text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import absolute_import
# import apis into api package
from .apis_api import ApisApi
from .apps_api import AppsApi
from .apps_v1beta1_api import AppsV1beta1Api
from .authentication_api import AuthenticationApi
from .authentication_v1_api import AuthenticationV1Api
from .authentication_v1beta1_api import AuthenticationV1beta1Api
from .authorization_api import AuthorizationApi
from .authorization_v1_api import AuthorizationV1Api
from .authorization_v1beta1_api import AuthorizationV1beta1Api
from .autoscaling_api import AutoscalingApi
from .autoscaling_v1_api import AutoscalingV1Api
from .autoscaling_v2alpha1_api import AutoscalingV2alpha1Api
from .batch_api import BatchApi
from .batch_v1_api import BatchV1Api
from .batch_v2alpha1_api import BatchV2alpha1Api
from .certificates_api import CertificatesApi
from .certificates_v1beta1_api import CertificatesV1beta1Api
from .core_api import CoreApi
from .core_v1_api import CoreV1Api
from .extensions_api import ExtensionsApi
from .extensions_v1beta1_api import ExtensionsV1beta1Api
from .logs_api import LogsApi
from .policy_api import PolicyApi
from .policy_v1beta1_api import PolicyV1beta1Api
from .rbac_authorization_api import RbacAuthorizationApi
from .rbac_authorization_v1alpha1_api import RbacAuthorizationV1alpha1Api
from .rbac_authorization_v1beta1_api import RbacAuthorizationV1beta1Api
from .settings_api import SettingsApi
from .settings_v1alpha1_api import SettingsV1alpha1Api
from .storage_api import StorageApi
from .storage_v1_api import StorageV1Api
from .storage_v1beta1_api import StorageV1beta1Api
from .version_api import VersionApi
| {
"content_hash": "d849327b6e1fbab4c4d377e9318dd5ff",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 45.166666666666664,
"alnum_prop": 0.8548585485854858,
"repo_name": "skuda/client-python",
"id": "ca1f5b9bc07f0a82af8450f334cb4dc569fd50da",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/apis/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
} |
import logging
from urllib import quote
import jsonpickle
from Vulnerability import Vulnerability
from VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties
from tests.CairisTests import CairisTests
__author__ = 'Robin Quetin'
class VulnerabilityTests(CairisTests):
# region Class fields
logger = logging.getLogger(__name__)
existing_vulnerability_id = 145
existing_vulnerability_name = 'Replay vulnerability'
existing_environment_name = 'Stroke'
existing_asset_names = ['Clinical data', 'Data node']
vulnerability_class = Vulnerability.__module__+'.'+Vulnerability.__name__
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/vulnerabilities?session_id=test')
vulnerabilities = jsonpickle.decode(rv.data)
self.assertIsNotNone(vulnerabilities, 'No results after deserialization')
self.assertIsInstance(vulnerabilities, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(vulnerabilities), 0, 'No vulnerabilities in the dictionary')
self.logger.info('[%s] Vulnerabilities found: %d', method, len(vulnerabilities))
vulnerability = vulnerabilities.values()[0]
self.logger.info('[%s] First vulnerability: %s [%d]\n', method, vulnerability['theVulnerabilityName'], vulnerability['theVulnerabilityId'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/vulnerabilities/name/%s?session_id=test' % quote(self.existing_vulnerability_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
vulnerability = jsonpickle.decode(rv.data)
self.assertIsNotNone(vulnerability, 'No results after deserialization')
self.logger.info('[%s] Vulnerability: %s [%d]\n', method, vulnerability['theVulnerabilityName'], vulnerability['theVulnerabilityId'])
def test_delete(self):
method = 'test_delete'
url = '/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theVulnerabilityName)
new_vulnerability_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_vulnerability_body)
self.app.post('/api/vulnerabilities', content_type='application/json', data=new_vulnerability_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
self.logger.info('[%s] Response data: %s', method, rv.data)
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/vulnerabilities'
self.logger.info('[%s] URL: %s', method, url)
new_vulnerability_body = self.prepare_json()
self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theVulnerabilityName))
rv = self.app.post(url, content_type='application/json', data=new_vulnerability_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('vulnerability_id', None)
self.assertIsNotNone(env_id, 'No vulnerability ID returned')
self.assertGreater(env_id, 0, 'Invalid vulnerability ID returned [%d]' % env_id)
self.logger.info('[%s] Vulnerability ID: %d\n', method, env_id)
rv = self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theVulnerabilityName))
def test_put(self):
method = 'test_put'
url = '/api/vulnerabilities'
self.logger.info('[%s] URL: %s', method, url)
new_vulnerability_body = self.prepare_json()
rv = self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theVulnerabilityName))
rv = self.app.post(url, content_type='application/json', data=new_vulnerability_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('vulnerability_id', None)
self.assertIsNotNone(env_id, 'No vulnerability ID returned')
self.assertGreater(env_id, 0, 'Invalid vulnerability ID returned [%d]' % env_id)
self.logger.info('[%s] Vulnerability ID: %d', method, env_id)
vulnerability_to_update = self.prepare_new_vulnerability()
vulnerability_to_update.theName = 'Edited test vulnerability'
vulnerability_to_update.theId = env_id
upd_env_body = self.prepare_json(vulnerability=vulnerability_to_update)
rv = self.app.put('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theVulnerabilityName), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('successfully updated'), -1, 'The vulnerability was not successfully updated')
rv = self.app.get('/api/vulnerabilities/name/%s?session_id=test' % quote(vulnerability_to_update.theVulnerabilityName))
upd_vulnerability = jsonpickle.decode(rv.data)
self.assertIsNotNone(upd_vulnerability, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, rv.data)
self.logger.info('[%s] Vulnerability: %s [%d]\n', method, upd_vulnerability['theVulnerabilityName'], upd_vulnerability['theVulnerabilityId'])
rv = self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(vulnerability_to_update.theName))
def prepare_new_vulnerability(self):
new_vulnerability_prop = VulnerabilityEnvironmentProperties(
environmentName=self.existing_environment_name,
severity='Critical',
assets=self.existing_asset_names
)
new_vulnerability = Vulnerability(
vulId=-1,
vulName='Test Vulnerability',
vulDesc='This is a test vulnerability',
vulType='Design',
tags=[],
cProps=[new_vulnerability_prop]
)
new_vulnerability.theEnvironmentDictionary = {}
return new_vulnerability
def prepare_dict(self, vulnerability=None):
if vulnerability is None:
vulnerability = self.prepare_new_vulnerability()
else:
assert isinstance(vulnerability, Vulnerability)
return {
'session_id': 'test',
'object': vulnerability,
}
def prepare_json(self, data_dict=None, vulnerability=None):
if data_dict is None:
data_dict = self.prepare_dict(vulnerability=vulnerability)
else:
assert isinstance(data_dict, dict)
new_vulnerability_body = jsonpickle.encode(data_dict)
self.logger.info('JSON data: %s', new_vulnerability_body)
return new_vulnerability_body | {
"content_hash": "88eb82ae94462ce7a37003d69b8c3ab2",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 188,
"avg_line_length": 50.63636363636363,
"alnum_prop": 0.6696588868940754,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "3253d80831713a5d8d25a5ef97f1f3664c6a47fd",
"size": "7798",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/tests/VulnerabilityTests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
} |
import tornado.ioloop
import tornado.iostream
from tornado import gen
import socket
import uuid
from urlparse import urlsplit
JOIN_GROUP = 1
LEAVE_GROUP = 2
NEW_PEER = 3
EXISTING_PEER = 4
REMOVE_PEER = 5
_MAX_HEALTH = 5
_PEER_HEALTH_INTERVAL = 1.5
_GROUP_ADDR = ('224.0.0.1', 9999)
_ADVERTISE_INTERVAL = 1000 * 1
_1KB = 1024 * 1
def gethostname():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 53)) # connecting to a UDP address doesn't send packets
return s.getsockname()[0]
def parse_payload(data):
uri = urlsplit(data)
if uri.scheme != 'qdpy':
return None
peer_id = uri.username
peer_group = uri.path[1:]
peer_ip = uri.hostname
peer_port = uri.port
return peer_group, peer_id, peer_ip, int(peer_port)
def create_payload(peer_group, peer_id, peer_ip, peer_port):
return 'qdpy://%s@%s:%d/%s' % (peer_id, peer_ip, peer_port, peer_group)
class Peer(object):
def __init__(self, addr, ioloop=None, groups=[], event_handler=None):
self.id = str(uuid.uuid4())
self.groups = groups[:]
self.peers = {}
self.addr = addr
self.ioloop = ioloop if ioloop is not None else tornado.ioloop.IOLoop.instance()
self._socket = None
self.event_handler = event_handler
def notify_event(self, event_type, **kwargs):
if self.event_handler:
self.event_handler(event_type, **kwargs)
def join(self, group):
def adder():
if group not in self.groups:
self.groups.append(group)
self.notify_event(JOIN_GROUP, group=group)
self.ioloop.add_callback(adder)
def leave(self, group):
def remover():
if group in self.groups:
self.groups.remove(group)
self.notify_event(LEAVE_GROUP, group=group)
self.ioloop.add_callback(remover)
@property
def socket(self):
if not self._socket:
host, port = _GROUP_ADDR
# Create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setblocking(False)
# Set some options to make it multicast-friendly
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
s.bind(('', port))
# Set some more multicast options
intf = socket.gethostbyname(socket.gethostname())
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(intf))
s.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(host) + socket.inet_aton(intf))
self._socket = s
return self._socket
def start(self):
self.ioloop.add_handler(self.socket, self.on_peer, tornado.ioloop.IOLoop.READ)
self._advertiser = tornado.ioloop.PeriodicCallback(self.advertise, _ADVERTISE_INTERVAL)
self._advertiser.start()
def track_peer(self, peer_id):
def tracker():
self.unhealthy_peer(peer_id, self.track_peer)
self.ioloop.call_later(_PEER_HEALTH_INTERVAL, tracker)
def unhealthy_peer(self, peer_id, continuer):
def checker():
if peer_id in self.peers:
peer = self.peers[peer_id]
peer['health'] -= 1
if not peer['group'] in self.groups or peer['health'] == 0:
del self.peers[peer_id]
self.notify_event(REMOVE_PEER, id=peer_id, peer=peer)
else:
continuer(peer_id)
self.ioloop.add_callback(checker)
def on_peer(self, _not, _used):
try:
data, _ = self.socket.recvfrom(_1KB)
except:
# TODO: What now?
return
peer_group, peer_id, peer_ip, peer_port = parse_payload(data)
# TODO: Scope each peer to a given group
if peer_group in self.groups and peer_id != self.id and peer_id not in self.get_peers():
# TODO: Allow for more than one group per peer since this would actually be bad if a
# peer belongs more than one group at a time as this call would then overwrite with
# the last joined group for that peer (or last broadcasted)
self.peers[peer_id] = {
'group': peer_group,
'addr': (peer_ip, peer_port),
'health': _MAX_HEALTH
}
self.track_peer(peer_id)
self.notify_event(NEW_PEER, id=peer_id, peer=self.peers[peer_id])
elif peer_id in self.get_peers():
if self.peers[peer_id]['health'] < _MAX_HEALTH:
self.peers[peer_id]['health'] += 1
self.notify_event(EXISTING_PEER, id=peer_id, peer=self.peers[peer_id])
def get_peers(self):
return {id: peer for id, peer in self.peers.iteritems() if peer['group'] in self.groups}
def advertise(self):
for group in self.groups:
host, port = self.addr
data = create_payload(group, self.id, host, port)
try:
self.socket.sendto(data, _GROUP_ADDR)
except:
# TODO: Is there anything special to be done here?
pass
| {
"content_hash": "74ae806ec88a2af947c26a0a11ee1405",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 107,
"avg_line_length": 33.94230769230769,
"alnum_prop": 0.5996222851746931,
"repo_name": "jerluc/qdpy",
"id": "50691ec5268fb26202b825d49f0dfae9a8a5f48c",
"size": "5295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qdpy/peer/discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13984"
}
],
"symlink_target": ""
} |
REDRAW_SCREEN = 'redraw screen'
CURSOR_UP = 'cursor up'
CURSOR_DOWN = 'cursor down'
CURSOR_LEFT = 'cursor left'
CURSOR_RIGHT = 'cursor right'
CURSOR_PAGE_UP = 'cursor page up'
CURSOR_PAGE_DOWN = 'cursor page down'
CURSOR_MAX_LEFT = 'cursor max left'
CURSOR_MAX_RIGHT = 'cursor max right'
ACTIVATE = 'activate'
class CommandMap(object):
"""
dict-like object for looking up commands from keystrokes
Default values (key: command)::
'tab': 'next selectable',
'ctrl n': 'next selectable',
'shift tab': 'prev selectable',
'ctrl p': 'prev selectable',
'ctrl l': 'redraw screen',
'esc': 'menu',
'up': 'cursor up',
'down': 'cursor down',
'left': 'cursor left',
'right': 'cursor right',
'page up': 'cursor page up',
'page down': 'cursor page down',
'home': 'cursor max left',
'end': 'cursor max right',
' ': 'activate',
'enter': 'activate',
"""
_command_defaults = {
'tab': 'next selectable',
'ctrl n': 'next selectable',
'shift tab': 'prev selectable',
'ctrl p': 'prev selectable',
'ctrl l': REDRAW_SCREEN,
'esc': 'menu',
'up': CURSOR_UP,
'down': CURSOR_DOWN,
'left': CURSOR_LEFT,
'right': CURSOR_RIGHT,
'page up': CURSOR_PAGE_UP,
'page down': CURSOR_PAGE_DOWN,
'home': CURSOR_MAX_LEFT,
'end': CURSOR_MAX_RIGHT,
' ': ACTIVATE,
'enter': ACTIVATE,
}
def __init__(self):
self.restore_defaults()
def restore_defaults(self):
self._command = dict(self._command_defaults)
def __getitem__(self, key):
return self._command.get(key, None)
def __setitem__(self, key, command):
self._command[key] = command
def __delitem__(self, key):
del self._command[key]
def clear_command(self, command):
dk = [k for k, v in self._command.items() if v == command]
for k in dk:
del self._command[k]
def copy(self):
"""
Return a new copy of this CommandMap, likely so we can modify
it separate from a shared one.
"""
c = CommandMap()
c._command = dict(self._command)
return c
command_map = CommandMap() # shared command mappings
| {
"content_hash": "1c0cc8c26a1f4e4cca9222c76ed307af",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 69,
"avg_line_length": 28.951807228915662,
"alnum_prop": 0.5372451102788182,
"repo_name": "AnyMesh/anyMesh-Python",
"id": "15633f8480aefcc2e5d357957bc9650c3f21b3b1",
"size": "3291",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "example/urwid/command_map.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "848265"
}
],
"symlink_target": ""
} |
import asyncio
from urllib.parse import urlparse
from Levenshtein import distance, hamming, jaro, jaro_winkler, median, ratio
MESSAGES = {
"names": ("Send some names to compare. Ej: "
"?name=oneligin&name=OneLogin%20Inc."),
"domains": ("Send some domains and URLs to get the canonical one. "
"Ej: ?domain=google.com&domain=https://www.google.com"),
}
async def similarity_score(name1, name2, algorithm=None, lower=False):
if lower:
str1, str2 = name1.lower(), name2.lower()
else:
str1, str2 = name1, name2
if algorithm == "levenshtein":
distance_func = distance
elif algorithm == "jaro":
distance_func = jaro
elif algorithm == "ratio":
distance_func = ratio
elif algorithm == "hamming":
distance_func = hamming
else:
distance_func = jaro_winkler
return distance_func(str1, str2)
def get_list_response(request, variable):
response = None
if variable in request.GET:
result = request.GET.getall(variable)
elif variable + "s" in request.GET:
result = request.GET.get(variable + "s").split(",")
else:
result = None
if variable in ["name", "names"]:
response = {"message": MESSAGES["names"]}
elif variable in ["domain", "domains"]:
response = {"message": MESSAGES["domains"]}
return result, response
def get_domains(domains):
result = set({})
for domain in domains:
if domain.startswith("http"):
domain = urlparse(domain).netloc
if domain.startswith("www."):
domain = domain.replace("www.", "", 1)
domain = domain.rsplit(":", 1)[0]
result.add(domain)
return result | {
"content_hash": "52a5e545a476b8b8288cc5a7b07b0629",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 32.867924528301884,
"alnum_prop": 0.605625717566016,
"repo_name": "versae/partnermatcher",
"id": "89a031251a695b4162bf2b0ef804c342537d86b2",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1857"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import os
import warnings
import numpy as np
from astropy import log as logger
import six
from ..util.constants import c, pi
from ..util.functions import FreezableClass
from ..dust import SphericalDust
from ..util.otf_hdf5 import on_the_fly_hdf5
from ..grid import CartesianGrid, SphericalPolarGrid, CylindricalPolarGrid, OctreeGrid, AMRGrid, VoronoiGrid
STOKESD = {}
STOKESD['I'] = 0
STOKESD['Q'] = 1
STOKESD['U'] = 2
STOKESD['V'] = 3
LABEL = {}
LABEL['I'] = r'$\lambda\, F_\lambda$'
LABEL['Q'] = r'$\lambda\, F_\lambda$ [stokes=Q]'
LABEL['U'] = r'$\lambda\, F_\lambda$ [stokes=U]'
LABEL['V'] = r'$\lambda\, F_\lambda$ [stokes=V]'
LABEL['linpol'] = 'Total linear polarization fraction'
LABEL['circpol'] = 'Total circular polarization fraction'
UNITS_LABEL = {}
UNITS_LABEL['ergs/s'] = '(ergs/s)'
UNITS_LABEL['ergs/cm^2/s'] = r'(ergs/cm$^2$/s)'
UNITS_LABEL['ergs/cm^2/s/Hz'] = r'(ergs/cm$^2$/s/Hz)'
UNITS_LABEL['Jy'] = 'Jy'
UNITS_LABEL['mJy'] = 'mJy'
UNITS_LABEL['MJy/sr'] = 'MJy/sr'
def mc_linear_polarization(I, sigma_I, Q, sigma_Q, U, sigma_U, N=1000):
# This function is written with in-place operations for performance, which
# can speed things up by at least a factor of two.
new_shape = (N,) + I.shape
ones_shape = (N,) + (1,) * I.ndim
xi1 = np.random.normal(loc=0, scale=1., size=ones_shape)
xi2 = np.random.normal(loc=0, scale=1., size=ones_shape)
xi3 = np.random.normal(loc=0, scale=1., size=ones_shape)
Is = np.zeros(new_shape)
Qs = np.zeros(new_shape)
Us = np.zeros(new_shape)
Is += sigma_I
Qs += sigma_Q
Us += sigma_U
Is *= xi1
Qs *= xi2
Us *= xi3
Is += I
Qs += Q
Us += U
np.divide(Qs, Is, out=Qs)
np.divide(Us, Is, out=Us)
Ps = np.hypot(Qs, Us)
return np.mean(Ps, axis=0), np.std(Ps, axis=0)
def mc_circular_polarization(I, sigma_I, V, sigma_V, N=1000):
# This function is written with in-place operations for performance, which
# can speed things up by at least a factor of two.
new_shape = (N,) + I.shape
ones_shape = (N,) + (1,) * I.ndim
xi1 = np.random.normal(loc=0, scale=1., size=ones_shape)
xi2 = np.random.normal(loc=0, scale=1., size=ones_shape)
Is = np.zeros(new_shape)
Vs = np.zeros(new_shape)
Is += sigma_I
Vs += sigma_V
Is *= xi1
Vs *= xi2
Is += I
Vs += V
np.abs(Vs, out=Vs)
Ps = np.divide(Vs, Is)
return np.mean(Ps, axis=0), np.std(Ps, axis=0)
class ModelOutput(FreezableClass):
'''
A class that can be used to access data in the output file from
radiative transfer models.
Parameters
----------
name : str
The name of the model output file (including extension)
'''
def __init__(self, filename):
# Check that file exists
if not os.path.exists(filename):
raise IOError("File not found: %s" % filename)
# Open file and store handle to object
# (but don't read in the contents yet)
self.filename = filename
self.file = None
def _get_origin_slice(self, group, component, source_id=None, dust_id=None, n_scat=None):
track_origin = group.attrs['track_origin'].decode('utf-8')
if track_origin == 'no' and component != 'total':
raise Exception("cannot extract component=%s - file only contains total flux" % component)
if track_origin != 'detailed':
if source_id is not None:
raise Exception("cannot specify source_id since track_origin was not set to 'detailed'")
if dust_id is not None:
raise Exception("cannot specify dust_id since track_origin was not set to 'detailed'")
if track_origin in ['basic', 'detailed']:
if component == 'source_emit':
io = 0
elif component == 'dust_emit':
io = 1
elif component == 'source_scat':
io = 2
elif component == 'dust_scat':
io = 3
else:
raise ValueError("component should be one of total/source_emit/dust_emit/source_scat/dust_scat since track_origin='{0}'".format(track_origin))
if track_origin == 'detailed':
if isinstance(source_id, six.string_types) and source_id != 'all':
try:
source_id = self.get_available_sources().index(source_id)
except ValueError:
raise ValueError("No source named {0}".format(source_id))
ns = group.attrs['n_sources']
nd = group.attrs['n_dust']
io = ((io - (io + 1) % 2 + 1) * ns + (io - io % 2) * nd) // 2
if component.startswith('source'):
if source_id is None or source_id == 'all':
io = (io, io + ns)
else:
if source_id < 0 or source_id >= ns:
raise ValueError("source_id should be between 0 and %i" % (ns - 1))
io = io + source_id
else:
if dust_id is None or dust_id == 'all':
io = (io, io + nd)
else:
if dust_id < 0 or dust_id >= nd:
raise ValueError("dust_id should be between 0 and %i" % (nd - 1))
io = io + dust_id
elif track_origin == 'scatterings':
if 'track_n_scat' in group.attrs:
track_n_scat = group.attrs['track_n_scat']
else:
track_n_scat = 0
if component == 'source':
io = 0
elif component == 'dust':
io = track_n_scat + 2
else:
raise ValueError("component should be one of total/source/dust since track_origin='scatterings'")
if n_scat is None:
# We need to remember to take into account the additional slice
# that contains the remaining flux. The upper bound of the
# slice is exclusive.
io = (io, io + track_n_scat + 2)
else:
if n_scat >= 0 and n_scat <= track_n_scat:
io += n_scat
else:
raise ValueError("n_scat should be between 0 and {0}".format(track_n_scat))
else:
raise ValueError("track_origin should be one of basic/detailed/scatterings")
return io
@on_the_fly_hdf5
def get_sed(self, stokes='I', group=0, technique='peeled',
distance=None, component='total', inclination='all',
aperture='all', uncertainties=False, units=None,
source_id=None, dust_id=None, n_scat=None):
'''
Retrieve SEDs for a specific image group and Stokes component.
Parameters
----------
stokes : str, optional
The Stokes component to return. This can be:
* 'I': Total intensity [default]
* 'Q': Q Stokes parameter (linear polarization)
* 'U': U Stokes parameter (linear polarization)
* 'V': V Stokes parameter (circular polarization)
* 'linpol': Total linear polarization fraction
* 'circpol': Total circular polariation fraction
Note that if the SED was set up with ``set_stokes(False)``, then
only the ``I`` component is available.
technique : str, optional
Whether to retrieve SED(s) computed with photon peeling-off
('peeled') or binning ('binned'). Default is 'peeled'.
group : int, optional
The peeloff group (zero-based). If multiple peeloff image groups
were requested, this can be used to select between them. The
default is to return the first group. This option is only used if
technique='peeled'.
distance : float, optional
The distance to the observer, in cm.
component : str, optional
The component to return based on origin and last interaction.
This can be:
* 'total': Total flux
* 'source_emit': The photons were last emitted from a source
and did not undergo any subsequent interactions.
* 'dust_emit': The photons were last emitted dust and did not
undergo any subsequent interactions
* 'source_scat': The photons were last emitted from a source
and were subsequently scattered
* 'dust_scat': The photons were last emitted from dust and
were subsequently scattered
* 'source': The photons that were last emitted from a source
* 'dust': The photons that were last emitted from dust
aperture : int, optional
The index of the aperture to plot (zero-based). Use 'all' to
return all apertures, and -1 to show the largest aperture.
inclination : int, optional
The index of the viewing angle to plot (zero-based). Use 'all'
to return all viewing angles.
uncertainties : bool, optional
Whether to compute and return uncertainties
units : str, optional
The output units for the SED(s). Valid options if a distance is
specified are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
The default is ``'ergs/cm^2/s'``. If a distance is not specified,
then this option is ignored, and the output units are ergs/s.
source_id, dust_id : int or str, optional
If the output file was made with track_origin='detailed', a
specific source and dust component can be specified (where 0 is
the first source or dust type). If 'all' is specified, then all
components are returned individually. If neither of these are
not specified, then the total component requested for all
sources or dust types is returned. For sources, it is also possible
to specify a source name as a string, if the source name was set
during the set-up.
n_scat : int, optional
If the output file was made with track_origin='scatterings', the
number of scatterings can be specified here. If specified, the
image is constructed only from photons that have scattered
``n_scat`` times since being last emitted.
Returns
-------
wav : numpy.ndarray
The wavelengths for which the SEDs are defined, in microns
flux or degree of polarization : numpy.ndarray
The flux or degree of polarization. This is a data cube which has
at most three dimensions (n_inclinations, n_apertures,
n_wavelengths). If an aperture or inclination is specified, this
reduces the number of dimensions in the flux cube. If `stokes` is
one of 'I', 'Q', 'U', or 'V', the flux is either returned in
ergs/s (if distance is not specified) or in the units specified by
units= (if distance is specified). If `stokes` is one of 'linpol'
or 'circpol', the degree of polarization is returned as a fraction
in the range 0 to 1.
uncertainty : numpy.ndarray
The uncertainties on the flux or degree of polarization. This
has the same dimensions as the flux or degree of polarization
array. This is only returned if uncertainties were requested.
'''
# Check argument types
if not isinstance(stokes, six.string_types):
raise ValueError("stokes argument should be a string")
# Check for inconsistent parameters
if distance is not None and stokes in ['linpol', 'circpol']:
raise Exception("Cannot scale linear or circular polarization degree by distance")
if technique == 'peeled':
n_groups = len(self.file['Peeled'])
if (group < 0 and -group <= n_groups) or (group >= 0 and group < n_groups):
g = self.file['Peeled/group_%05i' % (group + 1)]
else:
raise ValueError('File only contains %i image/SED group(s)' % n_groups)
else:
g = self.file['Binned']
if not 'seds' in g:
raise Exception("Group %i does not contain any SEDs" % group)
# Check that uncertainties are present if requested
if uncertainties and not 'seds_unc' in g:
raise Exception("Uncertainties requested but not present in file")
if 'track_origin' in g['seds'].attrs and component != 'total':
io = self._get_origin_slice(g['seds'], component=component,
source_id=source_id, dust_id=dust_id, n_scat=n_scat)
# Set up wavelength space
if 'use_filters' in g.attrs and g.attrs['use_filters'].decode('utf-8').lower() == 'yes':
nu = g['filt_nu0'][()]
wav = c / nu * 1.e4
elif 'numin' in g['seds'].attrs:
numin = g['seds'].attrs['numin']
numax = g['seds'].attrs['numax']
wavmin, wavmax = c / numax * 1.e4, c / numin * 1.e4
wav = np.logspace(np.log10(wavmax), np.log10(wavmin), g['seds'].shape[-1] * 2 + 1)[1::2]
nu = c / wav * 1.e4
else:
nu = g['frequencies']['nu']
wav = c / nu * 1.e4
flux = g['seds'][()]
if uncertainties:
unc = g['seds_unc'][()]
try:
inside_observer = g.attrs['inside_observer'].decode('utf-8').lower() == 'yes'
except:
inside_observer = False
if inside_observer and distance is not None:
raise ValueError("Cannot specify distance for inside observers")
# Set default units
if units is None:
if distance is None and not inside_observer:
units = 'ergs/s'
else:
units = 'ergs/cm^2/s'
# Optionally scale by distance
if distance is not None or inside_observer:
# Convert to the correct units
if units == 'ergs/cm^2/s':
scale = 1.
elif units == 'ergs/cm^2/s/Hz':
scale = 1. / nu
elif units == 'Jy':
scale = 1.e23 / nu
elif units == 'mJy':
scale = 1.e26 / nu
else:
raise ValueError("Unknown units: %s" % units)
# Scale by distance
if distance:
scale *= 1. / (4. * pi * distance ** 2)
else:
if units != 'ergs/s':
raise ValueError("Since distance= is not specified, units should be set to ergs/s")
# Units here are not technically ergs/cm^2/s but ergs/s
scale = 1.
# If in 32-bit mode, need to convert to 64-bit because of scaling/polarization to be safe
if flux.dtype == np.float32:
flux = flux.astype(np.float64)
if uncertainties and unc.dtype == np.float32:
unc = unc.astype(np.float64)
# If a stokes component is requested, scale the images. Frequency is
# the last dimension, so this compact notation can be used.
if stokes in STOKESD:
flux *= scale
if uncertainties:
unc *= scale
# We now slice the SED array to end up with what the user requested.
# Note that we slice from the last to the first dimension to ensure that
# we always know what the slices are. In this section, we make use of
# the fact that when writing array[i] with a multi-dimensional array,
# the index i applies only to the first dimension. So flux[1] really
# means flux[1, :, :, :, :].
if aperture == 'all':
pass
else:
if not isinstance(aperture, int):
raise TypeError('aperture should be an integer (it should'
' be the index of the aperture, not the '
'value itself)')
flux = flux[:, :, :, aperture]
if uncertainties:
unc = unc[:, :, :, aperture]
if inclination == 'all':
pass
else:
if not isinstance(inclination, int):
raise TypeError('inclination should be an integer (it should'
' be the index of the inclination, not the '
'value itself)')
flux = flux[:, :, inclination]
if uncertainties:
unc = unc[:, :, inclination]
# Select correct origin component
if component == 'total':
flux = np.sum(flux, axis=1)
if uncertainties:
unc = np.sqrt(np.sum(unc ** 2, axis=1))
elif component in ['source_emit', 'dust_emit', 'source_scat', 'dust_scat', 'dust', 'source']:
if type(io) is tuple:
start, end = io
flux = flux[:, start:end]
if uncertainties:
unc = unc[:, start:end]
if (component.startswith('source') and source_id is None) or \
(component.startswith('dust') and dust_id is None):
flux = np.sum(flux, axis=1)
if uncertainties:
unc = np.sqrt(np.sum(unc ** 2, axis=1))
else:
flux = flux[:, io]
if uncertainties:
unc = unc[:, io]
else:
raise Exception("Unknown component: %s" % component)
# Select correct Stokes component
if flux.shape[0] == 1 and stokes != 'I':
raise ValueError("Only the Stokes I value was stored for this SED")
if stokes in STOKESD:
flux = flux[STOKESD[stokes]]
if uncertainties:
unc = unc[STOKESD[stokes]]
elif stokes == 'linpol':
if uncertainties:
flux, unc = mc_linear_polarization(flux[0], unc[0], flux[1], unc[1], flux[2], unc[2])
else:
with np.errstate(invalid='ignore'):
flux = np.sqrt((flux[1] ** 2 + flux[2] ** 2) / flux[0] ** 2)
flux[np.isnan(flux)] = 0.
elif stokes == 'circpol':
if uncertainties:
flux, unc = mc_circular_polarization(flux[0], unc[0], flux[3], unc[3])
else:
with np.errstate(invalid='ignore'):
flux = np.abs(flux[3] / flux[0])
flux[np.isnan(flux)] = 0.
else:
raise ValueError("Unknown Stokes parameter: %s" % stokes)
from .sed import SED
sed = SED(nu=nu, val=flux, unc=unc if uncertainties else None, units=units)
# Add aperture information
sed.ap_min = g['seds'].attrs['apmin']
sed.ap_max = g['seds'].attrs['apmax']
# Add depth information
try:
sed.d_min = g.attrs['d_min']
sed.d_max = g.attrs['d_max']
except KeyError: # Older versions of Hyperion
sed.d_min = None
sed.d_max = None
# Add distance
sed.distance = distance
# Save whether the SED was from an inside observer
sed.inside_observer = inside_observer
return sed
@on_the_fly_hdf5
def get_image(self, stokes='I', group=0, technique='peeled',
distance=None, component='total', inclination='all',
uncertainties=False, units=None,
source_id=None, dust_id=None, n_scat=None):
'''
Retrieve images for a specific image group and Stokes component.
Parameters
----------
stokes : str, optional
The Stokes component to return. This can be:
* 'I': Total intensity [default]
* 'Q': Q Stokes parameter (linear polarization)
* 'U': U Stokes parameter (linear polarization)
* 'V': V Stokes parameter (circular polarization)
* 'linpol': Total linear polarization fraction
* 'circpol': Total circular polariation fraction
Note that if the image was set up with ``set_stokes(False)``, then
only the ``I`` component is available.
technique : str, optional
Whether to retrieve an image computed with photon peeling-off
('peeled') or binning ('binned'). Default is 'peeled'.
group : int, optional
The peeloff group (zero-based). If multiple peeloff image groups
were requested, this can be used to select between them. The
default is to return the first group. This option is only used if
technique='peeled'.
distance : float, optional
The distance to the observer, in cm.
component : str, optional
The component to return based on origin and last interaction.
This can be:
* 'total': Total flux
* 'source_emit': The photons were last emitted from a source
and did not undergo any subsequent interactions.
* 'dust_emit': The photons were last emitted dust and did not
undergo any subsequent interactions
* 'source_scat': The photons were last emitted from a source
and were subsequently scattered
* 'dust_scat': The photons were last emitted from dust and
were subsequently scattered
* 'source': The photons that were last emitted from a source
* 'dust': The photons that were last emitted from dust
inclination : int, optional
The index of the viewing angle to plot (zero-based). Use 'all'
to return all viewing angles.
uncertainties : bool, optional
Whether to compute and return uncertainties
units : str, optional
The output units for the image(s). Valid options if a distance is
specified are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
The default is ``'ergs/cm^2/s'``. If a distance is not specified,
then this option is ignored, and the output units are ergs/s.
source_id, dust_id : int or str, optional
If the output file was made with track_origin='detailed', a
specific source and dust component can be specified (where 0 is
the first source or dust type). If 'all' is specified, then all
components are returned individually. If neither of these are
not specified, then the total component requested for all
sources or dust types is returned. For sources, it is also possible
to specify a source name as a string, if the source name was set
during the set-up.
n_scat : int, optional
If the output file was made with track_origin='scatterings', the
number of scatterings can be specified here. If specified, the
image is constructed only from photons that have scattered
``n_scat`` times since being last emitted.
Returns
-------
wav : numpy.ndarray
The wavelengths for which the SEDs are defined, in microns
flux or degree of polarization : numpy.ndarray
The flux or degree of polarization. This is a data cube which has
at most three dimensions (n_inclinations, n_wavelengths). If an
aperture or inclination is specified, this reduces the number of
dimensions in the flux cube. If `stokes` is one of 'I', 'Q', 'U',
or 'V', the flux is either returned in ergs/s (if distance is not
specified) or in the units specified by units= (if distance is
specified). If `stokes` is one of 'linpol' or 'circpol', the
degree of polarization is returned as a fraction in the range 0 to
1.
uncertainty : numpy.ndarray
The uncertainties on the flux or degree of polarization. This
has the same dimensions as the flux or degree of polarization
array. This is only returned if uncertainties were requested.
'''
# Check argument types
if not isinstance(stokes, six.string_types):
raise ValueError("stokes argument should be a string")
# Check for inconsistent parameters
if distance is not None and stokes in ['linpol', 'circpol']:
raise Exception("Cannot scale linear or circular polarization degree by distance")
if technique == 'peeled':
n_groups = len(self.file['Peeled'])
if (group < 0 and -group <= n_groups) or (group >= 0 and group < n_groups):
g = self.file['Peeled/group_%05i' % (group + 1)]
else:
raise ValueError('File only contains %i image/SED group(s)' % n_groups)
else:
g = self.file['Binned']
if not 'images' in g:
raise Exception("Group %i does not contain any images" % group)
# Check that uncertainties are present if requested
if uncertainties and not 'images_unc' in g:
raise Exception("Uncertainties requested but not present in file")
if 'track_origin' in g['images'].attrs and component != 'total':
io = self._get_origin_slice(g['images'], component=component,
source_id=source_id, dust_id=dust_id, n_scat=n_scat)
# Set up wavelength space
if 'use_filters' in g.attrs and g.attrs['use_filters'].decode('utf-8').lower() == 'yes':
nu = g['filt_nu0'][()]
wav = c / nu * 1.e4
elif 'numin' in g['images'].attrs:
numin = g['images'].attrs['numin']
numax = g['images'].attrs['numax']
wavmin, wavmax = c / numax * 1.e4, c / numin * 1.e4
wav = np.logspace(np.log10(wavmax), np.log10(wavmin), g['images'].shape[-1] * 2 + 1)[1::2]
nu = c / wav * 1.e4
else:
nu = g['frequencies']['nu']
wav = c / nu * 1.e4
flux = g['images'][()]
if uncertainties:
unc = g['images_unc'][()]
try:
inside_observer = g.attrs['inside_observer'].decode('utf-8').lower() == 'yes'
except:
inside_observer = False
if inside_observer and distance is not None:
raise ValueError("Cannot specify distance for inside observers")
# Set default units
if units is None:
if distance is None and not inside_observer:
units = 'ergs/s'
else:
units = 'ergs/cm^2/s'
# Find pixel dimensions of image
ny, nx = flux.shape[-3:-1]
# Find physical and angular extent, and pixel area in steradians
if inside_observer:
# Physical extent cannot be set
x_min = x_max = y_min = y_max = None
# Find extent of the image
lon_min = g['images'].attrs['xmin']
lon_max = g['images'].attrs['xmax']
lat_min = g['images'].attrs['ymin']
lat_max = g['images'].attrs['ymax']
# Need to construct a mesh since every pixel might have a
# different size
lon = np.linspace(np.radians(lon_min), np.radians(lon_max), nx + 1)
lat = np.cos(np.linspace(np.radians(90. - lat_min), np.radians(90. - lat_max), ny + 1))
dlon = lon[1:] - lon[:-1]
dlat = lat[:-1] - lat[1:]
DLON, DLAT = np.meshgrid(dlon, dlat)
# Find pixel area in steradians
pix_area_sr = DLON * DLAT
else:
# Find physical extent of the image
x_min = g['images'].attrs['xmin']
x_max = g['images'].attrs['xmax']
y_min = g['images'].attrs['ymin']
y_max = g['images'].attrs['ymax']
if distance is not None:
# Find angular extent
lon_min_rad = np.arctan(x_min / distance)
lon_max_rad = np.arctan(x_max / distance)
lat_min_rad = np.arctan(y_min / distance)
lat_max_rad = np.arctan(y_max / distance)
# Find pixel size in arcseconds
pix_dx = abs(lon_max_rad - lon_min_rad) / float(nx)
pix_dy = abs(lat_max_rad - lat_min_rad) / float(ny)
# Find pixel area in steradians
pix_area_sr = pix_dx * pix_dy
# Find angular extent in degrees
lon_min = np.degrees(lon_min_rad)
lon_max = np.degrees(lon_max_rad)
lat_min = np.degrees(lat_min_rad)
lat_max = np.degrees(lat_max_rad)
else:
# Angular extent cannot be set
lon_min = lon_max = lat_min = lat_max = None
# Pixel area in steradians cannot be set
pix_area_sr = None
# Optionally scale by distance
if distance is not None or inside_observer:
# Convert to the correct units
if units == 'ergs/cm^2/s':
scale = 1.
elif units == 'ergs/cm^2/s/Hz':
scale = 1. / nu
elif units == 'Jy':
scale = 1.e23 / nu
elif units == 'mJy':
scale = 1.e26 / nu
elif units == 'MJy/sr':
if inside_observer:
scale = 1.e17 / nu[np.newaxis, np.newaxis, :] / pix_area_sr[:, :, np.newaxis]
else:
scale = 1.e17 / nu / pix_area_sr
else:
raise ValueError("Unknown units: %s" % units)
# Scale by distance
if distance:
scale *= 1. / (4. * pi * distance ** 2)
else:
if units != 'ergs/s':
raise ValueError("Since distance= is not specified, units should be set to ergs/s")
scale = 1.
# If in 32-bit mode, need to convert to 64-bit because of
# scaling/polarization to be safe
if flux.dtype == np.float32:
flux = flux.astype(np.float64)
if uncertainties and unc.dtype == np.float32:
unc = unc.astype(np.float64)
# If a stokes component is requested, scale the images. Frequency is
# the last dimension, so this compact notation can be used.
if stokes in STOKESD:
flux *= scale
if uncertainties:
unc *= scale
# We now slice the image array to end up with what the user requested.
# Note that we slice from the last to the first dimension to ensure that
# we always know what the slices are. In this section, we make use of
# the fact that when writing array[i] with a multi-dimensional array,
# the index i applies only to the first dimension. So flux[1] really
# means flux[1, :, :, :, :].
if inclination == 'all':
pass
else:
if not isinstance(inclination, int):
raise TypeError('inclination should be an integer (it should'
' be the index of the inclination, not the '
'value itself)')
flux = flux[:, :, inclination]
if uncertainties:
unc = unc[:, :, inclination]
# Select correct origin component
if component == 'total':
flux = np.sum(flux, axis=1)
if uncertainties:
unc = np.sqrt(np.sum(unc ** 2, axis=1))
elif component in ['source_emit', 'dust_emit', 'source_scat', 'dust_scat', 'dust', 'source']:
if type(io) is tuple:
start, end = io
flux = flux[:, start:end]
if uncertainties:
unc = unc[:, start:end]
if (component.startswith('source') and source_id is None) or \
(component.startswith('dust') and dust_id is None):
flux = np.sum(flux, axis=1)
if uncertainties:
unc = np.sqrt(np.sum(unc ** 2, axis=1))
else:
flux = flux[:, io]
if uncertainties:
unc = unc[:, io]
else:
raise Exception("Unknown component: %s" % component)
# Select correct Stokes component
if flux.shape[0] == 1 and stokes != 'I':
raise ValueError("Only the Stokes I value was stored for this image")
if stokes in STOKESD:
flux = flux[STOKESD[stokes]]
if uncertainties:
unc = unc[STOKESD[stokes]]
elif stokes == 'linpol':
if uncertainties:
flux, unc = mc_linear_polarization(flux[0], unc[0], flux[1], unc[1], flux[2], unc[2])
else:
with np.errstate(invalid='ignore'):
flux = np.sqrt((flux[1] ** 2 + flux[2] ** 2) / flux[0] ** 2)
flux[np.isnan(flux)] = 0.
elif stokes == 'circpol':
if uncertainties:
flux, unc = mc_circular_polarization(flux[0], unc[0], flux[3], unc[3])
else:
with np.errstate(invalid='ignore'):
flux = np.abs(flux[3] / flux[0])
flux[np.isnan(flux)] = 0.
else:
raise ValueError("Unknown Stokes parameter: %s" % stokes)
from .image import Image
image = Image(nu=nu, val=flux, unc=unc if uncertainties else None, units=units)
# Add physical extent
image.x_min = x_min
image.x_max = x_max
image.y_min = y_min
image.y_max = y_max
# Add depth information
try:
image.d_min = g.attrs['d_min']
image.d_max = g.attrs['d_max']
except KeyError: # Older versions of Hyperion
image.d_min = None
image.d_max = None
# Add angular extent
image.lon_min = lon_min
image.lon_max = lon_max
image.lat_min = lat_min
image.lat_max = lat_max
# Add pixel area in steradians
image.pix_area_sr = pix_area_sr
# Add distance
image.distance = distance
# Save whether the image was from an inside observer
image.inside_observer = inside_observer
return image
@on_the_fly_hdf5
def get_available_sources(self):
"""
Find out what sources are available in the output image (useful if detailed tracking was used)
"""
if self.file['Input'].file != self.file.file:
# Workaround for h5py bug - can't access link directly,
# need to use file attribute
g_sources = self.file['Input'].file[self.file['Input'].name]['Sources']
else:
g_sources = self.file['Input/Sources']
names = []
for source in sorted(g_sources.keys()):
names.append(g_sources[source].attrs['name'].decode('utf-8'))
return names
@on_the_fly_hdf5
def get_available_components(self, iteration=-1):
'''
Find out what physical components are available in the output file
Parameters
----------
iteration : integer, optional
The iteration to retrieve the grid for. The default is to return the components for the last iteration
'''
from .helpers import find_last_iteration
# If iteration is last one, find iteration number
if iteration == -1:
iteration = find_last_iteration(self.file)
# Return components
components = list(self.file['iteration_%05i' % iteration].keys())
if 'specific_energy' in components:
components.append('temperature')
return components
@on_the_fly_hdf5
def get_quantities(self, iteration=-1):
'''
Retrieve one of the physical grids for the model
Parameters
----------
iteration : integer, optional
The iteration to retrieve the quantities for. The default is to return the grid for the last iteration.
Returns
-------
grid : Grid instance
An object containing information about the geometry and quantities
'''
from .helpers import find_last_iteration
if 'Input' in self.file:
if self.file['Input'].file != self.file.file:
# Workaround for h5py bug - can't access link directly,
# need to use file attribute
g_grid = self.file['Input'].file[self.file['Input'].name]['Grid']
else:
g_grid = self.file['Input/Grid']
else:
g_grid = self.file['Grid']
# Find coordinate grid type
coord_type = g_grid['Geometry'].attrs['grid_type'].decode('utf-8')
if coord_type == 'car':
g = CartesianGrid()
elif coord_type == 'cyl_pol':
g = CylindricalPolarGrid()
elif coord_type == 'sph_pol':
g = SphericalPolarGrid()
elif coord_type == 'amr':
g = AMRGrid()
elif coord_type == 'oct':
g = OctreeGrid()
elif coord_type == 'vor':
g = VoronoiGrid()
else:
raise ValueError("Unknown grid type: {0}".format(coord_type))
# Read in geometry and input quantities
g.read_geometry(g_grid['Geometry'])
# If iteration is last one, find iteration number
if iteration == -1:
iteration = find_last_iteration(self.file)
else:
iteration = iteration + 1 # Python value is zero based
# Read in quantities from the requested iteration (if available)
if iteration > 0:
g.read_quantities(self.file['iteration_%05i' % iteration])
if not 'density' in g:
logger.info("No density present in output, reading initial density")
g.read_quantities(g_grid['Quantities'], quantities=['density'])
# Compute the temperature as a derived quantity
if 'specific_energy' in g:
# Open the dust group
n_dust = g.n_dust
g_dust = self.file['Input/Dust']
# Compile a list of specific energy to temperature functions
convert_func = []
for i in range(n_dust):
# Read in dust type
dust = g_dust['dust_%03i' % (i + 1)]
d = SphericalDust(dust)
# Add to conversion function list
convert_func.append(d.specific_energy2temperature)
# Define function to convert different specific energy to
# temperature for different dust types
def specific_energy2temperature(quantities):
quantities['temperature'] = []
for i in range(n_dust):
quantities['temperature'].append(convert_func[i](quantities['specific_energy'][i]))
# Get the grid to add the quantity
g.add_derived_quantity('temperature', specific_energy2temperature)
return g
@on_the_fly_hdf5
def get_physical_grid(self, name, iteration=-1, dust_id='all'):
'''
Retrieve one of the physical grids for the model
Parameters
----------
name : str
The component to retrieve. This should be one of:
'specific_energy' The specific energy absorbed in each cell
'temperature' The dust temperature in each cell (only
available for cells with LTE dust)
'density' The density in each cell (after possible
dust sublimation)
'density_diff' The difference in the final density
compared to the initial density
'n_photons' The number of unique photons that went
through each cell
iteration : integer, optional
The iteration to retrieve the grid for. The default is to return the grid for the last iteration.
dust_id : 'all' or int
If applicable, the ID of the dust type to extract the grid for (does not apply to n_photons)
Returns
-------
array : numpy.array instance for regular grids
The physical grid in cgs.
Notes
-----
At the moment, this method only works on regular grids, not AMR or Oct-tree grids
'''
from .helpers import find_last_iteration
warnings.warn("get_physical_grid is deprecated, use get_quantities instead", DeprecationWarning)
# Check that dust_id was not specified if grid is n_photons
if name == 'n_photons':
if dust_id != 'all':
raise ValueError("Cannot specify dust_id when retrieving n_photons")
# Check name
available_components = self.get_available_components()
if name not in available_components:
raise Exception("name should be one of %s" % '/'.join(available_components))
# If iteration is last one, find iteration number
if iteration == -1:
iteration = find_last_iteration(self.file)
# Extract specific energy grid
if name == 'temperature':
array = np.array(self.file['iteration_%05i' % iteration]['specific_energy'])
g_dust = self.file['Input/Dust']
g_dust = g_dust.file[g_dust.name] # workaround for h5py < 2.1.0
for i in range(array.shape[0]):
dust = g_dust['dust_%03i' % (i + 1)]
d = SphericalDust(dust)
array[i, :, :, :] = d.specific_energy2temperature(array[i, :, :, :])
else:
array = np.array(self.file['iteration_%05i' % iteration][name])
# If required, extract grid for a specific dust type
if name == 'n_photons':
return array
elif dust_id == 'all':
return [array[i, :, :, :] for i in range(array.shape[0])]
else:
return array[dust_id, :, :, :]
| {
"content_hash": "f2f4baca85628e7c2e9fce188a8a05af",
"timestamp": "",
"source": "github",
"line_count": 1136,
"max_line_length": 158,
"avg_line_length": 37.76496478873239,
"alnum_prop": 0.5435537633155404,
"repo_name": "hyperion-rt/hyperion",
"id": "54ad80089aab29d90835a4244642f88c18de604a",
"size": "42901",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hyperion/model/model_output.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "52997"
},
{
"name": "C++",
"bytes": "426981"
},
{
"name": "Fortran",
"bytes": "566390"
},
{
"name": "M4",
"bytes": "6308"
},
{
"name": "Makefile",
"bytes": "8813"
},
{
"name": "Python",
"bytes": "923253"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('conf', '0003_v310_JSONField_changes'),
]
operations = [
# This list is intentionally empty.
# Tower 3.2 included several data migrations that are no longer
# necessary (this list is now empty because Tower 3.2 is past EOL and
# cannot be directly upgraded to modern versions of Tower)
]
| {
"content_hash": "2e1a2e3acdfba1e1c03698c8ce5287d7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 28.176470588235293,
"alnum_prop": 0.6701461377870563,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "99fcd7ffcecd32c11cb338599c1630ce7ad06a1d",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/conf/migrations/0004_v320_reencrypt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import pytest
import numpy as np
from collections import defaultdict
from stellargraph.data.unsupervised_sampler import UnsupervisedSampler
from stellargraph.data.explorer import UniformRandomWalk
from ..test_utils.graphs import line_graph
def test_init_parameters(line_graph):
# if no graph is provided
with pytest.raises(ValueError):
UnsupervisedSampler(G=None)
# walk must have length strictly greater than 1
with pytest.raises(ValueError):
UnsupervisedSampler(G=line_graph, length=1)
# at least 1 walk from each root node
with pytest.raises(ValueError):
UnsupervisedSampler(G=line_graph, number_of_walks=0)
# nodes nodes parameter should be an iterable of node IDs
with pytest.raises(ValueError):
UnsupervisedSampler(G=line_graph, nodes=1)
# if no root nodes are provided for sampling defaulting to using all nodes as root nodes
sampler = UnsupervisedSampler(G=line_graph, nodes=None)
assert sampler.nodes == list(line_graph.nodes())
def test_run_batch_sizes(line_graph):
batch_size = 4
sampler = UnsupervisedSampler(G=line_graph, length=2, number_of_walks=2)
batches = sampler.run(batch_size)
# check batch sizes
assert len(batches) == np.ceil(len(line_graph.nodes()) * 4 / batch_size)
for ids, labels in batches[:-1]:
assert len(ids) == len(labels) == batch_size
# last batch can be smaller
ids, labels = batches[-1]
assert len(ids) == len(labels)
assert len(ids) <= batch_size
def test_run_context_pairs(line_graph):
batch_size = 4
sampler = UnsupervisedSampler(G=line_graph, length=2, number_of_walks=2)
batches = sampler.run(batch_size)
grouped_by_target = defaultdict(list)
for ids, labels in batches:
for (target, context), label in zip(ids, labels):
grouped_by_target[target].append((context, label))
assert len(grouped_by_target) == len(line_graph.nodes())
for target, sampled in grouped_by_target.items():
# exactly 2 positive and 2 negative context pairs for each target node
assert len(sampled) == 4
# since each walk has length = 2, there must be an edge between each positive context pair
for context, label in sampled:
if label == 1:
assert context in set(line_graph.neighbors(target))
def test_walker_uniform_random(line_graph):
length = 3
number_of_walks = 2
batch_size = 4
walker = UniformRandomWalk(line_graph, n=number_of_walks, length=length)
sampler = UnsupervisedSampler(line_graph, walker=walker)
batches = sampler.run(batch_size)
# batches should match the parameters used to create the walker object, instead of the defaults
# for UnsupervisedSampler
expected_num_batches = np.ceil(
line_graph.number_of_nodes() * number_of_walks * (length - 1) * 2 / batch_size
)
assert len(batches) == expected_num_batches
class CustomWalker:
def run(self, nodes):
return [[node, node] for node in nodes]
def test_walker_custom(line_graph):
walker = CustomWalker()
sampler = UnsupervisedSampler(line_graph, walker=walker)
batches = sampler.run(2)
assert len(batches) == line_graph.number_of_nodes()
# all positive examples should be self loops, since we defined our custom walker this way
for context_pairs, labels in batches:
for node, neighbour in context_pairs[labels == 1]:
assert node == neighbour
def test_ignored_param_warning(line_graph):
walker = UniformRandomWalk(line_graph, n=2, length=3)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'length'"):
UnsupervisedSampler(line_graph, walker=walker, length=5)
with pytest.raises(
ValueError, match="cannot specify both 'walker' and 'number_of_walks'"
):
UnsupervisedSampler(line_graph, walker=walker, number_of_walks=5)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'seed'"):
UnsupervisedSampler(line_graph, walker=walker, seed=1)
| {
"content_hash": "0100f841ba5b5d87287bf71d465f065f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 99,
"avg_line_length": 34.26890756302521,
"alnum_prop": 0.6907797940166749,
"repo_name": "stellargraph/stellargraph",
"id": "463e52edd657fb08596eb70ef32c2d4b4a7f1286",
"size": "4683",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/data/test_unsupervised_sampler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3274"
},
{
"name": "Python",
"bytes": "1740018"
},
{
"name": "Shell",
"bytes": "18236"
}
],
"symlink_target": ""
} |
"""Collections contain content documents and blueprints."""
from . import documents
from . import formats
from . import messages
from grow.common import structures
from grow.common import utils
from grow.pods import locales
import copy
import json
import logging
import operator
import os
import re
import sys
class Error(Exception):
pass
class CollectionNotEmptyError(Error):
pass
class BadCollectionNameError(Error, ValueError):
pass
class CollectionDoesNotExistError(Error, ValueError):
pass
class CollectionExistsError(Error):
pass
class BadFieldsError(Error, ValueError):
pass
class NoLocalesError(Error):
pass
class Collection(object):
CONTENT_PATH = '/content'
BLUEPRINT_PATH = '_blueprint.yaml'
_content_path_regex = re.compile('^' + CONTENT_PATH + '/?')
def __init__(self, pod_path, _pod):
utils.validate_name(pod_path)
regex = Collection._content_path_regex
self.pod = _pod
self.collection_path = regex.sub('', pod_path).strip('/')
self.pod_path = pod_path
self.basename = os.path.basename(self.collection_path)
self._default_locale = _pod.podspec.default_locale
self._blueprint_path = os.path.join(
self.pod_path, Collection.BLUEPRINT_PATH)
def __repr__(self):
return '<Collection "{}">'.format(self.collection_path)
def __eq__(self, other):
return (isinstance(other, Collection)
and self.collection_path == other.collection_path)
def __iter__(self):
for doc in self.list_docs():
yield doc
def __getattr__(self, name):
try:
return self.fields[name]
except KeyError:
return object.__getattribute__(self, name)
@utils.cached_property
def fields(self):
fields = utils.untag_fields(self.tagged_fields)
return {} if not fields else fields
@property
def tagged_fields(self):
return copy.deepcopy(self.yaml)
@classmethod
def list(cls, pod):
items = []
for root, dirs, _ in pod.walk(cls.CONTENT_PATH + '/'):
for dir_name in dirs:
pod_path = os.path.join(root, dir_name)
pod_path = pod_path.replace(pod.root, '')
col_path = os.path.join(pod_path, '_blueprint.yaml')
if pod.file_exists(col_path):
items.append(pod.get_collection(pod_path))
return items
def collections(self):
"""Returns collections contained within this collection. Implemented
as a function to allow future implementation of arguments."""
items = []
for root, dirs, _ in self.pod.walk(self.pod_path):
if root == self.pod.abs_path(self.pod_path):
for dir_name in dirs:
pod_path = os.path.join(self.pod_path, dir_name)
items.append(self.pod.get_collection(pod_path))
return items
@property
def exists(self):
"""Returns whether the collection exists, as determined by whether
the collection's blueprint exists."""
return self.pod.file_exists(self._blueprint_path)
@classmethod
def create(cls, collection_path, fields, pod):
"""Creates a new collection by writing a blueprint."""
collection = cls.get(collection_path, pod)
if collection.exists:
raise CollectionExistsError('{} already exists.'.format(collection))
fields = utils.dump_yaml(fields)
pod.write_file(collection._blueprint_path, fields)
return collection
@classmethod
def get(cls, collection_path, _pod):
"""Returns a collection object."""
return cls(collection_path, _pod)
def get_doc(self, pod_path, locale=None):
"""Returns a document contained in this collection."""
if locale is not None:
localized_path = formats.Format.localize_path(pod_path, locale)
if self.pod.file_exists(localized_path):
pod_path = localized_path
return documents.Document(pod_path, locale=locale, _pod=self.pod,
_collection=self)
def create_doc(self, basename, fields=utils.SENTINEL, body=utils.SENTINEL):
"""Creates a document within the collection."""
doc_pod_path = os.path.join(self.pod_path, basename)
doc = self.get_doc(doc_pod_path)
doc.write(fields=fields, body=body)
return doc
@property
@utils.memoize
def yaml(self):
if not self.exists:
return {}
result = utils.parse_yaml(self.pod.read_file(self._blueprint_path))
if result is None:
return {}
return result
def _get_builtin_field(self, name):
"""Returns a builtin field, which is a field prefixed with a `$`. To be
backwards compatible with the legacy recommendation, we return the
field unprefixed with `$` if a prefixed field cannot be found."""
return self.fields.get('${}'.format(name), self.fields.get(name))
def list_categories(self):
return self._get_builtin_field('categories') or []
@property
def title(self):
return self._get_builtin_field('title')
def titles(self, title_name=None):
if title_name is None:
return self.title
titles = self.fields.get('$titles', {})
return titles.get(title_name, self.title)
@property
def root(self):
return self._get_builtin_field('root')
@property
def view(self):
return self._get_builtin_field('view')
@property
def localization(self):
return self._get_builtin_field('localization')
@property
def order(self):
# Default to maxint so unordered collections go to the end.
return self.fields.get('$order', sys.maxint)
@property
def path_format(self):
return self._get_builtin_field('path')
def delete(self):
if len(self.list_docs(include_hidden=True)):
text = 'Collections that are not empty cannot be deleted.'
raise CollectionNotEmptyError(text)
self.pod.delete_file(self._blueprint_path)
def list_docs(self, order_by=None, locale=utils.SENTINEL, reverse=None,
include_hidden=False, recursive=True, inject=False):
reverse = False if reverse is None else reverse
order_by = 'order' if order_by is None else order_by
key = operator.attrgetter(order_by)
sorted_docs = structures.SortedCollection(key=key)
if inject:
injected_docs = self.pod.inject_preprocessors(collection=self)
if injected_docs is not None:
sorted_docs = injected_docs
self.pod.logger.info('Injected collection -> {}'.format(self.pod_path))
return reversed(sorted_docs) if reverse else sorted_docs
for path in self.pod.list_dir(self.pod_path, recursive=recursive):
pod_path = os.path.join(self.pod_path, path.lstrip('/'))
slug, ext = os.path.splitext(os.path.basename(pod_path))
if (slug.startswith('_')
or ext not in messages.extensions_to_formats
or not pod_path):
continue
try:
_, locale_from_path = \
formats.Format.parse_localized_path(pod_path)
if locale_from_path:
if (locale is not None
and locale in [utils.SENTINEL, locale_from_path]):
new_doc = self.get_doc(pod_path, locale=locale_from_path)
if not include_hidden and new_doc.hidden:
continue
sorted_docs.insert(new_doc)
continue
doc = self.get_doc(pod_path)
if not include_hidden and doc.hidden:
continue
if locale in [utils.SENTINEL, None]:
sorted_docs.insert(doc)
if locale is None:
continue
if locale == doc.default_locale:
sorted_docs.insert(doc)
else:
self._add_localized_docs(sorted_docs, pod_path, locale, doc)
except Exception as e:
logging.error('Error loading doc: {}'.format(pod_path))
raise
return reversed(sorted_docs) if reverse else sorted_docs
# Aliases `collection.docs` to `collection.list_docs`. `collection.docs`
# should be the public and supported way to retrieve documents from a
# collection.
docs = list_docs
def _add_localized_docs(self, sorted_docs, pod_path, locale, doc):
for each_locale in doc.locales:
if each_locale == doc.default_locale and locale != each_locale:
continue
base, ext = os.path.splitext(pod_path)
localized_file_path = '{}@{}{}'.format(base, each_locale, ext)
if (locale in [utils.SENTINEL, each_locale]
and not self.pod.file_exists(localized_file_path)):
new_doc = doc.localize(each_locale)
sorted_docs.insert(new_doc)
def list_servable_documents(self, include_hidden=False, locales=None, inject=None):
docs = []
inject = False if inject is None else inject
for doc in self.list_docs(include_hidden=include_hidden, inject=inject):
if (self._get_builtin_field('draft')
or not doc.has_serving_path()
or not doc.view
or (locales and doc.locale not in locales)):
continue
docs.append(doc)
return docs
@utils.cached_property
def locales(self):
if self.localization:
if self.localization.get('use_podspec_locales'):
return self.pod.list_locales()
try:
return locales.Locale.parse_codes(self.localization['locales'])
except KeyError:
# Locales inherited from podspec.
podspec = self.pod.get_podspec()
config = podspec.get_config()
if ('localization' in config
and 'locales' in config['localization']):
identifiers = config['localization']['locales']
return locales.Locale.parse_codes(identifiers)
raise NoLocalesError('{} has no locales.')
return []
def to_message(self):
message = messages.CollectionMessage()
message.title = self.title
message.collection_path = self.collection_path
return message
| {
"content_hash": "cccd933cd022f6d3dc013caa47cc6c6c",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 87,
"avg_line_length": 35.91,
"alnum_prop": 0.5907360995080293,
"repo_name": "denmojo/pygrow",
"id": "c5b36167c0632399dc722a69a24bdc203bb8a7b0",
"size": "10773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/pods/collection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2528"
},
{
"name": "HTML",
"bytes": "16228"
},
{
"name": "JavaScript",
"bytes": "3795"
},
{
"name": "Makefile",
"bytes": "4730"
},
{
"name": "Python",
"bytes": "511935"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
"""
Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2. The first ten pentagonal numbers are:
1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference, 70 − 22 = 48, is not pentagonal.
Find the pair of pentagonal numbers, Pj and Pk, for which their sum and difference are pentagonal and D = |Pk − Pj| is minimised; what is the value of D?
Solution comment:
No brain, just find any solution with n <= 10000. Happens to
include the desired solution, but not smart in any way.
"""
from projecteuler.numbers import pentagonal, ispentagonal
upper = 10000 # upper n in P(n)
pentagonals = [ pentagonal(i) for i in xrange(1, upper+1) ]
pentagonals_set = set(pentagonals)
for i in xrange(1, upper):
Pi = pentagonal(i)
for j in xrange(i, upper+1):
Pj = pentagonal(j)
if ispentagonal(Pj-Pi) and ispentagonal(Pi+Pj):
print i, j, Pi, Pj, Pj-Pi
| {
"content_hash": "5c54a933190589a4ada97ff77984aba9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 153,
"avg_line_length": 36.46153846153846,
"alnum_prop": 0.6782700421940928,
"repo_name": "bsamseth/project-euler",
"id": "0f9ffa33020ce743d6eb9bc9adb14c838c8ab4c8",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "044/44.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "141034"
},
{
"name": "CMake",
"bytes": "15286"
},
{
"name": "Jupyter Notebook",
"bytes": "11147"
},
{
"name": "Python",
"bytes": "116091"
}
],
"symlink_target": ""
} |
'''
Created on Feb 8, 2011
@author: Blodstone
'''
import pymongo
conn = pymongo.Connection()
col = conn['QAnlp_head_whWord']['feature10_Fine']
result = col.find()
count = 0
question = []
for tes in result:
print len(tes)-5
# break
# if len(tes) != 7979:
# count = count + 1
# question.append(tes['Question'])
print count
print question
| {
"content_hash": "d675fc1eb92b375b8cb7e00557d384ef",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 17.59090909090909,
"alnum_prop": 0.5943152454780362,
"repo_name": "blodstone/CCS590v2",
"id": "ddc971f04ec4da42dca59bbfe4894c6f3befe0ff",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test/Check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "173"
},
{
"name": "Java",
"bytes": "3073135"
},
{
"name": "Python",
"bytes": "113621"
}
],
"symlink_target": ""
} |
import logging
from gi.repository import Gst, GstNet
__all__ = ['Clock', 'NetTimeProvider']
port = 9998
log = logging.getLogger('Clock')
log.debug("Obtaining System-Clock")
Clock = Gst.SystemClock.obtain()
log.info("Using System-Clock for all pipelines.")
log.info("Starting NetTimeProvider on Port %u", port)
NetTimeProvider = GstNet.NetTimeProvider.new(Clock, '::', port)
| {
"content_hash": "914dd9265454023423b2a217550d332e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 27,
"alnum_prop": 0.7380952380952381,
"repo_name": "voc/voctomix",
"id": "3eb85832cd8445bfd9a4398038900e98ed3fd387",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "voctocore/lib/clock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2621"
},
{
"name": "Dockerfile",
"bytes": "2626"
},
{
"name": "Python",
"bytes": "350063"
},
{
"name": "Shell",
"bytes": "25187"
}
],
"symlink_target": ""
} |
from django.utils.text import slugify
from ...product.models import Attribute, AttributeValue
def attributes_to_hstore(attribute_value_input, attributes_queryset):
"""Transform attributes to the HStore representation.
Attributes configuration per product is stored in a HStore field as
a dict of IDs. This function transforms the list of `AttributeValueInput`
objects to this format.
"""
attributes_map = {attr.slug: attr.id for attr in attributes_queryset}
attributes_hstore = {}
values_map = dict(
AttributeValue.objects.values_list('slug', 'id'))
for attribute in attribute_value_input:
attr_slug = attribute.get('slug')
if attr_slug not in attributes_map:
raise ValueError(
'Attribute %r doesn\'t belong to given product type.' % (
attr_slug,))
value = attribute.get('value')
if not value:
continue
attribute_id = attributes_map[attr_slug]
value_id = values_map.get(value)
if value_id is None:
# `value_id` was not found; create a new AttributeValue
# instance from the provided `value`.
attr_instance = Attribute.objects.get(slug=attr_slug)
obj = attr_instance.values.get_or_create(
name=value, slug=slugify(value))[0]
value_id = obj.pk
attributes_hstore[str(attribute_id)] = str(value_id)
return attributes_hstore
| {
"content_hash": "a9f172117a428175cf34a5724ddaceda",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 34.395348837209305,
"alnum_prop": 0.6355645706558486,
"repo_name": "UITools/saleor",
"id": "4e0442ef562bed5f512d73214670eae61ebff69b",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/product/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 276a647f828a
Revises: None
Create Date: 2015-06-26 15:13:05.579248
"""
# revision identifiers, used by Alembic.
revision = '276a647f828a'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('cre',
sa.Column('code', sa.String(), nullable=False),
sa.Column('version', sa.Integer(), nullable=False),
sa.Column('template', sa.String(), nullable=True),
sa.Column('infills', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('code', 'version')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('cre')
### end Alembic commands ###
| {
"content_hash": "b83415940f3f3aacce8eb252dbfd3471",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 24.75,
"alnum_prop": 0.6666666666666666,
"repo_name": "LandRegistry/register-metadata",
"id": "dea67fca986468d9f50f3f614df6734df1b6208d",
"size": "792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/276a647f828a_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "13325"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
} |
"""Logging
"""
import sys
import os
import logging
from pip._vendor import colorama, pkg_resources
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
def should_color(consumer, environ, std=(sys.stdout, sys.stderr)):
real_consumer = (
consumer if not isinstance(consumer, colorama.AnsiToWin32)
else consumer.wrapped
)
# If consumer isn't stdout or stderr we shouldn't colorize it
if real_consumer not in std:
return False
# If consumer is a tty we should color it
if hasattr(real_consumer, "isatty") and real_consumer.isatty():
return True
# If we have an ASNI term we should color it
if environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def should_warn(current_version, removal_version):
# Our Significant digits on versions is 2, so remove everything but the
# first two places.
current_version = ".".join(current_version.split(".")[:2])
removal_version = ".".join(removal_version.split(".")[:2])
# Our warning threshold is one minor version before removal, so we
# decrement the minor version by one
major, minor = removal_version.split(".")
minor = str(int(minor) - 1)
warn_version = ".".join([major, minor])
# Test if our current_version should be a warn
return (pkg_resources.parse_version(current_version)
< pkg_resources.parse_version(warn_version))
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
COLORS = {
WARN: _color_wrap(colorama.Fore.YELLOW),
ERROR: _color_wrap(colorama.Fore.RED),
FATAL: _color_wrap(colorama.Fore.RED),
}
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def add_consumers(self, *consumers):
for level, consumer in consumers:
# Try to check for duplicate consumers before adding them
for chk_level, chk_consumer in self.consumers:
# Account for coloroma wrapped streams
if isinstance(chk_consumer, colorama.AnsiToWin32):
chk_consumer = chk_consumer.wrapped
if (level, consumer) == (chk_level, chk_consumer):
break
# If we didn't find a duplicate, then add it
else:
# Colorize consumer for Windows
if sys.platform.startswith('win') \
and hasattr(consumer, 'write'):
consumer = colorama.AnsiToWin32(consumer)
self.consumers.append((level, consumer))
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
# render
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
# FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if hasattr(consumer, 'write'):
write_content = rendered + '\n'
if should_color(consumer, os.environ):
# We are printing to stdout or stderr and it supports
# colors so render our text colored
colorizer = self.COLORS.get(level, lambda x: x)
write_content = colorizer(write_content)
consumer.write(write_content)
if hasattr(consumer, 'flush'):
consumer.flush()
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress
# (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(
0,
len(self.last_message) - len(message)
)
else:
padding = ''
sys.stdout.write(
'\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding)
)
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
| {
"content_hash": "ef28a73798f473b44e169a24a407e3f2",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 79,
"avg_line_length": 34.19310344827586,
"alnum_prop": 0.5488100040338846,
"repo_name": "blueyed/pip",
"id": "f7dfd144aaba079c8176330923206a3af935b7ae",
"size": "9916",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pip/log.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListLoadBalancer(command.Lister):
def get_parser(self, prog_name):
parser = super(ListLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'--admin_username',
metavar="admin_username",
help="filter by admin username")
parser.add_argument(
'--availability_zone',
metavar="availability_zone",
help="filter by availability zone")
parser.add_argument(
'--default_gateway',
metavar="default_gateway",
help="filter by default gateway")
parser.add_argument(
'--user_username',
metavar="user_username",
help="filter by user username")
parser.add_argument(
'--load_balancer_plan_id',
metavar="load_balancer_plan_id",
help="filter by load balancer plan id")
parser.add_argument(
'--id',
metavar="id",
help="filter by id")
parser.add_argument(
'--name',
metavar="name",
help="filter by name")
parser.add_argument(
'--status',
metavar="status",
help="filter by status")
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'load_balancer_plan_id',
'default_gateway',
'status',
)
column_headers = (
'ID',
'Name',
'Load Balancer Plan',
'Default Gateway',
'Status',
)
search_opts = dict()
if parsed_args.admin_username:
search_opts.update({"admin_username": parsed_args.admin_username})
if parsed_args.availability_zone:
search_opts.update({"availability_zone": parsed_args.availability_zones})
if parsed_args.user_username:
search_opts.update({"user_username": parsed_args.user_username})
if parsed_args.default_gateway:
search_opts.update({"default_gateway": parsed_args.default_gateway})
if parsed_args.load_balancer_plan_id:
search_opts.update({"load_balancer_plan_id": parsed_args.load_balancer_plan_id})
if parsed_args.id:
search_opts.update({"id": parsed_args.id})
if parsed_args.name:
search_opts.update({"name": parsed_args.name})
if parsed_args.status:
search_opts.update({"status": parsed_args.status})
data = [
to_obj.LoadBalancer(loadbalancer)
for loadbalancer in network_client.list_loadbalancers().get(
'load_balancers')
]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowLoadBalancer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'loadbalancer_id',
metavar="LOAD_BALANCER_ID",
help="ID of Load Balancer to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
loadbalancer_id = parsed_args.loadbalancer_id
dic = network_client.show_loadbalancer(loadbalancer_id).get('load_balancer')
columns = utils.get_columns(dic)
obj = to_obj.LoadBalancer(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class CreateLoadBalancer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<string>',
help='Name of load balancer to create.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of load balancer to create.')
parser.add_argument(
'load_balancer_plan_id',
metavar='LOAD_BALANCER_PLAN_ID',
help='Load Balancer Plan ID of load balancer to create')
parser.add_argument(
'--availability_zone',
metavar='<string>',
help='Availability Zone of load balancer to create.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'load_balancer': {}}
utils.update_dict(
parsed_args,
body['load_balancer'],
['name', 'description', 'load_balancer_plan_id',
'availability_zone'])
dic = network_client.create_loadbalancer(body).get('load_balancer')
columns = utils.get_columns(dic)
obj = to_obj.LoadBalancer(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class SetLoadBalancer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'loadbalancer_id',
metavar='LOAD_BALANCER_ID',
help='ID of Public IP to update.')
parser.add_argument(
'--name',
metavar='<string>',
help='Name of load balancer to update.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of load balancer to update.')
parser.add_argument(
'--load_balancer_plan_id',
metavar='LOAD_BALANCER_PLAN_ID',
help='LoadBalancer Plan ID of load balancer to update')
parser.add_argument(
'--default_gateway',
metavar='<ipv4>',
help='Default Gateway of load balancer to update.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'load_balancer': {}}
loadbalancer_id = parsed_args.loadbalancer_id
utils.update_dict(
parsed_args,
body['load_balancer'],
['name', 'description',
'load_balancer_plan_id', 'default_gateway'])
dic = network_client.update_loadbalancer(
loadbalancer_id, body).get('load_balancer')
columns = utils.get_columns(dic)
obj = to_obj.LoadBalancer(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class DeleteLoadBalancer(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'loadbalancer_id',
metavar="LOAD_BALANCER_ID",
nargs="+",
help="ID(s) of Load Balancers to delete."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
for fid in parsed_args.loadbalancer_id:
network_client.delete_loadbalancer(fid)
class RebootLoadBalancer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(RebootLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'loadbalancer_id',
metavar="LOAD_BALANCER_ID",
help="ID of Load Balancer to show."
)
parser.add_argument(
'--type',
metavar='{SOFT|HARD}',
default="HARD",
choices=["SOFT", "HARD"],
help='Reboot Type: SOFT/HARD.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
loadbalancer_id = parsed_args.loadbalancer_id
body = {}
body.update({"type": parsed_args.type})
network_client.reboot_loadbalancer(loadbalancer_id, body=body)
dic = network_client.show_loadbalancer(loadbalancer_id).get('load_balancer')
columns = utils.get_columns(dic)
obj = to_obj.LoadBalancer(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class ResetPasswordLoadBalancer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ResetPasswordLoadBalancer, self).get_parser(prog_name)
parser.add_argument(
'loadbalancer_id',
metavar="LOAD_BALANCER_ID",
help="ID of Load Balancer to show."
)
parser.add_argument(
'--username',
metavar='USERNAME',
required=True,
help='username to reset password of Load Balancer.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
loadbalancer_id = parsed_args.loadbalancer_id
body = {}
body.update({"username": parsed_args.username})
dic = network_client.reset_password_loadbalancer(loadbalancer_id, body=body)
columns = utils.get_columns(dic)
obj = to_obj.LoadBalancer(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
| {
"content_hash": "ee1726af0d780882e48a7d95a95724a0",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 92,
"avg_line_length": 34.119565217391305,
"alnum_prop": 0.5737496017840077,
"repo_name": "nttcom/eclcli",
"id": "e09dc8ee3f4fa3b3eb646a623b07a2e2bf486b01",
"size": "9417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eclcli/network/v2/load_balancer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2087533"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self):
self.ever_connected = True
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_reject(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
def on_addr(self, message): self.bad_message(message)
def on_getdata(self, message): self.bad_message(message)
def on_getblocks(self, message): self.bad_message(message)
def on_tx(self, message): self.bad_message(message)
def on_block(self, message): self.bad_message(message)
def on_getaddr(self, message): self.bad_message(message)
def on_headers(self, message): self.bad_message(message)
def on_getheaders(self, message): self.bad_message(message)
def on_ping(self, message): self.bad_message(message)
def on_mempool(self, message): self.bad_message(message)
def on_pong(self, message): self.bad_message(message)
def on_sendheaders(self, message): self.bad_message(message)
def on_sendcmpct(self, message): self.bad_message(message)
def on_cmpctblock(self, message): self.bad_message(message)
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if biblepayd ban behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, message): pass
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False)
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
network_thread_start()
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.is_connected
self.nodes[0].disconnect_p2ps()
# Wait until all connections are closed
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
if __name__ == '__main__':
P2PLeakTest().main()
| {
"content_hash": "e482386ea76bd8fb0f93922d9e8a4537",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 104,
"avg_line_length": 40.184,
"alnum_prop": 0.6938084809874577,
"repo_name": "biblepay/biblepay",
"id": "d50a7380b07e65c1d24a49cacbdc413f494a30fc",
"size": "5025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/p2p_leak.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "82631"
},
{
"name": "C",
"bytes": "1676673"
},
{
"name": "C++",
"bytes": "9008941"
},
{
"name": "CMake",
"bytes": "14553"
},
{
"name": "CSS",
"bytes": "211795"
},
{
"name": "Dockerfile",
"bytes": "655"
},
{
"name": "GDB",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "199753"
},
{
"name": "Makefile",
"bytes": "130183"
},
{
"name": "Objective-C++",
"bytes": "6210"
},
{
"name": "PowerShell",
"bytes": "3455"
},
{
"name": "Python",
"bytes": "1428152"
},
{
"name": "QMake",
"bytes": "874"
},
{
"name": "Ruby",
"bytes": "3540"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "93826"
},
{
"name": "TypeScript",
"bytes": "7705936"
}
],
"symlink_target": ""
} |
""""
Problem Statement
=================
Given certain stock values over a period of days (d days) and a number K, the number of transactions allowed, find the
maximum profit that be obtained with at most K transactions.
Video
-----
* https://youtu.be/oDhu5uGq_ic
Complexity
----------
* Space Complexity O(days * transctions)
* Time Complexity: Slow Solution O (days^2 * transactions), Faster Solution O(days * transaction)
"""
def max_profit(prices, K):
if K == 0 or prices == []:
return 0
days = len(prices)
num_transactions = K + 1 # 0th transaction up to and including kth transaction is considered.
T = [[0 for _ in range(days)] for _ in range(num_transactions)]
for transaction in range(1, num_transactions):
max_diff = - prices[0]
for day in range(1, days):
T[transaction][day] = max(T[transaction][day - 1], # No transaction
prices[day] + max_diff) # price on that day with max diff
max_diff = max(max_diff,
T[transaction - 1][day] - prices[day]) # update max_diff
print_actual_solution(T, prices)
return T[-1][-1]
def max_profit_slow_solution(prices, K):
if K == 0 or prices == []:
return 0
days = len(prices)
num_transactions = K + 1
T = [[0 for _ in range(len(prices))] for _ in range(num_transactions)]
for transaction in range(1, num_transactions):
for day in range(1, days):
# This maximum value of either
# a) No Transaction on the day. We pick the value from day - 1
# b) Max profit made by selling on the day plus the cost of the previous transaction, considered over m days
T[transaction][day] = max(T[transaction][day - 1],
max([(prices[day] - prices[m] + T[transaction - 1][m]) for m in range(day)]))
print_actual_solution(T, prices)
return T[-1][-1]
def print_actual_solution(T, prices):
transaction = len(T) - 1
day = len(T[0]) - 1
stack = []
while True:
if transaction == 0 or day == 0:
break
if T[transaction][day] == T[transaction][day - 1]: # Didn't sell
day -= 1
else:
stack.append(day) # sold
max_diff = T[transaction][day] - prices[day]
for k in range(day - 1, -1, -1):
if T[transaction - 1][k] - prices[k] == max_diff:
stack.append(k) # bought
transaction -= 1
break
for entry in range(len(stack) - 1, -1, -2):
print("Buy on day {day} at price {price}".format(day=stack[entry], price=prices[stack[transaction]]))
print("Sell on day {day} at price {price}".format(day=stack[entry], price=prices[stack[transaction - 1]]))
if __name__ == '__main__':
prices = [2, 5, 7, 1, 4, 3, 1, 3]
assert 10 == max_profit(prices, 3)
assert 10 == max_profit_slow_solution(prices, 3)
| {
"content_hash": "46550c264e155f1af55e4a5187eab7a0",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 120,
"avg_line_length": 33.120879120879124,
"alnum_prop": 0.5630391506303916,
"repo_name": "mission-peace/interview",
"id": "c7070d7a2b9c717846e49b74dc10e00da0449278",
"size": "3014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/dynamic/stockbuysellktransactions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "26234"
},
{
"name": "Java",
"bytes": "1111425"
},
{
"name": "Python",
"bytes": "105849"
}
],
"symlink_target": ""
} |
import os
from data_importers.tests.stubs import BaseStubCsvStationsJsonDistrictsImporter
"""
Define a stub implementation of json importer we can run tests against
uses data that will return a duplicate station on
(council_id, internal_council_id) by design
"""
class Command(BaseStubCsvStationsJsonDistrictsImporter):
srid = 4326
districts_name = "test.geojson"
stations_name = "test.csv"
base_folder_path = os.path.join(
os.path.dirname(__file__), "../fixtures/duplicate_station"
)
| {
"content_hash": "7aa242d759731a30afab0a31b79f88d2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 27.263157894736842,
"alnum_prop": 0.7393822393822393,
"repo_name": "DemocracyClub/UK-Polling-Stations",
"id": "7738867ad5a1c7a8373ca73e0f2f686b8479bc79",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_importers/tests/stubs/stub_duplicatestation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "85540"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "1111337"
},
{
"name": "SCSS",
"bytes": "5742"
}
],
"symlink_target": ""
} |
"""\
FuzzPy visualization plugins abstract base class.
Enforce an interface that visualization plugins must follow, namely:
- types: most provide a list of supported object types
- is_supported: must return True if the plugin can run in the current \
environment.
- visualize: must return a tuple (format, payload) that contains the \
visualization format and a string containing the visualization payload.
@author: Xavier Spriet
@contact: linkadmin@gmail.com
@license: LGPL-3
"""
from abc import ABCMeta, abstractmethod
class AbstractPlugin:
"""\
Abstract plugins class.
"""
__metaclass__ = ABCMeta
types = []
"""Supported datatypes for visualization."""
@abstractmethod
def is_supported(self):
"""\
Return whether the plugin is supported.
@rtype: C{bool}
@return: True if the plugin can run in the current environment, False\
otherwise.
"""
return False
@abstractmethod
def visualize(self, *args, **kwargs):
"""\
Main visualization callback.
Draws the visualization in-memory and saves the visualization data
in a payload string to be returned.
Arbitrary keyword arguments can be passed that will be send to the
backend object constructor. Future versions will attempt to provide
a consistent framework for marshalling those keywords.
@rtype: C{tuple}
@return: (format, payload) tuple.
"""
return ('', '')
| {
"content_hash": "241776d007240ef588a7548e6a420af6",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 28.925925925925927,
"alnum_prop": 0.6446862996158771,
"repo_name": "cmantas/tiramola_v3",
"id": "6b7e0c238f0d2422b4f06350b6769a021b3568d8",
"size": "1562",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/fuzz/visplugins/abc_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14788"
},
{
"name": "Makefile",
"bytes": "934"
},
{
"name": "PHP",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "237908"
},
{
"name": "Shell",
"bytes": "26636"
}
],
"symlink_target": ""
} |
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('MotesPages')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import random
from dustWeb import web
from DustLinkData import DataVaultException, \
DustLinkData
from dustWeb import WebPage
from dustWeb import WebPageDyn
from dustWeb import WebHandler
from dustWeb.viz import VizForm
from dustWeb.viz import VizHtml
from dustWeb.viz import VizTable
from dustWeb.viz import VizFields
from dustWeb.thirdparty import gviz_api
#============================ object ==========================================
class MotesPages(WebPageDyn.WebPageDyn):
#===== web handlers (private classes)
class pageMotes(WebHandler.WebHandler):
def getPage(self,subResource,username):
global webServer
global thisWebApp
username = web.ctx.session.username
currentPath = WebPage.WebPage.urlStringTolist(web.ctx.path)
visualizations = [
VizForm.VizForm(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'cleanup',
title = 'Cleanup motes',
),
]
# enable the following code to be able to add/delete motes by hand.
'''
visualizations = [
VizForm.VizForm(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'add',
title = 'Add a Mote',
),
VizForm.VizForm(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'delete',
title = 'Delete a Mote',
),
],
'''
page = thisWebApp.createPage(
username = username,
currentPath = currentPath,
visualizations = visualizations,
)
return page
# enable the following code to be able to add/delete motes by hand.
def getData(self,subResource,username):
dld = DustLinkData.DustLinkData()
# enable the following code to be able to add/delete network by hand.
'''
elif subResource==['add']:
return [
{
'name': 'mac',
'value': '',
'type': 'text',
},
]
elif subResource==['delete']:
macStrings = [DustLinkData.DustLinkData.macToString(mac) \
for mac in dld.getMoteMacs(username=username)]
return [
{
'name': 'mac',
'value': None,
'optionDisplay': macStrings,
'optionValue': macStrings,
'type': 'select',
'editable': True,
},
]
'''
if subResource==['cleanup']:
return [
{
'name': 'command',
'value': '',
'type': 'text',
},
]
else:
raise web.notfound()
def postData(self,receivedData,subResource,username):
dld = DustLinkData.DustLinkData()
# enable the following code to be able to add/delete network by hand.
'''
if subResource==['add']:
assert isinstance(receivedData,dict)
assert receivedData.keys()==['mac']
assert isinstance(receivedData['mac'],str)
mac = DustLinkData.DustLinkData.stringToMac(receivedData['mac'], username=username)
dld.addMote(mac,username=username)
elif subResource==['delete']:
assert isinstance(receivedData,dict)
assert receivedData.keys()==['mac']
assert isinstance(receivedData['mac'],str)
mac = DustLinkData.DustLinkData.stringToMac(receivedData['mac'])
dld.deleteMote(mac, username=username)
'''
if subResource==['cleanup']:
assert isinstance(receivedData,dict)
assert receivedData.keys()==['command']
assert isinstance(receivedData['command'],str)
# make sure this user had delete privileges on motes
dld.authorize(username,['motes'],DustLinkData.DustLinkData.ACTION_DELETE)
# do everything as admin from here on
command = receivedData['command']
if command=='cleanup':
# get all mote MAC address
macs = dld.getMoteMacs()
# get all netnames
netnames = dld.getNetnames()
# for each network, remove all motes from 'macs' list
for netname in netnames:
netmacs = dld.getNetworkMotes(netname)
for mac in netmacs:
macs.remove(mac)
# delete each mote remaining in 'macs' list
for mac in macs:
dld.deleteMote(mac)
else:
raise web.notfound()
class pageMotesSub(WebPageDyn.WebHandlerDyn):
def getPageDyn(self,dynPath,subResource,username):
global webServer
global thisWebApp
username = web.ctx.session.username
currentPath = WebPage.WebPage.urlStringTolist(web.ctx.path)
page = thisWebApp.createPage(
username = username,
currentPath = currentPath,
visualizations = [
VizFields.VizFields(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'info',
title = 'Info',
),
VizTable.VizTable(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'apps',
title = 'Apps on this Mote',
),
VizForm.VizForm(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'attach',
title = 'Attach App',
),
VizForm.VizForm(
webServer = webServer,
username = username,
resourcePath = currentPath,
subResourcePath = 'detach',
title = 'Detach App',
),
],
)
return page
def getDataDyn(self,dynPath,subResource,username):
dld = DustLinkData.DustLinkData()
mac = DustLinkData.DustLinkData.stringToMac(dynPath)
if subResource==['info']:
moteInfo = dld.getMoteInfo(mac,username=username)
if moteInfo:
returnVal = [
{
'name': k,
'value': v,
'type': 'text',
'editable': False,
}
for (k,v) in moteInfo.items()
]
else:
returnVal = []
return returnVal
elif subResource==['apps']:
with dld.dataLock:
appnames = dld.getAttachedApps(mac,username=username)
appnames.sort()
# columnNames
columnNames = ['appnane','numreceived','link']
# data
data = [
[
appname,
dld.getNumDataPoints(mac,appname,username=username),
'<a href="/motedata?mac={0}&app={1}">INTERACT</a>'.format(
DustLinkData.DustLinkData.macToString(mac),
appname
),
]
for appname in appnames
]
return VizTable.VizTable.formatReturnVal(columnNames,data)
elif subResource==['attach']:
with dld.dataLock:
appsToAttach = dld.getAppNames(username=username)
for app in dld.getAttachedApps(mac,username=username):
appsToAttach.remove(app)
return [
{
'name': 'appname',
'value': None,
'optionDisplay': appsToAttach,
'optionValue': appsToAttach,
'type': 'select',
'editable': True,
},
]
elif subResource==['detach']:
appsToDetach = dld.getAttachedApps(mac,username=username)
return [
{
'name': 'appname',
'value': None,
'optionDisplay': appsToDetach,
'optionValue': appsToDetach,
'type': 'select',
'editable': True,
},
]
else:
raise web.notfound()
def postDataDyn(self,receivedData,dynPath,subResource,username):
mac = DustLinkData.DustLinkData.stringToMac(dynPath)
dld = DustLinkData.DustLinkData()
if subResource==['attach']:
assert isinstance(receivedData,dict)
assert receivedData.keys()==['appname']
assert isinstance(receivedData['appname'],str)
dld.attachAppToMote(
mac,
receivedData['appname'],
username=username,
)
elif subResource==['detach']:
assert isinstance(receivedData,dict)
assert receivedData.keys()==['appname']
assert isinstance(receivedData['appname'],str)
dld.detachAppFromMote(
mac,
receivedData['appname'],
username=username,
)
else:
raise web.notfound()
def subPageLister(self):
username = str(web.ctx.session.username)
try:
return [
{
'url': DustLinkData.DustLinkData.macToString(u),
'title': DustLinkData.DustLinkData.macToString(u),
}
for u in DustLinkData.DustLinkData().getMoteMacs(username=username)
]
except DataVaultException.Unauthorized:
return []
def __init__(self, webServer_param):
global webServer
global thisWebApp
# local variables
self.webServer = webServer_param
# global variables
webServer = self.webServer
thisWebApp = self
# initialize parent class
WebPageDyn.WebPageDyn.__init__(self,webServer = self.webServer,
url = 'motes',
title = 'Motes',
webHandler = self.pageMotes,
subPageLister = self.subPageLister,
subPageHandler = self.pageMotesSub) | {
"content_hash": "1665d0808e7ddf5307cfe4bcd2506a57",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 99,
"avg_line_length": 40.58108108108108,
"alnum_prop": 0.37116217116217115,
"repo_name": "twatteyne/dustlink_academy",
"id": "4cbc74dbf445c7987eab26121e1a960e66448ab0",
"size": "15015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "views/web/MotesPages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "103133"
},
{
"name": "HTML",
"bytes": "24681"
},
{
"name": "JavaScript",
"bytes": "179571"
},
{
"name": "Python",
"bytes": "2236777"
}
],
"symlink_target": ""
} |
"""Annotation and rtyping support for the result of os.stat(), os.lstat()
and os.fstat(). In RPython like in plain Python the stat result can be
indexed like a tuple but also exposes the st_xxx attributes.
"""
import os
import sys
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rlib import rposix
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper import extregistry
from rpython.rtyper.annlowlevel import hlstr
from rpython.rtyper.extfunc import extdef
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rtyper.rtuple import TUPLE_TYPE
from rpython.rtyper.tool import rffi_platform as platform
from rpython.tool.pairtype import pairtype
from rpython.tool.sourcetools import func_renamer
from rpython.translator.tool.cbuild import ExternalCompilationInfo
# Support for float times is here.
# - ALL_STAT_FIELDS contains Float fields if the system can retrieve
# sub-second timestamps.
# - TIMESPEC is defined when the "struct stat" contains st_atim field.
if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'):
TIMESPEC = platform.Struct('struct timespec',
[('tv_sec', rffi.TIME_T),
('tv_nsec', rffi.LONG)])
else:
TIMESPEC = None
# all possible fields - some of them are not available on all platforms
ALL_STAT_FIELDS = [
("st_mode", lltype.Signed),
("st_ino", lltype.SignedLongLong),
("st_dev", lltype.SignedLongLong),
("st_nlink", lltype.Signed),
("st_uid", lltype.Signed),
("st_gid", lltype.Signed),
("st_size", lltype.SignedLongLong),
("st_atime", lltype.Float),
("st_mtime", lltype.Float),
("st_ctime", lltype.Float),
("st_blksize", lltype.Signed),
("st_blocks", lltype.Signed),
("st_rdev", lltype.Signed),
("st_flags", lltype.Signed),
#("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented
#("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented
]
N_INDEXABLE_FIELDS = 10
# For OO backends, expose only the portable fields (the first 10).
PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS]
STATVFS_FIELDS = [
("f_bsize", lltype.Signed),
("f_frsize", lltype.Signed),
("f_blocks", lltype.Signed),
("f_bfree", lltype.Signed),
("f_bavail", lltype.Signed),
("f_files", lltype.Signed),
("f_ffree", lltype.Signed),
("f_favail", lltype.Signed),
("f_flag", lltype.Signed),
("f_namemax", lltype.Signed),
]
# ____________________________________________________________
#
# Annotation support
class SomeStatResult(annmodel.SomeObject):
knowntype = os.stat_result
def rtyper_makerepr(self, rtyper):
from rpython.rtyper.module import r_os_stat
return r_os_stat.StatResultRepr(rtyper)
def rtyper_makekey(self):
return self.__class__,
def getattr(self, s_attr):
assert s_attr.is_constant(), "non-constant attr name in getattr()"
attrname = s_attr.const
TYPE = STAT_FIELD_TYPES[attrname]
return lltype_to_annotation(TYPE)
def _get_rmarshall_support_(self): # for rlib.rmarshal
# reduce and recreate stat_result objects from 10-tuples
# (we ignore the extra values here for simplicity and portability)
def stat_result_reduce(st):
return (st[0], st[1], st[2], st[3], st[4],
st[5], st[6], st[7], st[8], st[9])
def stat_result_recreate(tup):
return make_stat_result(tup + extra_zeroes)
s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE)
for name, TYPE in PORTABLE_STAT_FIELDS])
extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS))
return s_reduced, stat_result_reduce, stat_result_recreate
class SomeStatvfsResult(annmodel.SomeObject):
if hasattr(os, 'statvfs_result'):
knowntype = os.statvfs_result
else:
knowntype = None # will not be used
def rtyper_makerepr(self, rtyper):
from rpython.rtyper.module import r_os_stat
return r_os_stat.StatvfsResultRepr(rtyper)
def rtyper_makekey(self):
return self.__class__,
def getattr(self, s_attr):
assert s_attr.is_constant()
TYPE = STATVFS_FIELD_TYPES[s_attr.const]
return lltype_to_annotation(TYPE)
class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)):
def getitem((s_sta, s_int)):
assert s_int.is_constant(), "os.stat()[index]: index must be constant"
index = s_int.const
assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range"
name, TYPE = STAT_FIELDS[index]
return lltype_to_annotation(TYPE)
class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)):
def getitem((s_stat, s_int)):
assert s_int.is_constant()
name, TYPE = STATVFS_FIELDS[s_int.const]
return lltype_to_annotation(TYPE)
s_StatResult = SomeStatResult()
s_StatvfsResult = SomeStatvfsResult()
def make_stat_result(tup):
"""Turn a tuple into an os.stat_result object."""
positional = tup[:N_INDEXABLE_FIELDS]
kwds = {}
for i, name in enumerate(STAT_FIELD_NAMES[N_INDEXABLE_FIELDS:]):
kwds[name] = tup[N_INDEXABLE_FIELDS + i]
return os.stat_result(positional, kwds)
def make_statvfs_result(tup):
return os.statvfs_result(tup)
class MakeStatResultEntry(extregistry.ExtRegistryEntry):
_about_ = make_stat_result
def compute_result_annotation(self, s_tup):
return s_StatResult
def specialize_call(self, hop):
from rpython.rtyper.module import r_os_stat
return r_os_stat.specialize_make_stat_result(hop)
class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry):
_about_ = make_statvfs_result
def compute_result_annotation(self, s_tup):
return s_StatvfsResult
def specialize_call(self, hop):
from rpython.rtyper.module import r_os_stat
return r_os_stat.specialize_make_statvfs_result(hop)
# ____________________________________________________________
#
# RFFI support
if sys.platform.startswith('win'):
_name_struct_stat = '_stati64'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h']
else:
if sys.platform.startswith('linux'):
_name_struct_stat = 'stat64'
else:
_name_struct_stat = 'stat'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h']
compilation_info = ExternalCompilationInfo(
# This must be set to 64 on some systems to enable large file support.
#pre_include_bits = ['#define _FILE_OFFSET_BITS 64'],
# ^^^ nowadays it's always set in all C files we produce.
includes=INCLUDES
)
if TIMESPEC is not None:
class CConfig_for_timespec:
_compilation_info_ = compilation_info
TIMESPEC = TIMESPEC
TIMESPEC = lltype.Ptr(
platform.configure(CConfig_for_timespec)['TIMESPEC'])
def posix_declaration(try_to_add=None):
global STAT_STRUCT, STATVFS_STRUCT
LL_STAT_FIELDS = STAT_FIELDS[:]
if try_to_add:
LL_STAT_FIELDS.append(try_to_add)
if TIMESPEC is not None:
def _expand(lst, originalname, timespecname):
for i, (_name, _TYPE) in enumerate(lst):
if _name == originalname:
# replace the 'st_atime' field of type rffi.DOUBLE
# with a field 'st_atim' of type 'struct timespec'
lst[i] = (timespecname, TIMESPEC.TO)
break
_expand(LL_STAT_FIELDS, 'st_atime', 'st_atim')
_expand(LL_STAT_FIELDS, 'st_mtime', 'st_mtim')
_expand(LL_STAT_FIELDS, 'st_ctime', 'st_ctim')
del _expand
else:
# Replace float fields with integers
for name in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
for i, (_name, _TYPE) in enumerate(LL_STAT_FIELDS):
if _name == name:
LL_STAT_FIELDS[i] = (_name, lltype.Signed)
break
class CConfig:
_compilation_info_ = compilation_info
STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS)
try:
config = platform.configure(CConfig, ignore_errors=try_to_add is not None)
except platform.CompilationError:
if try_to_add:
return # failed to add this field, give up
raise
STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT'])
STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT'])
if try_to_add:
STAT_FIELDS.append(try_to_add)
# This lists only the fields that have been found on the underlying platform.
# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the
# following loop.
STAT_FIELDS = PORTABLE_STAT_FIELDS[:]
if sys.platform != 'win32':
posix_declaration()
for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)):
posix_declaration(ALL_STAT_FIELDS[_i])
del _i
# these two global vars only list the fields defined in the underlying platform
STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE}
STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS]
del _name, _TYPE
STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS)
STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS]
def build_stat_result(st):
# only for LL backends
if TIMESPEC is not None:
atim = st.c_st_atim; atime = int(atim.c_tv_sec) + 1E-9 * int(atim.c_tv_nsec)
mtim = st.c_st_mtim; mtime = int(mtim.c_tv_sec) + 1E-9 * int(mtim.c_tv_nsec)
ctim = st.c_st_ctim; ctime = int(ctim.c_tv_sec) + 1E-9 * int(ctim.c_tv_nsec)
else:
atime = st.c_st_atime
mtime = st.c_st_mtime
ctime = st.c_st_ctime
result = (st.c_st_mode,
st.c_st_ino,
st.c_st_dev,
st.c_st_nlink,
st.c_st_uid,
st.c_st_gid,
st.c_st_size,
atime,
mtime,
ctime)
if "st_blksize" in STAT_FIELD_TYPES: result += (st.c_st_blksize,)
if "st_blocks" in STAT_FIELD_TYPES: result += (st.c_st_blocks,)
if "st_rdev" in STAT_FIELD_TYPES: result += (st.c_st_rdev,)
if "st_flags" in STAT_FIELD_TYPES: result += (st.c_st_flags,)
return make_stat_result(result)
def build_statvfs_result(st):
return make_statvfs_result((
st.c_f_bsize,
st.c_f_frsize,
st.c_f_blocks,
st.c_f_bfree,
st.c_f_bavail,
st.c_f_files,
st.c_f_ffree,
st.c_f_favail,
st.c_f_flag,
st.c_f_namemax
))
def register_stat_variant(name, traits):
if name != 'fstat':
arg_is_path = True
s_arg = traits.str0
ARG1 = traits.CCHARP
else:
arg_is_path = False
s_arg = int
ARG1 = rffi.INT
if sys.platform == 'win32':
# See Win32 implementation below
posix_stat_llimpl = make_win32_stat_impl(name, traits)
return extdef(
[s_arg], s_StatResult, traits.ll_os_name(name),
llimpl=posix_stat_llimpl)
if sys.platform.startswith('linux'):
# because we always use _FILE_OFFSET_BITS 64 - this helps things work that are not a c compiler
_functions = {'stat': 'stat64',
'fstat': 'fstat64',
'lstat': 'lstat64'}
c_func_name = _functions[name]
else:
c_func_name = name
posix_mystat = rffi.llexternal(c_func_name,
[ARG1, STAT_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO)
@func_renamer('os_%s_llimpl' % (name,))
def posix_stat_llimpl(arg):
stresult = lltype.malloc(STAT_STRUCT.TO, flavor='raw')
try:
if arg_is_path:
arg = traits.str2charp(arg)
error = rffi.cast(rffi.LONG, posix_mystat(arg, stresult))
if arg_is_path:
traits.free_charp(arg)
if error != 0:
raise OSError(rposix.get_saved_errno(), "os_?stat failed")
return build_stat_result(stresult)
finally:
lltype.free(stresult, flavor='raw')
@func_renamer('os_%s_fake' % (name,))
def posix_fakeimpl(arg):
if s_arg == traits.str0:
arg = hlstr(arg)
st = getattr(os, name)(arg)
fields = [TYPE for fieldname, TYPE in STAT_FIELDS]
TP = TUPLE_TYPE(fields)
ll_tup = lltype.malloc(TP.TO)
for i, (fieldname, TYPE) in enumerate(STAT_FIELDS):
val = getattr(st, fieldname)
if isinstance(TYPE, lltype.Number):
rffi.setintfield(ll_tup, 'item%d' % i, int(val))
elif TYPE is lltype.Float:
setattr(ll_tup, 'item%d' % i, float(val))
else:
setattr(ll_tup, 'item%d' % i, val)
return ll_tup
return extdef(
[s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,),
llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl)
def register_statvfs_variant(name, traits):
if name != 'fstatvfs':
arg_is_path = True
s_arg = traits.str0
ARG1 = traits.CCHARP
else:
arg_is_path = False
s_arg = int
ARG1 = rffi.INT
posix_mystatvfs = rffi.llexternal(name,
[ARG1, STATVFS_STRUCT], rffi.INT,
compilation_info=compilation_info,
save_err=rffi.RFFI_SAVE_ERRNO)
@func_renamer('os_%s_llimpl' % (name,))
def posix_statvfs_llimpl(arg):
stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw')
try:
if arg_is_path:
arg = traits.str2charp(arg)
error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult))
if arg_is_path:
traits.free_charp(arg)
if error != 0:
raise OSError(rposix.get_saved_errno(), "os_?statvfs failed")
return build_statvfs_result(stresult)
finally:
lltype.free(stresult, flavor='raw')
@func_renamer('os_%s_fake' % (name,))
def posix_fakeimpl(arg):
if s_arg == traits.str0:
arg = hlstr(arg)
st = getattr(os, name)(arg)
fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS]
TP = TUPLE_TYPE(fields)
ll_tup = lltype.malloc(TP.TO)
for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS):
val = getattr(st, fieldname)
rffi.setintfield(ll_tup, 'item%d' % i, int(val))
return ll_tup
return extdef(
[s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,),
llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl
)
def make_win32_stat_impl(name, traits):
from rpython.rlib import rwin32
from rpython.rtyper.module.ll_win32file import make_win32_traits
win32traits = make_win32_traits(traits)
# The CRT of Windows has a number of flaws wrt. its stat() implementation:
# - time stamps are restricted to second resolution
# - file modification times suffer from forth-and-back conversions between
# UTC and local time
# Therefore, we implement our own stat, based on the Win32 API directly.
from rpython.rtyper.tool import rffi_platform as platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rlib import rwin32
assert len(STAT_FIELDS) == 10 # no extra fields on Windows
def attributes_to_mode(attributes):
m = 0
attributes = intmask(attributes)
if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY:
m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other
else:
m |= win32traits._S_IFREG
if attributes & win32traits.FILE_ATTRIBUTE_READONLY:
m |= 0444
else:
m |= 0666
return m
def attribute_data_to_stat(info):
st_mode = attributes_to_mode(info.c_dwFileAttributes)
st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow)
ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime)
mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime)
atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime)
result = (st_mode,
0, 0, 0, 0, 0,
st_size,
atime, mtime, ctime)
return make_stat_result(result)
def by_handle_info_to_stat(info):
# similar to the one above
st_mode = attributes_to_mode(info.c_dwFileAttributes)
st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow)
ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime)
mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime)
atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime)
# specific to fstat()
st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow)
st_nlink = info.c_nNumberOfLinks
result = (st_mode,
st_ino, 0, st_nlink, 0, 0,
st_size,
atime, mtime, ctime)
return make_stat_result(result)
def attributes_from_dir(l_path, data):
filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw')
try:
hFindFile = win32traits.FindFirstFile(l_path, filedata)
if hFindFile == rwin32.INVALID_HANDLE_VALUE:
return 0
win32traits.FindClose(hFindFile)
data.c_dwFileAttributes = filedata.c_dwFileAttributes
rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime)
rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime)
rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime)
data.c_nFileSizeHigh = filedata.c_nFileSizeHigh
data.c_nFileSizeLow = filedata.c_nFileSizeLow
return 1
finally:
lltype.free(filedata, flavor='raw')
def win32_stat_llimpl(path):
data = lltype.malloc(win32traits.WIN32_FILE_ATTRIBUTE_DATA, flavor='raw')
try:
l_path = traits.str2charp(path)
res = win32traits.GetFileAttributesEx(l_path, win32traits.GetFileExInfoStandard, data)
errcode = rwin32.GetLastError_saved()
if res == 0:
if errcode == win32traits.ERROR_SHARING_VIOLATION:
res = attributes_from_dir(l_path, data)
errcode = rwin32.GetLastError_saved()
traits.free_charp(l_path)
if res == 0:
raise WindowsError(errcode, "os_stat failed")
return attribute_data_to_stat(data)
finally:
lltype.free(data, flavor='raw')
def win32_fstat_llimpl(fd):
handle = rwin32.get_osfhandle(fd)
filetype = win32traits.GetFileType(handle)
if filetype == win32traits.FILE_TYPE_CHAR:
# console or LPT device
return make_stat_result((win32traits._S_IFCHR,
0, 0, 0, 0, 0,
0, 0, 0, 0))
elif filetype == win32traits.FILE_TYPE_PIPE:
# socket or named pipe
return make_stat_result((win32traits._S_IFIFO,
0, 0, 0, 0, 0,
0, 0, 0, 0))
elif filetype == win32traits.FILE_TYPE_UNKNOWN:
error = rwin32.GetLastError_saved()
if error != 0:
raise WindowsError(error, "os_fstat failed")
# else: unknown but valid file
# normal disk file (FILE_TYPE_DISK)
info = lltype.malloc(win32traits.BY_HANDLE_FILE_INFORMATION,
flavor='raw', zero=True)
try:
res = win32traits.GetFileInformationByHandle(handle, info)
if res == 0:
raise WindowsError(rwin32.GetLastError_saved(),
"os_fstat failed")
return by_handle_info_to_stat(info)
finally:
lltype.free(info, flavor='raw')
if name == 'fstat':
return win32_fstat_llimpl
else:
return win32_stat_llimpl
#__________________________________________________
# Helper functions for win32
def make_longlong(high, low):
return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low)
# Seconds between 1.1.1601 and 1.1.1970
secs_between_epochs = rffi.r_longlong(11644473600)
def FILE_TIME_to_time_t_float(filetime):
ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime)
# FILETIME is in units of 100 nsec
return float(ft) * (1.0 / 10000000.0) - secs_between_epochs
def time_t_to_FILE_TIME(time, filetime):
ft = rffi.r_longlong((time + secs_between_epochs) * 10000000)
filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32)
filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits
| {
"content_hash": "bb87c5400994af0430dcc5e89d2c5efa",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 103,
"avg_line_length": 35.792229729729726,
"alnum_prop": 0.5988956534050687,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "74e5644538488139d2d4952fb54741eafeaa6ed0",
"size": "21189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/rtyper/module/ll_os_stat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
} |
""" Make build alignments for starting a new HDP
"""
from __future__ import print_function
import glob
import os
import sys
import string
import pandas as pd
import numpy as np
from argparse import ArgumentParser
from random import shuffle
def parse_args():
parser = ArgumentParser(description=__doc__)
# query files
parser.add_argument('--C_alignments', '-C', action='store',
dest='C_alns', required=False, type=str, default=None,
help="C files")
parser.add_argument('--mC_alignments', '-mC', action='store',
dest='mC_alns', required=False, type=str, default=None,
help="mC files")
parser.add_argument('--hmC_alignments', '-hmC', action='store',
dest='hmC_alns', required=False, type=str, default=None,
help="hmC files")
parser.add_argument('--number_of_assignments', '-n', action='store', type=int, default=10000,
dest='max_assignments',
help='total number of assignments to collect FOR EACH GROUP')
parser.add_argument('--threshold', '-t', action='store', type=float, default=0.25, dest='threshold')
parser.add_argument('--out', '-o', action='store', type=str, required=True, dest='out_file')
return parser.parse_args()
def randomly_select_alignments(path_to_alignments):
alignments = [x for x in glob.glob(path_to_alignments) if os.stat(x).st_size != 0]
shuffle(alignments)
return alignments
def collect_assignments(alignments, strand, threshold, max_assignments, transtable):
if alignments is None:
return None
else:
assignments_list = []
add_to_assignments = assignments_list.append
total = 0
assert len(alignments) > 0, "Didn't find any alignments"
for alignment in alignments:
try:
data = pd.read_table(alignment, usecols=(4, 9, 12, 13),
dtype={'strand': np.str,
'kmer': np.str,
'posterior_prob': np.float64,
'event_mean': np.float64},
header=None,
names=['strand', 'kmer', 'posterior_prob', 'event_mean'])
selected_rows = data.ix[(data['strand'] == strand) & (data['posterior_prob'] >= threshold)]
total += selected_rows.shape[0]
assignment_table = pd.DataFrame({"kmer": selected_rows['kmer'].str.translate(transtable),
"event_mean": selected_rows["event_mean"]})
add_to_assignments(assignment_table)
except:
print("ERROR: problem with alignment {}".format(alignment))
continue
if total >= max_assignments:
break
assignments = pd.concat(assignments_list)
return assignments
def make_build_alignment(c_alns, mc_alns, hmc_alns, strand, threshold, max_assignments):
# translation tables for methylation
C_trans_table = string.maketrans("C", "C")
mC_trans_table = string.maketrans("C", "E")
hmC_trans_table = string.maketrans("C", "O")
C_table = collect_assignments(c_alns, "t", threshold, max_assignments, C_trans_table)
mC_table = collect_assignments(mc_alns, "t", threshold, max_assignments, mC_trans_table)
hmC_table = collect_assignments(hmc_alns, "t", threshold, max_assignments, hmC_trans_table)
nb_c_assignments = C_table.shape[0] if C_table is not None else "None"
nb_mc_assignments = mC_table.shape[0] if mC_table is not None else "None"
nb_hmc_assignments = hmC_table.shape[0] if hmC_table is not None else "None"
print("[buildAlignments] NOTICE: I found {C} C-assignments, {mC} mC-assignments, and {hmC} hmC-assignments "
"for strand {strand}"
"".format(C=nb_c_assignments, mC=nb_mc_assignments, hmC=nb_hmc_assignments, strand=strand),
file=sys.stderr)
tables = []
for table in (C_table, mC_table, hmC_table):
if table is None:
continue
else:
tables.append(table)
return pd.concat(tables)
def main(arguments):
args = parse_args()
C_alns = randomly_select_alignments(args.C_alns) if args.C_alns is not None else None
mC_alns = randomly_select_alignments(args.mC_alns) if args.mC_alns is not None else None
hmC_alns = randomly_select_alignments(args.hmC_alns) if args.hmC_alns is not None else None
template_build_alignment = make_build_alignment(C_alns, mC_alns, hmC_alns, 't',
args.threshold, args.max_assignments)
complement_build_alignment = make_build_alignment(C_alns, mC_alns, hmC_alns, 'c',
args.threshold, args.max_assignments)
entry_line = "blank\t0\tblank\tblank\t{strand}\t0\t0.0\t0.0\t0.0\t{kmer}\t0.0\t0.0\t0.0\t{event}\t0.0\n"
with open(args.out_file, 'w') as f:
for row in template_build_alignment.itertuples():
f.write(entry_line.format(strand="t", kmer=row[2], event=row[1]))
for row in complement_build_alignment.itertuples():
f.write(entry_line.format(strand="c", kmer=row[2], event=row[1]))
if __name__ == "__main__":
sys.exit(main(sys.argv)) | {
"content_hash": "891abe4be7710d3b32ae6a98644dadaf",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 112,
"avg_line_length": 44.153225806451616,
"alnum_prop": 0.5864840182648402,
"repo_name": "ArtRand/cPecan",
"id": "1b53f93fe46c0d4a06e55507265ae519dcd689ba",
"size": "5497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/makeBuildAlignments.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2680114"
},
{
"name": "C++",
"bytes": "113234"
},
{
"name": "HTML",
"bytes": "48463"
},
{
"name": "Makefile",
"bytes": "9295"
},
{
"name": "Objective-C",
"bytes": "4283"
},
{
"name": "Python",
"bytes": "242803"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
"""fix foreign constraints
Revision ID: 06382790fb2c
Create Date: 2016-08-11 14:45:34.416120
"""
from alembic import op
from sqlalchemy.engine import reflection
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '06382790fb2c'
down_revision = '010308b06b49'
def upgrade():
inspector = reflection.Inspector.from_engine(op.get_bind())
fks_to_cascade = {
'sfc_flow_classifier_l7_parameters': 'classifier_id',
'sfc_chain_group_associations': 'portchain_id',
'sfc_port_chain_parameters': 'chain_id',
'sfc_service_function_params': 'pair_id',
'sfc_chain_classifier_associations': 'portchain_id'
}
for table, column in fks_to_cascade.items():
fk_constraints = inspector.get_foreign_keys(table)
for fk in fk_constraints:
if column in fk['constrained_columns']:
fk['options']['ondelete'] = 'CASCADE'
migration.remove_foreign_keys(table, fk_constraints)
migration.create_foreign_keys(table, fk_constraints)
| {
"content_hash": "2e344a6c7b1b7976e027c1c4b1247deb",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 30.62857142857143,
"alnum_prop": 0.6660447761194029,
"repo_name": "openstack/networking-sfc",
"id": "155978b212f878fb6693089a8ae77faaa655ad09",
"size": "1647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_sfc/db/migration/alembic_migrations/versions/newton/contract/06382790fb2c_fix_foreign_constraints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1041"
},
{
"name": "Python",
"bytes": "1334579"
},
{
"name": "Shell",
"bytes": "3076"
}
],
"symlink_target": ""
} |
import sys # For getting generic exception info
import datetime # For getting time deltas for timeouts
import time # For sleep
import json # For packing ros message properties
import random # For picking robot responses and shuffling answer options
import logging # Log messages
import Queue # for queuing messages for the main game loop
from SS_Errors import NoStoryFound # Custom exception when no stories found
from ss_script_parser import ss_script_parser # Parses scripts
from ss_personalization_manager import ss_personalization_manager
from ss_ros import ss_ros # Our ROS connection
class ss_script_handler():
""" Social stories script handler parses and deals with script lines. Uses
the script parser to get the next line in a script. We keep loading script
lines and parsing script lines separate on the offchance that we might want
to replace how scripts are stored and accessed (e.g., in a database versus
in text files).
"""
# Constants for script playback:
# Time to pause after showing answer feedback and playing robot
# feedback speech before moving on to the next question.
ANSWER_FEEDBACK_PAUSE_TIME = 2
# Time to wait for robot to finish speaking or acting before
# moving on to the next script line (in seconds).
WAIT_TIME = 30
def __init__(self, ros_node, session, participant, script_path,
story_script_path, session_script_path, database, queue,
percent_correct_to_level):
""" Save references to ROS connection and logger, get scripts and
set up to read script lines
"""
# Set up logger.
self._logger = logging.getLogger(__name__)
self._logger.info("Setting up script handler...")
# Save reference to our ros node so we can publish messages.
self._ros_node = ros_node
# Save script paths so we can load scripts later.
self._script_path = script_path
if (story_script_path is None):
self._story_script_path = ""
else:
self._story_script_path = story_script_path
if (session_script_path is None):
self._session_script_path = ""
else:
self._session_script_path = session_script_path
# We get a reference to the main game node's queue so we can
# give it messages.
self._game_node_queue = queue
# Set up personalization manager so we can get personalized
# stories for this participant.
self._personalization_man = ss_personalization_manager(session,
participant, database, percent_correct_to_level)
# Set up script parser.
self._script_parser = ss_script_parser()
# These are other script parsers we may use later.
self._story_parser = None
self._repeat_parser = None
# If we have a repeating script, we will need to save its filename so
# we can re-load it when we repeat it.
self._repeating_script_name = ""
# Get session script from script parser and give to the script
# parser. Story scripts we will get later from the
# personalization manager.
try:
self._script_parser.load_script(self._script_path
+ self._session_script_path
+ self._script_parser.get_session_script(session))
except IOError:
self._logger.exception("Script parser could not open session "
+ "script!")
# Pass exception up so whoever wanted a script handler knows
# they didn't get a script.
raise
# Initialize flags and counters:
# Set up counter for how many stories have been told this session.
self._stories_told = 0
# When we start, we are not currently telling a story or
# repeating a script, or at the end of the game.
self._doing_story = False
self._repeating = False
self._end_game = False
# When we start, we are not asking a question, and so there is no
# current question type or number.
self._current_question_type = ""
self._current_question_num = 0
# For counting repetitions of a repeating script.
self._repetitions = 0
# The script will tell us the max number of repetitions.
self._max_repetitions = 1
# The script will tell us the max number of stories.
self._max_stories = 1
# The maximum number of incorrect user responses before the
# game moves on (can also be set in the script).
self._max_incorrect_responses = 2
# Set the maximum game time, in minutes. This can also be set
# in the game script.
self._max_game_time = datetime.timedelta(minutes=10)
# Sometimes we may need to know what the last user response we
# waited for was, and how long we waited.
self._last_response_to_get = None
self._last_response_timeout = None
# Save start time so we can check whether we've run out of time.
self._start_time = datetime.datetime.now()
# Initialize total time paused.
self._total_time_paused = datetime.timedelta(seconds=0)
# Initialize pause start time in case someone calls the resume
# game timer function before the pause game function.
self._pause_start_time = None
def iterate_once(self):
""" Play the next commands from the script """
try:
# We check whether we've reached the game time limit when
# we load new stories or when we are about to start a
# repeating script over again.
# Get next line from story script.
if self._doing_story and self._story_parser is not None:
self._logger.debug("Getting next line from story script.")
line = self._story_parser.next_line()
# If not in a story, get next line from repeating script.
elif self._repeating and self._repeat_parser is not None:
self._logger.debug("Getting next line from repeating script.")
line = self._repeat_parser.next_line()
# If not repeating, get next line from main session script.
else:
self._logger.debug("Getting next line from main session script.")
line = self._script_parser.next_line()
# We didn't read a line!
# If we get a stop iteration exception, we're at the end of the
# file and will stop iterating over lines.
except StopIteration as e:
# If we were doing a story, now we're done, go back to
# the previous script.
if self._doing_story:
self._logger.info("Finished story " + str(self._stories_told + 1)
+ " of " + str(self._max_stories) + "!")
self._doing_story = False
self._stories_told += 1
# If we were repeating a script, increment counter.
elif self._repeating:
self._repetitions += 1
self._logger.info("Finished repetition " + str(self._repetitions)
+ " of " + str(self._max_repetitions) + "!")
# If we've done enough repetitions, or if we've run out
# of game time, go back to the main session script (set
# the repeating flag to false).
if (self._repetitions >= self._max_repetitions) \
or self._end_game \
or ((datetime.datetime.now() - self._start_time) \
- self._total_time_paused >= self._max_game_time):
self._logger.info("Done repeating!")
self._repeating = False
# Otherwise, we need to repeat again. Reload the repeating
# script.
else:
# Create a script parser for the filename provided,
# assume it is in the session_scripts directory.
self._repeat_parser = ss_script_parser()
try:
self._repeat_parser.load_script(self._script_path
+ self._session_script_path
+ self._repeating_script_name)
except IOError:
self._logger.exception("Script parser could not open "
+ "session script to repeat! Skipping REPEAT line.")
sself._repeating = False
return
# Otherwise we're at the end of the main script.
else:
self._logger.info("No more script lines to get!")
# Pass on the stop iteration exception, with additional
# information about the player's performance during the
# game, formatted as a json object.
emotion, tom, order = self._personalization_man. \
get_performance_this_session()
performance = {}
if emotion is not None:
performance["child-emotion-question-accuracy"] = \
emotion
if tom is not None:
performance["child-tom-question-accuracy"] = \
tom
if order is not None:
performance["child-order-question-accuracy"] = \
order
e.performance = json.dumps(performance)
raise
except ValueError:
# We may get this exception if we try to get the next line
# but the script file is closed. If that happens, something
# probably went wrong with ending playback of a story script
# or a repeating script. End repeating and end the current
# story so we go back to the main session script.
if self._doing_story:
self._doing_story = False
if self._repeating:
self._repeating = False
# Oh no got some unexpected error! Raise it again so we can
# figure out what happened and deal with it during debugging.
except Exception as e:
self._logger.exception("Unexpected exception! Error: %s", e)
raise
# We got a line: parse it!
else:
# Make sure we got a line before we try parsing it. We
# might not get a line if the file has closed or if
# next_line has some other problem.
if not line:
self._logger.warning("[iterate_once] Tried to get next line, "
+ "but got None!")
return
# Got a line - print for debugging.
self._logger.debug("LINE: " + repr(line))
# Parse line!
# Split on tabs.
elements = line.rstrip().split('\t')
self._logger.debug("... " + str(len(elements)) + " elements: \n... "
+ str(elements))
if len(elements) < 1:
self._logger.info("Line had no elements! Going to next line...")
return
# Do different stuff depending on what the first element is.
#########################################################
# Some STORY lines have only one part to the command.
elif len(elements) == 1:
# For STORY lines, play back the next story for this
# participant.
if "STORY" in elements[0]:
self._logger.debug("STORY")
# If line indicates we need to start a story, do so.
self._doing_story = True
# Create a script parser for the filename provided,
# assuming it is in the story scripts directory.
self._story_parser = ss_script_parser()
try:
self._story_parser.load_script(self._script_path
+ self._story_script_path
+ self._personalization_man.get_next_story_script())
except IOError:
self._logger.exception("Script parser could not open "
+ "story script! Skipping STORY line.")
self._doing_story = False
except AttributeError:
self._logger.exception("Script parser could not open "
+ "story script because no script was loaded! "
+ "Skipping STORY line.")
self._doing_story = False
except NoStoryFound:
self._logger.exception("Script parser could not get \
the next story script because no script was \
found by the personalization manager! \
Skipping STORY line.")
self._doing_story = False
# Line has 2+ elements, so check the other commands.
#########################################################
# For STORY SETUP lines, pick the next story to play so
# we can load its graphics and play back the story.
elif "STORY" in elements[0] and "SETUP" in elements[1]:
self._logger.debug("STORY SETUP")
# Pick the next story to play.
self._personalization_man.pick_next_story()
#########################################################
# For ROBOT lines, send command to the robot.
elif "ROBOT" in elements[0]:
self._logger.debug("ROBOT")
# Play a randomly selected story intro from the list.
if "STORY_INTRO" in elements[1]:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._story_intros[
random.randint(0,len(self._story_intros)-1)])
# Play a randomly selected story closing from the list.
elif "STORY_CLOSING" in elements[1]:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._story_closings[
random.randint(0,len(self._story_closings)-1)])
# Send a command to the robot, with properties.
elif len(elements) > 2:
self._ros_node.send_robot_command(elements[1],
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=elements[2])
# Send a command to the robot, without properties.
else:
self._ros_node.send_robot_command(elements[1], "")
#########################################################
# For OPAL lines, send command to Opal game
elif "OPAL" in elements[0]:
self._logger.debug("OPAL")
if "LOAD_ALL" in elements[1] and len(elements) >= 3:
# Load all objects listed in file -- the file is
# assumed to have properties for one object on each
# line.
to_load = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
for obj in to_load:
self._ros_node.send_opal_command("LOAD_OBJECT", obj)
# Get the next story and load graphics into game.
elif "LOAD_STORY" in elements[1]:
self._load_next_story()
# Load answers for game.
elif "LOAD_ANSWERS" in elements[1] and len(elements) >= 3:
self._load_answers(elements[2])
# Send an opal command, with properties.
elif len(elements) > 2:
self._ros_node.send_opal_command(elements[1], elements[2])
# Send an opal command, without properties.
else:
self._ros_node.send_opal_command(elements[1])
#########################################################
# For PAUSE lines, sleep for the specified number of
# seconds before continuing script playback.
elif "PAUSE" in elements[0] and len(elements) >= 2:
self._logger.debug("PAUSE")
try:
time.sleep(int(elements[1]))
except ValueError:
self._logger.exception("Not pausing! PAUSE command was "
+ "given an invalid argument (should be an int)!")
#########################################################
# For ADD lines, get a list of robot commands that can be
# used in response to particular triggers from the specified
# file and save them for later use -- all ADD lines should
# have 3 elements.
elif "ADD" in elements[0] and len(elements) >= 3:
self._logger.debug("ADD")
# Read list of responses from the specified file into the
# appropriate variable.
try:
if "INCORRECT_RESPONSES" in elements[1]:
self._incorrect_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._incorrect_responses)))
if "CORRECT_RESPONSES" in elements[1]:
self._correct_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._correct_responses)))
elif "START_RESPONSES" in elements[1]:
self._start_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._start_responses)))
elif "NO_RESPONSES" in elements[1]:
self._no_responses = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._no_responses)))
elif "ANSWER_FEEDBACK" in elements[1]:
self._answer_feedback = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._answer_feedback)))
elif "STORY_INTROS" in elements[1]:
self._story_intros = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._story_intros)))
elif "STORY_CLOSINGS" in elements[1]:
self._story_closings = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._story_closings)))
elif "TIMEOUT_CLOSINGS" in elements[1]:
self._timeout_closings = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("Got "
+ str(len(self._timeout_closings)))
elif "MAX_STORIES_REACHED" in elements[1]:
self._max_stories_reached = self._read_list_from_file(
self._script_path + self._session_script_path +
elements[2])
self._logger.debug("... Got "
+ str(len(self._max_stories_reached)))
except IOError:
self._logger.exception("Failed to add responses!")
else:
self._logger.info("Added " + elements[1])
#########################################################
# For SET lines, set the specified constant.
elif "SET" in elements[0] and len(elements) >= 3:
self._logger.debug("SET")
if "MAX_INCORRECT_RESPONSES" in elements[1]:
self._max_incorrect_responses = int(elements[2])
self._logger.info("Set MAX_INCORRECT_RESPONSES to " +
elements[2])
elif "MAX_GAME_TIME" in elements[1]:
self._max_game_time = datetime.timedelta(minutes=
int(elements[2]))
self._logger.info("Set MAX_GAME_TIME to " + elements[2])
elif "MAX_STORIES" in elements[1]:
self._max_stories = int(elements[2])
self._logger.info("Set MAX_STORIES to " + elements[2])
#########################################################
# For WAIT lines, wait for the specified user response,
# or for a timeout.
elif "WAIT" in elements[0] and len(elements) >= 3:
self._logger.debug("WAIT")
self.wait_for_response(elements[1], int(elements[2]))
#########################################################
# For QUESTION lines, save the question type and question number
# for later use.
elif "QUESTION" in elements[0] and len(elements) >= 3:
self._current_question_type = elements[1]
self._current_question_num = int(elements[2])
self._logger.info("Current question: type " + elements[1]
+ ", num " + elements[2])
#########################################################
# For REPEAT lines, repeat lines in the specified script
# file the specified number of times.
elif "REPEAT" in elements[0] and len(elements) >= 3:
self._logger.debug("REPEAT")
self._repeating = True
self._repetitions = 0
# Create a script parser for the filename provided,
# assume it is in the session_scripts directory.
self._repeat_parser = ss_script_parser()
self._repeating_script_name = elements[2]
try:
self._repeat_parser.load_script(self._script_path
+ self._session_script_path
+ elements[2])
except IOError:
self._logger.exception("Script parser could not open "
+ "session script to repeat! Skipping REPEAT line.")
self._repeating = False
return
# Figure out how many times we should repeat the script.
if "MAX_STORIES" in elements[1]:
try:
self._max_repetitions = self._max_stories
except AttributeError:
self._logger.exception("Tried to set MAX_REPETITIONS to"
+ " MAX_STORIES, but MAX_STORIES has not been "
+ "set . Setting to 1 repetition instead.")
self._max_repetitions = 1
else:
self._max_repetitions = int(elements[1])
self._logger.debug("Going to repeat " + elements[2] + " " +
str(self._max_repetitions) + " time(s).")
def _read_list_from_file(self, filename):
""" Read a list of robot responses from a file, return a list
of the lines from the file
"""
# Open script for reading.
try:
fh = open(filename, "r")
return fh.readlines()
except IOError as e:
self._logger.exception("Cannot open file: " + filename)
# Pass exception up so anyone trying to add a response list
# from a script knows it didn't work.
raise
def wait_for_response(self, response_to_get, timeout):
""" Wait for a user response or wait until the specified time
has elapsed. If the response is incorrect, allow multiple
attempts up to the maximum number of incorrect responses.
"""
for i in range(0, self._max_incorrect_responses):
self._logger.info("Waiting for user response...")
# Save the response we were trying to get in case we need
# to try again.
self._last_response_to_get = response_to_get
self._last_response_timeout = timeout
# Wait for the specified type of response, or until the
# specified time has elapsed.
response, answer = self._ros_node.wait_for_response(response_to_get,
datetime.timedelta(seconds=int(timeout)))
# After waiting for a response, need to play back an
# appropriate robot response.
# If we didn't receive a response, then it was probably
# because we didn't send a valid response to wait for.
# This is different from a TIMEOUT since we didn't time
# out -- we just didn't get a response of any kind.
if not response:
self._logger.info("Done waiting -- did not get valid response!")
return False
# If we received no user response before timing out, send a
# TIMEOUT message and pause the game.
elif "TIMEOUT" in response:
# Announce we timed out.
self._ros_node.send_game_state("TIMEOUT")
# Pause game and wait to be told whether we should try
# waiting again for a response or whether we should
# skip it and move on. Queue up the pause command so the
# main game loop can take action.
self._game_node_queue.put("PAUSE")
# Announce the game is pausing.
self._ros_node.send_game_state("PAUSE")
# Indicate that we did not get a response.
# We don't break and let the user try again because the
# external game monitor deals with TIMEOUT events, and
# will tell us whether to try waiting again or to just
# skip waiting for this response.
return False
# If response was INCORRECT, randomly select a robot
# response to an incorrect user action.
elif "INCORRECT" in response:
# Record incorrect response in the db.
self._personalization_man.record_user_response(
self._current_question_num, self._current_question_type,
answer)
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._incorrect_responses[random.randint(0,
len(self._incorrect_responses)-1)])
except AttributeError:
self._logger.exception("Could not play an incorrect "
+ "response. Maybe none were loaded?")
# Don't break so we allow the user a chance to respond
# again.
# If response was NO, randomly select a robot response to
# the user selecting no.
elif "NO" in response:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._no_responses[random.randint(0,
len(self._no_responses)-1)])
except AttributeError:
self._logger.exception("Could not play a response to "
+ "user's NO. Maybe none were loaded?")
# Don't break so we allow the user a chance to respond
# again.
# If response was CORRECT, randomly select a robot response
# to a correct user action, highlight the correct answer,
# and break out of response loop.
elif "CORRECT" in response:
# Record correct response in the db.
self._personalization_man.record_user_response(
self._current_question_num, self._current_question_type,
answer)
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._correct_responses[random.randint(0,
len(self._correct_responses)-1)])
self._ros_node.send_opal_command("SHOW_CORRECT")
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._answer_feedback[random.randint(0,
len(self._answer_feedback)-1)])
# Pause after speaking before hiding correct again
time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)
self._ros_node.send_opal_command("HIDE_CORRECT")
except AttributeError:
self._logger.exception("Could not play a correct "
+ "response or could not play robot's answer"
+ " feedback. Maybe none were loaded?")
# Break from the for loop so we don't give the user
# a chance to respond again.
break
# If response was START, randomly select a robot response to
# the user selecting START, and break out of response loop.
elif "START" in response:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._start_responses[random.randint(0,
len(self._start_responses)-1)])
except AttributeError:
self._logger.exception("Could not play response to"
+ "user's START. Maybe none were loaded?")
# Break from the for loop so we don't give the user
# a chance to respond again.
break
# We exhausted our allowed number of user responses, so have
# the robot do something instead of waiting more.
else:
# If user was never correct, play robot's correct answer
# feedback and show which answer was correct in the game.
if "CORRECT" in response_to_get:
try:
self._ros_node.send_opal_command("SHOW_CORRECT")
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(
self.WAIT_TIME)),
properties=self._answer_feedback[random.randint(0,
len(self._answer_feedback)-1)])
# Pause after speaking before hiding correct again.
time.sleep(self.ANSWER_FEEDBACK_PAUSE_TIME)
self._ros_node.send_opal_command("HIDE_CORRECT")
except AttributeError:
self._logger.exception("Could not play robot's answer"
+ " feedback! Maybe none were loaded?")
# If user never selects START (which is used to ask the user
# if they are ready to play), stop all stories and repeating
# scripts, continue with main script so we go to the end.
elif "START" in response_to_get:
self._repeating = False
self.story = False
# We got a user response and responded to it!
return True
def skip_wait_for_response(self):
""" Skip waiting for a response; treat the skipped response as
a NO or INCORRECT response.
"""
# If the response to wait for was CORRECT or INCORRECT,
# randomly select a robot response to an incorrect user
# action.
if "CORRECT" in self._last_response_to_get:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),
properties=self._incorrect_responses[random.randint(0, \
len(self._incorrect_responses)-1)])
except AttributeError:
self._logger.exception("Could not play an incorrect "
+ "response. Maybe none were loaded?")
# If response to wait for was YES or NO, randomly select a
# robot response for a NO user action.
elif "NO" in self._last_response_to_get:
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),
properties=self._no_responses[random.randint(0,
len(self._no_responses)-1)])
except AttributeError:
self._logger.exception("Could not play a response to "
+ "user's NO. Maybe none were loaded?")
def set_end_game(self):
""" End the game gracefully -- stop any stories or repeating
scripts, go back to main session script and finish.
"""
# For now, we just need to set a flag indicating we should end
# the game. When we check whether we should load another story
# or repeat a repeating script, this flag will be used to skip
# back to the main session script, to the end of the game.
self._end_game = True
def set_start_level(self, level):
""" When the game starts, a level to start at can be provided.
Pass this to the personalization manager to deal with, since it
deals with picking the levels of stories to play.
"""
self._personalization_man.set_start_level(level)
def pause_game_timer(self):
""" Track how much time we spend paused so when we check
whether we have reached the max game time, we don't include
time spent paused.
"""
self._pause_start_time = datetime.datetime.now()
def resume_game_timer(self):
""" Add how much time we spent paused to our total time spent
paused.
"""
# Since this function could theoretically be called before we
# get a call to pause_game_timer, we have to check that there
# is a pause start time, and then later, reset it so we can't
# add the same pause length multiple times to our total pause
# time.
if self._pause_start_time is not None:
self._total_time_paused += datetime.datetime.now() \
- self._pause_start_time
# Reset pause start time.
self._pause_start_time = None
def wait_for_last_response_again(self):
""" Wait for the same response that we just waited for again,
with the same parameters for the response and the timeout.
"""
return self.wait_for_response(self._last_response_to_get,
self._last_response_timeout)
def _load_answers(self, answer_list):
""" Load the answer graphics for this story """
# We are given a list of words that indicate what the answer
# options are. By convention, the first word is probably the
# correct answer; the others are incorrect answers. However,
# we won't set this now because this convention may not hold.
# We expect the SET_CORRECT OpalCommand to be used to set
# which answers are correct or incorrect.
# split the list of answers on commas.
answers = answer_list.strip().split(',')
# Shuffle answers to display them in a random order.
random.shuffle(answers)
# Load in the graphic for each answer.
for answer in answers:
toload = {}
# Remove whitespace from name before using it.
toload["name"] = answer.strip()
toload["tag"] = "PlayObject"
toload["slot"] = answers.index(answer) + 1
toload["draggable"] = False
toload["isAnswerSlot"] = True
self._ros_node.send_opal_command("LOAD_OBJECT", json.dumps(toload))
def _load_next_story(self):
""" Get the next story, set up the game scene with scene and
answer slots, and load scene graphics.
"""
# If we've told the max number of stories, or if we've reached
# the max game time, don't load another story even though we
# were told to load one -- instead, play error message from
# robot saying we have to be done now.
if self._stories_told >= self._max_stories \
or ((datetime.datetime.now() - self._start_time) \
- self._total_time_paused >= self._max_game_time) or self._end_game:
self._logger.info("We were told to load another story, but we've "
+ "already played the maximum number of stories or we ran"
" out of time! Skipping and ending now.")
self._doing_story = False
try:
self._ros_node.send_robot_command("DO",
response="ROBOT_NOT_SPEAKING",
timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),
properties=self._max_stories_reached
[random.randint(0, len(self._no_responses)-1)])
except AttributeError:
self._logger.exception("Could not play a max stories reached "
+ "response. Maybe none were loaded?")
# We were either told to play another story because a
# repeating script loads a story and the max number of
# repetitions is greater than the max number of stories,
# so more stories were requested than can be played, or
# because we ran out of time and were supposed to play more
# stories than we have time for. Either way, stop the
# repeating script if there is one.
self._repeating = False
return
# Get the details for the next story.
try:
scenes, in_order, num_answers = \
self._personalization_man.get_next_story_details()
except NoStoryFound:
# If no story was found, we can't load the story!
self._logger.exception("Cannot load story - no story to load was" +
" found!")
self._doing_story = False
return
# Set up the story scene in the game.
setup = {}
setup["numScenes"] = len(scenes)
setup["scenesInOrder"] = in_order
setup["numAnswers"] = num_answers
self._ros_node.send_opal_command("SETUP_STORY_SCENE", json.dumps(setup))
# Load the scene graphics.
for scene in scenes:
toload = {}
toload["name"] = "scenes/" + scene
toload["tag"] = "PlayObject"
toload["slot"] = scenes.index(scene) + 1
if not in_order:
toload["correctSlot"] = scenes.index(scene) + 1
toload["draggable"] = False if in_order else True
toload["isAnswerSlot"] = False
self._ros_node.send_opal_command("LOAD_OBJECT", json.dumps(toload))
# Tell the personalization manager that we loaded the story so
# it can keep track of which stories have been played.
self._personalization_man.record_story_loaded()
| {
"content_hash": "24d4b768acd5075cacf7ea73929138e3",
"timestamp": "",
"source": "github",
"line_count": 865,
"max_line_length": 81,
"avg_line_length": 48.795375722543355,
"alnum_prop": 0.5238106520090978,
"repo_name": "personal-robots/sar_social_stories",
"id": "297b2e13ec4021fd48b2903dc297b505a6188cf4",
"size": "43375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ss_script_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "180164"
}
],
"symlink_target": ""
} |
import logging
import os
from addons.base.apps import BaseAddonAppConfig
from addons.gitlab.api import GitLabClient, ref_to_params
from addons.gitlab.exceptions import NotFoundError, GitLabError
from addons.gitlab.utils import get_refs, check_permissions
from website.util import rubeus
logger = logging.getLogger(__name__)
def gitlab_hgrid_data(node_settings, auth, **kwargs):
# Quit if no repo linked
if not node_settings.complete:
return
connection = GitLabClient(external_account=node_settings.external_account)
# Initialize repo here in the event that it is set in the privacy check
# below. This potentially saves an API call in _check_permissions, below.
repo = None
# Quit if privacy mismatch and not contributor
node = node_settings.owner
if node.is_public or node.is_contributor(auth.user):
try:
repo = connection.repo(node_settings.repo_id)
except NotFoundError:
logger.error('Could not access GitLab repo')
return None
try:
branch, sha, branches = get_refs(node_settings, branch=kwargs.get('branch'), sha=kwargs.get('sha'), connection=connection)
except (NotFoundError, GitLabError):
logger.error('GitLab repo not found')
return
if branch is not None:
ref = ref_to_params(branch, sha)
can_edit = check_permissions(node_settings, auth, connection, branch, sha, repo=repo)
else:
ref = ''
can_edit = False
permissions = {
'edit': can_edit,
'view': True,
'private': node_settings.is_private
}
urls = {
'upload': node_settings.owner.api_url + 'gitlab/file/' + ref,
'fetch': node_settings.owner.api_url + 'gitlab/hgrid/' + ref,
'branch': node_settings.owner.api_url + 'gitlab/hgrid/root/' + ref,
'zip': 'https://{0}/{1}/repository/archive.zip?branch={2}'.format(node_settings.external_account.oauth_secret, repo.path_with_namespace, ref),
'repo': 'https://{0}/{1}/tree/{2}'.format(node_settings.external_account.oauth_secret, repo.path_with_namespace, ref)
}
branch_names = [each.name for each in branches]
if not branch_names:
branch_names = [branch] # if repo un-init-ed then still add default branch to list of branches
return [rubeus.build_addon_root(
node_settings,
repo.path_with_namespace,
urls=urls,
permissions=permissions,
branches=branch_names,
private_key=kwargs.get('view_only', None),
default_branch=repo.default_branch,
)]
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 'gitlab_node_settings.mako')
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 'gitlab_user_settings.mako')
class GitLabAddonConfig(BaseAddonAppConfig):
name = 'addons.gitlab'
label = 'addons_gitlab'
full_name = 'GitLab'
short_name = 'gitlab'
configs = ['accounts', 'node']
categories = ['storage']
owners = ['user', 'node']
has_hgrid_files = True
max_file_size = 100 # MB
node_settings_template = NODE_SETTINGS_TEMPLATE
user_settings_template = USER_SETTINGS_TEMPLATE
@property
def get_hgrid_data(self):
return gitlab_hgrid_data
FILE_ADDED = 'gitlab_file_added'
FILE_REMOVED = 'gitlab_file_removed'
FILE_UPDATED = 'gitlab_file_updated'
FOLDER_CREATED = 'gitlab_folder_created'
NODE_AUTHORIZED = 'gitlab_node_authorized'
NODE_DEAUTHORIZED = 'gitlab_node_deauthorized'
NODE_DEAUTHORIZED_NO_USER = 'gitlab_node_deauthorized_no_user'
REPO_LINKED = 'gitlab_repo_linked'
actions = (
FILE_ADDED,
FILE_REMOVED,
FILE_UPDATED,
FOLDER_CREATED,
NODE_AUTHORIZED,
NODE_DEAUTHORIZED,
NODE_DEAUTHORIZED_NO_USER,
REPO_LINKED)
@property
def routes(self):
from . import routes
return [routes.api_routes]
@property
def user_settings(self):
return self.get_model('UserSettings')
@property
def node_settings(self):
return self.get_model('NodeSettings')
| {
"content_hash": "c872b8f6f8169ef7461910dc1b09ea4a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 150,
"avg_line_length": 33.256,
"alnum_prop": 0.6564830406543181,
"repo_name": "sloria/osf.io",
"id": "7d597ad35f132c3a015ca2308ad73982a2d05b53",
"size": "4157",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "addons/gitlab/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109070"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "263083"
},
{
"name": "JavaScript",
"bytes": "1856674"
},
{
"name": "Mako",
"bytes": "690812"
},
{
"name": "Python",
"bytes": "8397175"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import WebCrawler
import CMSDetector
Crawler = WebCrawler.Crawler()
def request_init():
url_request = "http://" + str(input("Enter url to crawl and fuck (e.g. www.noobs.com) : "))
Crawler.request_set(url_request)
print("\nFuck Scrap just started crawling ...\n")
print("Sending request ...")
Crawler.request_send()
Crawler.request_save()
Crawler.link_get()
def link_list():
print("\n** enumerating anchor tags ** \n")
Crawler.link_enumerate()
print("\n---> Found "+str(Crawler.link_page_counts())+" links on this page\n")
print("\n** end enumerating anchor tags **\n")
def linkrel_list():
print("\n** enumerating link rel resources **\n")
Crawler.linkrel_links_get()
Crawler.linkrel_links_enumerate()
print("\n---> Found "+str(Crawler.linkrel_links_counts())+" resources included in this page")
print("\n** end enumerating link rel resources **\n")
def script_list():
print("\n** enumerating scripts sources **\n")
Crawler.script_links_get()
Crawler.script_links_enumerate()
print("\n---> Found "+str(Crawler.script_links_counts())+" scripts included in this page")
print("\n** end enumerating scripts sources ** \n")
def detect_cms():
print("\n** Detecting CMS **")
cms_detector = CMSDetector.CMSDetect(Crawler.request_get())
if cms_detector.is_wordpress():
print("\n\tFound : The website is running on wordpress")
print("\n\t-->Enumerating verified URLs\n")
print("\t Admin Path : "+str(cms_detector.wordpress_admin_path()))
print("\t Readme Path : "+str(cms_detector.wordpress_readme_path()))
print("\t xmlrpc path : "+str(cms_detector.wordpress_xmlrpc_path()))
print("\t Install path : "+str(cms_detector.wordpress_install_path()))
print("\t Login path : "+str(cms_detector.wordpress_login_path()))
else:
print("\n\tThe website may not be running on a CMS ")
print("\n** end detecting CMS")
def recon_close():
Crawler.recon_save()
print("******************************************\n")
print("ReconResult save at : ReconFolder/ReconResults/ReconResults.html")
print("\n******************************************\n")
request_init()
link_list()
linkrel_list()
script_list()
detect_cms()
recon_close() | {
"content_hash": "f7544715720bafcdb8a11db1fa65aa70",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 97,
"avg_line_length": 33.8235294117647,
"alnum_prop": 0.6239130434782608,
"repo_name": "stinkymonkeyph/FuckScrap",
"id": "434fe85bb9a352952bf77eb1323088e955695229",
"size": "2300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FuckScrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9856"
},
{
"name": "Python",
"bytes": "10492"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from flask_pluginengine import current_plugin
from indico.core.notifications import email_sender, make_email
from indico.core.plugins import get_plugin_template_module
from indico.util.placeholders import replace_placeholders
from indico.web.flask.templating import get_template_module
def make_email_template(template, agreement, email_body=None):
func = get_template_module if not current_plugin else get_plugin_template_module
if not email_body:
email_body = agreement.definition.get_email_body_template(agreement.event).get_body()
email_body = replace_placeholders('agreement-email', email_body, definition=agreement.definition,
agreement=agreement)
return func(template, email_body=email_body)
@email_sender
def notify_agreement_new(agreement, email_body=None, cc_addresses=None, from_address=None):
template = make_email_template('events/agreements/emails/agreement_new.html', agreement, email_body)
return make_email(agreement.person_email, cc_list=cc_addresses, from_address=from_address,
template=template, html=True)
@email_sender
def notify_agreement_reminder(agreement, email_body=None, cc_addresses=None, from_address=None):
template = make_email_template('events/agreements/emails/agreement_reminder.html', agreement, email_body)
return make_email(agreement.person_email, cc_list=cc_addresses, from_address=from_address,
template=template, html=True)
@email_sender
def notify_new_signature_to_manager(agreement):
template = get_template_module('events/agreements/emails/new_signature_email_to_manager.txt', agreement=agreement)
return make_email(agreement.event.all_manager_emails, template=template)
| {
"content_hash": "009c9e69b141d3e5ae2644ad9f02d4a9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 118,
"avg_line_length": 48.513513513513516,
"alnum_prop": 0.7520891364902507,
"repo_name": "mic4ael/indico",
"id": "0747c7a493992ce42df9fa591b988578f1924999",
"size": "2009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/agreements/notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""TOLO Sauna (non-binary, general) sensors."""
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ToloSaunaCoordinatorEntity, ToloSaunaUpdateCoordinator
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up (non-binary, general) sensors for TOLO Sauna."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
ToloWaterLevelSensor(coordinator, entry),
ToloTankTemperatureSensor(coordinator, entry),
]
)
class ToloWaterLevelSensor(ToloSaunaCoordinatorEntity, SensorEntity):
"""Sensor for tank water level."""
_attr_entity_category = EntityCategory.DIAGNOSTIC
_attr_name = "Water Level"
_attr_icon = "mdi:waves-arrow-up"
_attr_state_class = SensorStateClass.MEASUREMENT
_attr_native_unit_of_measurement = PERCENTAGE
def __init__(
self, coordinator: ToloSaunaUpdateCoordinator, entry: ConfigEntry
) -> None:
"""Initialize TOLO Sauna tank water level sensor entity."""
super().__init__(coordinator, entry)
self._attr_unique_id = f"{entry.entry_id}_water_level"
@property
def native_value(self) -> int:
"""Return current tank water level."""
return self.coordinator.data.status.water_level_percent
class ToloTankTemperatureSensor(ToloSaunaCoordinatorEntity, SensorEntity):
"""Sensor for tank temperature."""
_attr_entity_category = EntityCategory.DIAGNOSTIC
_attr_name = "Tank Temperature"
_attr_device_class = SensorDeviceClass.TEMPERATURE
_attr_state_class = SensorStateClass.MEASUREMENT
_attr_native_unit_of_measurement = TEMP_CELSIUS
def __init__(
self, coordinator: ToloSaunaUpdateCoordinator, entry: ConfigEntry
) -> None:
"""Initialize TOLO Sauna tank temperature sensor entity."""
super().__init__(coordinator, entry)
self._attr_unique_id = f"{entry.entry_id}_tank_temperature"
@property
def native_value(self) -> int:
"""Return current tank temperature."""
return self.coordinator.data.status.tank_temperature
| {
"content_hash": "60ec3ad46e80c7bb9b87c08d8cf7d2b2",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 33.55263157894737,
"alnum_prop": 0.7082352941176471,
"repo_name": "home-assistant/home-assistant",
"id": "bcdb7db016535b7018eab235ad9ee4a6191cc6f2",
"size": "2550",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tolo/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
'''
Description:
This script grabs web text from the given URL input
It expects a input file with each line in the following format:
[ticker] | [date] (e.g. Fri, 15 Jul 2011 15:25:29 GMT) |
[header] (e.g. Summary of Second Quarter Margins for S&P 500 Stocks by Sectors) | [url]
Dependency:
Python 2.6
Created on:
July 13, 2011
Author:
Clint P. George
'''
import time
import os
import string
import urllib2
from pyparsing import *
from lxml.html import fromstring
from lxml.html.clean import Cleaner
from optparse import OptionParser
def pyparsing_html_cleaner(targetHTML):
removeText = replaceWith("")
scriptOpen,scriptClose = makeHTMLTags("script")
scriptBody = scriptOpen + SkipTo(scriptClose) + scriptClose
scriptBody.setParseAction(removeText)
anyTag,anyClose = makeHTMLTags(Word(alphas,alphanums+":_"))
anyTag.setParseAction(removeText)
anyClose.setParseAction(removeText)
htmlComment.setParseAction(removeText)
commonHTMLEntity.setParseAction(replaceHTMLEntity)
# first pass, strip out tags and translate entities
firstPass = (htmlComment | scriptBody | commonHTMLEntity | anyTag | anyClose ).transformString(targetHTML)
# first pass leaves many blank lines, collapse these down
repeatedNewlines = LineEnd() + OneOrMore(LineEnd())
repeatedNewlines.setParseAction(replaceWith("\n"))
secondPass = repeatedNewlines.transformString(firstPass)
return secondPass
def lxml_html_cleaner(html):
doc = fromstring(html)
tags = ['h1','h2','h3','h4','h5','h6', 'p',
'div', 'span', 'img', 'area', 'map']
args = {'meta':True, 'safe_attrs_only':False, 'page_structure':False,
'scripts':True, 'style':True, 'links':True, 'remove_tags':tags}
cleaner = Cleaner(**args)
path = '/html/body'
body = doc.xpath(path)[0]
return cleaner.clean_html(body).text_content().encode('ascii', 'ignore')
def make_valid_name(file_name):
valid_chars = "-_() %s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in file_name if c in valid_chars)
def download_raw_html(input_file_name, output_dir, verbose):
with open(input_file_name, "r") as fp:
count = 0
for article_line in fp:
count += 1
ss = article_line.strip().split('|')
# Creates a new directory for date
tm = time.strptime(ss[1].strip(), "%a, %d %b %Y %H:%M:%S GMT")
dir_name = time.strftime('%Y%m%d', tm)
if not os.path.exists(os.path.join(output_dir, dir_name)):
os.makedirs(os.path.join(output_dir, dir_name))
if verbose:
print 'Created dir: ', os.path.join(output_dir, dir_name)
# Creates a new directory for ticker
ticker = ss[0].strip()
if not os.path.exists(os.path.join(output_dir, dir_name, ticker)):
os.makedirs(os.path.join(output_dir, dir_name, ticker))
if verbose:
print 'Created dir: ', os.path.join(output_dir, dir_name, ticker)
try:
if verbose:
print 'Processing ', ss[0], ss[2], '...'
html = urllib2.urlopen(ss[3]).read()
file_name = make_valid_name(ss[2].strip())
with open(os.path.join(output_dir, dir_name, ticker, file_name + ".html"), 'w') as fp:
fp.write(html)
with open(os.path.join(output_dir, dir_name, ticker, file_name + ".txt"), 'w') as fc:
fc.write(lxml_html_cleaner(html))
# with open(os.path.join(output_dir, dir_name, ticker, file_name + ".txt2"), 'w') as fc2:
# fc2.write(pyparsing_html_cleaner(html))
if verbose:
print 'Finished processing ', ss[0], ss[2], '. Please wait(5s)...'
except:
if verbose:
print 'Processing ', ss[0], ss[2], 'failed'
time.sleep(5)
if __name__ == '__main__':
'''
Main function:
Example:
python2.6 grab_news_articles.py -- help
python2.6 grab_news_articles.py -f /home/clint/Yahoo/20110725.txt -d /home/clint/Yahoo/rawdata
'''
parser = OptionParser()
parser.add_option("-f", dest="url_file", action="store", help = "URL file, where we store the html links")
parser.add_option("-d", dest="download_dir", action="store", default = "./rawdata", help = "output directory, default ./rawdata")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose")
(options, args) = parser.parse_args()
if not os.path.exists(options.download_dir):
os.makedirs(options.download_dir)
download_raw_html(options.url_file, options.download_dir, options.verbose)
| {
"content_hash": "1cb482c86655d8f59010b99d8fea2f6c",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 133,
"avg_line_length": 34.3421052631579,
"alnum_prop": 0.5701149425287356,
"repo_name": "clintpgeorge/hornbill",
"id": "5cd27abce37d6d24cec5d7cced7aa3a1152b52b3",
"size": "5264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finance/grab_news_articles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32523"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import pymongo
import json
import bson.json_util as bju
import pandas as pd
from uuid import UUID
# Our imports
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
import emission.tests.common as etc
class TestFilterAccuracy(unittest.TestCase):
def setUp(self):
# We need to access the database directly sometimes in order to
# forcibly insert entries for the tests to pass. But we put the import
# in here to reduce the temptation to use the database directly elsewhere.
import emission.core.get_database as edb
import uuid
self.testUUID = UUID('079e0f1a-c440-3d7c-b0e7-de160f748e35')
with open("emission/tests/data/smoothing_data/tablet_2015-11-03") as fp:
self.entries = json.load(fp,
object_hook=bju.object_hook)
tsdb = edb.get_timeseries_db()
for entry in self.entries:
entry["user_id"] = self.testUUID
tsdb.insert_one(entry)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
def tearDown(self):
import emission.core.get_database as edb
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID})
def testEmptyCallToPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
# Check call to check duplicate with a zero length dataframe
entry = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_prior_duplicate(pd.DataFrame(), 0, entry), False)
def testEmptyCall(self):
# Check call to the entire filter accuracy with a zero length timeseries
import emission.core.get_database as edb
edb.get_timeseries_db().remove({"user_id": self.testUUID})
# We expect that this should not throw
eaicf.filter_accuracy(self.testUUID)
self.assertEqual(len(self.ts.get_data_df("background/location")), 0)
def testCheckPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry = unfiltered_points_df.iloc[5]
unfiltered_appended_df = pd.DataFrame([entry] * 5).append(unfiltered_points_df).reset_index()
logging.debug("unfiltered_appended_df = %s" % unfiltered_appended_df[["fmt_time"]].head())
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 0, entry), False)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 5, entry), True)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_points_df, 5, entry), False)
def testConvertToFiltered(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
entry_copy = eaicf.convert_to_filtered(self.ts.get_entry_at_ts("background/location",
"metadata.write_ts",
entry_from_df.metadata_write_ts))
self.assertNotIn("_id", entry_copy)
self.assertEquals(entry_copy["metadata"]["key"], "background/filtered_location")
def testExistingFilteredLocation(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
logging.debug("entry_from_df: data.ts = %s, metadata.ts = %s" %
(entry_from_df.ts, entry_from_df.metadata_write_ts))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), False)
entry_copy = self.ts.get_entry_at_ts("background/location", "metadata.write_ts",
entry_from_df.metadata_write_ts)
self.ts.insert(eaicf.convert_to_filtered(entry_copy))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), True)
def testFilterAccuracy(self):
unfiltered_points_df = self.ts.get_data_df("background/location", None)
self.assertEqual(len(unfiltered_points_df), 205)
pre_filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(pre_filtered_points_df), 0)
eaicf.filter_accuracy(self.testUUID)
filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(filtered_points_df), 124)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| {
"content_hash": "66df731920ace625d8305da3bf157ede",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 101,
"avg_line_length": 47.12820512820513,
"alnum_prop": 0.6775480594849475,
"repo_name": "sunil07t/e-mission-server",
"id": "4350a6f0f52505ce69f938919f0f19a51913c10d",
"size": "5514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emission/tests/analysisTests/intakeTests/TestFilterAccuracy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "711874"
},
{
"name": "HTML",
"bytes": "122542"
},
{
"name": "JavaScript",
"bytes": "6962852"
},
{
"name": "Jupyter Notebook",
"bytes": "99521529"
},
{
"name": "Python",
"bytes": "1800632"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
} |
"""
Tests for prediction of state space models
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
import pandas as pd
from statsmodels.tsa.statespace import sarimax
from numpy.testing import assert_equal, assert_raises, assert_allclose, assert_
def test_predict_dates():
index = pd.date_range(start='1950-01-01', periods=11, freq='D')
np.random.seed(324328)
endog = pd.Series(np.random.normal(size=10), index=index[:-1])
# Basic test
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.filter(mod.start_params)
# In-sample prediction should have the same index
pred = res.predict()
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[:-1].values)
# Out-of-sample forecasting should extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing in the SARIMAX model should eliminate dates of
# series eliminated due to differencing
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first index value
assert_equal(mod.nobs, endog.shape[0] - 1)
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[1:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing again, this time with a more complex differencing
# structure
mod = sarimax.SARIMAX(endog, order=(1, 2, 0), seasonal_order=(0, 1, 0, 4),
simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first 6 index values
assert_equal(mod.nobs, endog.shape[0] - (4 + 2))
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[4 + 2:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
def test_memory_no_predicted():
# Tests for forecasts with memory_no_predicted is set
endog = [0.5, 1.2, 0.4, 0.6]
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res1 = mod.filter([0.5, 1.])
mod.ssm.memory_no_predicted = True
res2 = mod.filter([0.5, 1.])
# Make sure we really didn't store all of the values in res2
assert_equal(res1.predicted_state.shape, (1, 5))
assert_(res2.predicted_state is None)
assert_equal(res1.predicted_state_cov.shape, (1, 1, 5))
assert_(res2.predicted_state_cov is None)
# Check that we can't do dynamic in-sample prediction
assert_raises(ValueError, res2.predict, dynamic=True)
assert_raises(ValueError, res2.get_prediction, dynamic=True)
# Make sure the point forecasts are the same
assert_allclose(res1.forecast(10), res2.forecast(10))
# Make sure the confidence intervals are the same
fcast1 = res1.get_forecast(10)
fcast2 = res1.get_forecast(10)
assert_allclose(fcast1.summary_frame(), fcast2.summary_frame())
| {
"content_hash": "2cd6f99d4edd2f57a86707bd6f94a16d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 36.37931034482759,
"alnum_prop": 0.6849921011058452,
"repo_name": "jseabold/statsmodels",
"id": "8560a7de979863443b8db999ba56c6817170acd6",
"size": "3165",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "statsmodels/tsa/statespace/tests/test_prediction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
import json
from buildercore import cloudformation
from . import base
from unittest.mock import patch, MagicMock
import botocore
def test_troposphere_v2_template_upgraded_to_v3_template():
"`cloudformation.read_template` will upgrade any Troposphere v2 string-booleans to v3 literal booleans."
v3_fixture = json.loads(base.fixture("cloudformation/project-with-troposphere-v3-template.json"))
v2_fixture_path = base.fixture_path("cloudformation/project-with-troposphere-v2-template.json")
assert v3_fixture == cloudformation._read_template(v2_fixture_path)
class StackCreationContextManager(base.BaseCase):
def test_catches_already_existing_stack_and_continues(self):
with cloudformation.stack_creation('dummy1--test'):
raise botocore.exceptions.ClientError(
{
'ResponseMetadata': {'RetryAttempts': 0, 'HTTPStatusCode': 400, 'RequestId': '55408f12-64e6-11e8-b06f-8bac24811007', 'HTTPHeaders': {'x-amzn-requestid': '55408f12-64e6-11e8-b06f-8bac24811007', 'date': 'Thu, 31 May 2018 15:21:47 GMT', 'content-length': '297', 'content-type': 'text/xml', 'connection': 'close'}},
'Error': {
'Message': 'Stack [dummy1--test] already exists',
'Code': 'AlreadyExistsException',
'Type': 'Sender'
}
},
'CreateStack'
)
class StackInformation(base.BaseCase):
@patch('buildercore.cloudformation.core.describe_stack')
def test_read_output(self, describe_stack):
description = MagicMock()
description.meta.data = {
'Outputs': [
{
'OutputKey': 'ElasticLoadBalancer',
'OutputValue': 'dummy1--t-ElasticL-19CB72BN8E36S',
# ...
}
],
}
describe_stack.return_value = description
self.assertEqual(
cloudformation.read_output('dummy1--test', 'ElasticLoadBalancer'),
'dummy1--t-ElasticL-19CB72BN8E36S'
)
class StackUpdate(base.BaseCase):
def test_no_updates(self):
cloudformation.update_template('dummy1--test', cloudformation.CloudFormationDelta())
class ApplyDelta(base.BaseCase):
def test_apply_delta_may_add_edit_and_remove_resources(self):
template = {
'Resources': {
'A': 1,
'B': 2,
'C': 3,
}
}
cloudformation.apply_delta(template, cloudformation.CloudFormationDelta({'Resources': {'D': 4}}, {'Resources': {'C': 30}}, {'Resources': {'B': 2}}))
self.assertEqual(template, {'Resources': {'A': 1, 'C': 30, 'D': 4}})
def test_apply_delta_may_add_components_which_werent_there(self):
template = {
'Resources': {
'A': 1,
}
}
cloudformation.apply_delta(template, cloudformation.CloudFormationDelta({'Outputs': {'B': 2}}, {}, {}))
self.assertEqual(template, {'Resources': {'A': 1}, 'Outputs': {'B': 2}})
| {
"content_hash": "6965aa81d241b6a1e1da065f9cb7e4b9",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 331,
"avg_line_length": 43.732394366197184,
"alnum_prop": 0.5900161030595813,
"repo_name": "elifesciences/builder",
"id": "93c33a0154c81f789f9d263eb9d28d626ba46371",
"size": "3105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/test_buildercore_cloudformation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "735556"
},
{
"name": "Shell",
"bytes": "33921"
},
{
"name": "Smarty",
"bytes": "142"
},
{
"name": "VCL",
"bytes": "4406"
}
],
"symlink_target": ""
} |
import itertools
from .qt import Qt
_role_counter = itertools.count(Qt.UserRole + 314159)
_name_to_role = {}
def get_role(name, create=True):
try:
return _name_to_role[name]
except KeyError:
if not create:
raise
if not isinstance(name, basestring):
raise TypeError('role names must be strings; got %s %r' % (type(name).__name__, name))
role = _name_to_role[name] = next(_role_counter)
return role
_name_to_role['display'] = Qt.DisplayRole
| {
"content_hash": "0aea85303a614466950fb352189bdbe9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 98,
"avg_line_length": 22.565217391304348,
"alnum_prop": 0.6088631984585742,
"repo_name": "westernx/uitools",
"id": "eeb5f8903100900aa658da506b59f6049137671f",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uitools/roles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41289"
}
],
"symlink_target": ""
} |
import os
#QT
import sip
sip.setapi('QVariant',2)
sip.setapi('QString',2)
from PyQt4 import QtCore,QtGui
from .tableDelegate import TableDelegate
from .folderView import FolderView
#------------------------------------------------------------------------------------------------------------
# Worker: Runs continuously in a separate thread
# Can do differents method / method can be interrupt by new method call
#------------------------------------------------------------------------------------------------------------
class Worker(QtCore.QObject):
valueChanged=QtCore.pyqtSignal(int)
folderDone=QtCore.pyqtSignal(int)
finished=QtCore.pyqtSignal()
def __init__(self):
super(Worker,self).__init__()
self._abort=False
self._interrupt=False
self._method="none"
self.mutex=QtCore.QMutex()
self.condition=QtCore.QWaitCondition()
def mainLoop(self):
while 1:
self.mutex.lock()
if not self._interrupt and not self._abort:
self.condition.wait(self.mutex)
self._interrupt=False
if self._abort:
self.finished.emit()
return
method=self._method
self.mutex.unlock()
if method=="icon_folder":
self.doMethod_icon_folder()
def requestMethod(self,method,arg=None):
locker=QtCore.QMutexLocker(self.mutex)
self._interrupt=True
self._method=method
self._arg=arg
self.condition.wakeOne()
def doMethod_icon_folder(self):
expList=self._arg
i=0
s=len(expList)
for exp in expList:
self.mutex.lock()
abort=self._abort
interrupt=self._interrupt
self.mutex.unlock()
if abort or interrupt:
self.valueChanged.emit(100)
break
exp.reset_folder_icon()
self.folderDone.emit(i)
i+=1
self.valueChanged.emit(i*100.0/s)
def abort(self):
locker=QtCore.QMutexLocker(self.mutex)
self._abort=True
self.condition.wakeOne()
#------------------------------------------------------------------------------------------------------------
# Model
#------------------------------------------------------------------------------------------------------------
class Model(QtCore.QAbstractTableModel):
def __init__(self,delegate=None,parent=None):
super(Model,self).__init__(parent)
#thread
self.working=False
self.thread=QtCore.QThread()
self.worker=Worker()
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.mainLoop)
self.thread.finished.connect(self.deleteLater)
self.thread.start()
self.worker.folderDone.connect(self.icon_done)
self.worker.finished.connect(self.thread.quit)
#list of current experiments to display
self.experimentList=[]
#Delegate
self.delegate=delegate
def rowCount(self,QModelIndex):
return len(self.experimentList)
def columnCount(self,QModelIndex):
return 4
def icon_done(self,row):
idx=self.index(row,3)
self.dataChanged.emit(idx,idx)
def reset_list(self,expList):
self.beginResetModel()
expList.sort()
self.experimentList=expList[:]
self.reset_horizontal_lines()
self.worker.requestMethod("icon_folder",self.experimentList)
self.endResetModel()
#To draw horizontal line according to date
def reset_horizontal_lines(self):
listDate=[exp.dateTime for exp in self.experimentList]
self.delegate.reset_horizontal_lines(listDate)
def clear(self):
self.beginResetModel()
self.experimentList=[]
self.endResetModel()
def data(self,index,role):
col=index.column()
row=index.row()
if role==QtCore.Qt.DisplayRole:
if col==0:
return self.experimentList[row].yearMonth
if col==1:
return self.experimentList[row].day
if col==2:
return self.experimentList[row].time
if col==3:
return self.experimentList[row].folderName
if role==QtCore.Qt.DecorationRole:
if col==3:
path=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../icons/')
path=os.path.realpath(path)+"/"
return QtGui.QIcon(path+self.experimentList[row].folder.icon)
def flags(self,index):
if index.column()==3:
return QtCore.Qt.ItemIsEnabled|QtCore.Qt.ItemIsSelectable
return QtCore.Qt.NoItemFlags
def pathLocal_from_index(self,index):
exp=self.experimentList[index.row()]
return exp.pathLocal
def createFiles_onSelection(self,selection,prmModel,prbModel):
for index in selection:
self.experimentList[index.row()].create_files(prmModel=prmModel,prbModel=prbModel)
self.experimentList[index.row()].reset_folder_icon()
self.dataChanged.emit(selection[0],selection[-1])
def update_exp(self,exp):
if exp in self.experimentList:
row=self.experimentList.index(exp)
index=self.index(row,3)
self.dataChanged.emit(index,index)
#--------------------------------------------------------------------------------------------------------------
# FileBrowser Widget
#--------------------------------------------------------------------------------------------------------------
class FileBrowser(QtGui.QWidget):
def __init__(self,ROOT,parent=None):
super(FileBrowser,self).__init__(parent)
#Combo Box
self.animalComboBox=QtGui.QComboBox()
#model/view
self.delegate=TableDelegate(self)
self.model=Model(self.delegate,self)
self.view=FolderView(self.model,self)
self.model.worker.valueChanged.connect(self.display_load)
self.view.table.setItemDelegate(self.delegate)
#button
pathIcon=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../icons/downarrow.png')
pathIcon=os.path.realpath(pathIcon)
self.button_add=QtGui.QPushButton(QtGui.QIcon(pathIcon)," ")
self.button_createFiles=QtGui.QPushButton("Create prm/prb")
self.button_createFiles.clicked.connect(self.createFiles)
self.button_createFiles.setEnabled(False)
self.button_loadModels=QtGui.QPushButton("Load models")
self.button_loadModels.clicked.connect(self.loadModels)
#label
labelPath=ROOT+os.sep
if len(labelPath)>20:
labelPath="..."+labelPath[-17:]
self.label_path=QtGui.QLabel(labelPath)
self.label_load=QtGui.QLabel(' ')
self.label_prmModel=QtGui.QLabel('no prm model')
self.label_prbModel=QtGui.QLabel('no prb model')
self.prmModel=QtCore.QFileInfo()
self.prbModel=QtCore.QFileInfo()
#Layout
hbox1=QtGui.QHBoxLayout()
hbox1.addWidget(self.label_path)
hbox1.addWidget(self.animalComboBox)
hbox1.addStretch()
hbox2=QtGui.QHBoxLayout()
hbox2.addWidget(self.view.label_hide)
hbox2.addWidget(self.view.edit_hide)
grid=QtGui.QHBoxLayout()
grid.addWidget(self.button_add)
grid.addWidget(self.button_loadModels)
grid.addWidget(self.label_prmModel)
grid.addWidget(self.label_prbModel)
grid.addWidget(self.button_createFiles)
grid.addWidget(self.label_load)
layout=QtGui.QGridLayout()
layout.addLayout(hbox1,1,1)
layout.addLayout(hbox2,1,2)
layout.addWidget(self.view,2,1,4,2)
layout.addLayout(grid,6,1,1,2)
self.setLayout(layout)
def set_animalComboBox(self,animalList):
for animalID in animalList:
self.animalComboBox.addItem(animalID)
def get_experiment_selection(self):
return self.view.table.selectedIndexes()
def createFiles(self):
if self.prmModel.exists() and self.prbModel.exists():
selection=self.get_experiment_selection()
self.model.createFiles_onSelection(selection,prmModel=self.prmModel,prbModel=self.prbModel)
self.view.refresh()
def loadModels(self):
filebox=QtGui.QFileDialog(self,"Load model for PRB and PRM files")
filebox.setFileMode(QtGui.QFileDialog.ExistingFiles)
filebox.setNameFilters(["PRB/PRM (*.prm *.prb)"])
filebox.setOptions(QtGui.QFileDialog.DontUseNativeDialog)
if filebox.exec_():
for selectedFile in filebox.selectedFiles():
if selectedFile.endswith(".prm"):
self.prmModel.setFile(selectedFile)
self.label_prmModel.setText(self.prmModel.fileName())
elif selectedFile.endswith(".prb"):
self.prbModel.setFile(selectedFile)
self.label_prbModel.setText(self.prbModel.fileName())
if self.prmModel.exists() and self.prbModel.exists():
self.button_createFiles.setEnabled(True)
def display_load(self,i):
percentage=str(i)+'%'
if i==100:
self.label_load.setText("")
else:
self.label_load.setText("Loading icons: "+percentage)
def reset_experimentList(self,experimentInfoList):
self.model.reset_list(experimentInfoList)
self.view.reset_view()
def on_close(self):
self.model.worker.abort()
#self.model.thread.wait()
| {
"content_hash": "7d9948d200634919ddde29145ccfc8c7",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 111,
"avg_line_length": 30.413919413919412,
"alnum_prop": 0.6774659761531976,
"repo_name": "tymoreau/klusta_process_manager",
"id": "865037f7a9bdb753ec35ab0e58ffa5e610d8c1ec",
"size": "8303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klusta_process_manager/fileBrowser/fileBrowser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "101103"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from celery import task
import inspect
@task
def do_build_project(project, branch=None):
builder = project.get_builder()
builder.build_project(project, branch=branch)
@task
def do_buildset(buildset):
builder = buildset.project.get_builder()
builder.run_buildset(buildset)
@task
def do_build(build, builder):
if inspect.isclass(builder):
builder = builder()
builder.build(build)
| {
"content_hash": "c1074ba01aa4105052ac7070dfbfe96d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 20.727272727272727,
"alnum_prop": 0.7171052631578947,
"repo_name": "lukaszb/zeusci",
"id": "c2c3cd5ca68e2224a2e3a6d4d27095fc095a45c9",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "zeusci/zeus/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11819"
},
{
"name": "JavaScript",
"bytes": "13360"
},
{
"name": "Python",
"bytes": "112872"
},
{
"name": "Ruby",
"bytes": "1262"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
"""test_maximise.py - test Skip List set implementations against the Hacker
Rank maximise sum problem.
This is a short test to ensure the code passes before we subject it to the
ridiculously sized data.
"""
from .benchmark import maximise
class TestMaximiseMixin(object):
def test_single(self):
self.assertEqual(self.maximise_fn([1], 2), 1)
def test_single_overflow(self):
self.assertEqual(self.maximise_fn([3], 2), 1)
def test_example(self):
self.assertEqual(self.maximise_fn([3, 3, 9, 9, 5], 7), 6)
def test_zeroes(self):
self.assertEqual(self.maximise_fn([0, 0, 0], 1), 0)
self.assertEqual(self.maximise_fn([0, 0, 0], 3), 0)
def test_ones(self):
self.assertEqual(self.maximise_fn([1, 1, 1, 1], 1), 0)
self.assertEqual(self.maximise_fn([1, 1, 1, 1], 2), 1)
self.assertEqual(self.maximise_fn([1, 1, 1, 1], 3), 2)
self.assertEqual(self.maximise_fn([1, 1, 1, 1], 4), 3)
def test_simple(self):
self.assertEqual(self.maximise_fn([5, 4], 7), 5)
self.assertEqual(self.maximise_fn([3, 1, 2], 7), 6)
self.assertEqual(self.maximise_fn([1, 1, 8], 7), 3)
# class TestMaximise(TestMaximiseMixin, unittest.TestCase):
# maximise_fn = lambda self, array, m: maximise(SkipListSet, array, m)
# if __name__ == "__main__":
# unittest.main()
| {
"content_hash": "fa4f4d5a8a1c7d775f3fad908c608ebd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 33.24390243902439,
"alnum_prop": 0.6294937637564196,
"repo_name": "jschaf/skiplist",
"id": "83163101ba6eeaaad901538a7bcbfb11aef04240",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skip_list/benchmarks/test_maximise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12714"
}
],
"symlink_target": ""
} |
"""Tokenization data types and interfaces."""
from abc import ABCMeta, abstractmethod
from functools import reduce
from sys import intern
from typing import Tuple, Optional, TYPE_CHECKING, Iterable, Union, Iterator
if TYPE_CHECKING:
from pyramids.config import ModelConfig
from pyramids.language import Language
__author__ = 'Aaron Hosford'
__all__ = [
'Tokenizer',
'TokenSequence',
]
class TokenSequence:
"""A sequence of tokens generated for the parse input."""
def __init__(self, tokens: Iterable[Tuple[str, int, int]]):
interned_tokens = []
spans = []
for token, start, end in tokens:
if not isinstance(token, str):
raise TypeError(token, str)
if not isinstance(start, int):
raise TypeError(start, int)
if not isinstance(end, int):
raise TypeError(end, int)
interned_tokens.append(intern(token))
spans.append((start, end))
self._tokens = tuple(interned_tokens)
self._spans = tuple(spans)
self._hash = (reduce(lambda a, b: a ^ id(b), self._tokens, 0) ^
reduce(lambda a, b: a ^ hash(b), self._spans, 0))
@property
def tokens(self) -> Tuple[str, ...]:
"""Get the interned token strings."""
return self._tokens
@property
def spans(self) -> Tuple[Tuple[int, int], ...]:
"""Get the token_start_index/token_end_index index spans of the tokens."""
return self._spans
def __str__(self) -> str:
return ' '.join(self._tokens)
def __repr__(self) -> str:
return type(self).__name__ + "(" + repr(self._tokens) + ")"
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: 'TokenSequence') -> bool:
if not isinstance(other, TokenSequence):
return NotImplemented
return self is other or (self._hash == other._hash and
self._tokens == other._tokens and
self._spans == other._spans)
def __ne__(self, other: 'TokenSequence') -> bool:
if not isinstance(other, TokenSequence):
return NotImplemented
return not self == other
def __le__(self, other: 'TokenSequence') -> bool:
if not isinstance(other, TokenSequence):
return NotImplemented
if len(self._tokens) != len(other._tokens):
return len(self._tokens) < len(other._tokens)
if self._tokens != other._tokens:
return self._tokens < other._tokens
return self._spans <= other._spans
def __gt__(self, other: 'TokenSequence') -> bool:
if not isinstance(other, TokenSequence):
return NotImplemented
return not self <= other
def __ge__(self, other: 'TokenSequence') -> bool:
if not isinstance(other, TokenSequence):
return NotImplemented
return other <= self
def __lt__(self, other: 'TokenSequence') -> bool:
if not isinstance(other, TokenSequence):
return NotImplemented
return not self >= other
def __getitem__(self, index: Union[int, slice]) -> Union[str, Tuple[str, ...]]:
return self._tokens[index]
def __len__(self) -> int:
return len(self._tokens)
def __iter__(self) -> Iterator[str]:
return iter(self._tokens)
class Tokenizer(metaclass=ABCMeta):
"""Abstract interface for Pyramids tokenizers."""
@classmethod
@abstractmethod
def from_config(cls, config_info: 'ModelConfig') -> 'Tokenizer':
"""Create a tokenizer instance from the given configuration info."""
raise NotImplementedError()
@abstractmethod
def tokenize(self, text: str) -> TokenSequence:
"""Tokenize a piece of text."""
raise NotImplementedError()
@property
@abstractmethod
def language(self) -> 'Optional[Language]':
"""Get the language this tokenizer is designed for, if indicated."""
raise NotImplementedError()
| {
"content_hash": "a837bce453896d223ff10b416b8e89f9",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 83,
"avg_line_length": 32.983739837398375,
"alnum_prop": 0.5900912003943801,
"repo_name": "hosford42/pyramids",
"id": "c0f6aec36163ff2ab03550a773d6cba31b21e00f",
"size": "4082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramids/tokenization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266486"
}
],
"symlink_target": ""
} |
import argparse
import ConfigParser
import MySQLdb
import sys
import time
from novanet2neutron import common
CONF = ConfigParser.ConfigParser()
def add_port(neutronc, instance, network_id, subnet_id,
mac_address, ip_address):
body_value = {
"port": {
"tenant_id": instance.tenant_id,
"mac_address": mac_address,
"fixed_ips": [
{
"subnet_id": subnet_id,
"ip_address": ip_address,
}],
"network_id": network_id,
}
}
ports = neutronc.list_ports(mac_address=mac_address, network_id=network_id)
if ports['ports']:
port = ports['ports'][0]
print "Not creating port for %s already exists" % mac_address
else:
try:
port = neutronc.create_port(body=body_value)['port']
except Exception, e:
print e
instance_ports = neutronc.list_ports(device_id=instance.id,
network_id=network_id)
if not instance_ports['ports']:
try:
print 'attach interface'
instance.interface_attach(port['id'], "", "")
except Exception, e:
print e
else:
print "Not attaching, already attached %s" % instance.id
def add_ports(neutronc, cursor, mappings, instance, target_zone):
#suspend = False
#if instance.status == "SUSPENDED":
# instance.resume()
# time.sleep(2)
# suspend = True
cursor.execute(
"SELECT * from network_migration_info where uuid = '%s'" % instance.id)
networks = cursor.fetchall()
for network in networks:
zone = network['availability_zone']
if zone is None or zone == 'None':
print "unknown zone for %s" % instance.id
continue
if zone != target_zone:
continue
network_name = network['network_name']
ip_v4 = network['ip_v4']
ip_v6 = network['ip_v6']
mac_address = network['mac_address']
network_info = mappings['network_%s:%s' % (zone, network_name)]
neutron_network = network_info['network_id']
subnet_v4 = network_info['subnet_v4_id']
add_port(neutronc, instance, neutron_network,
subnet_v4, mac_address, ip_v4)
if ip_v6 != "None":
subnet_v6 = network_info['subnet_v6_id']
add_port(neutronc, instance, neutron_network,
subnet_v6, mac_address, ip_v6)
#if suspend:
# instance.suspend()
def create_networks(neutronc):
mappings = {}
for section in CONF.sections():
if not section.startswith('network_'):
continue
mappings[section] = {}
for option in CONF.options(section):
mappings[section][option] = CONF.get(section, option)
zone = CONF.get(section, 'zone')
network_name = CONF.get(section, 'name')
if zone == network_name:
name = zone
else:
name = "%s-%s" % (zone, network_name)
physnet = CONF.get(section, 'physnet')
network = common.get_network(neutronc, name)
if not network:
network = common.create_network(neutronc, name, physnet)
mappings[section]['network_id'] = network
subnet_v4 = common.get_subnet(neutronc, network, 4)
try:
gateway_v4 = CONF.get(section, 'gateway_v4')
except:
gateway_v4 = None
if not subnet_v4:
subnet_v4 = common.create_subnet(
neutronc, network, 4,
CONF.get(section, 'cidr_v4'),
CONF.get(section, 'dns_servers').split(','),
gateway_v4,
CONF.get(section, 'dhcp_start'),
CONF.get(section, 'dhcp_end'))
mappings[section]['subnet_v4_id'] = subnet_v4
if 'cidr_v6' in CONF.options(section):
subnet_v6 = common.create_subnet(
neutronc, network, 6,
CONF.get(section, 'cidr_v6'),
CONF.get(section, 'dns_servers').split(','),
CONF.get(section, 'gateway_v6'))
mappings[section]['subnet_v6_id'] = subnet_v6
return mappings
def check_hypervisors(novac):
print "Checking all hypervisors are running fake driver"
for h in novac.hypervisors.list():
if h.hypervisor_type != 'fake':
print 'Hypervisor %s is not fake' % h.hypervisor_hostname
sys.exit(1)
def collect_args():
parser = argparse.ArgumentParser(description='novanet2neutron.')
parser.add_argument('-c', '--config', action='store',
default='novanet2neutron.conf', help="Config file")
parser.add_argument('-z', '--zone', action='store',
help="AZ to migrate")
return parser.parse_args()
def main():
args = collect_args()
common.load_config(CONF, args.config)
target_zone = args.zone
conn = MySQLdb.connect(
host=CONF.get('db', 'host'),
user=CONF.get('db', 'user'),
passwd=CONF.get('db', 'password'),
db=CONF.get('db', 'name'))
cursor = MySQLdb.cursors.DictCursor(conn)
novac = common.get_nova_client()
check_hypervisors(novac)
neutronc = common.get_neutron_client()
print "creating networks"
mappings = create_networks(neutronc)
print "getting instances"
instances = common.all_servers(novac)
print "adding ports"
for i in instances:
add_ports(neutronc, cursor, mappings, i, target_zone)
cursor.close()
conn.close()
if __name__ == "__main__":
main()
| {
"content_hash": "a078ab3987b5a4dca7edb272e1cf1e4a",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 32.69942196531792,
"alnum_prop": 0.5635495845854693,
"repo_name": "NeCTAR-RC/novanet2neutron",
"id": "060bedd4a02061ed367c90a88fa95e92909d7e5d",
"size": "5680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrate-control.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37674"
}
],
"symlink_target": ""
} |
import numbers
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike, Dtype
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype, register_extension_dtype
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
# mypy: https://github.com/python/mypy/issues/4125
@property
def type(self) -> Type: # type: ignore[override]
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> Type["BooleanArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "BooleanArray":
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# TODO should optimize this without going through object array
bool_arr = BooleanArray._from_sequence(np.array(arr))
results.append(bool_arr)
return BooleanArray._concat_same_type(results)
def coerce_to_array(
values, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(len(values), dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.ndim != 1:
raise ValueError("values must be a 1D list-like")
if mask.ndim != 1:
raise ValueError("mask must be a 1D list-like")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
) -> "BooleanArray":
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls, strings: List[str], *, dtype: Optional[Dtype] = None, copy: bool = False
) -> "BooleanArray":
def map_string(s):
if isna(s):
return s
elif s in ["True", "TRUE", "true", "1", "1.0"]:
return True
elif s in ["False", "FALSE", "false", "0", "0.0"]:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# For BooleanArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (BooleanArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, BooleanArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_bool_dtype(x.dtype):
m = mask.copy()
return BooleanArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an BooleanDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy)
if is_bool_dtype(dtype):
# astype_nansafe converts np.nan to True
if self._hasna:
raise ValueError("cannot convert float NaN to bool")
else:
return self._data.astype(dtype, copy=copy)
# for integer, error if there are missing values
if is_integer_dtype(dtype) and self._hasna:
raise ValueError("cannot convert NA to integer")
# for float dtype, ensure we use np.nan before casting (numpy cannot
# deal with pd.NA)
na_value = self._na_value
if is_float_dtype(dtype):
na_value = np.nan
# coerce
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = -1
return data
def any(self, *, skipna: bool = True, **kwargs):
"""
Return whether any element is True.
Returns False unless there is at least one element that is True.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is True, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BooleanArray.all : Return whether all elements are True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, False)
result = values.any()
if skipna:
return result
else:
if result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, *, skipna: bool = True, **kwargs):
"""
Return whether all elements are True.
Returns True unless there is at least one element that is False.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is False, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, True)
result = values.all()
if skipna:
return result
else:
if not result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_booleanarray = isinstance(other, BooleanArray)
other_is_scalar = lib.is_scalar(other)
mask = None
if other_is_booleanarray:
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
elif op.__name__ in {"xor", "rxor"}:
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
return BooleanArray(result, mask)
def _cmp_method(self, other, op):
from pandas.arrays import FloatingArray, IntegerArray
if isinstance(other, (IntegerArray, FloatingArray)):
return NotImplemented
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
result = np.zeros_like(self._data)
mask = np.ones_like(self._data)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask, copy=False)
def _arith_method(self, other, op):
mask = None
op_name = op.__name__
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match")
# nans propagate
if mask is None:
mask = self._mask
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | mask
if other is libmissing.NA:
# if other is NA, the result will be all NA and we can't run the
# actual op, so we need to choose the resulting dtype manually
if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}:
dtype = "int8"
else:
dtype = "bool"
result = np.zeros(len(self._data), dtype=dtype)
else:
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
# Avoid DeprecationWarning: In future, it will be an error
# for 'np.bool_' scalars to be interpreted as an index
other = bool(other)
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
return super()._reduce(name, skipna=skipna, **kwargs)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
elif is_bool_dtype(result):
return BooleanArray(result, mask, copy=False)
elif is_integer_dtype(result):
from pandas.core.arrays import IntegerArray
return IntegerArray(result, mask, copy=False)
else:
result[mask] = np.nan
return result
| {
"content_hash": "9f08c1a108e81ef705fb375806751362",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 85,
"avg_line_length": 31.784203102961918,
"alnum_prop": 0.5646327934324384,
"repo_name": "jreback/pandas",
"id": "bbbc0911b484604507a0097709ab628bf18c6e95",
"size": "22535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/arrays/boolean.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14930989"
},
{
"name": "Shell",
"bytes": "29317"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
"""
Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
"""
__author__ = 'Daniel'
class Solution(object):
def hIndex(self, citations):
"""
From linear search into bin search
:type citations: List[int]
:rtype: int
"""
n = len(citations)
s = 0
e = n
while s < e:
m = (s+e)/2
if citations[m] >= n-m:
e = m
else:
s = m+1
return n-s
if __name__ == "__main__":
assert Solution().hIndex([0, 1, 3, 5, 6]) == 3 | {
"content_hash": "1ed2603c53095e9e1678f21b4220f600",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 115,
"avg_line_length": 22.357142857142858,
"alnum_prop": 0.476038338658147,
"repo_name": "ee08b397/LeetCode-4",
"id": "28b6b8ced0ac39bb16137700508c028617edf33d",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "275 H-Index II.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555639"
}
],
"symlink_target": ""
} |
from typing import MutableMapping, MutableSequence
from google.protobuf import field_mask_pb2 # type: ignore
import proto # type: ignore
from google.cloud.bigquery_migration_v2.types import migration_entities
__protobuf__ = proto.module(
package="google.cloud.bigquery.migration.v2",
manifest={
"CreateMigrationWorkflowRequest",
"GetMigrationWorkflowRequest",
"ListMigrationWorkflowsRequest",
"ListMigrationWorkflowsResponse",
"DeleteMigrationWorkflowRequest",
"StartMigrationWorkflowRequest",
"GetMigrationSubtaskRequest",
"ListMigrationSubtasksRequest",
"ListMigrationSubtasksResponse",
},
)
class CreateMigrationWorkflowRequest(proto.Message):
r"""Request to create a migration workflow resource.
Attributes:
parent (str):
Required. The name of the project to which this migration
workflow belongs. Example: ``projects/foo/locations/bar``
migration_workflow (google.cloud.bigquery_migration_v2.types.MigrationWorkflow):
Required. The migration workflow to create.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
migration_workflow: migration_entities.MigrationWorkflow = proto.Field(
proto.MESSAGE,
number=2,
message=migration_entities.MigrationWorkflow,
)
class GetMigrationWorkflowRequest(proto.Message):
r"""A request to get a previously created migration workflow.
Attributes:
name (str):
Required. The unique identifier for the migration workflow.
Example: ``projects/123/locations/us/workflows/1234``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to be retrieved.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
read_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class ListMigrationWorkflowsRequest(proto.Message):
r"""A request to list previously created migration workflows.
Attributes:
parent (str):
Required. The project and location of the migration
workflows to list. Example: ``projects/123/locations/us``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to be retrieved.
page_size (int):
The maximum number of migration workflows to
return. The service may return fewer than this
number.
page_token (str):
A page token, received from previous
``ListMigrationWorkflows`` call. Provide this to retrieve
the subsequent page.
When paginating, all other parameters provided to
``ListMigrationWorkflows`` must match the call that provided
the page token.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
read_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
page_size: int = proto.Field(
proto.INT32,
number=3,
)
page_token: str = proto.Field(
proto.STRING,
number=4,
)
class ListMigrationWorkflowsResponse(proto.Message):
r"""Response object for a ``ListMigrationWorkflows`` call.
Attributes:
migration_workflows (MutableSequence[google.cloud.bigquery_migration_v2.types.MigrationWorkflow]):
The migration workflows for the specified
project / location.
next_page_token (str):
A token, which can be sent as ``page_token`` to retrieve the
next page. If this field is omitted, there are no subsequent
pages.
"""
@property
def raw_page(self):
return self
migration_workflows: MutableSequence[
migration_entities.MigrationWorkflow
] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=migration_entities.MigrationWorkflow,
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class DeleteMigrationWorkflowRequest(proto.Message):
r"""A request to delete a previously created migration workflow.
Attributes:
name (str):
Required. The unique identifier for the migration workflow.
Example: ``projects/123/locations/us/workflows/1234``
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class StartMigrationWorkflowRequest(proto.Message):
r"""A request to start a previously created migration workflow.
Attributes:
name (str):
Required. The unique identifier for the migration workflow.
Example: ``projects/123/locations/us/workflows/1234``
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class GetMigrationSubtaskRequest(proto.Message):
r"""A request to get a previously created migration subtasks.
Attributes:
name (str):
Required. The unique identifier for the migration subtask.
Example:
``projects/123/locations/us/workflows/1234/subtasks/543``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The list of fields to be retrieved.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
read_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class ListMigrationSubtasksRequest(proto.Message):
r"""A request to list previously created migration subtasks.
Attributes:
parent (str):
Required. The migration task of the subtasks to list.
Example: ``projects/123/locations/us/workflows/1234``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The list of fields to be retrieved.
page_size (int):
Optional. The maximum number of migration
tasks to return. The service may return fewer
than this number.
page_token (str):
Optional. A page token, received from previous
``ListMigrationSubtasks`` call. Provide this to retrieve the
subsequent page.
When paginating, all other parameters provided to
``ListMigrationSubtasks`` must match the call that provided
the page token.
filter (str):
Optional. The filter to apply. This can be used to get the
subtasks of a specific tasks in a workflow, e.g.
``migration_task = "ab012"`` where ``"ab012"`` is the task
ID (not the name in the named map).
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
read_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
page_size: int = proto.Field(
proto.INT32,
number=3,
)
page_token: str = proto.Field(
proto.STRING,
number=4,
)
filter: str = proto.Field(
proto.STRING,
number=5,
)
class ListMigrationSubtasksResponse(proto.Message):
r"""Response object for a ``ListMigrationSubtasks`` call.
Attributes:
migration_subtasks (MutableSequence[google.cloud.bigquery_migration_v2.types.MigrationSubtask]):
The migration subtasks for the specified
task.
next_page_token (str):
A token, which can be sent as ``page_token`` to retrieve the
next page. If this field is omitted, there are no subsequent
pages.
"""
@property
def raw_page(self):
return self
migration_subtasks: MutableSequence[
migration_entities.MigrationSubtask
] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=migration_entities.MigrationSubtask,
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "e8a120d789e855586f0568fec435b19f",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 106,
"avg_line_length": 30,
"alnum_prop": 0.6278588807785888,
"repo_name": "googleapis/python-bigquery-migration",
"id": "0b38c79c80a6bea786748f5b2d2143633b2a2a8d",
"size": "8820",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/bigquery_migration_v2/types/migration_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "715104"
},
{
"name": "Shell",
"bytes": "30696"
}
],
"symlink_target": ""
} |
import pyjd # this is dummy in pyjs.
from pyjamas import DOM
from pyjamas.ui.RootPanel import RootPanel, RootPanelCls, manageRootPanel
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.ui.FocusPanel import FocusPanel
from pyjamas.Canvas.GWTCanvas import GWTCanvas
from pyjamas.Canvas.ImageLoader import loadImages
from pyjamas.Canvas import Color
#from pyjamas.Canvas2D import Canvas, CanvasImage, ImageLoadListener
from pyjamas.Timer import Timer
from pyjamas import Window
from pyjamas.ui import Event
from pyjamas.ui import KeyboardListener
from pyjamas.ui.KeyboardListener import KeyboardHandler
from pyjamas.ui.ClickListener import ClickHandler
from pyjamas.ui.Image import Image
import math
import pygwt
import random
NUM_ASTEROIDS = 2
FPS = 30
ROTATE_SPEED_PER_SEC = math.pi
ROTATE_SPEED = ROTATE_SPEED_PER_SEC / FPS
FRICTION=0.05
THRUST=0.2
SPEED_MAX = 10
MAX_ASTEROID_SPEED = 2.0
SHOT_LIFESPAN = 60
SHOT_COLOR = Color.Color('#fff')
SHOT_SPEED = 7.0
SHOT_DELAY = 10
ASTEROID_RADIUS = 45.0
ASTEROID_IMAGE_SIZE=180.0
ASTEROID_SIZES = [90.0, 45.0, 22.0, 11.0]
def randfloat(absval):
return (random.random() * (2 * absval) - absval)
def distsq(x1,y1, x2,y2):
return ((x1 - x2) * (x1 - x2)) + ((y1 - y2) * (y1 - y2))
class Asteroid:
def __init__(self, canvas, x=None, y=None, size=0):
self.canvas = canvas
if x is None or y is None:
self.x = canvas.width/2
self.y = canvas.height/2
while distsq(self.x, self.y, canvas.width / 2, canvas.height / 2) < (180*180):
self.x = random.randint(0, canvas.width)
self.y = random.randint(0, canvas.height)
else:
self.x = x
self.y = y
self.dx = randfloat(MAX_ASTEROID_SPEED)
self.dy = randfloat(MAX_ASTEROID_SPEED)
self.rot = (random.random() * (2 * math.pi)) - math.pi
self.rotspeed = (random.random() * 0.1) - 0.05
self.size = size
self.radius = ASTEROID_SIZES[self.size]
self.scale = (self.radius / ASTEROID_IMAGE_SIZE) * 2
def move(self):
if self.dx > 0 and self.x >= self.canvas.width:
self.dx = -self.dx
elif self.dx < 0 and self.x <= 0:
self.dx = -self.dx
if self.dy > 0 and self.y >= self.canvas.height:
self.dy = -self.dy
elif self.dy < 0 and self.y <= 0:
self.dy = -self.dy
self.x += self.dx
self.y += self.dy
self.rot += self.rotspeed
def draw(self):
ctx = self.canvas
ctx.saveContext()
ctx.translate(self.x, self.y)
ctx.rotate(self.rot)
ctx.scale(self.scale,self.scale)
ctx.drawImage(self.canvas.asteroid, -(ASTEROID_IMAGE_SIZE / 2), -(ASTEROID_IMAGE_SIZE / 2))
ctx.restoreContext()
class Shot:
def __init__(self, canvas, x, y, dx, dy, dir):
self.canvas = canvas
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.dir = dir
self.lifespan = SHOT_LIFESPAN
def move(self):
self.lifespan -= 1
if self.lifespan <= 0:
return False
self.x = self.x + self.dx + SHOT_SPEED * math.sin(self.dir)
self.y = self.y + self.dy - SHOT_SPEED * math.cos(self.dir)
for a in self.canvas.asteroids:
if distsq(self.x, self.y, a.x, a.y) < (a.radius * a.radius):
self.canvas.destroyAsteroid(a)
return False
return True
def draw(self, ctx):
ctx.setFillStyle(SHOT_COLOR)
ctx.fillRect(int(self.x - 1),int(self.y - 1),3,3)
class GameCanvas(GWTCanvas):
def __init__(self, w, h):
GWTCanvas.__init__(self, w, h)
self.width = w
self.height = h
self.key_up = self.key_down = self.key_left = self.key_right = self.key_fire = False
images = ['./images/Ship1.png', './images/Ship2.png', './images/Asteroid.png']
loadImages(images, self)
self.run = False
#self.ship = CanvasImage('images/Ship1.png')
#self.ship_thrust = CanvasImage('images/Ship2.png')
#self.asteroid = CanvasImage('images/Asteroid.png')
#self.loader = ImageLoadListener()
#self.loader.add(self.ship)
#self.loader.add(self.ship_thrust)
#self.loader.add(self.asteroid)
self.num_asteroids = NUM_ASTEROIDS
self.sinkEvents(Event.KEYEVENTS)
self.onTimer()
def onImagesLoaded(self, imagesHandles):
print "loaded images", imagesHandles
self.ship = imagesHandles[0]
self.ship_thrust = imagesHandles[1]
self.asteroid = imagesHandles[2]
print "resize", self.width, self.height
self.resize(self.width, self.height)
self.reset()
self.run = True
def addTo(self, panel):
panel.add(self)
self.top = DOM.getAbsoluteTop(self.getElement())
self.left = DOM.getAbsoluteLeft(self.getElement())
def onTimer(self, t=None):
Timer(int(1000/FPS), self)
if not self.run:
return
self.advance()
self.draw()
return
self.saveContext()
self.clear()
self.translate(30, 30)
self.setFillStyle(Color.Color("#fff"))
self.setStrokeStyle(Color.Color("#fff"))
self.beginPath()
self.moveTo(25, 25)
self.lineTo(105, 25)
self.lineTo(25, 105)
self.closePath()
self.fill()
self.beginPath()
self.moveTo(125, 125)
self.lineTo(125, 45)
self.lineTo(45, 125)
self.closePath()
self.stroke()
self.beginPath()
self.moveTo(265, 265)
self.lineTo(165, 265)
self.lineTo(265, 165)
self.lineTo(265, 265)
self.fillRect(25, 165, 100, 100)
self.setFillStyle(Color.BLACK)
self.fillRect(45, 185, 60, 60)
self.strokeRect(50, 190, 50, 50)
self.rect(165, 25, 100, 100)
self.stroke()
self.restoreContext()
def followMouse(self):
self.dx = self.mouseX - self.xx
if self.dx != 0:
self.dx = self.dx / math.fabs(self.dx)
self.dy = self.mouseY - self.yy
if self.dy != 0:
self.dy = self.dy / math.fabs(self.dy)
def keyboardMotion(self):
if self.key_left:
self.rot -= ROTATE_SPEED
if self.key_right:
self.rot += ROTATE_SPEED
if self.rot < 0-math.pi:
self.rot += 2*math.pi
elif self.rot > math.pi:
self.rot -= 2*math.pi
if self.key_up:
self.dx += THRUST * math.sin(self.rot)
self.dy -= THRUST * math.cos(self.rot)
else:
if math.fabs(self.dx) < 0.001 and math.fabs(self.dy) < 0.001:
self.dx = 0
self.dy = 0
else:
dir = math.atan2(self.dx, self.dy)
self.dx -= FRICTION * math.sin(dir)
self.dy -= FRICTION * math.cos(dir)
if self.key_fire:
self.checkAddShot()
def setMotion(self):
self.keyboardMotion()
def advance(self):
for a in self.asteroids:
a.move()
if(distsq(self.xx, self.yy, a.x, a.y) < (a.radius * a.radius)):
self.destroyShip()
for s in self.shots:
if not s.move():
self.removeShot(s)
self.shot_delay -= 1
self.setMotion()
self.xx += self.dx
self.yy += self.dy
if self.dx > 0 and self.xx >= self.width:
self.xx -= self.width
elif self.dx < 0 and self.xx < 0:
self.xx += self.width
if self.yy > 0 and self.yy >= self.height:
self.yy -= self.height
elif self.dy < 0 and self.yy < 0:
self.yy += self.height
def setKey(self, k, set):
DOM.eventPreventDefault(DOM.eventGetCurrentEvent())
if k == KeyboardListener.KEY_UP:
self.key_up = set
elif k == KeyboardListener.KEY_DOWN:
self.key_down = set
elif k == KeyboardListener.KEY_LEFT:
self.key_left = set
elif k == KeyboardListener.KEY_RIGHT:
self.key_right = set
elif k == 32:
self.key_fire = set
def onKeyPress(self, sender, keyCode, modifiers = None):
pass
#self.setKey(keyCode, True)
def onKeyDown(self, sender, keyCode, modifiers = None):
self.setKey(keyCode, True)
def onKeyUp(self, sender, keyCode, modifiers = None):
self.setKey(keyCode, False)
def checkAddShot(self):
if self.shot_delay > 0:
return
if self.key_fire:
s = Shot(self, self.xx, self.yy, self.dx, self.dy, self.rot)
self.shots.append(s)
self.shot_delay = SHOT_DELAY
def destroyAsteroid(self, a):
self.asteroids.remove(a)
if a.size < len(ASTEROID_SIZES) - 1:
for i in range(2):
self.asteroids.append(Asteroid(self, a.x, a.y, a.size + 1))
if len(self.asteroids) <= 0:
self.num_asteroids += 1
self.reset()
def removeShot(self, s):
if s in self.shots:
self.shots.remove(s)
def destroyShip(self):
self.num_asteroids = NUM_ASTEROIDS
self.reset()
def reset(self):
self.asteroids = []
self.shots = []
self.shot_delay = 0
for a in range(self.num_asteroids):
self.asteroids.append(Asteroid(self))
# The one thing that really needs to change before going any further
# is the player's ship being defined solely as members of this canvas
# class. It's bad, and comes from having done this whole thing very
# organically starting from just noodling around with Pyjamas.
self.xx = self.width/2
self.yy = self.height/2
self.dx = 0
self.dy = 0
self.rot = 0
self.speed = 0
def draw(self):
#if not self.loader.isLoaded():
# return
self.setFillStyle(Color.Color('#000'))
self.fillRect(0,0,self.width,self.height)
for a in self.asteroids:
a.draw()
for s in self.shots:
s.draw(self)
self.saveContext()
self.translate(self.xx, self.yy)
self.rotate(self.rot)
if self.key_up:
img = self.ship_thrust
else:
img = self.ship
self.drawImage(img, -15, -12)
self.restoreContext()
class RootPanelListener(RootPanelCls, KeyboardHandler, ClickHandler):
def __init__(self, Parent, *args, **kwargs):
self.Parent = Parent
self.focussed = False
RootPanelCls.__init__(self, *args, **kwargs)
ClickHandler.__init__(self)
KeyboardHandler.__init__(self)
self.addClickListener(self)
def onClick(self, Sender):
self.focussed = not self.focussed
self.Parent.setFocus(self.focussed)
if __name__ == '__main__':
pyjd.setup("public/Space.html")
c = GameCanvas(800, 600)
panel = FocusPanel(Widget=c)
RootPanel().add(panel)
panel.addKeyboardListener(c)
panel.setFocus(True)
RootPanel().add(HTML("""
<hr/>
Left/Right arrows turn, Up key thrusts, Space bar fires<br/>
<a href="http://rumsey.org/blog/?p=215">About Space Game</a> by <a href="http://rumsey.org/blog/">Ogre</a><br/>
Written entirely in Python, using <a href="http://pyj.be/">Pyjamas</a></br>
Copyright © 2009 Joe Rumsey
"""))
#c.getElement().focus()
pyjd.run()
| {
"content_hash": "9ce28f025cbe2da3c4e3821bf02cd95d",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 111,
"avg_line_length": 30.25581395348837,
"alnum_prop": 0.5716969852250405,
"repo_name": "minghuascode/pyj",
"id": "0bcdb9eba73413f4e2a701336cff39d3c9e3f462",
"size": "12324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/asteroids/Space.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "107608"
},
{
"name": "JavaScript",
"bytes": "116371"
},
{
"name": "PHP",
"bytes": "5473"
},
{
"name": "Python",
"bytes": "7572605"
},
{
"name": "Shell",
"bytes": "24231"
}
],
"symlink_target": ""
} |
from tower import ugettext_lazy as _lazy
DESC_GENERAL = _lazy(u'General Audiences')
DESC_3 = _lazy(u'Not recommended for users younger than 3 years of age')
DESC_6 = _lazy(u'Not recommended for users younger than 6 years of age')
DESC_7 = _lazy(u'Not recommended for users younger than 7 years of age')
DESC_10 = _lazy(u'Not recommended for users younger than 10 years of age')
DESC_12 = _lazy(u'Not recommended for users younger than 12 years of age')
DESC_13 = _lazy(u'Not recommended for users younger than 13 years of age')
DESC_14 = _lazy(u'Not recommended for users younger than 14 years of age')
DESC_16 = _lazy(u'Not recommended for users younger than 16 years of age')
DESC_17 = _lazy(u'Not recommended for users younger than 17 years of age')
DESC_18 = _lazy(u'Not recommended for users younger than 18 years of age')
DESC_REJECTED = _lazy(u'Rejected for All Audiences')
RATING_DESCS = {
'0': DESC_GENERAL,
'3': DESC_3,
'6': DESC_6,
'7': DESC_7,
'10': DESC_10,
'12': DESC_12,
'13': DESC_13,
'14': DESC_14,
'16': DESC_16,
'17': DESC_17,
'18': DESC_18,
'X': DESC_REJECTED,
}
class RATING(object):
"""
Content rating.
name -- how we name the rating, for translated display on all pages.
description -- for general translated display on consumer pages.
iarc_name -- how IARC names the rating, to talk with IARC.
slug -- for CSS classes, to create icons. Dynamic. generated for most.
"""
class RATING_BODY(object):
"""
Content rating body.
name -- for general translated display on all pages.
description -- for general translated display on all pages.
iarc_name -- how IARC names the ratings body, to talk with IARC.
ratings -- list of RATINGs associated with this body.
full_name -- in case we ever want to display the full translated name.
url -- in case we ever want to link to the ratings body page for more info.
"""
class CLASSIND_L(RATING):
id = 0
name = '0+'
description = RATING_DESCS['0']
iarc_name = '0+'
class CLASSIND_10(RATING):
id = 1
name = '10+'
description = RATING_DESCS['10']
iarc_name = '10+'
class CLASSIND_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class CLASSIND_14(RATING):
id = 3
name = '14+'
description = RATING_DESCS['14']
iarc_name = '14+'
class CLASSIND_16(RATING):
id = 4
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class CLASSIND_18(RATING):
id = 5
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class CLASSIND(RATING_BODY):
"""
The Brazilian game ratings body (aka. DEJUS, DJCTQ).
"""
id = 0
name = 'CLASSIND'
description = _lazy(u'Brazil')
iarc_name = 'CLASSIND'
ratings = (CLASSIND_L, CLASSIND_10, CLASSIND_12, CLASSIND_14, CLASSIND_16,
CLASSIND_18)
full_name = _lazy(u'Department of Justice, Rating, Titles and '
u'Qualification')
url = ('http://portal.mj.gov.br/classificacao/data/Pages/'
'MJ6BC270E8PTBRNN.htm')
class GENERIC_3(RATING):
id = 0
name = '3+'
description = RATING_DESCS['3']
iarc_name = '3+'
class GENERIC_7(RATING):
id = 1
name = '7+'
description = RATING_DESCS['7']
iarc_name = '7+'
class GENERIC_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class GENERIC_16(RATING):
id = 3
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class GENERIC_18(RATING):
id = 4
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class GENERIC(RATING_BODY):
"""
The generic game ratings body (used in Germany, for example).
"""
id = 1
name = _lazy('Generic')
description = '' # No comment.
iarc_name = 'Generic'
ratings = (GENERIC_3, GENERIC_7, GENERIC_12, GENERIC_16, GENERIC_18)
full_name = _lazy(u'Generic')
class USK_0(RATING):
id = 0
name = '0+'
description = RATING_DESCS['0']
iarc_name = '0+'
class USK_6(RATING):
id = 1
name = '6+'
description = RATING_DESCS['6']
iarc_name = '6+'
class USK_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class USK_16(RATING):
id = 3
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class USK_18(RATING):
id = 4
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class USK_REJECTED(RATING):
id = 5
name = _lazy('Rating Rejected')
description = RATING_DESCS['X']
iarc_name = 'Rating Rejected'
class USK(RATING_BODY):
"""
The organization responsible for game ratings in Germany
(aka. Unterhaltungssoftware Selbstkontrolle).
"""
id = 2
name = 'USK'
description = _lazy(u'Germany')
iarc_name = 'USK'
ratings = (USK_0, USK_6, USK_12, USK_16, USK_18, USK_REJECTED)
full_name = _lazy(u'Entertainment Software Self-Regulation Body')
url = 'http://www.usk.de/en/'
class ESRB_E(RATING):
"""Everybody."""
id = 0
name = _lazy('Everyone')
description = RATING_DESCS['0']
iarc_name = 'Everyone'
slug = '0'
class ESRB_10(RATING):
id = 1
name = _lazy('Everyone 10+') # L10n: `10+` is age ten and over.
slug = '10'
description = RATING_DESCS['10']
iarc_name = 'Everyone 10+'
class ESRB_T(RATING):
id = 2
name = _lazy('Teen')
slug = '13'
description = RATING_DESCS['13']
iarc_name = 'Teen'
class ESRB_M(RATING):
id = 3
name = _lazy('Mature 17+') # L10n: `17+` is age seventeen and over.
slug = '17'
description = RATING_DESCS['17']
iarc_name = 'Mature 17+'
class ESRB_A(RATING):
id = 4
name = _lazy('Adults Only 18+') # L10n: `18+` is age eighteen and over.
slug = '18'
description = RATING_DESCS['18']
iarc_name = 'Adults Only'
class ESRB_RP(RATING):
id = 4
name = _lazy('Rating Pending')
slug = 'pending'
description = RATING_DESCS['18']
iarc_name = 'Rating Pending'
class ESRB(RATING_BODY):
"""
The North American game ratings body (i.e. USA, Canada).
"""
id = 3
name = 'ESRB'
description = _lazy(u'N. America') # L10n: `N.` stands for North.
iarc_name = 'ESRB'
ratings = (ESRB_E, ESRB_10, ESRB_T, ESRB_M, ESRB_A)
full_name = _lazy(u'Entertainment Software Rating Board')
url = 'http://esrb.org'
class PEGI_3(RATING):
id = 0
name = '3+'
description = RATING_DESCS['3']
iarc_name = '3+'
class PEGI_7(RATING):
id = 1
name = '7+'
description = RATING_DESCS['7']
iarc_name = '7+'
class PEGI_12(RATING):
id = 2
name = '12+'
description = RATING_DESCS['12']
iarc_name = '12+'
class PEGI_16(RATING):
id = 3
name = '16+'
description = RATING_DESCS['16']
iarc_name = '16+'
class PEGI_18(RATING):
id = 4
name = '18+'
description = RATING_DESCS['18']
iarc_name = '18+'
class PEGI(RATING_BODY):
"""
The European game ratings body (i.e. UK, Poland, Spain).
"""
id = 4
name = 'PEGI'
description = _lazy(u'Europe')
iarc_name = 'PEGI'
ratings = (PEGI_3, PEGI_7, PEGI_12, PEGI_16, PEGI_18)
full_name = _lazy(u'Pan European Game Information')
url = 'http://www.pegi.info'
RATINGS_BODIES = {
CLASSIND.id: CLASSIND,
GENERIC.id: GENERIC,
USK.id: USK,
ESRB.id: ESRB,
PEGI.id: PEGI,
}
# Attach ratings bodies to ratings.
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
r.ratingsbody = rb
def ALL_RATINGS():
"""
List of all ratings with waffled bodies.
"""
import waffle
ALL_RATINGS = []
for rb in RATINGS_BODIES.values():
if rb in (CLASSIND, GENERIC) or waffle.switch_is_active('iarc'):
ALL_RATINGS.extend(rb.ratings)
return ALL_RATINGS
def RATINGS_BY_NAME():
"""
Create a list of tuples (choices) after we know the locale since this
attempts to concatenate two lazy translations in constants file.
"""
import waffle
all_ratings = ALL_RATINGS()
ratings_choices = []
for rb in RATINGS_BODIES.values():
if rb in (CLASSIND, GENERIC) or waffle.switch_is_active('iarc'):
for r in rb.ratings:
ratings_choices.append(
(all_ratings.index(r), u'%s - %s' % (rb.name, r.name)))
return ratings_choices
| {
"content_hash": "2e349a299164be3287254deb492ee146",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 79,
"avg_line_length": 22.233766233766232,
"alnum_prop": 0.5974299065420561,
"repo_name": "Joergen/zamboni",
"id": "a751b386f46529e383fccc63433f16dac2c5c9a3",
"size": "8584",
"binary": false,
"copies": "1",
"ref": "refs/heads/uge43",
"path": "mkt/constants/ratingsbodies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "608838"
},
{
"name": "JavaScript",
"bytes": "1750529"
},
{
"name": "Perl",
"bytes": "565"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6063534"
},
{
"name": "Ruby",
"bytes": "1865"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
} |
import ctypes as ct
import pandas as pd
import numpy as np
import random, csv, sys, os
import math
import time
np.set_printoptions(suppress=True)
from statistics import median
import classes_water as ENC
import classes_power as ODC
import classes_interconnection as ICC
import grb_solvers
from comtypes import automation
import win32com.client
# from win32com.client import makepy
# sys.argv = ['makepy', 'OpenDSSEngine.DSS']
# makepy.main()
from operator import itemgetter
def main(dss_debug, write_cols, power_df, water_df, pipe_fail_id):
os_username = os.getlogin()
# --------------
# READ CSV FILES
# --------------
csv_curve = pd.read_csv('./data_water/network-water/2000curve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_junction = pd.read_csv('./data_water/network-water/2100junction.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_reservoir = pd.read_csv('./data_water/network-water/2101reservoir.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_tank = pd.read_csv('./data_water/network-water/2102tank.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pipe = pd.read_csv('./data_water/network-water/2200pipe.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pump = pd.read_csv('./data_water/network-water/2201pump.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_valve = pd.read_csv('./data_water/network-water/2202valve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_xycurve = pd.read_csv('./data_power/network-power/1000xycurve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_regcontrol = pd.read_csv('./data_power/network-power/1100regcontrol.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_wiredata = pd.read_csv('./data_power/network-power/1200wiredata.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_linecode = pd.read_csv('./data_power/network-power/1201linecode.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_bus = pd.read_csv('./data_power/network-power/1300bus.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_vsource = pd.read_csv('./data_power/network-power/1301vsource.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_generator = pd.read_csv('./data_power/network-power/1302generator.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_load = pd.read_csv('./data_power/network-power/1303load.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_solarpv = pd.read_csv('./data_power/network-power/1304solarpv.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_windturbine = pd.read_csv('./data_power/network-power/1305windturbine.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_directconnection = pd.read_csv('./data_power/network-power/1400directconnection.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_cable = pd.read_csv('./data_power/network-power/1401cable.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_overheadline = pd.read_csv('./data_power/network-power/1402overheadline.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_twowindingtransformer = pd.read_csv('./data_power/network-power/1403twowindingtransformer.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_capacitor = pd.read_csv('./data_power/network-power/1404capacitor.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_reactor = pd.read_csv('./data_power/network-power/1405reactor.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# csv_allcolumns= pd.read_csv('./data_power/network-power/allcolumns.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# -----------------
# CREATE COMPONENTS
# -----------------
object_curve = ENC.Curve(csv_curve)
object_junction = ENC.Junction(csv_junction)
object_reservoir = ENC.Reservoir(csv_reservoir)
object_tank = ENC.Tank(csv_tank)
object_pipe = ENC.Pipe(csv_pipe)
object_pump = ENC.Pump(csv_pump)
object_valve = ENC.Valve(csv_valve)
object_xycurve = ODC.XYCurve(csv_xycurve)
object_regcontrol = ODC.RegControl(csv_regcontrol)
object_wiredata = ODC.WireData(csv_wiredata)
object_linecode = ODC.LineCode(csv_linecode)
object_bus = ODC.Bus(csv_bus)
object_vsource = ODC.VSource(csv_vsource)
object_generator = ODC.Generator(csv_generator)
object_load = ODC.Load(csv_load)
object_solarpv = ODC.SolarPV(csv_solarpv)
object_windturbine = ODC.WindTurbine(csv_windturbine, object_xycurve)
object_directconnection = ODC.DirectConnection(csv_directconnection)
object_cable = ODC.Cable(csv_cable)
object_overheadline = ODC.OverheadLine(csv_overheadline)
object_twowindingtransformer = ODC.TwoWindingTransformer(csv_twowindingtransformer)
object_capacitor = ODC.Capacitor(csv_capacitor)
object_reactor = ODC.Reactor(csv_reactor)
# -----------------------
# ADD COMPONENTS TO LISTS
# -----------------------
w_object_list = [object_junction, object_reservoir, object_tank, # Water NODES
object_pipe, object_pump, object_valve, # Water LINKS
object_curve] # Water SYSTEM OPS
object_list = [object_vsource, object_bus, object_generator, object_load, object_solarpv, object_windturbine, #NODES
object_xycurve, object_wiredata, object_linecode, #OTHERS
object_directconnection, object_cable, object_overheadline, object_twowindingtransformer, object_capacitor, object_reactor, # CONNECTIONS
object_regcontrol] # CONTROLS
interconn_dict = {'generator': object_generator, 'load': object_load, 'pump': object_pump, 'junction': object_junction}
# ---------
# RUN EPANET and OPENDSS
# ---------
def run_EPANET():
filedir = 'C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/data_water/en-inputs/en-input.inp'
with open(filedir, 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
templist = ['[TITLE]']
writer.writerow(templist)
writer.writerow('')
for water_object in w_object_list:
water_object.createAllEN(writer, interconn_dict)
templist = ['[ENERGY]']
writer.writerow(templist)
templist = ['Global', 'Efficiency', 75]
writer.writerow(templist)
templist = ['Global', 'Price', 0]
writer.writerow(templist)
templist = ['Demand', 'Charge', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[REACTIONS]']
writer.writerow(templist)
templist = ['Order', 'Bulk', 1]
writer.writerow(templist)
templist = ['Order', 'Tank', 1]
writer.writerow(templist)
templist = ['Order', 'Wall', 1]
writer.writerow(templist)
templist = ['Global', 'Bulk', 0]
writer.writerow(templist)
templist = ['Global', 'Wall', 0]
writer.writerow(templist)
templist = ['Limiting', 'Potential', 0]
writer.writerow(templist)
templist = ['Roughness', 'Correlation', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[TIMES]']
writer.writerow(templist)
templist = ['Duration', '0:00:00']
writer.writerow(templist)
templist = ['Hydraulic', 'Timestep', '0:00:01']
writer.writerow(templist)
templist = ['Quality', 'Timestep', '0:06']
writer.writerow(templist)
templist = ['Pattern', 'Timestep', '1:00']
writer.writerow(templist)
templist = ['Pattern', 'Start', '0:00']
writer.writerow(templist)
templist = ['Report', 'Timestep', '1:00']
writer.writerow(templist)
templist = ['Report', 'Start', '0:00']
writer.writerow(templist)
templist = ['Start', 'ClockTime', 12, 'am']
writer.writerow(templist)
templist = ['Statistic', 'NONE']
writer.writerow(templist)
writer.writerow('')
templist = ['[REPORT]']
writer.writerow(templist)
templist = ['Status', 'No']
writer.writerow(templist)
templist = ['Summary', 'No']
writer.writerow(templist)
templist = ['Page', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[OPTIONS]']
writer.writerow(templist)
templist = ['Units', 'GPM'] #GPM is US Customary units
writer.writerow(templist)
templist = ['Headloss', 'C-M']
writer.writerow(templist)
templist = ['Specific', 'Gravity', 1]
writer.writerow(templist)
templist = ['Viscosity', 1]
writer.writerow(templist)
templist = ['Trials', 40]
writer.writerow(templist)
templist = ['Accuracy', 0.001]
writer.writerow(templist)
templist = ['CHECKFREQ', 2]
writer.writerow(templist)
templist = ['MAXCHECK', 10]
writer.writerow(templist)
templist = ['DAMPLIMIT', 0]
writer.writerow(templist)
templist = ['Unbalanced', 'Continue', 10]
writer.writerow(templist)
templist = ['Pattern', 1]
writer.writerow(templist)
templist = ['Demand', 'Multiplier', 1.0]
writer.writerow(templist)
templist = ['Emitter', 'Exponent', 0.5]
writer.writerow(templist)
templist = ['Quality', 'None', 'mg/L']
writer.writerow(templist)
templist = ['Diffusivity', 1]
writer.writerow(templist)
templist = ['Tolerance', 0.01]
writer.writerow(templist)
writer.writerow('')
templist=['[END]']
writer.writerow(templist)
epalib = ct.cdll.LoadLibrary('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/data_water/epanet2mingw64.dll')
# Byte objects
en_input_file = ct.c_char_p(filedir.encode('utf-8'))
en_report_file = ct.c_char_p(str('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/data_water/en-outputs/out.rpt').encode('utf-8'))
en_byte_file = ct.c_char_p(''.encode('utf-8'))
# Send strings as char* to the epalib function
errorcode = epalib.ENopen(en_input_file, en_report_file, en_byte_file)
if errorcode != 0:
print(1, 'ERRORCODE is', errorcode)
errorcode = epalib.ENopenH()
if errorcode != 0:
print(2, 'ERRORCODE is', errorcode)
init_flag = ct.c_int(0)
errorcode = epalib.ENinitH(init_flag)
if errorcode != 0:
print(3, 'ERRORCODE is', errorcode)
time = ct.pointer(ct.c_long(1))
timestep = ct.pointer(ct.c_long(1))
while True:
errorcode = epalib.ENrunH(time)
if errorcode != 0:
pass
# print(4, 'ERRORCODE is', errorcode)
errorcode = epalib.ENnextH(timestep)
if errorcode != 0:
print(5, 'ERRORCODE is', errorcode)
if timestep.contents.value == 0:
break
for water_object in w_object_list:
water_object.readAllENoutputs(epalib)
errorcode = epalib.ENcloseH()
if errorcode != 0:
print(6, 'ERRORCODE is', errorcode)
errorcode = epalib.ENclose()
if errorcode != 0:
print(7, 'ERRORCODE is', errorcode)
input_list_continuous = []
input_list_categorical = []
input_tensor_continuous = np.empty([0,0], dtype=np.float64).flatten()
input_tensor_categorical = np.empty([0,0], dtype=np.float64).flatten()
for object in w_object_list:
list_continuous, list_categorical, tensor_continuous, tensor_categorical = object.convertToInputTensor()
input_list_continuous = input_list_continuous + list_continuous
input_list_categorical = input_list_categorical + list_categorical
input_tensor_continuous = np.concatenate((input_tensor_continuous, tensor_continuous), axis=0)
input_tensor_categorical = np.concatenate((input_tensor_categorical, tensor_categorical), axis=0)
output_list = []
output_tensor = np.empty([0,0], dtype=np.float64).flatten()
for object in w_object_list:
o_list, o_tensor = object.convertToOutputTensor()
output_list = output_list + o_list
output_tensor = np.concatenate((output_tensor, o_tensor), axis=0)
return input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor
def run_OpenDSS(dss_debug, solverFlag):
# SET SOURCEBUS
# VsourceClass.sourcebus = vsourceobj.id[1]
dssObj = win32com.client.Dispatch('OpenDSSEngine.DSS') # OPENDSS COMPORT
dssObj.AllowForms = False
dssText = dssObj.Text
dssCkt = dssObj.ActiveCircuit
dssSolution = dssCkt.Solution
dssActvElem = dssCkt.ActiveCktElement
dssActvBus = dssCkt.ActiveBus
dssText.Command = 'Clear'
dssText.Command = 'Set DataPath=\'C:\\Users\\'+os_username+'\\Documents\\OpenDSS'
dssText.Command = 'Set DefaultBaseFrequency=60'
for object in object_list:
object.createAllDSS(dssText, interconn_dict, dss_debug)
set_voltagebase = set()
for object in object_list:
set_voltagebase = set_voltagebase | object.voltagesToSets()
dssText.Command = 'Set VoltageBases={}'.format(list(set_voltagebase))
dssText.Command = 'CalcVoltageBases'
dssText.Command = 'Solve BaseFrequency=60 MaxIter=300'
variant_buses = automation.VARIANT()
variant_voltages_mag = automation.VARIANT()
variant_voltages_pu = automation.VARIANT()
variant_currents = automation.VARIANT()
variant_powers = automation.VARIANT()
for object in object_list:
object.readAllDSSOutputs(dssCkt, dssActvElem, dssActvBus, variant_buses, variant_voltages_mag, variant_voltages_pu, variant_currents, variant_powers)
if solverFlag == False:
# dssText.Command = 'Save Circuit'
# dssText.Command = 'Export Summary (summary.csv)'
# dssText.Command = 'Export Currents (currents.csv)'
# dssText.Command = 'Export Voltages (voltages.csv)'
# dssText.Command = 'Export Overloads (overloads.csv)'
# dssText.Command = 'Export Powers kVA (powers.csv)'
input_list_continuous = []
input_list_categorical = []
input_tensor_continuous = np.empty([0,0], dtype=np.float64).flatten()
input_tensor_categorical = np.empty([0,0], dtype=np.float64).flatten()
for object in object_list:
list_continuous, list_categorical, tensor_continuous, tensor_categorical = object.convertToInputTensor()
input_list_continuous = input_list_continuous + list_continuous
input_list_categorical = input_list_categorical + list_categorical
input_tensor_continuous = np.concatenate((input_tensor_continuous, tensor_continuous), axis=0)
input_tensor_categorical = np.concatenate((input_tensor_categorical, tensor_categorical), axis=0)
output_list = []
output_tensor = np.empty([0,0], dtype=np.float64).flatten()
for object in object_list:
o_list, o_tensor = object.convertToOutputTensor()
output_list = output_list + o_list
output_tensor = np.concatenate((output_tensor, o_tensor), axis=0)
return input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor
else:
losses = dssCkt.Losses
return float(losses[0])*0.001 # kW
# SIM STEP 0: SOLVE PUMP POWER CONSUMPTION
# ----------------------------------------
for pipe in object_pipe.matrix:
if pipe[ENC.Pipe.ID] in [pipe_fail_id]:
pipe[ENC.Pipe.OPERATIONAL_STATUS] = 0.0
base_curve_matrix = np.array(object_curve.matrix, copy=True)
base_junction_matrix = np.array(object_junction.matrix, copy=True)
base_reservoir_matrix = np.array(object_reservoir.matrix, copy=True)
base_tank_matrix = np.array(object_tank.matrix, copy=True)
base_pipe_matrix = np.array(object_pipe.matrix, copy=True)
base_pump_matrix = np.array(object_pump.matrix, copy=True)
base_valve_matrix = np.array(object_valve.matrix, copy=True)
artificial_reservoir_id_shift = 1000.0
max_groundwater_flow = 12399.0 # GPM
groundwater_id_shift = 2000.0
# Scale reservoir heads using water_df
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID] == 21.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 864.92*water_df + 817.08)
elif reservoir[ENC.Reservoir.ID] == 22.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 951.11*water_df + 898.89)
elif reservoir[ENC.Reservoir.ID] == 23.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 668.35*water_df + 631.65)
groundwater_list = []
map_to_groundwater_reservoir = {}
map_to_groundwater_pipe = {}
# Track real reservoirs
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID] >= 3000.0:
groundwater_list.append(reservoir[ENC.Reservoir.ID])
map_to_groundwater_reservoir[reservoir[ENC.Reservoir.ID]] = reservoir
for pipe in object_pipe.matrix:
if reservoir[ENC.Reservoir.ID] == pipe[ENC.Pipe.ID]:
map_to_groundwater_pipe[reservoir[ENC.Reservoir.ID]] = pipe
# WARNING THIS IS HARDCODED
elif reservoir[ENC.Reservoir.ID] == 23.0:
groundwater_list.append(reservoir[ENC.Reservoir.ID])
map_to_groundwater_reservoir[reservoir[ENC.Reservoir.ID]] = reservoir
for pipe in object_pipe.matrix:
# WARNING THIS IS HARDCODED
if pipe[ENC.Pipe.ID] == 36.0:
map_to_groundwater_pipe[reservoir[ENC.Reservoir.ID]] = pipe
# Loop real reservoirs, turn off the ones with water inflow
while len(groundwater_list) > 0:
# initialize relevnt demand junctions
demand_list = []
map_to_junction = {}
map_to_junction_groundwater = {}
map_to_reservoir = {}
map_to_pipe = {}
# Track demand junctions
for junction in object_junction.matrix:
if junction[ENC.Junction.BASE_DEMAND_AVERAGE] > 0.0:
demand_list.append(junction[ENC.Junction.ID])
for junction in object_junction.matrix:
if junction[ENC.Junction.ID] in demand_list:
map_to_junction[junction[ENC.Junction.ID]] = junction
found_junction_groundwater = 0
for junction_groundwater in object_junction.matrix:
if junction[ENC.Junction.ID] + groundwater_id_shift == junction_groundwater[ENC.Junction.ID]:
map_to_junction_groundwater[junction[ENC.Junction.ID]] = junction_groundwater
found_junction_groundwater = 1
if found_junction_groundwater == 0:
map_to_junction_groundwater[junction[ENC.Junction.ID]] = junction
# Track artificial reservoirs
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID]-artificial_reservoir_id_shift in demand_list:
map_to_reservoir[reservoir[ENC.Reservoir.ID]-artificial_reservoir_id_shift] = reservoir
# Track pipes for artifical reservoirs
for pipe in object_pipe.matrix:
if pipe[ENC.Pipe.ID]-artificial_reservoir_id_shift in demand_list:
map_to_pipe[pipe[ENC.Pipe.ID]-artificial_reservoir_id_shift] = pipe
# Reset demand junction demands to 0
for junction_id in demand_list:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = 0.0
# Begin EPANET pressure-driven analysis for artificial reservoirs
while len(demand_list) > 0:
# Close artifical reservoirs pipes
for junction_id in demand_list:
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
# Set valve pressure loss to 0
for valve in object_valve.matrix:
valve[ENC.Valve.MODEL] = 2.0
valve[ENC.Valve.SETTING] = 0.0
run_EPANET()
# Open demand junctions with positive pressure ratio
# Can take multiple iterations
pos_pres_bool = True
while pos_pres_bool:
pos_pres_bool = False
max_pres_id = demand_list[0]
for junction_id in demand_list:
if max(map_to_junction[junction_id][ENC.Junction.PRESSURE], map_to_junction_groundwater[junction_id][ENC.Junction.PRESSURE]) > map_to_junction[max_pres_id][ENC.Junction.MIN_PRESSURE] and map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] == 0.0:
max_pres_id = junction_id
# this uses the MINIMUM ALLOWABLE PRESSURE
if max(map_to_junction[max_pres_id][ENC.Junction.PRESSURE], map_to_junction_groundwater[max_pres_id][ENC.Junction.PRESSURE]) > (map_to_junction[max_pres_id][ENC.Junction.MIN_PRESSURE] - 0.01) and map_to_pipe[max_pres_id][ENC.Pipe.OPERATIONAL_STATUS] == 0.0:
map_to_pipe[max_pres_id][ENC.Pipe.OPERATIONAL_STATUS] = 1.0
pos_pres_bool = True
run_EPANET()
# Set flow control valves to maximum amount of groundwater flow
for valve in object_valve.matrix:
valve[ENC.Valve.MODEL] = 3.0
valve[ENC.Valve.SETTING] = max_groundwater_flow
run_EPANET()
# Close artifical reservoirs with inflows
# Can take multiple iterations
neg_dem_bool = True
while neg_dem_bool:
neg_dem_bool = False
for junction_id in demand_list:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] < 0.0:
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
neg_dem_bool = True
run_EPANET()
run_EPANET()
# Set base_demand to maximum if possible
pda_count = 0
demand_list_copy = demand_list.copy()
for junction_id in demand_list_copy:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] >= water_df * map_to_junction[junction_id][ENC.Junction.BASE_DEMAND_AVERAGE]:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = water_df * map_to_junction[junction_id][ENC.Junction.BASE_DEMAND_AVERAGE]
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
demand_list.remove(junction_id)
pda_count += 1
# Set base_demand to greater than 0 and less than maximum if there are no maximums
if pda_count == 0:
for junction_id in demand_list_copy:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] >= -0.01:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = map_to_reservoir[junction_id][ENC.Reservoir.DEMAND]
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
demand_list.remove(junction_id)
# End inner loop
run_EPANET()
# Close real reservoirs with inflows if possible
pda_count = 0
groundwater_list_copy = groundwater_list.copy()
for groundwater_id in groundwater_list_copy:
if map_to_groundwater_reservoir[groundwater_id][ENC.Reservoir.DEMAND] > 0.0:
map_to_groundwater_pipe[groundwater_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
groundwater_list.remove(groundwater_id)
pda_count += 1
# Delete real reservoirs from being tracked if no real reservoirs have inflows
if pda_count == 0:
for groundwater_id in groundwater_list_copy:
groundwater_list.remove(groundwater_id)
# End middle loop
run_EPANET()
# SIM STEP 1: SET LOAD AND DEMAND CURVES
# ------------------------------
power_factor = 0.0
power_load_factor = power_df
object_load.multiplyLoadFactor(power_load_factor, power_factor)
print('power load factor', power_load_factor)
# object_junction.multiplyLoadFactor(water_df)
print('water demand factor', water_df)
# SIM STEP 2: SET LOAD INTERCONNECTIONS
# ----------------------------------
object_load.setInterconnectionLoad(interconn_dict)
# SIM STEP 3: SET GENERATOR DISPATCH
# ----------------------------------
exports = 0.0 # kW
losses = 0.0 # kW
def fun_set_power_dispatch(object_load, object_generator, losses, exports):
counter = 0
lost_min = 10000000.0
while True:
need_reserves, actual_reserves, nominal_reserves_dict = grb_solvers.unit_commitment_priority_list_n2(object_load, object_generator, losses, exports) # unit commitment is variable
new_loss = run_OpenDSS(0, True)
counter += 1
if math.fabs(losses - new_loss) > 1.0:
if counter > 199:
print('Dispatcher - Losses/Exports did not converge')
sys.exit()
elif counter > 150:
while True:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = dispatcher_max
need_reserves, actual_reserves, nominal_reserves_dict = grb_solvers.unit_commitment_priority_list_2_n2(object_load, object_generator, losses, exports) # unit commitment is input
new_loss = run_OpenDSS(0, True)
counter +=1
if math.fabs(losses - new_loss) < 1.0:
return need_reserves, actual_reserves, nominal_reserves_dict
else:
losses += 0.8 * (new_loss - losses)
elif counter > 100:
while True:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = dispatcher_min
need_reserves, actual_reserves, nominal_reserves_dict = grb_solvers.unit_commitment_priority_list_2_n2(object_load, object_generator, losses, exports) # unit commitment is input
new_loss = run_OpenDSS(0, True)
counter +=1
if math.fabs(losses - new_loss) < 1.0:
return need_reserves, actual_reserves, nominal_reserves_dict
else:
losses += 0.8 * (new_loss - losses)
elif counter > 50:
if math.fabs(new_loss) < math.fabs(lost_min):
lost_min = new_loss
dispatcher_min = np.array(object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS], copy=True)
else:
dispatcher_max = np.array(object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS], copy=True)
losses += 0.8*(new_loss - losses)
else:
return need_reserves, actual_reserves, nominal_reserves_dict
need_reserves, actual_reserves, nominal_reserves_dict = fun_set_power_dispatch(object_load, object_generator, losses, exports)
for generator in object_generator.matrix:
if generator[ODC.Generator.ID] in [101.0, 102.0, 201.0, 202.0]:
pass
else:
if math.fabs(nominal_reserves_dict[generator[ODC.Generator.ID]]) > 1.0 and generator[ODC.Generator.OPERATIONAL_STATUS] == 0.0:
print('*********************** YOU GOOFED *******************************')
print('exports #1', 0.5 * (object_cable.matrix[33, ODC.Cable.REAL_POWER_2] - object_cable.matrix[33, ODC.Cable.REAL_POWER_1]))
print('')
# SIM STEP 4: SET JUNCTION INTERCONNECTIONS
# -----------------------------------------
# SIM STEP 5:
# Set water
# ----------------------------
object_curve.matrix = np.array(base_curve_matrix, copy=True)
object_junction.matrix = np.array(base_junction_matrix, copy=True)
object_reservoir.matrix = np.array(base_reservoir_matrix, copy=True)
object_tank.matrix = np.array(base_tank_matrix, copy=True)
object_pipe.matrix = np.array(base_pipe_matrix, copy=True)
object_pump.matrix = np.array(base_pump_matrix, copy=True)
object_valve.matrix = np.array(base_valve_matrix, copy=True)
object_junction.setInterconnectionDemand(interconn_dict, nominal_reserves_dict)
artificial_reservoir_id_shift = 1000.0
max_groundwater_flow = 12399.0 # GPM
groundwater_id_shift = 2000.0
# Scale reservoir heads using water_df
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID] == 21.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 864.92*water_df + 817.08)
elif reservoir[ENC.Reservoir.ID] == 22.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 951.11*water_df + 898.89)
elif reservoir[ENC.Reservoir.ID] == 23.0:
reservoir[ENC.Reservoir.TOTAL_HEAD] = max(reservoir[ENC.Reservoir.TOTAL_HEAD], 668.35*water_df + 631.65)
groundwater_list = []
map_to_groundwater_reservoir = {}
map_to_groundwater_pipe = {}
# Track real reservoirs
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID] >= 3000.0:
groundwater_list.append(reservoir[ENC.Reservoir.ID])
map_to_groundwater_reservoir[reservoir[ENC.Reservoir.ID]] = reservoir
for pipe in object_pipe.matrix:
if reservoir[ENC.Reservoir.ID] == pipe[ENC.Pipe.ID]:
map_to_groundwater_pipe[reservoir[ENC.Reservoir.ID]] = pipe
# WARNING THIS IS HARDCODED
elif reservoir[ENC.Reservoir.ID] == 23.0:
groundwater_list.append(reservoir[ENC.Reservoir.ID])
map_to_groundwater_reservoir[reservoir[ENC.Reservoir.ID]] = reservoir
for pipe in object_pipe.matrix:
# WARNING THIS IS HARDCODED
if pipe[ENC.Pipe.ID] == 36.0:
map_to_groundwater_pipe[reservoir[ENC.Reservoir.ID]] = pipe
# Loop real reservoirs, turn off the ones with water inflow
while len(groundwater_list) > 0:
# initialize relevnt demand junctions
demand_list = []
map_to_junction = {}
map_to_junction_groundwater = {}
map_to_reservoir = {}
map_to_pipe = {}
# Track demand junctions
for junction in object_junction.matrix:
if junction[ENC.Junction.BASE_DEMAND_AVERAGE] + junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + junction[ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND] > 0.0:
demand_list.append(junction[ENC.Junction.ID])
for junction in object_junction.matrix:
if junction[ENC.Junction.ID] in demand_list:
map_to_junction[junction[ENC.Junction.ID]] = junction
found_junction_groundwater = 0
for junction_groundwater in object_junction.matrix:
if junction[ENC.Junction.ID] + groundwater_id_shift == junction_groundwater[ENC.Junction.ID]:
map_to_junction_groundwater[junction[ENC.Junction.ID]] = junction_groundwater
found_junction_groundwater = 1
if found_junction_groundwater == 0:
map_to_junction_groundwater[junction[ENC.Junction.ID]] = junction
# Track artificial reservoirs
for reservoir in object_reservoir.matrix:
if reservoir[ENC.Reservoir.ID]-artificial_reservoir_id_shift in demand_list:
map_to_reservoir[reservoir[ENC.Reservoir.ID]-artificial_reservoir_id_shift] = reservoir
# Track pipes for artifical reservoirs
for pipe in object_pipe.matrix:
if pipe[ENC.Pipe.ID]-artificial_reservoir_id_shift in demand_list:
map_to_pipe[pipe[ENC.Pipe.ID]-artificial_reservoir_id_shift] = pipe
# Reset demand junction demands to 0
for junction_id in demand_list:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = 0.0
# Begin EPANET pressure-driven analysis for artificial reservoirs
while len(demand_list) > 0:
# Close artifical reservoirs pipes
for junction_id in demand_list:
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
# Set valve pressure loss to 0
for valve in object_valve.matrix:
valve[ENC.Valve.MODEL] = 2.0
valve[ENC.Valve.SETTING] = 0.0
run_EPANET()
# Open demand junctions with positive pressure ratio
# Can take multiple iterations
pos_pres_bool = True
while pos_pres_bool:
pos_pres_bool = False
max_pres_id = demand_list[0]
for junction_id in demand_list:
if max(map_to_junction[junction_id][ENC.Junction.PRESSURE], map_to_junction_groundwater[junction_id][ENC.Junction.PRESSURE]) > map_to_junction[max_pres_id][ENC.Junction.MIN_PRESSURE] and map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] == 0.0:
max_pres_id = junction_id
# this uses the MINIMUM ALLOWABLE PRESSURE
if max(map_to_junction[max_pres_id][ENC.Junction.PRESSURE], map_to_junction_groundwater[max_pres_id][ENC.Junction.PRESSURE]) > (map_to_junction[max_pres_id][ENC.Junction.MIN_PRESSURE] - 0.01) and map_to_pipe[max_pres_id][ENC.Pipe.OPERATIONAL_STATUS] == 0.0:
map_to_pipe[max_pres_id][ENC.Pipe.OPERATIONAL_STATUS] = 1.0
pos_pres_bool = True
run_EPANET()
# Set flow control valves to maximum amount of groundwater flow
for valve in object_valve.matrix:
valve[ENC.Valve.MODEL] = 3.0
valve[ENC.Valve.SETTING] = max_groundwater_flow
run_EPANET()
# Close artifical reservoirs with inflows
# Can take multiple iterations
neg_dem_bool = True
while neg_dem_bool:
neg_dem_bool = False
for junction_id in demand_list:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] < 0.0:
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
neg_dem_bool = True
run_EPANET()
run_EPANET()
# Set base_demand to maximum if possible
pda_count = 0
demand_list_copy = demand_list.copy()
for junction_id in demand_list_copy:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] >= water_df * map_to_junction[junction_id][ENC.Junction.BASE_DEMAND_AVERAGE] + map_to_junction[junction_id][ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + map_to_junction[junction_id][ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND]:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = water_df * map_to_junction[junction_id][ENC.Junction.BASE_DEMAND_AVERAGE] + map_to_junction[junction_id][ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + map_to_junction[junction_id][ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND]
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
demand_list.remove(junction_id)
pda_count += 1
# Set base_demand to greater than 0 and less than maximum if there are no maximums
if pda_count == 0:
for junction_id in demand_list_copy:
if map_to_reservoir[junction_id][ENC.Reservoir.DEMAND] >= -0.01:
map_to_junction[junction_id][ENC.Junction.BASE_DEMAND] = map_to_reservoir[junction_id][ENC.Reservoir.DEMAND]
map_to_pipe[junction_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
demand_list.remove(junction_id)
# End inner loop
run_EPANET()
# Close real reservoirs with inflows if possible
pda_count = 0
groundwater_list_copy = groundwater_list.copy()
for groundwater_id in groundwater_list_copy:
if map_to_groundwater_reservoir[groundwater_id][ENC.Reservoir.DEMAND] > 0.0:
map_to_groundwater_pipe[groundwater_id][ENC.Pipe.OPERATIONAL_STATUS] = 0.0
groundwater_list.remove(groundwater_id)
pda_count += 1
# Delete real reservoirs from being tracked if no real reservoirs have inflows
if pda_count == 0:
for groundwater_id in groundwater_list_copy:
groundwater_list.remove(groundwater_id)
# End middle loop
run_EPANET()
# SIM STEP 6: RUN POWER-WATER SIMULATION
# --------------------------------------
genid_to_genobject = {}
juncid_to_genid_water = {}
reduced_reserves_dict = {}
for generator in object_generator.matrix:
genid_to_genobject[generator[ODC.Generator.ID]] = generator
for junction in object_junction.matrix:
juncid_to_genid_water[junction[ENC.Junction.ID]] = []
for generator in object_generator.matrix:
if junction[ENC.Junction.ID] == generator[ODC.Generator.JUNCTION_ID]:
juncid_to_genid_water[junction[ENC.Junction.ID]].append((generator[ODC.Generator.ID], generator[ODC.Generator.WATER_CONSUMPTION]))
for junction in object_junction.matrix:
if junction[ENC.Junction.BASE_DEMAND_AVERAGE] + junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + junction[ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND] > 0.0:
# Water demand is met for nominal reserves
if junction[ENC.Junction.BASE_DEMAND] >= junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + junction[ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND]:
for genid, _ in juncid_to_genid_water[junction[ENC.Junction.ID]]:
reduced_reserves_dict[genid] = nominal_reserves_dict[genid]
elif junction[ENC.Junction.BASE_DEMAND] <= junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND]:
for genid, _ in juncid_to_genid_water[junction[ENC.Junction.ID]]:
reduced_reserves_dict[genid] = 0.0
elif junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] < junction[ENC.Junction.BASE_DEMAND] < junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + junction[ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND]:
def getWaterConsumption(genid_genwater):
return genid_genwater[1]
demand_for_reserves = junction[ENC.Junction.INTERCONNECTION_DISPATCH_DEMAND] + junction[ENC.Junction.INTERCONNECTION_RESPONSE_DEMAND] - junction[ENC.Junction.BASE_DEMAND]
for genid, genwater in sorted(juncid_to_genid_water[junction[ENC.Junction.ID]], key=getWaterConsumption, reverse=True):
if demand_for_reserves > 0.0:
if nominal_reserves_dict[genid] * genid_to_genobject[genid][ODC.Generator.WATER_CONSUMPTION] * 0.001 <= demand_for_reserves:
reduced_reserves_dict[genid] = 0.0
demand_for_reserves -= nominal_reserves_dict[genid] * genid_to_genobject[genid][ODC.Generator.WATER_CONSUMPTION] * 0.001
else:
fraction = 1.0 - (demand_for_reserves/(nominal_reserves_dict[genid]*0.001*genid_to_genobject[genid][ODC.Generator.WATER_CONSUMPTION]))
reduced_reserves_dict[genid] = fraction * nominal_reserves_dict[genid]
demand_for_reserves -= (nominal_reserves_dict[genid] - reduced_reserves_dict[genid]) * genid_to_genobject[genid][ODC.Generator.WATER_CONSUMPTION] * 0.001
else:
reduced_reserves_dict[genid] = nominal_reserves_dict[genid]
else:
print("ERROR IN CALCULATING REDUCED RESERVES!")
nominal_reserves_list = []
reduced_reserves_list = []
for generator in object_generator.matrix:
nominal_reserves_list.append(nominal_reserves_dict.get(generator[ODC.Generator.ID], 0.0))
reduced_reserves_list.append(nominal_reserves_dict.get(generator[ODC.Generator.ID], 0.0) - reduced_reserves_dict.get(generator[ODC.Generator.ID], 0.0))
with open('C:\\Users\\' + os_username + '\\Documents\\git\\RISE-power-water-ss-1phase\\model_outputs\\analysis_power_water\\power_water_pipe_n2_{}.csv'.format(int(pipe_fail_id)), 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([water_df, power_df, need_reserves, actual_reserves, sum(reduced_reserves_dict.values())] + nominal_reserves_list + reduced_reserves_list)
# POWER N-1 CONTINGENCY ANALYSIS
# ------------------------------
for row in object_generator.matrix:
reduced_reserves_val = nominal_reserves_dict.get(row[ODC.Generator.ID], 0.0) - reduced_reserves_dict.get(row[ODC.Generator.ID], 0.0)
if reduced_reserves_val > 0.001:
row[ODC.Generator.REAL_GENERATION_MAX_RATING] = row[ODC.Generator.REAL_GENERATION] + reduced_reserves_dict.get(row[ODC.Generator.ID], 0.0)
for row in object_generator.matrix:
if row[ODC.Generator.REAL_GENERATION_MAX_RATING] < row[ODC.Generator.REAL_GENERATION_MIN_RATING]:
row[ODC.Generator.OPERATIONAL_STATUS] = 0.0
row[ODC.Generator.REAL_GENERATION] = 0.0
row[ODC.Generator.REAL_GENERATION_MIN_RATING] = 0.0
row[ODC.Generator.REAL_GENERATION_MAX_RATING] = 0.0
base_gen_commitment = np.array(object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS], copy=True)
base_gen_dispatch = np.array(object_generator.matrix[:, ODC.Generator.REAL_GENERATION], copy=True)
base_gen_dispatch_min = np.array(object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MIN_RATING], copy=True)
base_gen_dispatch_max = np.array(object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING], copy=True)
list_gen_mint = []
base_branch_commitment = np.array(object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A], copy=True)
list_branch_mint = []
print('Generators')
for row in object_generator.matrix:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = np.array(base_gen_commitment, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = np.array(base_gen_dispatch, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MIN_RATING] = np.array(base_gen_dispatch_min, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING] = np.array(base_gen_dispatch_max, copy=True)
run_OpenDSS(0, True)
if row[ODC.Generator.REAL_GENERATION] != 0.0 or row[ODC.Generator.ID] in [101.0, 102.0, 201.0, 202.0]:
row[ODC.Generator.OPERATIONAL_STATUS] = 0.0
row[ODC.Generator.REAL_GENERATION] = 0.0
row[ODC.Generator.REAL_GENERATION_MIN_RATING] = 0.0
row[ODC.Generator.REAL_GENERATION_MAX_RATING] = 0.0
run_OpenDSS(0, True)
try:
list_gen_mint.append(grb_solvers.contingency_response(object_load, object_generator, object_cable))
except:
list_gen_mint.append(10000)
else:
list_gen_mint.append(0)
print('Cables')
for row in object_cable.matrix:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = np.array(base_gen_commitment, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = np.array(base_gen_dispatch, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MIN_RATING] = np.array(base_gen_dispatch_min, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION_MAX_RATING] = np.array(base_gen_dispatch_max, copy=True)
object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A] = np.array(base_branch_commitment, copy=True)
run_OpenDSS(0, True)
if row[ODC.Cable.ID] not in [10.0, 100.0]:
if row[ODC.Cable.OPERATIONAL_STATUS_A] == 1.0:
row[ODC.Cable.OPERATIONAL_STATUS_A] = 0.0
run_OpenDSS(0, True)
try:
list_branch_mint.append(grb_solvers.contingency_response(object_load, object_generator, object_cable))
except:
list_branch_mint.append(10000)
else:
list_branch_mint.append(0)
object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A] = np.array(base_branch_commitment, copy=True)
print('')
with open('C:\\Users\\' + os_username + '\\Documents\\git\\RISE-power-water-ss-1phase\\model_outputs\\analysis_power_water\\power_water_gen_response_n2_{}.csv'.format(int(pipe_fail_id)), 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([water_df, power_df] + list_gen_mint)
with open('C:\\Users\\' + os_username + '\\Documents\\git\\RISE-power-water-ss-1phase\\model_outputs\\analysis_power_water\\power_water_branch_response_n2_{}.csv'.format(int(pipe_fail_id)), 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([water_df, power_df] + list_branch_mint)
# Interconnections have no effect
# input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor = run_OpenDSS(dss_debug, False)
# input_list_continuous1, input_list_categorical1, output_list1, input_tensor_continuous1, input_tensor_categorical1, output_tensor1 = run_EPANET()
# Interconnections have an effect
# _, _, output_list, _, _, output_tensor = run_OpenDSS(dss_debug, False)
# _, _, output_list1, _, _, output_tensor1 = run_EPANET()
# RESULTS STEP 1: FORMAT INPUT/OUTPUT TENSORS
# -------------------------------------------
# input_list_continuous = input_list_continuous + input_list_continuous1
# input_list_categorical = input_list_categorical + input_list_categorical1
# output_list = output_list + output_list1
# input_tensor_continuous = np.concatenate((input_tensor_continuous, input_tensor_continuous1), axis=0)
# input_tensor_categorical = np.concatenate((input_tensor_categorical, input_tensor_categorical1), axis=0)
# output_tensor = np.concatenate((output_tensor, output_tensor1), axis=0)
# RESULTS STEP 2: WRITE INPUT/OUTPUT TENSORS TO FILE
# --------------------------------------------------
# if write_cols:
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_list_continuous_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(input_list_continuous)
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_list_categorical_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(input_list_categorical)
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/output_list_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(output_list)
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_tensor_continuous.csv', 'ab') as f:
# np.savetxt(f, input_tensor_continuous[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_tensor_categorical.csv', 'ab') as f:
# np.savetxt(f, input_tensor_categorical[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/output_tensor.csv', 'ab') as f:
# np.savetxt(f, output_tensor[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# END SIMULATION
# --------------
if __name__ == '__main__':
write_cols = False # Write column names to seperate file
dss_debug = 0
power_df = float(sys.argv[1])
water_df = float(sys.argv[2])
pipe_fid = float(sys.argv[3])
main(dss_debug, write_cols, power_df, water_df, pipe_fid) | {
"content_hash": "6c03bb03062811bcdcc1d91a00259ab0",
"timestamp": "",
"source": "github",
"line_count": 949,
"max_line_length": 290,
"avg_line_length": 45.463645943098,
"alnum_prop": 0.7089813419863252,
"repo_name": "btgorman/RISE-power-water-ss-1phase",
"id": "a0f07062ff2f5cfc2f59ddd73f8b63aee2d3bffc",
"size": "43751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis_power_water_n2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1215714"
}
],
"symlink_target": ""
} |
from typing import Tuple, Optional, Sequence, List
import numpy as np
import pandas as pd
import scipy as sp
from anndata import AnnData
from natsort import natsorted
from .. import logging as logg
from ..neighbors import Neighbors, OnFlySymMatrix
def _diffmap(adata, n_comps=15, neighbors_key=None, random_state=0):
start = logg.info(f'computing Diffusion Maps using n_comps={n_comps}(=n_dcs)')
dpt = DPT(adata, neighbors_key=neighbors_key)
dpt.compute_transitions()
dpt.compute_eigen(n_comps=n_comps, random_state=random_state)
adata.obsm['X_diffmap'] = dpt.eigen_basis
adata.uns['diffmap_evals'] = dpt.eigen_values
logg.info(
' finished',
time=start,
deep=(
'added\n'
' \'X_diffmap\', diffmap coordinates (adata.obsm)\n'
' \'diffmap_evals\', eigenvalues of transition matrix (adata.uns)'
),
)
def dpt(
adata: AnnData,
n_dcs: int = 10,
n_branchings: int = 0,
min_group_size: float = 0.01,
allow_kendall_tau_shift: bool = True,
neighbors_key: Optional[str] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Infer progression of cells through geodesic distance along the graph
[Haghverdi16]_ [Wolf19]_.
Reconstruct the progression of a biological process from snapshot
data. `Diffusion Pseudotime` has been introduced by [Haghverdi16]_ and
implemented within Scanpy [Wolf18]_. Here, we use a further developed
version, which is able to deal with disconnected graphs [Wolf19]_ and can
be run in a `hierarchical` mode by setting the parameter
`n_branchings>1`. We recommend, however, to only use
:func:`~scanpy.tl.dpt` for computing pseudotime (`n_branchings=0`) and
to detect branchings via :func:`~scanpy.tl.paga`. For pseudotime, you need
to annotate your data with a root cell. For instance::
adata.uns['iroot'] = np.flatnonzero(adata.obs['cell_types'] == 'Stem')[0]
This requires to run :func:`~scanpy.pp.neighbors`, first. In order to
reproduce the original implementation of DPT, use `method=='gauss'` in
this. Using the default `method=='umap'` only leads to minor quantitative
differences, though.
.. versionadded:: 1.1
:func:`~scanpy.tl.dpt` also requires to run
:func:`~scanpy.tl.diffmap` first. As previously,
:func:`~scanpy.tl.dpt` came with a default parameter of ``n_dcs=10`` but
:func:`~scanpy.tl.diffmap` has a default parameter of ``n_comps=15``,
you need to pass ``n_comps=10`` in :func:`~scanpy.tl.diffmap` in order
to exactly reproduce previous :func:`~scanpy.tl.dpt` results.
Parameters
----------
adata
Annotated data matrix.
n_dcs
The number of diffusion components to use.
n_branchings
Number of branchings to detect.
min_group_size
During recursive splitting of branches ('dpt groups') for `n_branchings`
> 1, do not consider groups that contain less than `min_group_size` data
points. If a float, `min_group_size` refers to a fraction of the total
number of data points.
allow_kendall_tau_shift
If a very small branch is detected upon splitting, shift away from
maximum correlation in Kendall tau criterion of [Haghverdi16]_ to
stabilize the splitting.
neighbors_key
If not specified, dpt looks .uns['neighbors'] for neighbors settings
and .obsp['connectivities'], .obsp['distances'] for connectivities and
distances respectively (default storage places for pp.neighbors).
If specified, dpt looks .uns[neighbors_key] for neighbors settings and
.obsp[.uns[neighbors_key]['connectivities_key']],
.obsp[.uns[neighbors_key]['distances_key']] for connectivities and distances
respectively.
copy
Copy instance before computation and return a copy.
Otherwise, perform computation inplace and return `None`.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
If `n_branchings==0`, no field `dpt_groups` will be written.
`dpt_pseudotime` : :class:`pandas.Series` (`adata.obs`, dtype `float`)
Array of dim (number of samples) that stores the pseudotime of each
cell, that is, the DPT distance with respect to the root cell.
`dpt_groups` : :class:`pandas.Series` (`adata.obs`, dtype `category`)
Array of dim (number of samples) that stores the subgroup id ('0',
'1', ...) for each cell. The groups typically correspond to
'progenitor cells', 'undecided cells' or 'branches' of a process.
Notes
-----
The tool is similar to the R package `destiny` of [Angerer16]_.
"""
# standard errors, warnings etc.
adata = adata.copy() if copy else adata
if neighbors_key is None:
neighbors_key = 'neighbors'
if neighbors_key not in adata.uns:
raise ValueError('You need to run `pp.neighbors` and `tl.diffmap` first.')
if 'iroot' not in adata.uns and 'xroot' not in adata.var:
logg.warning(
'No root cell found. To compute pseudotime, pass the index or '
'expression vector of a root cell, one of:\n'
' adata.uns[\'iroot\'] = root_cell_index\n'
' adata.var[\'xroot\'] = adata[root_cell_name, :].X'
)
if 'X_diffmap' not in adata.obsm.keys():
logg.warning(
'Trying to run `tl.dpt` without prior call of `tl.diffmap`. '
'Falling back to `tl.diffmap` with default parameters.'
)
_diffmap(adata, neighbors_key=neighbors_key)
# start with the actual computation
dpt = DPT(
adata,
n_dcs=n_dcs,
min_group_size=min_group_size,
n_branchings=n_branchings,
allow_kendall_tau_shift=allow_kendall_tau_shift,
neighbors_key=neighbors_key,
)
start = logg.info(f'computing Diffusion Pseudotime using n_dcs={n_dcs}')
if n_branchings > 1:
logg.info(' this uses a hierarchical implementation')
if dpt.iroot is not None:
dpt._set_pseudotime() # pseudotimes are distances from root point
adata.uns[
'iroot'
] = dpt.iroot # update iroot, might have changed when subsampling, for example
adata.obs['dpt_pseudotime'] = dpt.pseudotime
# detect branchings and partition the data into segments
if n_branchings > 0:
dpt.branchings_segments()
adata.obs['dpt_groups'] = pd.Categorical(
values=dpt.segs_names.astype('U'),
categories=natsorted(np.array(dpt.segs_names_unique).astype('U')),
)
# the "change points" separate segments in the ordering above
adata.uns['dpt_changepoints'] = dpt.changepoints
# the tip points of segments
adata.uns['dpt_grouptips'] = dpt.segs_tips
# the ordering according to segments and pseudotime
ordering_id = np.zeros(adata.n_obs, dtype=int)
for count, idx in enumerate(dpt.indices):
ordering_id[idx] = count
adata.obs['dpt_order'] = ordering_id
adata.obs['dpt_order_indices'] = dpt.indices
logg.info(
' finished',
time=start,
deep=(
'added\n'
+ (
" 'dpt_pseudotime', the pseudotime (adata.obs)"
if dpt.iroot is not None
else ''
)
+ (
"\n 'dpt_groups', the branching subgroups of dpt (adata.obs)"
"\n 'dpt_order', cell order (adata.obs)"
if n_branchings > 0
else ''
)
),
)
return adata if copy else None
class DPT(Neighbors):
"""\
Hierarchical Diffusion Pseudotime.
"""
def __init__(
self,
adata,
n_dcs=None,
min_group_size=0.01,
n_branchings=0,
allow_kendall_tau_shift=False,
neighbors_key=None,
):
super().__init__(adata, n_dcs=n_dcs, neighbors_key=neighbors_key)
self.flavor = 'haghverdi16'
self.n_branchings = n_branchings
self.min_group_size = (
min_group_size
if min_group_size >= 1
else int(min_group_size * self._adata.shape[0])
)
self.passed_adata = adata # just for debugging purposes
self.choose_largest_segment = False
self.allow_kendall_tau_shift = allow_kendall_tau_shift
def branchings_segments(self):
"""\
Detect branchings and partition the data into corresponding segments.
Detect all branchings up to `n_branchings`.
Writes
------
segs : np.ndarray
Array of dimension (number of segments) × (number of data
points). Each row stores a mask array that defines a segment.
segs_tips : np.ndarray
Array of dimension (number of segments) × 2. Each row stores the
indices of the two tip points of each segment.
segs_names : np.ndarray
Array of dimension (number of data points). Stores an integer label
for each segment.
"""
self.detect_branchings()
self.postprocess_segments()
self.set_segs_names()
self.order_pseudotime()
def detect_branchings(self):
"""\
Detect all branchings up to `n_branchings`.
Writes Attributes
-----------------
segs : np.ndarray
List of integer index arrays.
segs_tips : np.ndarray
List of indices of the tips of segments.
"""
logg.debug(
f' detect {self.n_branchings} '
f'branching{"" if self.n_branchings == 1 else "s"}',
)
# a segment is a subset of points of the data set (defined by the
# indices of the points in the segment)
# initialize the search for branchings with a single segment,
# that is, get the indices of the whole data set
indices_all = np.arange(self._adata.shape[0], dtype=int)
# let's keep a list of segments, the first segment to add is the
# whole data set
segs = [indices_all]
# a segment can as well be defined by the two points that have maximal
# distance in the segment, the "tips" of the segment
#
# the rest of the points in the segment is then defined by demanding
# them to "be close to the line segment that connects the tips", that
# is, for such a point, the normalized added distance to both tips is
# smaller than one:
# (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1
# of course, this condition is fulfilled by the full cylindrical
# subspace surrounding that line segment, where the radius of the
# cylinder can be infinite
#
# if D denotes a euclidian distance matrix, a line segment is a linear
# object, and the name "line" is justified. if we take the
# diffusion-based distance matrix Dchosen, which approximates geodesic
# distance, with "line", we mean the shortest path between two points,
# which can be highly non-linear in the original space
#
# let us define the tips of the whole data set
if False: # this is safe, but not compatible with on-the-fly computation
tips_all = np.array(
np.unravel_index(
np.argmax(self.distances_dpt), self.distances_dpt.shape
)
)
else:
if self.iroot is not None:
tip_0 = np.argmax(self.distances_dpt[self.iroot])
else:
tip_0 = np.argmax(self.distances_dpt[0])
tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])])
# we keep a list of the tips of each segment
segs_tips = [tips_all]
segs_connects = [[]]
segs_undecided = [True]
segs_adjacency = [[]]
logg.debug(
' do not consider groups with less than '
f'{self.min_group_size} points for splitting'
)
for ibranch in range(self.n_branchings):
iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided)
if iseg == -1:
logg.debug(' partitioning converged')
break
logg.debug(
f' branching {ibranch + 1}: split group {iseg}',
) # [third start end]
# detect branching and update segs and segs_tips
self.detect_branching(
segs,
segs_tips,
segs_connects,
segs_undecided,
segs_adjacency,
iseg,
tips3,
)
# store as class members
self.segs = segs
self.segs_tips = segs_tips
self.segs_undecided = segs_undecided
# the following is a bit too much, but this allows easy storage
self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float)
self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int)
for i, seg_adjacency in enumerate(segs_adjacency):
self.segs_connects[i, seg_adjacency] = segs_connects[i]
for i in range(len(segs)):
for j in range(len(segs)):
self.segs_adjacency[i, j] = self.distances_dpt[
self.segs_connects[i, j], self.segs_connects[j, i]
]
self.segs_adjacency = self.segs_adjacency.tocsr()
self.segs_connects = self.segs_connects.tocsr()
def check_adjacency(self):
n_edges_per_seg = np.sum(self.segs_adjacency > 0, axis=1).A1
for n_edges in range(1, np.max(n_edges_per_seg) + 1):
for iseg in range(self.segs_adjacency.shape[0]):
if n_edges_per_seg[iseg] == n_edges:
neighbor_segs = ( # noqa: F841 TODO Evaluate whether to assign the variable or not
self.segs_adjacency[iseg].todense().A1
)
closest_points_other_segs = [
seg[np.argmin(self.distances_dpt[self.segs_tips[iseg][0], seg])]
for seg in self.segs
]
seg = self.segs[iseg]
closest_points_in_segs = [
seg[np.argmin(self.distances_dpt[tips[0], seg])]
for tips in self.segs_tips
]
distance_segs = [
self.distances_dpt[closest_points_other_segs[ipoint], point]
for ipoint, point in enumerate(closest_points_in_segs)
]
# exclude the first point, the segment itself
closest_segs = np.argsort(distance_segs)[1 : n_edges + 1]
# update adjacency matrix within the loop!
# self.segs_adjacency[iseg, neighbor_segs > 0] = 0
# self.segs_adjacency[iseg, closest_segs] = np.array(distance_segs)[closest_segs]
# self.segs_adjacency[neighbor_segs > 0, iseg] = 0
# self.segs_adjacency[closest_segs, iseg] = np.array(distance_segs)[closest_segs].reshape(len(closest_segs), 1)
# n_edges_per_seg = np.sum(self.segs_adjacency > 0, axis=1).A1
print(iseg, distance_segs, closest_segs)
# print(self.segs_adjacency)
# self.segs_adjacency.eliminate_zeros()
def select_segment(self, segs, segs_tips, segs_undecided) -> Tuple[int, int]:
"""\
Out of a list of line segments, choose segment that has the most
distant second data point.
Assume the distance matrix Ddiff is sorted according to seg_idcs.
Compute all the distances.
Returns
-------
iseg
Index identifying the position within the list of line segments.
tips3
Positions of tips within chosen segment.
"""
scores_tips = np.zeros((len(segs), 4))
allindices = np.arange(self._adata.shape[0], dtype=int)
for iseg, seg in enumerate(segs):
# do not consider too small segments
if segs_tips[iseg][0] == -1:
continue
# restrict distance matrix to points in segment
if not isinstance(self.distances_dpt, OnFlySymMatrix):
Dseg = self.distances_dpt[np.ix_(seg, seg)]
else:
Dseg = self.distances_dpt.restrict(seg)
third_maximizer = None
if segs_undecided[iseg]:
# check that none of our tips "connects" with a tip of the
# other segments
for jseg in range(len(segs)):
if jseg != iseg:
# take the inner tip, the "second tip" of the segment
for itip in range(2):
if (
self.distances_dpt[
segs_tips[jseg][1], segs_tips[iseg][itip]
]
< 0.5
* self.distances_dpt[
segs_tips[iseg][~itip], segs_tips[iseg][itip]
]
):
# logg.debug(
# ' group', iseg, 'with tip', segs_tips[iseg][itip],
# 'connects with', jseg, 'with tip', segs_tips[jseg][1],
# )
# logg.debug(' do not use the tip for "triangulation"')
third_maximizer = itip
# map the global position to the position within the segment
tips = [np.where(allindices[seg] == tip)[0][0] for tip in segs_tips[iseg]]
# find the third point on the segment that has maximal
# added distance from the two tip points
dseg = Dseg[tips[0]] + Dseg[tips[1]]
if not np.isfinite(dseg).any():
continue
# add this point to tips, it's a third tip, we store it at the first
# position in an array called tips3
third_tip = np.argmax(dseg)
if third_maximizer is not None:
# find a fourth point that has maximal distance to all three
dseg += Dseg[third_tip]
fourth_tip = np.argmax(dseg)
if fourth_tip != tips[0] and fourth_tip != third_tip:
tips[1] = fourth_tip
dseg -= Dseg[tips[1]]
else:
dseg -= Dseg[third_tip]
tips3 = np.append(tips, third_tip)
# compute the score as ratio of the added distance to the third tip,
# to what it would be if it were on the straight line between the
# two first tips, given by Dseg[tips[:2]]
# if we did not normalize, there would be a danger of simply
# assigning the highest score to the longest segment
score = dseg[tips3[2]] / Dseg[tips3[0], tips3[1]]
score = (
len(seg) if self.choose_largest_segment else score
) # simply the number of points
logg.debug(
f' group {iseg} score {score} n_points {len(seg)} ' + '(too small)'
if len(seg) < self.min_group_size
else '',
)
if len(seg) <= self.min_group_size:
score = 0
# write result
scores_tips[iseg, 0] = score
scores_tips[iseg, 1:] = tips3
iseg = np.argmax(scores_tips[:, 0])
if scores_tips[iseg, 0] == 0:
return -1, None
tips3 = scores_tips[iseg, 1:].astype(int)
return iseg, tips3
def postprocess_segments(self):
"""Convert the format of the segment class members."""
# make segs a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for iseg, seg in enumerate(self.segs):
mask = np.zeros(self._adata.shape[0], dtype=bool)
mask[seg] = True
self.segs[iseg] = mask
# convert to arrays
self.segs = np.array(self.segs)
self.segs_tips = np.array(self.segs_tips)
def set_segs_names(self):
"""Return a single array that stores integer segment labels."""
segs_names = np.zeros(self._adata.shape[0], dtype=np.int8)
self.segs_names_unique = []
for iseg, seg in enumerate(self.segs):
segs_names[seg] = iseg
self.segs_names_unique.append(iseg)
self.segs_names = segs_names
def order_pseudotime(self):
"""\
Define indices that reflect segment and pseudotime order.
Writes
------
indices : np.ndarray
Index array of shape n, which stores an ordering of the data points
with respect to increasing segment index and increasing pseudotime.
changepoints : np.ndarray
Index array of shape len(ssegs)-1, which stores the indices of
points where the segment index changes, with respect to the ordering
of indices.
"""
# within segs_tips, order tips according to pseudotime
if self.iroot is not None:
for itips, tips in enumerate(self.segs_tips):
if tips[0] != -1:
indices = np.argsort(self.pseudotime[tips])
self.segs_tips[itips] = self.segs_tips[itips][indices]
else:
logg.debug(f' group {itips} is very small')
# sort indices according to segments
indices = np.argsort(self.segs_names)
segs_names = self.segs_names[indices]
# find changepoints of segments
changepoints = np.arange(indices.size - 1)[np.diff(segs_names) == 1] + 1
if self.iroot is not None:
pseudotime = self.pseudotime[indices]
for iseg, seg in enumerate(self.segs):
# only consider one segment, it's already ordered by segment
seg_sorted = seg[indices]
# consider the pseudotime on this segment and sort them
seg_indices = np.argsort(pseudotime[seg_sorted])
# within the segment, order indices according to increasing pseudotime
indices[seg_sorted] = indices[seg_sorted][seg_indices]
# define class members
self.indices = indices
self.changepoints = changepoints
def detect_branching(
self,
segs: Sequence[np.ndarray],
segs_tips: Sequence[np.ndarray],
segs_connects,
segs_undecided,
segs_adjacency,
iseg: int,
tips3: np.ndarray,
):
"""\
Detect branching on given segment.
Updates all list parameters inplace.
Call function _detect_branching and perform bookkeeping on segs and
segs_tips.
Parameters
----------
segs
Dchosen distance matrix restricted to segment.
segs_tips
Stores all tip points for the segments in segs.
iseg
Position of segment under study in segs.
tips3
The three tip points. They form a 'triangle' that contains the data.
"""
seg = segs[iseg]
# restrict distance matrix to points in segment
if not isinstance(self.distances_dpt, OnFlySymMatrix):
Dseg = self.distances_dpt[np.ix_(seg, seg)]
else:
Dseg = self.distances_dpt.restrict(seg)
# given the three tip points and the distance matrix detect the
# branching on the segment, return the list ssegs of segments that
# are defined by splitting this segment
result = self._detect_branching(Dseg, tips3, seg)
ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk = result
# map back to global indices
for iseg_new, seg_new in enumerate(ssegs):
ssegs[iseg_new] = seg[seg_new]
ssegs_tips[iseg_new] = seg[ssegs_tips[iseg_new]]
ssegs_connects[iseg_new] = list(seg[ssegs_connects[iseg_new]])
# remove previous segment
segs.pop(iseg)
segs_tips.pop(iseg)
# insert trunk/undecided_cells at same position
segs.insert(iseg, ssegs[trunk])
segs_tips.insert(iseg, ssegs_tips[trunk])
# append other segments
segs += [seg for iseg, seg in enumerate(ssegs) if iseg != trunk]
segs_tips += [
seg_tips for iseg, seg_tips in enumerate(ssegs_tips) if iseg != trunk
]
if len(ssegs) == 4:
# insert undecided cells at same position
segs_undecided.pop(iseg)
segs_undecided.insert(iseg, True)
# correct edges in adjacency matrix
n_add = len(ssegs) - 1
prev_connecting_segments = segs_adjacency[iseg].copy()
if self.flavor == 'haghverdi16':
segs_adjacency += [[iseg] for i in range(n_add)]
segs_connects += [
seg_connects
for iseg, seg_connects in enumerate(ssegs_connects)
if iseg != trunk
]
prev_connecting_points = segs_connects[ # noqa: F841 TODO Evaluate whether to assign the variable or not
iseg
]
for jseg_cnt, jseg in enumerate(prev_connecting_segments):
iseg_cnt = 0
for iseg_new, seg_new in enumerate(ssegs):
if iseg_new != trunk:
pos = segs_adjacency[jseg].index(iseg)
connection_to_iseg = segs_connects[jseg][pos]
if connection_to_iseg in seg_new:
kseg = len(segs) - n_add + iseg_cnt
segs_adjacency[jseg][pos] = kseg
pos_2 = segs_adjacency[iseg].index(jseg)
segs_adjacency[iseg].pop(pos_2)
idx = segs_connects[iseg].pop(pos_2)
segs_adjacency[kseg].append(jseg)
segs_connects[kseg].append(idx)
break
iseg_cnt += 1
segs_adjacency[iseg] += list(
range(len(segs_adjacency) - n_add, len(segs_adjacency))
)
segs_connects[iseg] += ssegs_connects[trunk]
else:
import networkx as nx
segs_adjacency += [[] for i in range(n_add)]
segs_connects += [[] for i in range(n_add)]
kseg_list = [iseg] + list(range(len(segs) - n_add, len(segs)))
for jseg in prev_connecting_segments:
pos = segs_adjacency[jseg].index(iseg)
distances = []
closest_points_in_jseg = []
closest_points_in_kseg = []
for kseg in kseg_list:
reference_point_in_k = segs_tips[kseg][0]
closest_points_in_jseg.append(
segs[jseg][
np.argmin(
self.distances_dpt[reference_point_in_k, segs[jseg]]
)
]
)
# do not use the tip in the large segment j, instead, use the closest point
reference_point_in_j = closest_points_in_jseg[
-1
] # segs_tips[jseg][0]
closest_points_in_kseg.append(
segs[kseg][
np.argmin(
self.distances_dpt[reference_point_in_j, segs[kseg]]
)
]
)
distances.append(
self.distances_dpt[
closest_points_in_jseg[-1], closest_points_in_kseg[-1]
]
)
# print(jseg, '(', segs_tips[jseg][0], closest_points_in_jseg[-1], ')',
# kseg, '(', segs_tips[kseg][0], closest_points_in_kseg[-1], ') :', distances[-1])
idx = np.argmin(distances)
kseg_min = kseg_list[idx]
segs_adjacency[jseg][pos] = kseg_min
segs_connects[jseg][pos] = closest_points_in_kseg[idx]
pos_2 = segs_adjacency[iseg].index(jseg)
segs_adjacency[iseg].pop(pos_2)
segs_connects[iseg].pop(pos_2)
segs_adjacency[kseg_min].append(jseg)
segs_connects[kseg_min].append(closest_points_in_jseg[idx])
# if we split two clusters, we need to check whether the new segments connect to any of the other
# old segments
# if not, we add a link between the new segments, if yes, we add two links to connect them at the
# correct old segments
do_not_attach_kseg = False
for kseg in kseg_list:
distances = []
closest_points_in_jseg = []
closest_points_in_kseg = []
jseg_list = [
jseg
for jseg in range(len(segs))
if jseg != kseg and jseg not in prev_connecting_segments
]
for jseg in jseg_list:
reference_point_in_k = segs_tips[kseg][0]
closest_points_in_jseg.append(
segs[jseg][
np.argmin(
self.distances_dpt[reference_point_in_k, segs[jseg]]
)
]
)
# do not use the tip in the large segment j, instead, use the closest point
reference_point_in_j = closest_points_in_jseg[
-1
] # segs_tips[jseg][0]
closest_points_in_kseg.append(
segs[kseg][
np.argmin(
self.distances_dpt[reference_point_in_j, segs[kseg]]
)
]
)
distances.append(
self.distances_dpt[
closest_points_in_jseg[-1], closest_points_in_kseg[-1]
]
)
idx = np.argmin(distances)
jseg_min = jseg_list[idx]
if jseg_min not in kseg_list:
segs_adjacency_sparse = sp.sparse.lil_matrix(
(len(segs), len(segs)), dtype=float
)
for i, seg_adjacency in enumerate(segs_adjacency):
segs_adjacency_sparse[i, seg_adjacency] = 1
G = nx.Graph(segs_adjacency_sparse)
paths_all = nx.single_source_dijkstra_path(G, source=kseg)
if jseg_min not in paths_all:
segs_adjacency[jseg_min].append(kseg)
segs_connects[jseg_min].append(closest_points_in_kseg[idx])
segs_adjacency[kseg].append(jseg_min)
segs_connects[kseg].append(closest_points_in_jseg[idx])
logg.debug(f' attaching new segment {kseg} at {jseg_min}')
# if we split the cluster, we should not attach kseg
do_not_attach_kseg = True
else:
logg.debug(
f' cannot attach new segment {kseg} at {jseg_min} '
'(would produce cycle)'
)
if kseg != kseg_list[-1]:
logg.debug(' continue')
continue
else:
logg.debug(' do not add another link')
break
if jseg_min in kseg_list and not do_not_attach_kseg:
segs_adjacency[jseg_min].append(kseg)
segs_connects[jseg_min].append(closest_points_in_kseg[idx])
segs_adjacency[kseg].append(jseg_min)
segs_connects[kseg].append(closest_points_in_jseg[idx])
break
segs_undecided += [False for i in range(n_add)]
def _detect_branching(
self,
Dseg: np.ndarray,
tips: np.ndarray,
seg_reference=None,
) -> Tuple[
List[np.ndarray],
List[np.ndarray],
List[List[int]],
List[List[int]],
int,
]:
"""\
Detect branching on given segment.
Call function __detect_branching three times for all three orderings of
tips. Points that do not belong to the same segment in all three
orderings are assigned to a fourth segment. The latter is, by Haghverdi
et al. (2016) referred to as 'undecided cells'.
Parameters
----------
Dseg
Dchosen distance matrix restricted to segment.
tips
The three tip points. They form a 'triangle' that contains the data.
Returns
-------
ssegs
List of segments obtained from splitting the single segment defined
via the first two tip cells.
ssegs_tips
List of tips of segments in ssegs.
ssegs_adjacency
?
ssegs_connects
?
trunk
?
"""
if self.flavor == 'haghverdi16':
ssegs = self._detect_branching_single_haghverdi16(Dseg, tips)
elif self.flavor == 'wolf17_tri':
ssegs = self._detect_branching_single_wolf17_tri(Dseg, tips)
elif self.flavor == 'wolf17_bi' or self.flavor == 'wolf17_bi_un':
ssegs = self._detect_branching_single_wolf17_bi(Dseg, tips)
else:
raise ValueError(
'`flavor` needs to be in {"haghverdi16", "wolf17_tri", "wolf17_bi"}.'
)
# make sure that each data point has a unique association with a segment
masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)
for iseg, seg in enumerate(ssegs):
masks[iseg][seg] = True
nonunique = np.sum(masks, axis=0) > 1
ssegs = []
for iseg, mask in enumerate(masks):
mask[nonunique] = False
ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])
# compute new tips within new segments
ssegs_tips = []
for inewseg, newseg in enumerate(ssegs):
if len(np.flatnonzero(newseg)) <= 1:
logg.warning(f'detected group with only {np.flatnonzero(newseg)} cells')
secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]
ssegs_tips.append([tips[inewseg], secondtip])
undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]
if len(undecided_cells) > 0:
ssegs.append(undecided_cells)
# establish the connecting points with the other segments
ssegs_connects = [[], [], [], []]
for inewseg, newseg_tips in enumerate(ssegs_tips):
reference_point = newseg_tips[0]
# closest cell to the new segment within undecided cells
closest_cell = undecided_cells[
np.argmin(Dseg[reference_point][undecided_cells])
]
ssegs_connects[inewseg].append(closest_cell)
# closest cell to the undecided cells within new segment
closest_cell = ssegs[inewseg][
np.argmin(Dseg[closest_cell][ssegs[inewseg]])
]
ssegs_connects[-1].append(closest_cell)
# also compute tips for the undecided cells
tip_0 = undecided_cells[
np.argmax(Dseg[undecided_cells[0]][undecided_cells])
]
tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]
ssegs_tips.append([tip_0, tip_1])
ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]
trunk = 3
elif len(ssegs) == 3:
reference_point = np.zeros(3, dtype=int)
reference_point[0] = ssegs_tips[0][0]
reference_point[1] = ssegs_tips[1][0]
reference_point[2] = ssegs_tips[2][0]
closest_points = np.zeros((3, 3), dtype=int)
# this is another strategy than for the undecided_cells
# here it's possible to use the more symmetric procedure
# shouldn't make much of a difference
closest_points[0, 1] = ssegs[1][
np.argmin(Dseg[reference_point[0]][ssegs[1]])
]
closest_points[1, 0] = ssegs[0][
np.argmin(Dseg[reference_point[1]][ssegs[0]])
]
closest_points[0, 2] = ssegs[2][
np.argmin(Dseg[reference_point[0]][ssegs[2]])
]
closest_points[2, 0] = ssegs[0][
np.argmin(Dseg[reference_point[2]][ssegs[0]])
]
closest_points[1, 2] = ssegs[2][
np.argmin(Dseg[reference_point[1]][ssegs[2]])
]
closest_points[2, 1] = ssegs[1][
np.argmin(Dseg[reference_point[2]][ssegs[1]])
]
added_dist = np.zeros(3)
added_dist[0] = (
Dseg[closest_points[1, 0], closest_points[0, 1]]
+ Dseg[closest_points[2, 0], closest_points[0, 2]]
)
added_dist[1] = (
Dseg[closest_points[0, 1], closest_points[1, 0]]
+ Dseg[closest_points[2, 1], closest_points[1, 2]]
)
added_dist[2] = (
Dseg[closest_points[1, 2], closest_points[2, 1]]
+ Dseg[closest_points[0, 2], closest_points[2, 0]]
)
trunk = np.argmin(added_dist)
ssegs_adjacency = [
[trunk] if i != trunk else [j for j in range(3) if j != trunk]
for i in range(3)
]
ssegs_connects = [
[closest_points[i, trunk]]
if i != trunk
else [closest_points[trunk, j] for j in range(3) if j != trunk]
for i in range(3)
]
else:
trunk = 0
ssegs_adjacency = [[1], [0]]
reference_point_in_0 = ssegs_tips[0][0]
closest_point_in_1 = ssegs[1][
np.argmin(Dseg[reference_point_in_0][ssegs[1]])
]
reference_point_in_1 = closest_point_in_1 # ssegs_tips[1][0]
closest_point_in_0 = ssegs[0][
np.argmin(Dseg[reference_point_in_1][ssegs[0]])
]
ssegs_connects = [[closest_point_in_1], [closest_point_in_0]]
return ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk
def _detect_branching_single_haghverdi16(self, Dseg, tips):
"""Detect branching on given segment."""
# compute branchings using different starting points the first index of
# tips is the starting point for the other two, the order does not
# matter
ssegs = []
# permutations of tip cells
ps = [
[0, 1, 2], # start by computing distances from the first tip
[1, 2, 0], # -"- second tip
[2, 0, 1], # -"- third tip
]
for i, p in enumerate(ps):
ssegs.append(self.__detect_branching_haghverdi16(Dseg, tips[p]))
return ssegs
def _detect_branching_single_wolf17_tri(self, Dseg, tips):
# all pairwise distances
dist_from_0 = Dseg[tips[0]]
dist_from_1 = Dseg[tips[1]]
dist_from_2 = Dseg[tips[2]]
closer_to_0_than_to_1 = dist_from_0 < dist_from_1
closer_to_0_than_to_2 = dist_from_0 < dist_from_2
closer_to_1_than_to_2 = dist_from_1 < dist_from_2
masks = np.zeros((2, Dseg.shape[0]), dtype=bool)
masks[0] = closer_to_0_than_to_1
masks[1] = closer_to_0_than_to_2
segment_0 = np.sum(masks, axis=0) == 2
masks = np.zeros((2, Dseg.shape[0]), dtype=bool)
masks[0] = ~closer_to_0_than_to_1
masks[1] = closer_to_1_than_to_2
segment_1 = np.sum(masks, axis=0) == 2
masks = np.zeros((2, Dseg.shape[0]), dtype=bool)
masks[0] = ~closer_to_0_than_to_2
masks[1] = ~closer_to_1_than_to_2
segment_2 = np.sum(masks, axis=0) == 2
ssegs = [segment_0, segment_1, segment_2]
return ssegs
def _detect_branching_single_wolf17_bi(self, Dseg, tips):
dist_from_0 = Dseg[tips[0]]
dist_from_1 = Dseg[tips[1]]
closer_to_0_than_to_1 = dist_from_0 < dist_from_1
ssegs = [closer_to_0_than_to_1, ~closer_to_0_than_to_1]
return ssegs
def __detect_branching_haghverdi16(
self, Dseg: np.ndarray, tips: np.ndarray
) -> np.ndarray:
"""\
Detect branching on given segment.
Compute point that maximizes kendall tau correlation of the sequences of
distances to the second and the third tip, respectively, when 'moving
away' from the first tip: tips[0]. 'Moving away' means moving in the
direction of increasing distance from the first tip.
Parameters
----------
Dseg
Dchosen distance matrix restricted to segment.
tips
The three tip points. They form a 'triangle' that contains the data.
Returns
-------
Segments obtained from "splitting away the first tip cell".
"""
# sort distance from first tip point
# then the sequence of distances Dseg[tips[0]][idcs] increases
idcs = np.argsort(Dseg[tips[0]])
# consider now the sequence of distances from the other
# two tip points, which only increase when being close to `tips[0]`
# where they become correlated
# at the point where this happens, we define a branching point
if True:
imax = self.kendall_tau_split(
Dseg[tips[1]][idcs],
Dseg[tips[2]][idcs],
)
if False:
# if we were in euclidian space, the following should work
# as well, but here, it doesn't because the scales in Dseg are
# highly different, one would need to write the following equation
# in terms of an ordering, such as exploited by the kendall
# correlation method above
imax = np.argmin(
Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]
)
# init list to store new segments
ssegs = [] # noqa: F841 # TODO Look into this
# first new segment: all points until, but excluding the branching point
# increasing the following slightly from imax is a more conservative choice
# as the criterion based on normalized distances, which follows below,
# is less stable
if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift:
# if "everything" is correlated (very large value of imax), a more
# conservative choice amounts to reducing this
logg.warning(
'shifting branching point away from maximal kendall-tau '
'correlation (suppress this with `allow_kendall_tau_shift=False`)'
)
ibranch = int(0.95 * imax)
else:
# otherwise, a more conservative choice is the following
ibranch = imax + 1
return idcs[:ibranch]
def kendall_tau_split(self, a, b) -> int:
"""Return splitting index that maximizes correlation in the sequences.
Compute difference in Kendall tau for all splitted sequences.
For each splitting index i, compute the difference of the two
correlation measures kendalltau(a[:i], b[:i]) and
kendalltau(a[i:], b[i:]).
Returns the splitting index that maximizes
kendalltau(a[:i], b[:i]) - kendalltau(a[i:], b[i:])
Parameters
----------
a, b : np.ndarray
One dimensional sequences.
Returns
-------
Splitting index according to above description.
"""
if a.size != b.size:
raise ValueError('a and b need to have the same size')
if a.ndim != b.ndim != 1:
raise ValueError('a and b need to be one-dimensional arrays')
import scipy as sp
min_length = 5
n = a.size
idx_range = np.arange(min_length, a.size - min_length - 1, dtype=int)
corr_coeff = np.zeros(idx_range.size)
pos_old = sp.stats.kendalltau(a[:min_length], b[:min_length])[0]
neg_old = sp.stats.kendalltau(a[min_length:], b[min_length:])[0]
for ii, i in enumerate(idx_range):
if True:
# compute differences in concordance when adding a[i] and b[i]
# to the first subsequence, and removing these elements from
# the second subsequence
diff_pos, diff_neg = self._kendall_tau_diff(a, b, i)
pos = pos_old + self._kendall_tau_add(i, diff_pos, pos_old)
neg = neg_old + self._kendall_tau_subtract(n - i, diff_neg, neg_old)
pos_old = pos
neg_old = neg
if False:
# computation using sp.stats.kendalltau, takes much longer!
# just for debugging purposes
pos = sp.stats.kendalltau(a[: i + 1], b[: i + 1])[0]
neg = sp.stats.kendalltau(a[i + 1 :], b[i + 1 :])[0]
if False:
# the following is much slower than using sp.stats.kendalltau,
# it is only good for debugging because it allows to compute the
# tau-a version, which does not account for ties, whereas
# sp.stats.kendalltau computes tau-b version, which accounts for
# ties
pos = sp.stats.mstats.kendalltau(a[:i], b[:i], use_ties=False)[0]
neg = sp.stats.mstats.kendalltau(a[i:], b[i:], use_ties=False)[0]
corr_coeff[ii] = pos - neg
iimax = np.argmax(corr_coeff)
imax = min_length + iimax
corr_coeff_max = corr_coeff[iimax]
if corr_coeff_max < 0.3:
logg.debug(' is root itself, never obtain significant correlation')
return imax
def _kendall_tau_add(self, len_old: int, diff_pos: int, tau_old: float):
"""Compute Kendall tau delta.
The new sequence has length len_old + 1.
Parameters
----------
len_old
The length of the old sequence, used to compute tau_old.
diff_pos
Difference between concordant and non-concordant pairs.
tau_old
Kendall rank correlation of the old sequence.
"""
return 2.0 / (len_old + 1) * (float(diff_pos) / len_old - tau_old)
def _kendall_tau_subtract(self, len_old: int, diff_neg: int, tau_old: float):
"""Compute Kendall tau delta.
The new sequence has length len_old - 1.
Parameters
----------
len_old
The length of the old sequence, used to compute tau_old.
diff_neg
Difference between concordant and non-concordant pairs.
tau_old
Kendall rank correlation of the old sequence.
"""
return 2.0 / (len_old - 2) * (-float(diff_neg) / (len_old - 1) + tau_old)
def _kendall_tau_diff(self, a: np.ndarray, b: np.ndarray, i) -> Tuple[int, int]:
"""Compute difference in concordance of pairs in split sequences.
Consider splitting a and b at index i.
Parameters
----------
a
?
b
?
Returns
-------
diff_pos
Difference between concordant pairs for both subsequences.
diff_neg
Difference between non-concordant pairs for both subsequences.
"""
# compute ordering relation of the single points a[i] and b[i]
# with all previous points of the sequences a and b, respectively
a_pos = np.zeros(a[:i].size, dtype=int)
a_pos[a[:i] > a[i]] = 1
a_pos[a[:i] < a[i]] = -1
b_pos = np.zeros(b[:i].size, dtype=int)
b_pos[b[:i] > b[i]] = 1
b_pos[b[:i] < b[i]] = -1
diff_pos = np.dot(a_pos, b_pos).astype(float)
# compute ordering relation of the single points a[i] and b[i]
# with all later points of the sequences
a_neg = np.zeros(a[i:].size, dtype=int)
a_neg[a[i:] > a[i]] = 1
a_neg[a[i:] < a[i]] = -1
b_neg = np.zeros(b[i:].size, dtype=int)
b_neg[b[i:] > b[i]] = 1
b_neg[b[i:] < b[i]] = -1
diff_neg = np.dot(a_neg, b_neg)
return diff_pos, diff_neg
| {
"content_hash": "d0ebd45e5e3872f42da3bd742ff889a3",
"timestamp": "",
"source": "github",
"line_count": 1144,
"max_line_length": 131,
"avg_line_length": 43.33391608391609,
"alnum_prop": 0.5398596038245854,
"repo_name": "theislab/scanpy",
"id": "2d298fd7f36ea816622132f19d1e3bd0ce00fc6f",
"size": "49576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scanpy/tools/_dpt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1255713"
},
{
"name": "R",
"bytes": "2315"
}
],
"symlink_target": ""
} |
'''
Simimple Model Railway Automation
Hall-effect Sensor Support Module
Author : Peter Wallen
Created : 21/1/13
Version 1.0
This code encapulates hardware associated with sensors used to detect the location of trains.
The hardware supported comprises of :
One or more Microchip MCP23017 16-Bit I/O Expanders acting as sensor controllers.
Each sensor controller can be connected to a maximum of 16 hall-effect sensors.
This module requires python-smbus
'''
import smbus
import time
bus = 0
def i2Cbus_open():
'''
This function must be called once by the automation script to open the I2C bus between
the Rpi and the sensor controller(s).
'''
global bus
try:
bus = smbus.SMBus(0)
except EnvironmentError as e:
print e
raise RuntimeError("Unable to open I2C bus")
def config(address):
'''
This function must be called once by the automation script for each sensor controller.
The address of the controller is determined by the A10,A1,A2 pins on the MCP23017 chip.
eg. If A0,A1 and A2 are LOW then the address should be 0x20.
For information about configuring the sensor controller see the Microchip MCP23017 datasheet.
For eaxample to connect sensors to GPA0 - GPA7, use GPB0 - GPB7 to drive LED indicators and
enable interupts to allow the last sensor triggered to be stored in the interupt capture register,
configure as follows:
bus.write_byte_data(address,IODIRA,0xff) # set all ports in bank A to input
bus.write_byte_data(address,IODIRB,0x00) # set all ports in bank B to output
bus.write_byte_data(address,GPPUA,0xff) # enable pullup resistors for bank A
bus.write_byte_data(address,GPINTENA,0xff) # enable interupts on port A
'''
global bus
# MCP23017 register constants
IODIRA = 0x00
IODIRB = 0x01
GPINTENA = 0X04
GPINTENB = 0x05
GPPUA = 0x0c
GPPUB = 0x0d
INTCAPA= 0x10
INTCAPB= 0x11
GPIOA = 0x12
GPIOB = 0x13
bus.write_byte_data(address,IODIRA,0xff) # set all ports in bank A to input
bus.write_byte_data(address,IODIRB,0x00) # set all ports in bank B to output
bus.write_byte_data(address,GPPUA,0xff) # enable pullup resistors for bank A
bus.write_byte_data(address,GPINTENA,0xff) # enable interupts on port A
class Sensor(object):
'''
The class describing a sensor object.
A sensor object is associate with each train detection sensor.
'''
def __init__(self,address,bank,port):
'''
The class constructor is called with the following parameters:
address : the address of the sensor controller on the I2C bus eg. 0X20
bank : the register group the sensor is connected to: 'A' for GPA0 - GPA7 and 'B' for GPB0 - GPB7
port : the port on the sensor controller the sensor is connected to (1 - 8).
NB. port 1 corresponds to pin GPx0 and port 8 corresponds to pin GPx7
where x = A or B
'''
global bus
mask_table = [0x00,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80]
if bus == 0 :
raise RuntimeError("I2C bus has not been opened")
self.address = address
self.port = 0
if bank == "A" :
self.iodir = 0x00
self.gpinten = 0x04
self.gppu = 0x0c
self.intcap = 0x10
self.gpio = 0x12
elif bank == "B" :
self.iodir = 0x01
self.gpinten = 0x05
self.gppu = 0x0d
self.intcap = 0x11
self.gpio = 0x13
else :
raise RuntimeError("Invalid bank must be A or B")
if port > 8 or port < 1 :
raise RuntimeError("Invalid port must be between 1 and 8")
else :
self.port |= mask_table[port]
def wait(self) :
'''
This method will poll the interupt capture registor for the sensor until its triggered.
In addition, it will control a status LED connected to the corresponding port on bank A.
'''
x = bus.read_byte_data(self.address,self.intcap)
# switch off indicator for appropriate port
status = bus.read_byte_data(self.address,0x13)
status &= self.port
bus.write_byte_data(self.address,0x13,status)
while (x & self.port) :
x = bus.read_byte_data(self.address,self.intcap)
time.sleep(1)
# switch on indicator for appropriate port
status = bus.read_byte_data(self.address,0x13)
status |= self.port
bus.write_byte_data(self.address,0x13,status)
| {
"content_hash": "e0a9739714197034db7b301859d8486a",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 103,
"avg_line_length": 33.775193798449614,
"alnum_prop": 0.6844158824879504,
"repo_name": "phwallen/smrc",
"id": "45ee3fd83832501c88f451f983f031776f8826cc",
"size": "4357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heSensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "4447"
},
{
"name": "Python",
"bytes": "19951"
}
],
"symlink_target": ""
} |
__all__ = ("mesh")
def bu_to_inches(d):
"""Return Blender unit d in inches."""
import bpy
import mathutils
# 1bu = 1 / 0.3048 ft
convert = lambda v: (12.0 * bpy.context.scene.unit_settings.scale_length * v) / 0.3048
if isinstance(d, mathutils.Vector):
return mathutils.Vector((convert(d.x), convert(d.y), convert(d.z)))
if isinstance(d, (int, float)):
return convert(d)
def inches_to_bu(d):
"""Return d (inches) in Blender units."""
return d / bu_to_inches(1)
def feet_to_bu(d):
"""Return d (feet) in Blender units."""
return 12.0 * inches_to_bu(d)
def approximately(left, right, value):
EPSILON = 1.0e-05
return ((left - EPSILON) <= value <= (right + EPSILON))
def cleanup_data():
import bpy
# Clean up camera list of any stale cameras
for camera in bpy.data.cameras[:]:
if not any((obj for obj in bpy.context.scene.objects if (obj.type == 'CAMERA') and (obj.data == camera))):
bpy.data.cameras.remove(camera)
# Clean up mesh list of any stale meshes
for mesh in bpy.data.meshes[:]:
if not any((obj for obj in bpy.context.scene.objects if (obj.type == 'MESH') and (obj.data == mesh))):
bpy.data.meshes.remove(mesh)
class DuplicateScene():
_file_stack = []
_unique_id = 0
def __init__(self):
import os
import bpy
if not bpy.data.is_saved:
raise ValueError("Save .blend file before exporting")
# Push filepath onto stack
if len(DuplicateScene._file_stack) == 0:
DuplicateScene._file_stack.append(bpy.data.filepath)
bpy.ops.wm.save_mainfile(filepath = bpy.data.filepath,
check_existing = False)
else:
filepath, fileext = os.path.splitext(bpy.data.filepath)
DuplicateScene._file_stack.append("%s.copy.%02i.blend" % (filepath, DuplicateScene._unique_id))
DuplicateScene._unique_id += 1
# Save file
filepath = DuplicateScene._file_stack[-1]
bpy.ops.wm.save_as_mainfile(filepath = filepath,
check_existing = False,
copy = True)
filepath = DuplicateScene._file_stack[-1]
def __enter__(self):
import bpy
# Return new context override
context = bpy.context
blend_data = bpy.data
window = context.window_manager.windows[0]
screen = window.screen
scene = context.scene
area_view3d = [area for area in screen.areas if area.type == 'VIEW_3D'][0]
space_view3d = [space for space in area_view3d.spaces if space.type == 'VIEW_3D'][0]
region_window_view3d = [region for region in area_view3d.regions if region.type == 'WINDOW'][0]
return {'window': window,
'screen': screen,
'context': context,
'blend_data': blend_data,
'scene': scene,
'area': area_view3d,
'space': space_view3d,
'region': region_window_view3d}
def __exit__(self, type, value, traceback):
import os
import bpy
filepath = DuplicateScene._file_stack.pop()
bpy.ops.wm.open_mainfile(filepath = filepath,
load_ui = True,
use_scripts = False)
if len(DuplicateScene._file_stack) > 0:
os.remove(filepath)
| {
"content_hash": "f4713c838151201f72efd66846b723dd",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 114,
"avg_line_length": 36.27835051546392,
"alnum_prop": 0.5603864734299517,
"repo_name": "ijacquez/blender-tools",
"id": "5077f2b85f16a1b22980a9427fbf51dfaf9b317b",
"size": "3519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/helper_utils/lib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "438"
},
{
"name": "Python",
"bytes": "17264"
}
],
"symlink_target": ""
} |
import mock
from nova.network import model as network_model
from nova import test
from nova import utils
from nova.virt.vmwareapi import fake as vmwareapi_fake
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _get_vim(self):
return vmwareapi_fake.FakeVim()
def _call_method(self, module, method, *args, **kwargs):
return self.ret
def _wait_for_task(self, task_ref):
return
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
dns=None,
gateway=
network_model.IP('dead:beef::1'),
ips=[network_model.IP(
'dead:beef::dcad:beff:feef:0')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4, subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self.network_info = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])
utils.reset_is_neutron()
self._session = fake_session()
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
self.assertEqual(result,
'DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
'192.168.0.1;192.168.0.255;192.168.0.1#')
def test_is_neutron_nova(self):
self.flags(network_api_class='nova.network.api.API')
ops = vmops.VMwareVMOps(None, None, None)
self.assertFalse(ops._is_neutron)
def test_is_neutron_neutron(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
ops = vmops.VMwareVMOps(None, None, None)
self.assertTrue(ops._is_neutron)
def test_is_neutron_quantum(self):
self.flags(network_api_class='nova.network.quantumv2.api.API')
ops = vmops.VMwareVMOps(None, None, None)
self.assertTrue(ops._is_neutron)
def test_use_linked_clone_override_nf(self):
value = vmops.VMwareVMOps.decide_linked_clone(None, False)
self.assertFalse(value, "No overrides present but still overridden!")
def test_use_linked_clone_override_nt(self):
value = vmops.VMwareVMOps.decide_linked_clone(None, True)
self.assertTrue(value, "No overrides present but still overridden!")
def test_use_linked_clone_override_ny(self):
value = vmops.VMwareVMOps.decide_linked_clone(None, "yes")
self.assertTrue(value, "No overrides present but still overridden!")
def test_use_linked_clone_override_ft(self):
value = vmops.VMwareVMOps.decide_linked_clone(False, True)
self.assertFalse(value,
"image level metadata failed to override global")
def test_use_linked_clone_override_nt(self):
value = vmops.VMwareVMOps.decide_linked_clone("no", True)
self.assertFalse(value,
"image level metadata failed to override global")
def test_use_linked_clone_override_yf(self):
value = vmops.VMwareVMOps.decide_linked_clone("yes", False)
self.assertTrue(value,
"image level metadata failed to override global")
def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
instance_ds_ref = mock.Mock()
instance_ds_ref.value = "ds-1"
_vcvmops = vmops.VMwareVCVMOps(self._session, None, None)
if ds_ref_exists:
ds_ref = mock.Mock()
ds_ref.value = "ds-1"
else:
ds_ref = None
def fake_call_method(module, method, *args, **kwargs):
fake_object1 = vmwareapi_fake.FakeRetrieveResult()
fake_object1.add_object(vmwareapi_fake.Datacenter(
ds_ref=ds_ref))
if not ds_ref:
# Token is set for the fake_object1, so it will continue to
# fetch the next object.
setattr(fake_object1, 'token', 'token-0')
if method == "continue_to_get_objects":
fake_object2 = vmwareapi_fake.FakeRetrieveResult()
fake_object2.add_object(vmwareapi_fake.Datacenter())
return fake_object2
return fake_object1
with mock.patch.object(self._session, '_call_method',
side_effect=fake_call_method) as fake_call:
dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
if ds_ref:
self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
fake_call.assert_called_once_with(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self.assertEqual("ha-datacenter", dc_info.name)
else:
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vim_util, "continue_to_get_objects",
"token-0")]
fake_call.assert_has_calls(calls)
self.assertIsNone(dc_info)
def test_get_datacenter_ref_and_name(self):
self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
def test_get_datacenter_ref_and_name_with_no_datastore(self):
self._test_get_datacenter_ref_and_name()
| {
"content_hash": "04b593ef5770b8482762ce3c3f089be6",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 43.4,
"alnum_prop": 0.5272781328972796,
"repo_name": "SUSE-Cloud/nova",
"id": "3bd3ba3ddee5f3428da9de03669bbfa765f49147",
"size": "7409",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/havana",
"path": "nova/tests/virt/vmwareapi/test_vmwareapi_vmops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13441452"
},
{
"name": "Shell",
"bytes": "20579"
}
],
"symlink_target": ""
} |
import pywikibot
from ddt import ddt, file_data
from pywikibot import Site
from testfixtures import compare
from service.ws_re.scanner.tasks.check_redirect_links import CHRETask
from service.ws_re.scanner.tasks.test_base_task import TaskTestCase
from service.ws_re.template.re_page import RePage
from tools.test import real_wiki_test
class TestCHRETaskRealWiki(TaskTestCase):
@real_wiki_test
def test_get_backlinks(self):
WS_WIKI = Site(code="de", fam="wikisource", user="THEbotIT")
task = CHRETask(WS_WIKI, self.logger)
compare(["Benutzer:S8w4/Spielwiese/Lemmata06kurz",
"Paulys Realencyclopädie der classischen Altertumswissenschaft/Register/PD 2013"],
task.get_backlinks(pywikibot.Page(WS_WIKI, "RE:ho epi bomo hiereus")))
@real_wiki_test
def test_integration(self):
WS_WIKI = Site(code="de", fam="wikisource", user="THEbotIT")
task = CHRETask(WS_WIKI, self.logger)
task.re_page = RePage(pywikibot.Page(WS_WIKI, "RE:Ulpius 1a"))
task.task()
@ddt
class TestCHRETaskUnittests(TaskTestCase):
@file_data("test_data/test_check_redirect_links.yml")
def test_replace_redirect_links(self, text, redirect, target, expect):
task = CHRETask(None, self.logger)
replaced_text = task.replace_redirect_links(text, redirect, target)
compare(expect, replaced_text)
def test_filter_link_list(self):
link_list = [
"Literatur",
"RE:Querverweis",
"Wikisource:RE-Werkstatt/Zeug in der Werkstatt",
"Benutzer:S8w4/Spielwiese/Lemmata06kurz",
"Benutzer Diskussion:S8w4",
"Benutzer:THEbotIT/some_logging_page",
"RE:Wartung:Strukturfehler",
"Paulys Realencyclopädie der classischen Altertumswissenschaft/Register/PD 2013",
]
task = CHRETask(None, self.logger)
compare(["Literatur", "RE:Querverweis"], task.filter_link_list(link_list))
| {
"content_hash": "9eea347c38486939e4609b63ba39b02c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 99,
"avg_line_length": 40.53061224489796,
"alnum_prop": 0.675226586102719,
"repo_name": "the-it/WS_THEbotIT",
"id": "9deaf058a746ce8a310851bfae688c546f34dedb",
"size": "2023",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "service/ws_re/scanner/tasks/test_check_redirect_links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "3121"
},
{
"name": "Makefile",
"bytes": "3017"
},
{
"name": "Python",
"bytes": "785189"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
} |
'''
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
A: a1 → a2
↘
c1 → c2 → c3
↗
B: b1 → b2 → b3
begin to intersect at node c1.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
nodeA, nodeB = headA, headB
while nodeA != nodeB:
nodeA = nodeA.next if nodeA else headB
nodeB = nodeB.next if nodeB else headA
return nodeA
def getIntersectionNode_diff(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
def get_length(node):
length = 0
while node:
node = node.next
length += 1
return length
len1 = get_length(headA)
len2 = get_length(headB)
if len1 > len2:
for __ in range(len1 - len2):
headA = headA.next
else:
for __ in range(len2 - len1):
headB = headB.next
while headA:
if headA == headB:
return headA
headA = headA.next
headB = headB.next
return None
if __name__ == "__main__":
None | {
"content_hash": "063ff95699d45fa7232ff6ce9fe9ee6f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 93,
"avg_line_length": 26.875,
"alnum_prop": 0.5209302325581395,
"repo_name": "gavinfish/leetcode-share",
"id": "5f6fb82a7536bd32d88f8d38cbb052326d854591",
"size": "1949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/160 Intersection of Two Linked Lists.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "81458"
},
{
"name": "Python",
"bytes": "222883"
}
],
"symlink_target": ""
} |
'''
Reduces BED regions to overlap them. The BED file *must* be sorted in order
to merge them.
'''
import os
import sys
from ngsutils.bed import BedStreamer, BedRegion
def usage():
print __doc__
print """\
Usage: bedutils reduce {opts} bedfile
-extend num{,num} Extend the BED region {num} bases 5' and 3'
Can either a single number (extend the same in both
direction) or a comma delimited pair of numbers where the
first number extends the region in the 5' direction and
the second number extends the region in the 3' direction.
-clip Only extend the reads to find overlapping regions, don't
extend the edges of the regions.
-c Output the number of regions merged as the score (count).
Otherwise, the scores for all of the regions are added
together.
-nostrand Ignore strand information when merging regions
"""
sys.exit(1)
class MergedRegion(object):
'''
Manages regions to be merged together.
'''
def __init__(self, extend=(0, 0), clip=False, count=False, out=sys.stdout):
'''
extend is a tuple or list. The first is the 5' extension,
the last is the 3' extension. These are strand specific.
'''
self.extend = extend
self.clip = clip
self.count = count
self.out = out
self._reset()
def _reset(self):
self.chrom = None
self.extended_start = 0
self.extended_end = 0
self.score = 0
self.strand = None
self.region_start = 0
self.region_end = 0
self.members = []
def add(self, region):
if not region.strand or region.strand == '+':
newstart = region.start - self.extend[0]
newend = region.end + self.extend[1]
else:
newstart = region.start - self.extend[1]
newend = region.end + self.extend[0]
if newstart < 0:
newstart = 0
if self.chrom != region.chrom:
self.write()
self._reset()
elif newstart >= self.extended_end:
self.write()
self._reset()
if not self.extended_start:
self.extended_start = newstart
if self.clip:
if not self.region_start:
self.region_start = region.start
self.region_end = region.end
else:
if not self.region_start:
self.region_start = newstart
self.region_end = newend
self.chrom = region.chrom
if not self.strand:
self.strand = region.strand
elif self.strand != region.strand:
self.strand = '+'
self.extended_start = min(self.extended_start, newstart)
self.extended_end = max(self.extended_end, newend)
if region.name:
self.members.append(region.name)
if self.count:
self.score += 1
else:
self.score += region.score
def write(self):
if self.chrom and self.region_start and self.region_end:
region = BedRegion(self.chrom, self.region_start, self.region_end, ','.join(sorted(set(self.members))), self.score, self.strand)
region.write(self.out)
self.region_start = 0
self.region_end = 0
def bed_reduce(bed, extend=(0, 0), stranded=True, count=False, clip=False, out=sys.stdout):
plus_region = MergedRegion(extend, clip, count, out)
minus_region = MergedRegion(extend, clip, count, out)
# these are just for checking that the file is sorted
lchrom = None
lstart = None
lend = None
lregion = None
for region in bed:
if lchrom == region.chrom:
if region.start < lstart or (region.start == lstart and region.end < lend):
print 'last ', lregion
print 'current ', region
sys.stderr.write('BED file is not sorted!\n')
sys.stderr.write('chrom: %s\t%s (= %s)\n' % (lchrom, region.chrom, (region.chrom == lchrom)))
sys.stderr.write('start: %s\t%s (< %s)\n' % (lstart, region.start, (lstart < region.start)))
sys.stderr.write('end: %s\t%s\n' % (lend, region.end))
sys.exit(1)
lchrom = region.chrom
lstart = region.start
lend = region.end
lregion = region
if not stranded or region.strand == '+':
plus_region.add(region)
else:
minus_region.add(region)
plus_region.write()
minus_region.write()
if __name__ == '__main__':
fname = None
extend = (0, 0)
stranded = True
count = False
last = None
clip = False
for arg in sys.argv[1:]:
if arg == '-h':
usage()
if last == '-extend':
if ',' in arg:
extend = [int(x) for x in arg.split(',')]
else:
extend = [int(arg), ] * 2
last = None
elif arg in ['-extend']:
last = arg
elif arg == '-clip':
clip = True
elif arg == '-nostrand':
stranded = False
elif arg == '-c':
count = True
elif not fname and (arg == '-' or os.path.exists(arg)):
fname = arg
else:
print "Unknown option: %s" % arg
usage()
if not fname:
usage()
bed_reduce(BedStreamer(fname), extend, stranded, count, clip)
| {
"content_hash": "e1b65d46211fe075602269d4f8d1a0b7",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 140,
"avg_line_length": 29.893048128342247,
"alnum_prop": 0.5391771019677997,
"repo_name": "ngsutils/ngsutils",
"id": "6c3db207b35af5fd1ccc80b32cf9993664c6f5e9",
"size": "5671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ngsutils/bed/reduce.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "749841"
},
{
"name": "R",
"bytes": "1013"
},
{
"name": "Shell",
"bytes": "8231"
}
],
"symlink_target": ""
} |
'''
Created on 2015/11/08
@author: wildberry
'''
from __builtin__ import basestring
import types
def raiseException(excp):
'''
Raises an error if the excp is a instance of an error/exception
@param excp: an error/exception instance
@return: excp if and only if the excp is not a class or a sub-class of BaseException
'''
if isinstance(excp, BaseException):
raise excp
else:
return excp
def toStrTuple(*args):
'''
Returns tuple(map(str, fullArgs))
@param fullArgs: varargs to map str function
@return: a tuple-of-str
'''
return tuple(map(str, args))
def switch(cases):
'''
C-like switch-case statement.(But no-through and matching is not ordinal)<br/>
<pre>
example)
# ['0-th', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']
nth10 = switch({
1: lambda v: "first",
2: lambda v: "second",
3: lambda v: "third",
4: lambda v: "fourth",
5: lambda v: "fifth",
6: lambda v: "sixth",
7: lambda v: "seventh",
8: lambda v: "eighth",
9: lambda v: "ninth",
10: lambda v: "tenth",
None: lambda v: str(v) + "-th"}) # key == None is default
print map(nth10, range(11))
</pre>
@param cases: an instance of dict(key = any value, value = 1-ary function)
@return: a function
'''
if not isinstance(cases, dict):
raise ValueError("%s is not an instance of dict" % str(cases))
if None in cases:
defaulted = cases[None]
cases.pop(None)
else:
defaulted = lambda _: None
def find(value):
if not value in cases:
return defaulted(value) if defaulted else None
else:
return cases[value](value)
return find
def _match_split_defaulted(cases):
'''
Private function for match(cases), regmatch(cases) and switch(cases) function
@param cases: an instance of dict
@return: a tuple of (defaulted, cases)
'''
if not isinstance(cases, dict):
raise ValueError("%s is not an instance of dict" % str(cases))
if None in cases:
defaulted = cases[None]
cases.pop(None)
else:
defaulted = lambda _: None
return defaulted, cases
def match(cases):
'''
Creates a function which can find and evaluate a value<br />
<pre>
example)
# ['0 == 0', '0 < 1 <= 3', '0 < 2 <= 3', '0 < 3 <= 3', '4 is not in 0 to 3']
print map(match({
(lambda v: v == 0): (lambda v: str(v) + " == 0"),
(lambda v: 0 < v and v <= 3): (lambda v: "0 < " + str(v) + " <= 3"),
None: (lambda v: str(v) + " is not in 0 to 3")}), (i for i in range(5))) # key == None is default
</pre>
@param cases: an instance of dict(key = 1-ary boolean function, value = 1-ary function)
@return: a function
'''
defaulted, cases = _match_split_defaulted(cases)
def find(value):
for pat, lmd in cases.items():
if pat(value):
return lmd(value)
if defaulted:
return defaulted(value)
return None
return find
try:
import re
def regmatch(cases):
'''
Creates a function which can search and evaluate a value<br />
<pre>
example)
# ['Starts with x: xyz', 'Starts with y: yzx', 'Starts with z: zxy', 'Prefix is not x, y or z: abc']
finder = regmatch({
r"^x.*$": lambda v: "Starts with x: " + v,
r"^y.*$": lambda v: "Starts with y: " + v,
re.compile(r"^z.*$"): lambda v: "Starts with z: " + v,
None: lambda v: "Prefix is not x, y or z: " + v}) # key == None is default
print map(finder, ["xyz", "yzx", "zxy", "abc"])
</pre>
@param cases: a dictionary object(key = basestring or compiled regular expression(re.compile(..)), value = 1-ary function)
@return: a function
'''
defaulted, cases = _match_split_defaulted(cases)
cases = {(lambda target, keyregex=key: re.search(keyregex, target, re.DOTALL) if isinstance(keyregex, basestring) else keyregex.search(target))
: lmd for key, lmd in cases.items()}
if defaulted:
cases[None] = defaulted
return match(cases)
except:
pass
def select(values, pred = (lambda: True)):
'''
Select a value which satisfies pred
@param values: iterable object
@param pred: function for predicating value or value to compare to each value in values
@return: a sequence of value which satisfies pred
'''
if isinstance(pred, types.FunctionType):
for v in values:
if pred(v):
yield v
return
else:
for v in values:
if v == pred:
yield v
return
| {
"content_hash": "4fd80aa34c7bbafe6d6ccef9315b9e2c",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 151,
"avg_line_length": 31.257861635220127,
"alnum_prop": 0.5505030181086519,
"repo_name": "gnomeberry/pyth2",
"id": "ff7b1168387657c4c4db1dd44949ae0bd42cf950",
"size": "4970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyth2/PythUtil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "48"
},
{
"name": "Python",
"bytes": "122508"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
} |
"""
Unittests for table management
"""
__revision__ = '$Id: unittest_table.py,v 1.13 2006-04-09 22:30:53 nico Exp $'
import sys
import os
from cStringIO import StringIO
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.table import Table, TableStyleSheet, DocbookTableWriter, \
DocbookRenderer, TableStyle, TableWriter, TableCellRenderer
from logilab.common.compat import set
class TableTC(TestCase):
"""Table TestCase class"""
def setUp(self):
"""Creates a default table"""
# from logilab.common import table
# reload(table)
self.table = Table()
self.table.create_rows(['row1', 'row2', 'row3'])
self.table.create_columns(['col1', 'col2'])
def test_valeur_scalaire(self):
tab = Table()
tab.create_columns(['col1'])
tab.append_row([1])
self.assertEquals(tab, [[1]])
tab.append_row([2])
self.assertEquals(tab[0,0], 1)
self.assertEquals(tab[1,0], 2)
def test_valeur_ligne(self):
tab = Table()
tab.create_columns(['col1','col2'])
tab.append_row([1,2])
self.assertEquals(tab, [[1,2]])
def test_valeur_colonne(self):
tab = Table()
tab.create_columns(['col1'])
tab.append_row([1])
tab.append_row([2])
self.assertEquals(tab, [[1],[2]])
self.assertEquals(tab[:,0], [1,2])
def test_indexation(self):
"""we should be able to use [] to access rows"""
self.assert_(self.table[0] == self.table.data[0])
self.assert_(self.table[1] == self.table.data[1])
def test_iterable(self):
"""test iter(table)"""
it = iter(self.table)
self.assert_(it.next() == self.table.data[0])
self.assert_(it.next() == self.table.data[1])
def test_get_rows(self):
"""tests Table.get_rows()"""
self.assertEquals(self.table, [[0, 0], [0, 0], [0, 0]])
self.assertEquals(self.table[:], [[0, 0], [0, 0], [0, 0]])
self.table.insert_column(1, range(3), 'supp')
self.assertEquals(self.table, [[0, 0, 0], [0, 1, 0], [0, 2, 0]])
self.assertEquals(self.table[:], [[0, 0, 0], [0, 1, 0], [0, 2, 0]])
def test_get_cells(self):
self.table.insert_column(1, range(3), 'supp')
self.assertEquals(self.table[0,1], 0)
self.assertEquals(self.table[1,1], 1)
self.assertEquals(self.table[2,1], 2)
self.assertEquals(self.table['row1', 'supp'], 0)
self.assertEquals(self.table['row2', 'supp'], 1)
self.assertEquals(self.table['row3', 'supp'], 2)
self.assertRaises(KeyError, self.table.__getitem__, ('row1', 'foo'))
self.assertRaises(KeyError, self.table.__getitem__, ('foo', 'bar'))
def test_shape(self):
"""tests table shape"""
self.assertEquals(self.table.shape, (3, 2))
self.table.insert_column(1, range(3), 'supp')
self.assertEquals(self.table.shape, (3, 3))
def test_set_column(self):
"""Tests that table.set_column() works fine.
"""
self.table.set_column(0, range(3))
self.assertEquals(self.table[0,0], 0)
self.assertEquals(self.table[1,0], 1)
self.assertEquals(self.table[2,0], 2)
def test_set_column_by_id(self):
"""Tests that table.set_column_by_id() works fine.
"""
self.table.set_column_by_id('col1', range(3))
self.assertEquals(self.table[0,0], 0)
self.assertEquals(self.table[1,0], 1)
self.assertEquals(self.table[2,0], 2)
self.assertRaises(KeyError, self.table.set_column_by_id, 'col123', range(3))
def test_cells_ids(self):
"""tests that we can access cells by giving row/col ids"""
self.assertRaises(KeyError, self.table.set_cell_by_ids, 'row12', 'col1', 12)
self.assertRaises(KeyError, self.table.set_cell_by_ids, 'row1', 'col12', 12)
self.assertEquals(self.table[0,0], 0)
self.table.set_cell_by_ids('row1', 'col1', 'DATA')
self.assertEquals(self.table[0,0], 'DATA')
self.assertRaises(KeyError, self.table.set_row_by_id, 'row12', [])
self.table.set_row_by_id('row1', ['1.0', '1.1'])
self.assertEquals(self.table[0,0], '1.0')
def test_insert_row(self):
"""tests a row insertion"""
tmp_data = ['tmp1', 'tmp2']
self.table.insert_row(1, tmp_data, 'tmprow')
self.assertEquals(self.table[1], tmp_data)
self.assertEquals(self.table['tmprow'], tmp_data)
self.table.delete_row_by_id('tmprow')
self.assertRaises(KeyError, self.table.delete_row_by_id, 'tmprow')
self.assertEquals(self.table[1], [0, 0])
self.assertRaises(KeyError, self.table.__getitem__, 'tmprow')
def test_get_column(self):
"""Tests that table.get_column() works fine.
"""
self.table.set_cell(0, 1, 12)
self.table.set_cell(2, 1, 13)
self.assertEquals(self.table[:,1], [12,0,13])
self.assertEquals(self.table[:,'col2'], [12,0,13])
def test_get_columns(self):
"""Tests if table.get_columns() works fine.
"""
self.table.set_cell(0, 1, 12)
self.table.set_cell(2, 1, 13)
self.assertEquals(self.table.get_columns(), [[0,0,0], [12,0,13]])
def test_insert_column(self):
"""Tests that table.insert_column() works fine.
"""
self.table.insert_column(1, range(3), "inserted_column")
self.assertEquals(self.table[:,1], [0,1,2])
self.assertEquals(self.table.col_names,
['col1', 'inserted_column', 'col2'])
def test_delete_column(self):
"""Tests that table.delete_column() works fine.
"""
self.table.delete_column(1)
self.assertEquals(self.table.col_names, ['col1'])
self.assertEquals(self.table[:,0], [0,0,0])
self.assertRaises(KeyError, self.table.delete_column_by_id, 'col2')
self.table.delete_column_by_id('col1')
self.assertEquals(self.table.col_names, [])
def test_transpose(self):
"""Tests that table.transpose() works fine.
"""
self.table.append_column(range(5,8), 'col3')
ttable = self.table.transpose()
self.assertEquals(ttable.row_names, ['col1', 'col2', 'col3'])
self.assertEquals(ttable.col_names, ['row1', 'row2', 'row3'])
self.assertEquals(ttable.data, [[0,0,0], [0,0,0], [5,6,7]])
def test_sort_table(self):
"""Tests the table sort by column
"""
self.table.set_column(0, [3, 1, 2])
self.table.set_column(1, [1, 2, 3])
self.table.sort_by_column_index(0)
self.assertEquals(self.table.row_names, ['row2', 'row3', 'row1'])
self.assertEquals(self.table.data, [[1, 2], [2, 3], [3, 1]])
self.table.sort_by_column_index(1, 'desc')
self.assertEquals(self.table.row_names, ['row3', 'row2', 'row1'])
self.assertEquals(self.table.data, [[2, 3], [1, 2], [3, 1]])
def test_sort_by_id(self):
"""tests sort_by_column_id()"""
self.table.set_column_by_id('col1', [3, 1, 2])
self.table.set_column_by_id('col2', [1, 2, 3])
self.table.sort_by_column_id('col1')
self.assertRaises(KeyError, self.table.sort_by_column_id, 'col123')
self.assertEquals(self.table.row_names, ['row2', 'row3', 'row1'])
self.assertEquals(self.table.data, [[1, 2], [2, 3], [3, 1]])
self.table.sort_by_column_id('col2', 'desc')
self.assertEquals(self.table.row_names, ['row3', 'row2', 'row1'])
self.assertEquals(self.table.data, [[2, 3], [1, 2], [3, 1]])
def test_pprint(self):
"""only tests pprint doesn't raise an exception"""
self.table.pprint()
str(self.table)
class GroupByTC(TestCase):
"""specific test suite for groupby()"""
def setUp(self):
t = Table()
t.create_columns(['date', 'res', 'task', 'usage'])
t.append_row(['date1', 'ing1', 'task1', 0.3])
t.append_row(['date1', 'ing2', 'task2', 0.3])
t.append_row(['date2', 'ing3', 'task3', 0.3])
t.append_row(['date3', 'ing4', 'task2', 0.3])
t.append_row(['date1', 'ing1', 'task3', 0.3])
t.append_row(['date3', 'ing1', 'task3', 0.3])
self.table = t
def test_single_groupby(self):
"""tests groupby() on several columns"""
grouped = self.table.groupby('date')
self.assertEquals(len(grouped), 3)
self.assertEquals(len(grouped['date1']), 3)
self.assertEquals(len(grouped['date2']), 1)
self.assertEquals(len(grouped['date3']), 2)
self.assertEquals(grouped['date1'], [
('date1', 'ing1', 'task1', 0.3),
('date1', 'ing2', 'task2', 0.3),
('date1', 'ing1', 'task3', 0.3),
])
self.assertEquals(grouped['date2'], [('date2', 'ing3', 'task3', 0.3)])
self.assertEquals(grouped['date3'], [
('date3', 'ing4', 'task2', 0.3),
('date3', 'ing1', 'task3', 0.3),
])
def test_multiple_groupby(self):
"""tests groupby() on several columns"""
grouped = self.table.groupby('date', 'task')
self.assertEquals(len(grouped), 3)
self.assertEquals(len(grouped['date1']), 3)
self.assertEquals(len(grouped['date2']), 1)
self.assertEquals(len(grouped['date3']), 2)
self.assertEquals(grouped['date1']['task1'], [('date1', 'ing1', 'task1', 0.3)])
self.assertEquals(grouped['date2']['task3'], [('date2', 'ing3', 'task3', 0.3)])
self.assertEquals(grouped['date3']['task2'], [('date3', 'ing4', 'task2', 0.3)])
date3 = grouped['date3']
self.assertRaises(KeyError, date3.__getitem__, 'task1')
def test_select(self):
"""tests Table.select() method"""
rows = self.table.select('date', 'date1')
self.assertEquals(rows, [
('date1', 'ing1', 'task1', 0.3),
('date1', 'ing2', 'task2', 0.3),
('date1', 'ing1', 'task3', 0.3),
])
class TableStyleSheetTC(TestCase):
"""The Stylesheet test case
"""
def setUp(self):
"""Builds a simple table to test the stylesheet
"""
self.table = Table()
self.table.create_row('row1')
self.table.create_columns(['a','b','c'])
self.stylesheet = TableStyleSheet()
# We don't want anything to be printed
self.stdout_backup = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = self.stdout_backup
def test_add_rule(self):
"""Tests that the regex pattern works as expected.
"""
rule = '0_2 = sqrt(0_0**2 + 0_1**2)'
self.stylesheet.add_rule(rule)
self.table.set_row(0, [3,4,0])
self.table.apply_stylesheet(self.stylesheet)
self.assertEquals(self.table[0], [3,4,5])
self.assertEquals(len(self.stylesheet.rules), 1)
self.stylesheet.add_rule('some bad rule with bad syntax')
self.assertEquals(len(self.stylesheet.rules), 1, "Ill-formed rule mustn't be added")
self.assertEquals(len(self.stylesheet.instructions), 1, "Ill-formed rule mustn't be added")
def test_stylesheet_init(self):
"""tests Stylesheet.__init__"""
rule = '0_2 = 1'
sheet = TableStyleSheet([rule, 'bad rule'])
self.assertEquals(len(sheet.rules), 1, "Ill-formed rule mustn't be added")
self.assertEquals(len(sheet.instructions), 1, "Ill-formed rule mustn't be added")
def test_rowavg_rule(self):
"""Tests that add_rowavg_rule works as expected
"""
self.table.set_row(0, [10,20,0])
self.stylesheet.add_rowavg_rule((0,2), 0, 0, 1)
self.table.apply_stylesheet(self.stylesheet)
val = self.table[0,2]
self.assert_(int(val) == 15)
def test_rowsum_rule(self):
"""Tests that add_rowsum_rule works as expected
"""
self.table.set_row(0, [10,20,0])
self.stylesheet.add_rowsum_rule((0,2), 0, 0, 1)
self.table.apply_stylesheet(self.stylesheet)
val = self.table[0,2]
self.assert_(val == 30)
def test_colavg_rule(self):
"""Tests that add_colavg_rule works as expected
"""
self.table.set_row(0, [10,20,0])
self.table.append_row([12,8,3], 'row2')
self.table.create_row('row3')
self.stylesheet.add_colavg_rule((2,0), 0, 0, 1)
self.table.apply_stylesheet(self.stylesheet)
val = self.table[2,0]
self.assert_(int(val) == 11)
def test_colsum_rule(self):
"""Tests that add_colsum_rule works as expected
"""
self.table.set_row(0, [10,20,0])
self.table.append_row([12,8,3], 'row2')
self.table.create_row('row3')
self.stylesheet.add_colsum_rule((2,0), 0, 0, 1)
self.table.apply_stylesheet(self.stylesheet)
val = self.table[2,0]
self.assert_(val == 22)
class TableStyleTC(TestCase):
"""Test suite for TableSuite"""
def setUp(self):
self.table = Table()
self.table.create_rows(['row1', 'row2', 'row3'])
self.table.create_columns(['col1', 'col2'])
self.style = TableStyle(self.table)
self._tested_attrs = (('size', '1*'),
('alignment', 'right'),
('unit', ''))
def test_getset(self):
"""tests style's get and set methods"""
for attrname, default_value in self._tested_attrs:
getter = getattr(self.style, 'get_%s' % attrname)
setter = getattr(self.style, 'set_%s' % attrname)
self.assertRaises(KeyError, getter, 'badcol')
self.assertEquals(getter('col1'), default_value)
setter('FOO', 'col1')
self.assertEquals(getter('col1'), 'FOO')
def test_getset_index(self):
"""tests style's get and set by index methods"""
for attrname, default_value in self._tested_attrs:
getter = getattr(self.style, 'get_%s' % attrname)
setter = getattr(self.style, 'set_%s' % attrname)
igetter = getattr(self.style, 'get_%s_by_index' % attrname)
isetter = getattr(self.style, 'set_%s_by_index' % attrname)
self.assertEquals(getter('__row_column__'), default_value)
isetter('FOO', 0)
self.assertEquals(getter('__row_column__'), 'FOO')
self.assertEquals(igetter(0), 'FOO')
self.assertEquals(getter('col1'), default_value)
isetter('FOO', 1)
self.assertEquals(getter('col1'), 'FOO')
self.assertEquals(igetter(1), 'FOO')
class RendererTC(TestCase):
"""Test suite for DocbookRenderer"""
def setUp(self):
self.renderer = DocbookRenderer(alignment = True)
self.table = Table()
self.table.create_rows(['row1', 'row2', 'row3'])
self.table.create_columns(['col1', 'col2'])
self.style = TableStyle(self.table)
self.base_renderer = TableCellRenderer()
def test_cell_content(self):
"""test how alignment is rendered"""
entry_xml = self.renderer._render_cell_content('data', self.style, 1)
self.assertEquals(entry_xml, "<entry align='right'>data</entry>\n")
self.style.set_alignment_by_index('left', 1)
entry_xml = self.renderer._render_cell_content('data', self.style, 1)
self.assertEquals(entry_xml, "<entry align='left'>data</entry>\n")
def test_default_content_rendering(self):
"""tests that default rendering just prints the cell's content"""
rendered_cell = self.base_renderer._render_cell_content('data', self.style, 1)
self.assertEquals(rendered_cell, "data")
def test_replacement_char(self):
"""tests that 0 is replaced when asked for"""
cell_content = self.base_renderer._make_cell_content(0, self.style, 1)
self.assertEquals(cell_content, 0)
self.base_renderer.properties['skip_zero'] = '---'
cell_content = self.base_renderer._make_cell_content(0, self.style, 1)
self.assertEquals(cell_content, '---')
def test_unit(self):
"""tests if units are added"""
self.base_renderer.properties['units'] = True
self.style.set_unit_by_index('EUR', 1)
cell_content = self.base_renderer._make_cell_content(12, self.style, 1)
self.assertEquals(cell_content, '12 EUR')
class DocbookTableWriterTC(TestCase):
"""TestCase for table's writer"""
def setUp(self):
self.stream = StringIO()
self.table = Table()
self.table.create_rows(['row1', 'row2', 'row3'])
self.table.create_columns(['col1', 'col2'])
self.writer = DocbookTableWriter(self.stream, self.table, None)
self.writer.set_renderer(DocbookRenderer())
def test_write_table(self):
"""make sure write_table() doesn't raise any exception"""
self.writer.write_table()
def test_abstract_writer(self):
"""tests that Abstract Writers can't be used !"""
writer = TableWriter(self.stream, self.table, None)
self.assertRaises(NotImplementedError, writer.write_table)
if __name__ == '__main__':
unittest_main()
| {
"content_hash": "eca5ea8cfc656b8cad9a5bdc78315925",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 99,
"avg_line_length": 40.009280742459396,
"alnum_prop": 0.580723729993041,
"repo_name": "dbbhattacharya/kitsune",
"id": "86b868c4f99ea73bed38e5f26f61264060c53614",
"size": "18085",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/logilab-common/test/unittest_table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
import numbers
from collections import defaultdict
import numpy as np
from coremltools import _logger as logger
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
from .block import Function, curr_block
from .input_type import (InternalInputType, ListOrTensorInputType,
TensorInputType, TupleInputType)
from .program import Placeholder, Program
from .var import InternalVar, Var
def is_python_value(val):
return (
isinstance(val, (np.generic, np.ndarray))
or isinstance(val, numbers.Number)
or isinstance(val, str)
or isinstance(val, bool)
or (isinstance(val, (tuple, list)) and all(is_python_value(v) for v in val))
)
class Builder:
"""
This class is a singleton builder to construct a MIL program. For more
information, see `Create a MIL program <https://coremltools.readme.io/docs/model-intermediate-language#create-a-mil-program>`_.
Importing ``.ops`` triggers the installation of all MIL ops into the Builder.
For details on each op, see `MIL ops <https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html>`_.
Examples
--------
>>> from coremltools.converters.mil.mil import Builder as mb
>>> from coremltools.converters.mil.mil import Program, Function
>>> prog = Program()
>>> func_inputs = {"x": mb.placeholder(shape=[2,3]),
>>> "y": mb.placeholder(shape=[2,3])}
>>> with Function(func_inputs) as ssa_fun:
>>> x, y = ssa_fun.inputs['x'], ssa_fun.inputs['y']
>>> res_var = mb.add(x=x, y=y) # created within ssa_fun block
>>> ssa_fun.set_outputs([res_var])
>>> prog.add_function("main", ssa_fun)
>>> # Importing ops triggers installation of all ops into Builder.
>>> from .ops import defs as _ops
"""
name_count = defaultdict(int)
@classmethod
def _get_free_name(cls, name):
new_name = name + "_" + str(cls.name_count[name])
cls.name_count[name] += 1
return new_name
@classmethod
def _maybe_set_name(cls, kwargs, op_type):
if "name" not in kwargs:
kwargs["name"] = cls._get_free_name(op_type)
return kwargs
@classmethod
def _add_const(cls, val, name, before_op):
if not is_python_value(val):
raise ValueError("Cannot add const {}".format(val))
if any_symbolic(val):
msg = (
"Python native vals (list, tuple), np.array that are"
+ "operation inputs cannot have symbolic values. Consider feeding"
+ "symbolic shape in through placeholder and use mb.shape() "
+ "operator. Input {}: {}"
)
raise ValueError(msg.format(name, val))
const_name = cls._get_free_name(name)
logger.debug("Adding const op '{}'".format(const_name))
output_var = cls.const(val=val, name=const_name,
before_op=before_op)
return output_var
@classmethod
def _create_vars(cls, input_spec, op_name, before_op,
candidate_kv):
"""
For each key K in `candidate_kv`, create a Var if the
followings are satisfied:
- K exists in input_spec and is not an InternalInputType
- candidate_kv[K] is not already a Var
Inputs
------
- candidate_kv: Dict[str, Any]
Key-values may be inputs to an op (whose inputs is defined by
input_spec)
Returns
-------
- var_kv: Dict[str, Var]
For the K satisfying the above, var_kv[K] is the newly
created Var
"""
update_dict = {}
for k, val in candidate_kv.items():
if isinstance(val, Var):
continue # already a Var
if k not in input_spec.input_types:
continue # k is not an op input
in_type = input_spec.input_types[k]
if isinstance(in_type, InternalInputType):
new_var_name = op_name + "_" + k
var = InternalVar(val, name=new_var_name)
curr_block().add_internal_var(var)
update_dict[k] = var
continue # Not a regular Var
new_var_name = op_name + "_" + k
if isinstance(in_type, TupleInputType):
var = []
for i, v in enumerate(val):
if isinstance(v, Var):
var.append(v)
continue
var.append(
cls._add_const(v, new_var_name + str(i),
before_op)
)
update_dict[k] = var
continue
if isinstance(in_type, (TensorInputType, ListOrTensorInputType)):
var = cls._add_const(val, new_var_name, before_op)
update_dict[k] = var
return update_dict
@classmethod
def _add_op(cls, op_cls, **kwargs):
"""
Add an op of type `op_cls` (e.g., convolution) to current block.
"""
kwargs = cls._maybe_set_name(kwargs, op_cls.__name__)
logger.info(
"Adding op '{}' of type {}".format(kwargs["name"], op_cls.__name__)
)
before_op = kwargs.get("before_op", None)
# Shallow copy list inputs to ensure op inputs are immutable
kwargs = {k: v if not isinstance(v, (list, tuple)) else v[:] for k, v in kwargs.items() if v is not None}
kwargs.update(cls._create_vars(
input_spec=op_cls.input_spec,
op_name=kwargs["name"], before_op=before_op,
candidate_kv=kwargs))
new_op = op_cls(**kwargs)
# Initialize optional input Vars if it wasn't in kwargs
default_inputs = new_op.default_inputs()
# Shallow copy list inputs to ensure op inputs are immutable
missing_optional_vals = {k: v if not isinstance(v, (list, tuple)) else v[:] for k, v in default_inputs.items()
if k not in kwargs and v is not None}
missing_optional_vars = cls._create_vars(
input_spec=op_cls.input_spec,
op_name=kwargs["name"], before_op=before_op,
candidate_kv=missing_optional_vals)
new_op.set_inputs(type_inference=False,
**missing_optional_vars)
curr_block()._insert_op_before(new_op, before_op=before_op)
new_op.build_nested_blocks()
new_op.type_value_inference()
if len(new_op.outputs) == 1:
return new_op.outputs[0]
return new_op.outputs
@staticmethod
def placeholder(shape, dtype=None, allow_rank0_input=False):
return Placeholder(shape, dtype, allow_rank0_input=allow_rank0_input)
@staticmethod
def TensorSpec(shape, dtype=None):
return Placeholder(shape, dtype)
@staticmethod
def program(input_specs=None, opset_version=None):
"""
The ``mb.program`` decorator creates a MIL program with a single
function (``main``). The input to ``main`` is a tensor.
Parameters
----------
input_specs: TensorSpec
Describes a tensor.
opset_version: AvailableTarget enum
Describes the opset version of the program
Examples
--------
>>> import coremltools as ct
>>> @mb.program(input_specs=[mb.TensorSpec(shape=(1,2))], opset_version=ct.target.iOS16)
>>> def prog(a):
>>> return mb.add(x=a, y=2)
"""
if input_specs is None:
input_specs = []
def wrapper(main_block):
program = Program()
num_args = main_block.__code__.co_argcount
arg_names = list(main_block.__code__.co_varnames)[:num_args]
if len(input_specs) != num_args:
msg = "{} expects {} inputs: {}. Got {} input_specs."
raise ValueError(
msg.format(
main_block.__name__, num_args, arg_names, len(input_specs)
)
)
input_spec_dict = {k: v for k, v in zip(arg_names, input_specs)}
with Function(input_spec_dict, opset_version) as func:
input_vars = [func.inputs[a] for a in arg_names]
outputs = main_block(*input_vars)
if isinstance(outputs, tuple):
outputs = list(outputs)
elif not isinstance(outputs, list):
outputs = [outputs]
func.set_outputs(outputs)
program.add_function("main", func)
return program
return wrapper
| {
"content_hash": "530cc753b235118c8b2812d0801e184a",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 133,
"avg_line_length": 36.045643153526974,
"alnum_prop": 0.5607229193047082,
"repo_name": "apple/coremltools",
"id": "2f782c27fac661704a965f228a0f1a2b29cbe687",
"size": "8906",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "coremltools/converters/mil/mil/builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "79917"
},
{
"name": "C++",
"bytes": "1420033"
},
{
"name": "CMake",
"bytes": "20418"
},
{
"name": "Makefile",
"bytes": "4258"
},
{
"name": "Mustache",
"bytes": "2676"
},
{
"name": "Objective-C",
"bytes": "4061"
},
{
"name": "Objective-C++",
"bytes": "28933"
},
{
"name": "Python",
"bytes": "5004520"
},
{
"name": "Shell",
"bytes": "19662"
}
],
"symlink_target": ""
} |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
from os import environ
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Caracas'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-ES'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'static'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/assets/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'assets')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"*ed!)9oq8-^v77g@k!u^!#p0g77lgb4_8^3yg0a=cux16@ir)a"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'grappelli',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'apps.users',
'apps.surveys',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## SOUTH CONFIGURATION
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
# Don't need to use South when setting up a test database.
SOUTH_TESTS_MIGRATE = False
########## END SOUTH CONFIGURATION
# User profile
#AUTH_PROFILE_MODULE = 'apps.users.UserProfile'
# URL of the login page.
LOGIN_URL = '/login/'
GRAPPELLI_ADMIN_TITLE = 'Sistema de Encuestas C-NUTRA' | {
"content_hash": "1bcd1a65068b7910bf32841886e95c00",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 100,
"avg_line_length": 29.40530303030303,
"alnum_prop": 0.6845291768646142,
"repo_name": "dmallcott/C-Nutra",
"id": "835783aea29104060de625c025b3dac57510addb",
"size": "7763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c_nutra/config/settings/base.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54060"
},
{
"name": "JavaScript",
"bytes": "141536"
},
{
"name": "Python",
"bytes": "82103"
},
{
"name": "Ruby",
"bytes": "853"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
class Retrier(object):
"""
Models a Retrier for a "Retry" field in a Task or Parallel state
:param ErrorNameList: [Required] The set of error names that this ``Retrier`` will handle.
:type ErrorNameList: list of str
:param IntervalSeconds: [Optional] The interval in seconds before retrying the state. Default is 1 second.
:type IntervalSeconds: int
:param MaxAttempts: [Optional] The maximum number of retry attempts. Default is 3.
:type MaxAttempts: int
:param BackoffRate: [Optional] The growth rate in retry interval. Must be greater than 1.0. Default is 2.0.
:type BackoffRate: float
"""
def __init__(self, ErrorNameList=None, IntervalSeconds=1, MaxAttempts=3, BackoffRate=2.0):
"""
Initializer for this instance
:param ErrorNameList: [Required] The set of error names that this ``Retrier`` will handle.
:type ErrorNameList: list of str
:param IntervalSeconds: [Optional] The interval in seconds before retrying the state. Default is 1 second.
:type IntervalSeconds: int
:param MaxAttempts: [Optional] The maximum number of retry attempts. Default is 3.
:type MaxAttempts: int
:param BackoffRate: [Optional] The growth rate in retry interval. Must be greater than 1.0. Default is 2.0.
:type BackoffRate: float
"""
self._error_name_list = None
self._interval_seconds = 1
self._max_attempts = 3
self._back_off_rate = 2.0
self.set_error_name_list(ErrorNameList)
self.set_interval_seconds(IntervalSeconds)
self.set_max_attempts(MaxAttempts)
self.set_backoff_rate(BackoffRate)
def get_error_name_list(self):
"""
Returns the ``list`` of error names that this instance will handle.
:returns: list of str -- The list of error names
"""
return self._error_name_list
def set_error_name_list(self, ErrorNameList):
"""
Sets the ``list`` of error names that this instance will handle.
``ErrorNameList`` must not be ``None``, and must be a non-empty ``list`` of ``str``.
:param ErrorNameList: [Required] The set of error names that this ``Retrier`` will handle.
:type ErrorNameList: list of str
"""
if not ErrorNameList:
raise Exception("ErrorNameList must not be None for a Retrier")
if not isinstance(ErrorNameList, list):
raise Exception("ErrorNameList must be a list for a Retrier")
if len(ErrorNameList) == 0:
raise Exception("ErrorNameList must be a non-empty list for a Retrier")
for o in ErrorNameList:
if not isinstance(o, str):
raise Exception("ErrorNameList must only contain strings")
self._error_name_list = ErrorNameList
def get_interval_seconds(self):
"""
Returns the interval in seconds before the state machine will retry the associated failed state.
:returns: int -- The interval in seconds before retrying.
"""
return self._interval_seconds
def set_interval_seconds(self, IntervalSeconds=1):
"""
Sets the interval in seconds before the state machine will retry the associated failed state.
The interval must be >= 1 second. Default is 1 second.
:param IntervalSeconds: [Optional] The interval in seconds before retrying the state.
:type IntervalSeconds: int
"""
if not IntervalSeconds:
raise Exception("IntervalSeconds must not be None for a Retrier")
if not isinstance(IntervalSeconds, int):
raise Exception("IntervalSeconds must be an integer value")
if IntervalSeconds < 1:
raise Exception("IntervalSeconds must be greater than 1 second")
self._interval_seconds = IntervalSeconds
def get_max_attempts(self):
"""
Returns the maximum number of retry attempts of the associated failed state.
:returns: int -- The maximum number of retry attempts.
"""
return self._max_attempts
def set_max_attempts(self, MaxAttempts=3):
"""
Sets the maximum number of retry attempts of the associated failed state.
The max attempts must be greater than or equal to zero. A value of zero indicates that no retry will be attempted. The default is 3.
:param MaxAttempts: [Optional] The maximum number of retry attempts.
:type MaxAttempts: int
"""
if not MaxAttempts:
raise Exception("MaxAttempts must not be None for a Retrier")
if not isinstance(MaxAttempts, int):
raise Exception("MaxAttempts must be an integer value")
if MaxAttempts < 0:
raise Exception("MaxAttempts must be 0 or greater")
self._max_attempts = MaxAttempts
def get_backoff_rate(self):
"""
Returns the backoff rate that will be applied to the ``IntervalSeconds`` on each retry.
:returns: float -- The backoff rate for the ``IntervalSeconds``.
"""
return self._back_off_rate
def set_backoff_rate(self, BackoffRate=2.0):
"""
Sets the backoff rate that will be applied to the ``IntervalSeconds`` after the first retry.
The backoff rate must be >= 1.0. Default is 2.0.
:param BackoffRate: [Optional] The growth rate in retry interval.
:type BackoffRate: float
"""
if not BackoffRate:
raise Exception("BackoffRate must not be None for a Retrier")
if not isinstance(BackoffRate, float):
raise Exception("BackoffRate must be an float value")
if BackoffRate < 1.0:
raise Exception("BackoffRate must be greater or equal to 1.0")
self._back_off_rate = BackoffRate
def validate(self):
"""
Validates this instance is correctly specified.
Raises ``Exception`` with details of the error, if the state machine is incorrectly defined.
"""
if not self.get_error_name_list():
raise Exception("Retrier must have an ErrorNameList")
def to_json(self):
"""
Returns the JSON representation of this instance.
:returns: dict -- The JSON representation
"""
return {
"ErrorEquals" : self.get_error_name_list(),
"IntervalSeconds" : self.get_interval_seconds(),
"MaxAttempts" : self.get_max_attempts(),
"BackoffRate" : self.get_backoff_rate()
}
def clone(self):
"""
Returns a clone of this instance.
:returns: ``Retrier`` -- A new instance of this instance and any other instances in its branch.
"""
c = Retrier(
IntervalSeconds=self.get_interval_seconds(),
MaxAttempts=self.get_max_attempts(),
BackoffRate=self.get_backoff_rate())
if self.get_error_name_list():
c.set_error_name_list(ErrorNameList=[ n for n in self.get_error_name_list() ])
return c
| {
"content_hash": "ccd7880b722cbf2bbce9dff1d6db91ff",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 136,
"avg_line_length": 33.67567567567568,
"alnum_prop": 0.720545746388443,
"repo_name": "gford1000/awssl",
"id": "16c768757c70a3753c80b81e6fe7b40bceb10e90",
"size": "6230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awssl/retrier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165950"
}
],
"symlink_target": ""
} |
NAME = 'ntpdshm'
VERSION = '0.2.1'
LICENSE = 'BSD License'
AUTHOR = 'Markus Juenemann'
EMAIL = 'markus@juenemann.net'
DESCRIPTION = 'Python interface to NTP Shared Memory'
URL = 'https://github.com/mjuenema/python-ntpdshm'
from setuptools import setup, Extension
ntpdshm_module = Extension('ntpdshm._shm', sources=['ntpdshm/shm.c', 'ntpdshm/shm_wrap.c'],)
from os.path import join, dirname
readme = open(join(dirname(__file__), 'README.rst')).read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = open(join(dirname(__file__), 'requirements.txt')).read().split()
test_requirements = open(join(dirname(__file__), 'test_requirements.txt')).read().split()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=readme + '\n\n' + history,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=[
NAME,
],
package_dir={'ntpdshm':
'ntpdshm'},
include_package_data=True,
install_requires=requirements,
ext_modules = [ntpdshm_module],
py_modules = ['ntpdshm'],
license=LICENSE,
zip_safe=False,
keywords='ntp, shared memory',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: Unix',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking :: Time Synchronization',
'Programming Language :: Python :: C',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| {
"content_hash": "c380328fad2e78c4f4bf7f63bcab4829",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 92,
"avg_line_length": 33.40677966101695,
"alnum_prop": 0.6230339928970066,
"repo_name": "mjuenema/python-ntpdshm",
"id": "1cb3e9dbff0ce95e028997c9295613ede9722260",
"size": "1996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "5485"
},
{
"name": "Makefile",
"bytes": "2172"
},
{
"name": "Python",
"bytes": "14629"
}
],
"symlink_target": ""
} |
# Calculates the current version number. If possible, this is the
# output of “git describe”, modified to conform to the versioning
# scheme that setuptools uses. If “git describe” returns an error
# (most likely because we're in an unpacked copy of a release tarball,
# rather than in a git working copy), then we fall back on reading the
# contents of the RELEASE-VERSION file.
#
# To use this script, simply import it your setup.py file, and use the
# results of get_git_version() as your package version:
#
# from version import get_git_version
#
# setup(
# version=get_git_version()[0],
# .
# .
# .
# )
#
# This will automatically update the RELEASE-VERSION file, if
# necessary. Note that the RELEASE-VERSION file should *not* be
# checked into git; please add it to your top-level .gitignore file.
#
# You'll probably want to distribute the RELEASE-VERSION file in your
# sdist tarballs; to do this, just create a MANIFEST.in file that
# contains the following line:
#
# include RELEASE-VERSION
from __future__ import absolute_import, division, print_function
__all__ = ("get_git_version")
import os
import sys
import traceback
from subprocess import Popen, PIPE
def call_git_describe(abbrev=4):
dot_git = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'.git')
if not os.path.exists(dot_git):
return None, None
line = None
p = None
try:
p = Popen(['git', 'describe', '--abbrev=%d' % abbrev],
stdout=PIPE, stderr=PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)),
universal_newlines=True)
p.stderr.close()
describe_line = p.stdout.readlines()[0].strip()
p = Popen(['git', 'rev-parse', 'HEAD'],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
source_hash = p.stdout.readlines()[0].strip()
source_hash = source_hash[:abbrev]
parts = describe_line.split('-')
if len(parts) == 1:
version = parts[0]
else:
ver, rel, source_hash = parts
version_parts = ver.split('.')
lasti = len(version_parts) - 1
# increment whatever the last part of this a.b.c.d.yadda
version_parts[lasti] = str(int(version_parts[lasti]) + 1)
version = '{}.dev{}'.format('.'.join(version_parts), rel)
return version, source_hash
except Exception as exc:
sys.stderr.write('line: %r\n' % line)
sys.stderr.write(traceback.format_exc(exc))
try:
sys.stderr.write('p.stderr.read()=%s\n' % p.stderr.read())
except Exception as exc:
sys.stderr.write(traceback.format_exc(exc))
try:
sys.stderr.write('os.getcwd()=%s\n' % os.getcwd())
except Exception as exc:
sys.stderr.write(traceback.format_exc(exc))
return None, None
def read_release_version():
try:
f = open("RELEASE-VERSION", "r")
try:
version = f.readlines()[0]
return version.strip().split(',')
finally:
f.close()
except:
return None, None
def write_release_version(version, source_hash):
f = open("RELEASE-VERSION", "w")
f.write("%s,%s\n" % (version, source_hash))
f.close()
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version, release_source_hash = read_release_version()
# First try to get the current version using “git describe”.
version, source_hash = call_git_describe(abbrev)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
source_hash = release_source_hash
# If we still don't have anything, that's an error.
if version is None:
# raise ValueError("Cannot find the version number!")
version = '0.0.0'
source_hash = ''
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version or source_hash != release_source_hash:
write_release_version(version, source_hash)
# Finally, return the current version.
return version, source_hash
if __name__ == "__main__":
print(get_git_version())
| {
"content_hash": "da2f2d639b6caba840417c1c24f6acbb",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 72,
"avg_line_length": 29.94557823129252,
"alnum_prop": 0.6119945479327579,
"repo_name": "diffeo/pytest-diffeo",
"id": "5e2283c5cca68cfe2eb157e64e4caea9ce9848bb",
"size": "4534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15553"
}
],
"symlink_target": ""
} |
import os
import sys
import gzip
import subprocess
topics = int(sys.argv[1])
iterations = int(sys.argv[2])
def main():
samples_directory = r'C:\mallet\clinton\data\globals\samples'
samples_output_directory = r'C:\mallet\clinton\data\globals\samples\output'
command = 'mallet train-topics --num-topics 8 '
command += r'--input C:\mallet\clinton\data\corpus\clinton.mallet '
command += r'--output-model {}\output-1.model '.format(samples_output_directory)
print('running:', command)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
print('process return code:', process.returncode)
print()
for i in range(2, iterations+1) :
command = r'mallet train-topics --num-topics {} '.format(topics)
command += r'--input-model {}\output-{}.model '.format(samples_output_directory, i-1)
command += r'--output-model {}\output-{}.model'.format(samples_output_directory, i)
print('running:', command)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
print('process return code:', process.returncode)
print()
command = 'mallet train-topics --num-topics 8 '
command += r'--input-model {}\output-{}.model '.format(samples_output_directory, iterations)
command += r'--output-model {}\output-final.model '.format(samples_directory)
command += r'--evaluator-filename {}\evaluator.model '.format(samples_directory)
command += r'--output-topic-keys {}\topic-keys.txt '.format(samples_directory)
command += r'--output-doc-topics {}\doc-topics.txt '.format(samples_directory)
command += r'--output-state {}\state.gz '.format(samples_directory)
command += r'--xml-topic-report {}\topic-report.xml '.format(samples_directory)
command += r'--diagnostics-file {}\diagnostics-file.txt '.format(samples_directory)
command += r'--word-topic-counts-file {}\word-topic-counts.txt '.format(samples_directory)
command += r'--topic-word-weights-file {}\topic-word-weights.txt '.format(samples_directory)
command += r'--inferencer-filename {}\inferencer.mallet '.format(samples_directory)
print('running:', command)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
print('process return code:', process.returncode)
print()
print('unzipping state.gz file...')
content = ''
with gzip.open(r'{}\state.gz'.format(samples_directory), 'rb') as zip:
content = zip.read()
csv_name = 'state.csv'
with open(r'{}\state.csv'.format(samples_directory), 'w+') as output:
output.write(content.decode('utf8'))
print('state.gz unzipped to state.csv')
if __name__ == "__main__":
main() | {
"content_hash": "246aa3c3ea4e763ed9f9093a40954c58",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 93,
"avg_line_length": 35.66216216216216,
"alnum_prop": 0.7032967032967034,
"repo_name": "pepper-johnson/Erudition",
"id": "42f18a061ca61bd18f06d2140181d8fcbb4320e1",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Mallet/clinton/post-processing/train_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12450"
},
{
"name": "Jupyter Notebook",
"bytes": "4661320"
},
{
"name": "Python",
"bytes": "133457"
}
],
"symlink_target": ""
} |
def decimal_to_binary_fraction(x=0.5):
"""
Input: x, a float between 0 and 1
Returns binary representation of x
"""
p = 0
while ((2 ** p) * x) % 1 != 0:
# print('Remainder = ' + str((2**p)*x - int((2**p)*x)))
p += 1
num = int(x * (2 ** p))
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
for i in range(p - len(result)):
result = '0' + result
result = result[0:-p] + '.' + result[-p:]
return result # If there is no integer p such that x*(2**p) is a whole number, then internal
# representation is always an approximation
# Suggest that testing equality of floats is not exact: Use abs(x-y) < some
# small number, rather than x == y
# Why does print(0.1) return 0.1, if not exact?
# Because Python designers set it up this way to automatically round
| {
"content_hash": "e22eaa294865f1e81fd6e877be04eb1b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 97,
"avg_line_length": 28.3125,
"alnum_prop": 0.5629139072847682,
"repo_name": "Mdlkxzmcp/various_python",
"id": "43a74cac582bdf300bc81daa9bedf7b376e2c024",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Alpha & Beta/wootMath/decimalToBinaryFraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1713"
},
{
"name": "HTML",
"bytes": "10923"
},
{
"name": "JavaScript",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "262310"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from typing import Any
from dataproperty import DataProperty
from typepy import Typecode
def bool_to_str(value) -> str:
if value is True:
return "true"
if value is False:
return "false"
return value
def serialize_dp(dp: DataProperty) -> Any:
if dp.typecode in (Typecode.REAL_NUMBER, Typecode.INFINITY, Typecode.NAN) and isinstance(
dp.data, Decimal
):
return float(dp.data)
if dp.typecode == Typecode.DATETIME:
return dp.to_str()
return dp.data
| {
"content_hash": "6dc0519fd9e99755e21edd0243edf8f7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 93,
"avg_line_length": 21.115384615384617,
"alnum_prop": 0.6666666666666666,
"repo_name": "thombashi/pytablewriter",
"id": "980f2eaa18039b441cfc4757575e626923ccffba",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytablewriter/writer/text/_common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "649545"
}
],
"symlink_target": ""
} |
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, Length, EqualTo
class LoginForm(Form):
email = StringField('Email Address', [DataRequired(), Email()])
password = PasswordField('Password', [DataRequired()])
class RegisterForm(Form):
email = StringField(
'Email Address',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)])
password = PasswordField(
'Password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Confirm password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
| {
"content_hash": "79ac725ba19cc56f4a99ac8025f8b137",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 30.2,
"alnum_prop": 0.6476821192052981,
"repo_name": "JeromeErasmus/browserstack_automate",
"id": "f75e8981ceac1313988f6fcee9bbd2c862bfa2b4",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automate/server/user/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56"
},
{
"name": "HTML",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "90"
},
{
"name": "Python",
"bytes": "25193"
}
],
"symlink_target": ""
} |
'''
usage: metadata.py <first.pdf> [<next.pdf> ...]
Creates output.pdf
This file demonstrates two features:
1) Concatenating multiple input PDFs.
2) adding metadata to the PDF.
If you do not need to add metadata, look at subset.py, which
has a simpler interface to PdfWriter.
'''
import sys
import os
import find_pdfrw
from pdfrw import PdfReader, PdfWriter, IndirectPdfDict
inputs = sys.argv[1:]
assert inputs
outfn = 'output.pdf'
writer = PdfWriter()
for inpfn in inputs:
writer.addpages(PdfReader(inpfn.pages)
writer.trailer.Info = IndirectPdfDict(
Title = 'your title goes here',
Author = 'your name goes here',
Subject = 'what is it all about?',
Creator = 'some script goes here',
)
writer.write(outfn)
| {
"content_hash": "baf364c6559e6eb007258a3393d24f19",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 60,
"avg_line_length": 20.027027027027028,
"alnum_prop": 0.717948717948718,
"repo_name": "lamby/pkg-pdfrw",
"id": "9c691087e9d890f746f0b9f4d8aac2167ca043d8",
"size": "764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/metadata.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103701"
}
],
"symlink_target": ""
} |
"""
A simple cache management utility for daisy.
"""
from __future__ import print_function
import functools
import optparse
import os
import sys
import time
from oslo_utils import timeutils
from daisy.common import utils
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from daisy.common import exception
import daisy.image_cache.client
from daisy.version import version_info as version
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotFound:
options = args[0]
print("Cache management middleware not enabled on host %s" %
options.host)
return FAILURE
except exception.Forbidden:
print("Not authorized to make this request.")
return FAILURE
except Exception as e:
options = args[0]
if options.debug:
raise
print("Failed to %s. Got error:" % action)
pieces = utils.exception_to_str(e).split('\n')
for piece in pieces:
print(piece)
return FAILURE
return wrapper
return wrap
@catch_error('show cached images')
def list_cached(options, args):
"""%(prog)s list-cached [options]
List all images currently cached.
"""
client = get_client(options)
images = client.get_cached_images()
if not images:
print("No cached images.")
return SUCCESS
print("Found %d cached images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
pretty_table.add_column(19, label="Last Accessed (UTC)")
pretty_table.add_column(19, label="Last Modified (UTC)")
# 1 TB takes 13 characters to display: len(str(2**40)) == 13
pretty_table.add_column(14, label="Size", just="r")
pretty_table.add_column(10, label="Hits", just="r")
print(pretty_table.make_header())
for image in images:
last_modified = image['last_modified']
last_modified = timeutils.iso8601_from_timestamp(last_modified)
last_accessed = image['last_accessed']
if last_accessed == 0:
last_accessed = "N/A"
else:
last_accessed = timeutils.iso8601_from_timestamp(last_accessed)
print(pretty_table.make_row(
image['image_id'],
last_accessed,
last_modified,
image['size'],
image['hits']))
@catch_error('show queued images')
def list_queued(options, args):
"""%(prog)s list-queued [options]
List all images currently queued for caching.
"""
client = get_client(options)
images = client.get_queued_images()
if not images:
print("No queued images.")
return SUCCESS
print("Found %d queued images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
print(pretty_table.make_header())
for image in images:
print(pretty_table.make_row(image))
@catch_error('queue the specified image for caching')
def queue_image(options, args):
"""%(prog)s queue-image <IMAGE_ID> [options]
Queues an image for caching
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("queue from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Queue image %(image_id)s for caching?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.queue_image_for_caching(image_id)
if options.verbose:
print("Queued image %(image_id)s for caching" %
{'image_id': image_id})
return SUCCESS
@catch_error('delete the specified cached image')
def delete_cached_image(options, args):
"""
%(prog)s delete-cached-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete cached image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_cached_image(image_id)
if options.verbose:
print("Deleted cached image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all cached images')
def delete_all_cached_images(options, args):
"""%(prog)s delete-all-cached-images [options]
Remove all images from the cache.
"""
if (not options.force and
not user_confirm("Delete all cached images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_cached_images()
if options.verbose:
print("Deleted %(num_deleted)s cached images" %
{'num_deleted': num_deleted})
return SUCCESS
@catch_error('delete the specified queued image')
def delete_queued_image(options, args):
"""
%(prog)s delete-queued-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete queued image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_queued_image(image_id)
if options.verbose:
print("Deleted queued image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all queued images')
def delete_all_queued_images(options, args):
"""%(prog)s delete-all-queued-images [options]
Remove all images from the cache queue.
"""
if (not options.force and
not user_confirm("Delete all queued images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_queued_images()
if options.verbose:
print("Deleted %(num_deleted)s queued images" %
{'num_deleted': num_deleted})
return SUCCESS
def get_client(options):
"""Return a new client object to a Glance server.
specified by the --host and --port options
supplied to the CLI
"""
return daisy.image_cache.client.get_client(
host=options.host,
port=options.port,
username=options.os_username,
password=options.os_password,
tenant=options.os_tenant_name,
auth_url=options.os_auth_url,
auth_strategy=options.os_auth_strategy,
auth_token=options.os_auth_token,
region=options.os_region_name,
insecure=options.insecure)
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def create_options(parser):
"""Set up the CLI and config-file options that may be
parsed and program commands.
:param parser: The option parser
"""
parser.add_option('-v', '--verbose', default=False, action="store_true",
help="Print more verbose output.")
parser.add_option('-d', '--debug', default=False, action="store_true",
help="Print debugging output.")
parser.add_option('-H', '--host', metavar="ADDRESS", default="0.0.0.0",
help="Address of Glance API host. "
"Default: %default.")
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int, default=9292,
help="Port the Glance API host listens on. "
"Default: %default.")
parser.add_option('-k', '--insecure', dest="insecure",
default=False, action="store_true",
help="Explicitly allow glance to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution.")
parser.add_option('-f', '--force', dest="force", metavar="FORCE",
default=False, action="store_true",
help="Prevent select actions from requesting "
"user confirmation.")
parser.add_option('--os-auth-token',
dest='os_auth_token',
default=env('OS_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN].')
parser.add_option('-A', '--os_auth_token', '--auth_token',
dest='os_auth_token',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-username',
dest='os_username',
default=env('OS_USERNAME'),
help='Defaults to env[OS_USERNAME].')
parser.add_option('-I', '--os_username',
dest='os_username',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-password',
dest='os_password',
default=env('OS_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_option('-K', '--os_password',
dest='os_password',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-region-name',
dest='os_region_name',
default=env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME].')
parser.add_option('-R', '--os_region_name',
dest='os_region_name',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-tenant-id',
dest='os_tenant_id',
default=env('OS_TENANT_ID'),
help='Defaults to env[OS_TENANT_ID].')
parser.add_option('--os_tenant_id',
dest='os_tenant_id',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-tenant-name',
dest='os_tenant_name',
default=env('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_option('-T', '--os_tenant_name',
dest='os_tenant_name',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-auth-url',
default=env('OS_AUTH_URL'),
help='Defaults to env[OS_AUTH_URL].')
parser.add_option('-N', '--os_auth_url',
dest='os_auth_url',
help=optparse.SUPPRESS_HELP)
parser.add_option('-S', '--os_auth_strategy', dest="os_auth_strategy",
metavar="STRATEGY",
help="Authentication strategy (keystone or noauth).")
def parse_options(parser, cli_args):
"""
Returns the parsed CLI options, command to run and its arguments, merged
with any same-named options found in a configuration file
:param parser: The option parser
"""
if not cli_args:
cli_args.append('-h') # Show options in usage output...
(options, args) = parser.parse_args(cli_args)
# HACK(sirp): Make the parser available to the print_help method
# print_help is a command, so it only accepts (options, args); we could
# one-off have it take (parser, options, args), however, for now, I think
# this little hack will suffice
options.__parser = parser
if not args:
parser.print_usage()
sys.exit(0)
command_name = args.pop(0)
command = lookup_command(parser, command_name)
return (options, command, args)
def print_help(options, args):
"""
Print help specific to a command
"""
if len(args) != 1:
sys.exit("Please specify a command")
parser = options.__parser
command_name = args.pop()
command = lookup_command(parser, command_name)
print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})
def lookup_command(parser, command_name):
BASE_COMMANDS = {'help': print_help}
CACHE_COMMANDS = {
'list-cached': list_cached,
'list-queued': list_queued,
'queue-image': queue_image,
'delete-cached-image': delete_cached_image,
'delete-all-cached-images': delete_all_cached_images,
'delete-queued-image': delete_queued_image,
'delete-all-queued-images': delete_all_queued_images,
}
commands = {}
for command_set in (BASE_COMMANDS, CACHE_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
parser.print_usage()
sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name})
return command
def user_confirm(prompt, default=False):
"""Yes/No question dialog with user.
:param prompt: question/statement to present to user (string)
:param default: boolean value to return if empty string
is received as response to prompt
"""
if default:
prompt_default = "[Y/n]"
else:
prompt_default = "[y/N]"
answer = raw_input("%s %s " % (prompt, prompt_default))
if answer == "":
return default
else:
return answer.lower() in ("yes", "y")
def main():
usage = """
%prog <command> [options] [args]
Commands:
help <command> Output help for one of the commands below
list-cached List all images currently cached
list-queued List all images currently queued for caching
queue-image Queue an image for caching
delete-cached-image Purges an image from the cache
delete-all-cached-images Removes all images from the cache
delete-queued-image Deletes an image from the cache queue
delete-all-queued-images Deletes all images from the cache queue
"""
version_string = version.cached_version_string()
oparser = optparse.OptionParser(version=version_string,
usage=usage.strip())
create_options(oparser)
(options, command, args) = parse_options(oparser, sys.argv[1:])
try:
start_time = time.time()
result = command(options, args)
end_time = time.time()
if options.verbose:
print("Completed in %-0.4f sec." % (end_time - start_time))
sys.exit(result)
except (RuntimeError, NotImplementedError) as e:
print("ERROR: ", e)
if __name__ == '__main__':
main()
| {
"content_hash": "83f165ab4781d187d8fa0d96b53e763f",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 78,
"avg_line_length": 31.64870259481038,
"alnum_prop": 0.5824924318869829,
"repo_name": "OpenDaisy/daisy-api",
"id": "8f3eb7ed8893fbc90dc5105ce44953f0c48d3d4e",
"size": "16515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisy/cmd/cache_manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1475450"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
} |
import time
import pygame
import const
from os import sys
import RPi.GPIO as GPIO
from threading import Thread
from multiprocessing import Process
import subprocess
import serial
print("Raspberry Pi Master")
driveSer = serial.Serial('/dev/ttyUSB0', 250000, timeout=1)
turnSer = serial.Serial('/dev/ttyUSB1', 250000, timeout=1)
stopped = False
currentSpeed = 0
currentTurn = 0
manual = True
frontDistance = 400
TRIG = 20
ECHO = 21
incr = 0
jValue = 0
pygame.init()
j = pygame.joystick.Joystick(0)
j.init()
# setup GPIO and variables before starting
def setup():
global stopped
global currentSpeed
global currentTurn
global manual
global frontDistance
global jValue
GPIO.setmode(GPIO.BCM)
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
stopped = False
currentSpeed = 0
currentTurn = 0
manual = True
frontDistance = 400
jValue = 0
# measure distance between ultrasonic sensor and object
def distance():
GPIO.output(TRIG, 0)
time.sleep(0.000002)
GPIO.output(TRIG, 1)
time.sleep(0.00001)
GPIO.output(TRIG, 0)
start = time.time()
while GPIO.input(ECHO)==0:
pass
start = time.time()
while GPIO.input(ECHO)==1:
pass
stop = time.time()
elapsed = stop - start
return elapsed * 340 / 2 * 100
# set motor speed
def setSpeed(speed):
global incr
incr += 1
print("-------------------")
print(speed)
try:
theByte = bytes(chr(speed+48))
driveSer.write(theByte)
print(speed)
except IOError:
print("disconnected")
time.sleep(0.15)
# set motor speed
def setTurn(turn):
print("-------------------")
print(turn)
try:
theByte = bytes(chr((turn)+48))
turnSer.write(theByte)
except IOError:
print("disconnected")
time.sleep(0.15)
# get PS3 joystick value
def getJoystickXValue():
global manual
global jValue
jBefore = jValue
events = pygame.event.get()
for event in events:
if event.type == pygame.JOYAXISMOTION:
if event.axis == 1:
jValue = event.value
if j.get_button(11) and manual:
print("Cruise")
manual = False
if j.get_button(10) and not manual:
print("Manual")
manual = True
elif j.get_button(16):
stopDrive()
if not jValue and jValue is not 0:
return jBefore
return jValue
def getJoystickYValue():
global manual
global jValue
jBefore = jValue
events = pygame.event.get()
for event in events:
if event.type == pygame.JOYAXISMOTION:
if event.axis == 2:
jValue = event.value
elif j.get_button(16):
stopDrive()
if not jValue and jValue is not 0:
return jBefore
return jValue
# enable manual driving and return speed
def manualDrive():
driveV = getJoystickXValue()
driveV = int(driveV * 100) * -1
if driveV > 128:
driveV = 128
if driveV < 0:
driveV = const.motorZeroSpeed
return driveV
# enable cruise control and return speed
def cruiseControl():
driveV = 0
jValue = getJoystickXValue()
stopDif = const.cruiseMaxStopDistance - const.cruiseMinStopDistance
stopDistance = (currentSpeed - const.motorZeroSpeed) * 14.8148148148
if stopDistance < const.cruiseMinStopDistance:
stopDistance = const.cruiseMinStopDistance
if stopDistance > const.cruiseMaxStopDistance:
stopDistance = const.cruiseMaxStopDistance
if frontDistance < stopDistance:
driveV = const.motorZeroSpeed
elif frontDistance <= 400 and frontDistance > stopDistance:
driveV = int(frontDistance/30) + const.motorZeroSpeed
else:
if currentSpeed + const.cruiseSpeedIncrement < const.cruiseTopSpeed:
driveV = currentSpeed
driveV += const.cruiseSpeedIncrement
else:
driveV = currentSpeed
# print driveV, " driveV"
return driveV
# stop drive and close program
def stopDrive():
global stopped
stopped = True
setSpeed(-1)
setTurn(50)
print("Stopping ... ")
driveSer.close()
turnSer.close()
GPIO.cleanup()
j.quit()
pygame.quit()
print("Stopped")
sys.exit()
# repeatedly return distance values until stopped
def distanceLoop():
global frontDistance
try:
while not stopped:
frontDistance = distance()
# print frontDistance,"cm"
time.sleep(0.3)
except KeyboardInterrupt:
stopDrive()
# repeatedly apply voltage to motor based on drive type until stopped
def driveLoop():
global stopped
global manual
global currentSpeed
try:
while not stopped:
print("drive")
if manual:
currentSpeed = manualDrive()
else:
currentSpeed = cruiseControl()
if currentSpeed <= const.motorMaxSpeed and currentSpeed >= const.motorZeroSpeed:
setSpeed(currentSpeed)
except KeyboardInterrupt:
stopDrive()
def turnLoop():
global currentTurn
global stopped
try:
while not stopped:
print("turn")
turnP = getJoystickYValue() * 100
currentTurn = int((turnP/2) + 50)
setTurn(int((turnP/2) + 50))
except KeyboardInterrupt:
stopDrive()
# start drive and multiple threads and main method
def startDrive():
setup()
t1 = Process(target = driveLoop)
t2 = Thread(target = distanceLoop)
t3 = Process(target = turnLoop)
t1.start()
# t2.start()
t3.start()
| {
"content_hash": "f2b9ec9df1330157d05a9eafafabfea9",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 92,
"avg_line_length": 22.42292490118577,
"alnum_prop": 0.6227745460955403,
"repo_name": "maanitm/MadMobile",
"id": "36c8554a93fcfd22d25593bacfaf4f3989d27eff",
"size": "5673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run/raspberrypi/raspberryMaster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3771"
},
{
"name": "PHP",
"bytes": "6676"
},
{
"name": "Python",
"bytes": "28508"
}
],
"symlink_target": ""
} |
default_app_config = 'waldur_mastermind.invoices.apps.InvoiceConfig'
| {
"content_hash": "713ccf24496b23fc066f759c521253c6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 68,
"avg_line_length": 69,
"alnum_prop": 0.8260869565217391,
"repo_name": "opennode/waldur-mastermind",
"id": "9af7789fc780de7b2c0d65f8ff91aff05b4d4923",
"size": "69",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/invoices/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
"""Test the Smart Meter Texas config flow."""
import asyncio
from aiohttp import ClientError
import pytest
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
from homeassistant import config_entries, setup
from homeassistant.components.smart_meter_texas.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
TEST_LOGIN = {CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password"}
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("smart_meter_texas.Client.authenticate", return_value=True), patch(
"homeassistant.components.smart_meter_texas.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.smart_meter_texas.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_LOGIN[CONF_USERNAME]
assert result2["data"] == TEST_LOGIN
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate", side_effect=SmartMeterTexasAuthError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
@pytest.mark.parametrize(
"side_effect", [asyncio.TimeoutError, ClientError, SmartMeterTexasAPIError]
)
async def test_form_cannot_connect(hass, side_effect):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate", side_effect=side_effect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_exception(hass):
"""Test base exception is handled."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate", side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_duplicate_account(hass):
"""Test that a duplicate account cannot be configured."""
MockConfigEntry(
domain=DOMAIN,
unique_id="user123",
data={"username": "user123", "password": "password123"},
).add_to_hass(hass)
with patch(
"smart_meter_texas.Client.authenticate", return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={"username": "user123", "password": "password123"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| {
"content_hash": "c96ec74019d11af326ace23bca09aa39",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 86,
"avg_line_length": 33.03333333333333,
"alnum_prop": 0.6612008072653885,
"repo_name": "titilambert/home-assistant",
"id": "d1e88df8a801541e4a64c9890622e07ff96af54d",
"size": "3964",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/smart_meter_texas/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
import os
from contextlib import contextmanager
from OpenSSL import crypto, SSL
import synapse.common as s_common
from synapse.tests.common import *
import synapse.lib.certdir as s_certdir
class CertDirTest(SynTest):
@contextmanager
def getCertDir(self):
'''
Get a test CertDir object.
Yields:
s_certdir.CertDir: A certdir object based out of a temp directory.
'''
# create a temp folder and make it a cert dir
with self.getTestDir() as dirname:
s_scope.set('testdir', dirname)
cdir = s_certdir.CertDir(path=dirname)
yield cdir
def basic_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(cert)
self.nn(key)
# Make sure the certs were generated with the expected number of bits
self.eq(cert.get_pubkey().bits(), cdir.crypto_numbits)
self.eq(key.bits(), cdir.crypto_numbits)
# Make sure the certs were generated with the correct version number
self.eq(cert.get_version(), 2)
# ensure we can sign / verify data with our keypair
buf = b'The quick brown fox jumps over the lazy dog.'
sig = crypto.sign(key, buf, 'sha256')
sig2 = crypto.sign(key, buf + b'wut', 'sha256')
self.none(crypto.verify(cert, sig, buf, 'sha256'))
self.raises(crypto.Error, crypto.verify, cert, sig2, buf, 'sha256')
# ensure that a ssl context using both cert/key match
sslcontext = SSL.Context(SSL.TLSv1_2_METHOD)
sslcontext.use_certificate(cert)
sslcontext.use_privatekey(key)
self.none(sslcontext.check_privatekey())
if cacert:
# Make sure the cert was signed by the CA
self.eq(cert.get_issuer().der(), cacert.get_subject().der())
store = crypto.X509Store()
ctx = crypto.X509StoreContext(store, cert)
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cert)
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# Generate a separate CA that did not sign the certificate
try:
cdir.genCaCert('otherca')
except DupFileName:
pass
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cdir.getCaCert('otherca'))
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# OpenSSL should be able to verify the certificate, once its CA is loaded
store.add_cert(cacert)
self.none(ctx.verify_certificate()) # valid
def p12_assertions(self, cdir, cert, key, p12, cacert=None):
'''
test basic p12 certificate bundle assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
p12 (crypto.PKCS12): PKCS12 object to test
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(p12)
# Pull out the CA cert and keypair data
p12_cacert = None
if cacert:
p12_cacert = p12.get_ca_certificates()
self.nn(p12_cacert)
self.len(1, p12_cacert)
p12_cacert = p12_cacert[0]
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cacert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cacert))
p12_cert = p12.get_certificate()
p12_key = p12.get_privatekey()
self.basic_assertions(cdir, p12_cert, p12_key, cacert=p12_cacert)
# Make sure that the CA cert and keypair files are the same as the CA cert and keypair contained in the p12 file
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cert))
self.eq(crypto.dump_privatekey(crypto.FILETYPE_ASN1, key), crypto.dump_privatekey(crypto.FILETYPE_ASN1, p12_key))
def user_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'client')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.notin(b'subjectAltName', exts)
def host_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'server')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature,keyEncipherment')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'serverAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.isin(b'subjectAltName', exts)
def test_certdir_cas(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
inter_name = 'testsyn-intermed'
base = cdir._getPathJoin()
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getCaCert(caname))
self.none(cdir.getCaKey(caname))
self.false(cdir.isCaCert(caname))
self.none(cdir.getCaCertPath(caname))
self.none(cdir.getCaKeyPath(caname))
# Generate a self-signed CA =======================================
cdir.genCaCert(caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getCaCert(caname), crypto.X509)
self.isinstance(cdir.getCaKey(caname), crypto.PKey)
self.true(cdir.isCaCert(caname))
self.eq(cdir.getCaCertPath(caname), base + '/cas/' + caname + '.crt')
self.eq(cdir.getCaKeyPath(caname), base + '/cas/' + caname + '.key')
# Run basic assertions on the CA keypair
cacert = cdir.getCaCert(caname)
cakey = cdir.getCaKey(caname)
self.basic_assertions(cdir, cacert, cakey)
# Generate intermediate CA ========================================
cdir.genCaCert(inter_name, signas=caname)
# Run basic assertions, make sure that it was signed by the root CA
inter_cacert = cdir.getCaCert(inter_name)
inter_cakey = cdir.getCaKey(inter_name)
self.basic_assertions(cdir, inter_cacert, inter_cakey, cacert=cacert)
def test_certdir_hosts(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
hostname_unsigned = 'unsigned.vertex.link'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getHostCert(hostname_unsigned))
self.none(cdir.getHostKey(hostname_unsigned))
self.false(cdir.isHostCert(hostname_unsigned))
self.none(cdir.getHostCertPath(hostname_unsigned))
self.none(cdir.getHostKeyPath(hostname_unsigned))
self.none(cdir.getHostCaPath(hostname_unsigned))
# Generate a self-signed host keypair =============================
cdir.genHostCert(hostname_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname_unsigned), crypto.X509)
self.isinstance(cdir.getHostKey(hostname_unsigned), crypto.PKey)
self.true(cdir.isHostCert(hostname_unsigned))
self.eq(cdir.getHostCertPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.crt')
self.eq(cdir.getHostKeyPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.key')
self.none(cdir.getHostCaPath(hostname_unsigned)) # the cert is self-signed, so there is no ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname_unsigned)
key = cdir.getHostKey(hostname_unsigned)
self.basic_assertions(cdir, cert, key)
self.host_assertions(cdir, cert, key)
# Generate a signed host keypair ==================================
cdir.genHostCert(hostname, signas=caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname), crypto.X509)
self.isinstance(cdir.getHostKey(hostname), crypto.PKey)
self.true(cdir.isHostCert(hostname))
self.eq(cdir.getHostCertPath(hostname), base + '/hosts/' + hostname + '.crt')
self.eq(cdir.getHostKeyPath(hostname), base + '/hosts/' + hostname + '.key')
self.eq(cdir.getHostCaPath(hostname), base + '/cas/' + caname + '.crt') # the cert is signed, so there is a ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.host_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = 'visi@vertex.link'
username_unsigned = 'unsigned@vertex.link'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getUserCert(username_unsigned))
self.none(cdir.getUserKey(username_unsigned))
self.none(cdir.getClientCert(username_unsigned))
self.false(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.none(cdir.getUserCertPath('nope'))
self.none(cdir.getUserKeyPath('nope'))
self.none(cdir.getUserCaPath('nope'))
self.none(cdir.getUserForHost('nope', 'host.vertex.link'))
# Generate a self-signed user keypair =============================
cdir.genUserCert(username_unsigned)
self.raises(NoSuchFile, cdir.genClientCert, username_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username_unsigned), crypto.X509)
self.isinstance(cdir.getUserKey(username_unsigned), crypto.PKey)
self.none(cdir.getClientCert(username_unsigned))
self.true(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.eq(cdir.getUserCertPath(username_unsigned), base + '/users/' + username_unsigned + '.crt')
self.eq(cdir.getUserKeyPath(username_unsigned), base + '/users/' + username_unsigned + '.key')
self.none(cdir.getUserCaPath(username_unsigned)) # no CA
self.eq(cdir.getUserForHost('unsigned', 'host.vertex.link'), username_unsigned)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username_unsigned)
key = cdir.getUserKey(username_unsigned)
self.basic_assertions(cdir, cert, key)
self.user_assertions(cdir, cert, key)
# Generate a signed user keypair ==================================
cdir.genUserCert(username, signas=caname)
cdir.genClientCert(username)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username), crypto.X509)
self.isinstance(cdir.getUserKey(username), crypto.PKey)
self.isinstance(cdir.getClientCert(username), crypto.PKCS12)
self.true(cdir.isUserCert(username))
self.true(cdir.isClientCert(username))
self.eq(cdir.getUserCertPath(username), base + '/users/' + username + '.crt')
self.eq(cdir.getUserKeyPath(username), base + '/users/' + username + '.key')
self.eq(cdir.getUserCaPath(username), base + '/cas/' + caname + '.crt')
self.eq(cdir.getUserForHost('visi', 'host.vertex.link'), username)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
p12 = cdir.getClientCert(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.user_assertions(cdir, cert, key, cacert=cacert)
self.p12_assertions(cdir, cert, key, p12, cacert=cacert)
# Test missing files for generating a client cert
os.remove(base + '/users/' + username + '.key')
self.raises(NoSuchFile, cdir.genClientCert, username) # user key
os.remove(base + '/cas/' + caname + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # ca crt
os.remove(base + '/users/' + username + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # user crt
def test_certdir_hosts_sans(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
cdir.genCaCert(caname)
# Host cert with multiple SANs ====================================
hostname = 'visi.vertex.link'
sans = 'DNS:vertex.link,DNS:visi.vertex.link,DNS:vertex.link'
cdir.genHostCert(hostname, signas=caname, sans=sans)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x1f\x82\x0bvertex.link\x82\x10visi.vertex.link') # ASN.1 encoded subjectAltName data
# Host cert with no specified SANs ================================
hostname = 'visi2.vertex.link'
cdir.genHostCert(hostname, signas=caname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi2.vertex.link') # ASN.1 encoded subjectAltName data
# Self-signed Host cert with no specified SANs ====================
hostname = 'visi3.vertex.link'
cdir.genHostCert(hostname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi3.vertex.link') # ASN.1 encoded subjectAltName data
def test_certdir_hosts_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
# Generate CA cert and host CSR
cdir.genCaCert(caname)
cdir.genHostCsr(hostname)
path = cdir._getPathJoin('hosts', hostname + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signHostCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = 'visi@vertex.link'
# Generate CA cert and user CSR
cdir.genCaCert(caname)
cdir.genUserCsr(username)
path = cdir._getPathJoin('users', username + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signUserCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_importfile(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
with self.getTestDir() as testpath:
# File doesn't exist
fpath = s_common.genpath(testpath, 'not_real.crt')
self.raises(NoSuchFile, cdir.importFile, fpath, 'cas')
# File has unsupported extension
fpath = s_common.genpath(testpath, 'coolpic.bmp')
with s_common.genfile(fpath) as fd:
self.raises(BadFileExt, cdir.importFile, fpath, 'cas')
tests = (
('cas', 'coolca.crt'),
('cas', 'coolca.key'),
('hosts', 'coolhost.crt'),
('hosts', 'coolhost.key'),
('users', 'cooluser.crt'),
('users', 'cooluser.key'),
('users', 'cooluser.p12'),
)
data = b'arbitrary data'
for ftype, fname in tests:
srcpath = s_common.genpath(testpath, fname)
dstpath = s_common.genpath(cdir.path, ftype, fname)
with s_common.genfile(srcpath) as fd:
fd.write(b'arbitrary data')
fd.seek(0)
# Make sure the file is not there
self.raises(NoSuchFile, s_common.reqfile, dstpath)
# Import it and make sure it exists
self.none(cdir.importFile(srcpath, ftype))
with s_common.reqfile(dstpath) as dstfd:
self.eq(dstfd.read(), b'arbitrary data')
# Make sure it can't be overwritten
self.raises(FileExists, cdir.importFile, srcpath, ftype)
def test_certdir_valUserCert(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
base = cdir._getPathJoin()
cdir.genCaCert('syntest')
cdir.genCaCert('newp')
cacerts = cdir.getCaCerts()
syntestca = cdir.getCaCert('syntest')
newpca = cdir.getCaCert('newp')
self.raises(crypto.Error, cdir.valUserCert, b'')
cdir.genUserCert('cool')
path = cdir.getUserCertPath('cool')
byts = cdir._getPathBytes(path)
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts)
cdir.genUserCert('cooler', signas='syntest')
path = cdir.getUserCertPath('cooler')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(syntestca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(newpca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
cdir.genUserCert('coolest', signas='newp')
path = cdir.getUserCertPath('coolest')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(newpca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(syntestca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
| {
"content_hash": "7b779cc352ad56f62e355a8dd717e47f",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 143,
"avg_line_length": 46.14462809917355,
"alnum_prop": 0.5997134413898093,
"repo_name": "vivisect/synapse",
"id": "1eca8a057ff0f3fe4b6e78cc2e0abd8c8b687a70",
"size": "22334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/tests/test_lib_certdir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716598"
}
],
"symlink_target": ""
} |
from Monument import Monument, Dataset
import importer_utils as utils
import importer as importer
class AlSq(Monument):
def set_special_is(self):
if self.has_non_empty_attribute("type"):
spec_is_raw = self.type.lower()
is_dict = self.data_files["is"]["mappings"]
matches = utils.get_matching_items_from_dict(spec_is_raw, is_dict)
if len(matches) == 1:
self.substitute_statement("is", matches[0])
else:
self.add_to_report("type", self.type, "is")
def set_location(self):
"""Set Location based on mapping file."""
self.set_from_dict_match(
lookup_dict=self.data_files["settlements"],
dict_label="itemLabel",
value_label="place",
prop="location"
)
def set_adm_location(self):
"""Set Admin Location based on mapping file."""
self.set_from_dict_match(
lookup_dict=self.data_files["municipalities"],
dict_label="itemLabel",
value_label="municipality",
prop="located_adm"
)
def update_labels(self):
albanian = utils.remove_markup(self.name)
self.add_label("sq", albanian)
def update_descriptions(self):
english = "cultural heritage monument of Albania"
self.add_description("en", english)
def set_heritage_id(self):
wlm = "{}-{}".format(self.mapping["table_name"].upper(), self.idno)
self.add_statement("wlm_id", wlm)
self.add_disambiguator(self.idno)
def __init__(self, db_row_dict, mapping, data_files, existing, repository):
Monument.__init__(self, db_row_dict, mapping,
data_files, existing, repository)
self.set_monuments_all_id("idno")
self.set_changed()
self.set_wlm_source()
self.set_country()
self.set_heritage_id()
self.set_heritage()
self.set_is()
self.set_special_is()
self.set_adm_location()
self.set_location()
self.set_coords()
self.set_image()
self.update_descriptions()
self.update_labels()
# there's no commonscat in dataset
self.set_wd_item(self.find_matching_wikidata(mapping))
if __name__ == "__main__":
"""Point of entrance for importer."""
args = importer.handle_args()
dataset = Dataset("al", "sq", AlSq)
dataset.data_files = {"settlements": "albania_settlements.json",
"municipalities": "albania_municipalities.json"}
dataset.lookup_downloads = {"is": "al_(sq)/type"}
importer.main(args, dataset)
| {
"content_hash": "24b0814ff71cb39c24568260d019d108",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 34.62337662337662,
"alnum_prop": 0.5817704426106527,
"repo_name": "Vesihiisi/COH-tools",
"id": "9fd71e00b4f77fc09fc235b45ffe94a7d71186e9",
"size": "2666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "importer/AlSq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "291218"
}
],
"symlink_target": ""
} |
from django.utils.safestring import mark_safe
from django.utils.html import escape, conditional_escape
from django.utils.encoding import force_unicode
from django.forms.widgets import ClearableFileInput as oldWidget, CheckboxInput, FileInput, Input
from nginx_filter_image.templatetags.pimage import pimage_single, pimage_sizes
class ClearableFileInput(oldWidget, FileInput):
#template_with_initial = u'%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s'
template_with_initial = u'%(initial)s %(clear_template)s %(input_text)s: %(input)s<br clear="all"/>'
template_with_clear = u'<label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s %(clear)s </label>'
def __init__(self, pimage, attrs={}):
super(ClearableFileInput, self).__init__(attrs=attrs)
self.pimage = pimage
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label}
template = u'%(input)s'
substitutions['input'] = super(FileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = (u'<img src="%s" %s>'
% (
pimage_single(escape(value.url), self.pimage),
pimage_sizes(value, self.pimage),
)
)
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
| {
"content_hash": "888f73d9f80ba840829c7e7e5052fe8b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 112,
"avg_line_length": 54.142857142857146,
"alnum_prop": 0.5958663148636764,
"repo_name": "BlackWizard/django-misc-base",
"id": "1241aefdfc588ad4f37e529c77f1b23009f1fbce",
"size": "2298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc_base/widgets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12343"
}
],
"symlink_target": ""
} |
from .controllers import organization_count, organization_follow, organization_follow_ignore, \
organization_stop_following, voter_count
from ballot.controllers import ballot_item_options_retrieve_for_api, choose_election_from_existing_data, \
voter_ballot_items_retrieve_for_api
from candidate.controllers import candidate_retrieve_for_api, candidates_retrieve_for_api
from config.base import get_environment_variable
from django.http import HttpResponse, HttpResponseRedirect
from geoip.controllers import voter_location_retrieve_from_ip_for_api
from import_export_facebook.controllers import facebook_disconnect_for_api, facebook_sign_in_for_api
from import_export_google_civic.controllers import voter_ballot_items_retrieve_from_google_civic_for_api
from import_export_twitter.controllers import twitter_sign_in_start_for_api, \
twitter_sign_in_request_access_token_for_api, twitter_sign_in_request_voter_info_for_api
import json
from measure.controllers import measure_retrieve_for_api
from office.controllers import office_retrieve_for_api
from organization.controllers import organization_retrieve_for_api, organization_save_for_api, \
organization_search_for_api, organizations_followed_retrieve_for_api
from position.controllers import position_list_for_ballot_item_for_api, position_list_for_opinion_maker_for_api, \
position_retrieve_for_api, position_save_for_api, voter_all_positions_retrieve_for_api, \
voter_position_retrieve_for_api, voter_position_comment_save_for_api, voter_position_visibility_save_for_api
from position.models import ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING, \
FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC
from position_like.controllers import position_like_count_for_api, voter_position_like_off_save_for_api, \
voter_position_like_on_save_for_api, voter_position_like_status_retrieve_for_api
from quick_info.controllers import quick_info_retrieve_for_api
from ballot.controllers import choose_election_and_prepare_ballot_data
from ballot.models import OFFICE, CANDIDATE, MEASURE, VoterBallotSavedManager
from rest_framework.response import Response
from rest_framework.views import APIView
from search.controllers import search_all_for_api
from star.controllers import voter_all_stars_status_retrieve_for_api, voter_star_off_save_for_api, \
voter_star_on_save_for_api, voter_star_status_retrieve_for_api
from support_oppose_deciding.controllers import position_oppose_count_for_ballot_item_for_api, \
position_support_count_for_ballot_item_for_api, \
position_public_oppose_count_for_ballot_item_for_api, \
position_public_support_count_for_ballot_item_for_api, positions_count_for_all_ballot_items_for_api, \
positions_count_for_one_ballot_item_for_api, \
voter_opposing_save, voter_stop_opposing_save, voter_stop_supporting_save, voter_supporting_save_for_api
from twitter.controllers import twitter_identity_retrieve_for_api
from voter.controllers import voter_address_retrieve_for_api, voter_create_for_api, \
voter_photo_save_for_api, voter_retrieve_for_api, voter_retrieve_list_for_api, voter_sign_out_for_api
from voter.models import BALLOT_ADDRESS, fetch_voter_id_from_voter_device_link, VoterAddress, VoterAddressManager, \
VoterDeviceLink, VoterDeviceLinkManager, voter_has_authority, VoterManager
from voter.serializers import VoterSerializer
from voter_guide.controllers import voter_guide_possibility_retrieve_for_api, voter_guide_possibility_save_for_api, \
voter_guides_followed_retrieve_for_api, voter_guides_to_follow_retrieve_for_api
from voter_guide.models import ORGANIZATION, PUBLIC_FIGURE
import wevote_functions.admin
from wevote_functions.functions import convert_to_bool, convert_to_int, generate_voter_device_id, get_voter_device_id, \
is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
def ballot_item_options_retrieve_view(request): # ballotItemOptionsRetrieve
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
results = ballot_item_options_retrieve_for_api(google_civic_election_id)
response = HttpResponse(json.dumps(results['json_data']), content_type='application/json')
return response
def ballot_item_retrieve_view(request): # ballotItemRetrieve
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if not positive_value_exists(kind_of_ballot_item) or kind_of_ballot_item not in(OFFICE, CANDIDATE, MEASURE):
status = 'VALID_BALLOT_ITEM_TYPE_MISSING'
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if kind_of_ballot_item == OFFICE:
return office_retrieve_for_api(ballot_item_id, ballot_item_we_vote_id)
elif kind_of_ballot_item == CANDIDATE:
return candidate_retrieve_for_api(ballot_item_id, ballot_item_we_vote_id)
elif kind_of_ballot_item == MEASURE:
return measure_retrieve_for_api(ballot_item_id, ballot_item_we_vote_id)
else:
status = 'BALLOT_ITEM_RETRIEVE_UNKNOWN_ERROR'
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def candidate_retrieve_view(request): # candidateRetrieve
candidate_id = request.GET.get('candidate_id', 0)
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', None)
return candidate_retrieve_for_api(candidate_id, candidate_we_vote_id)
def candidates_retrieve_view(request): # candidatesRetrieve
office_id = request.GET.get('office_id', 0)
office_we_vote_id = request.GET.get('office_we_vote_id', '')
return candidates_retrieve_for_api(office_id, office_we_vote_id)
def device_id_generate_view(request): # deviceIdGenerate
"""
This API call is used by clients to generate a transient unique identifier (device_id - stored on client)
which ties the device to a persistent voter_id (mapped together and stored on the server).
Note: This call does not create a voter account -- that must be done in voterCreate.
:param request:
:return: Unique device id that can be stored in a cookie
"""
voter_device_id = generate_voter_device_id() # Stored in cookie elsewhere
logger.debug("apis_v1/views.py, device_id_generate-voter_device_id: {voter_device_id}".format(
voter_device_id=voter_device_id
))
if positive_value_exists(voter_device_id):
success = True
status = "DEVICE_ID_GENERATE_VALUE_DOES_NOT_EXIST"
else:
success = False
status = "DEVICE_ID_GENERATE_VALUE_EXISTS"
json_data = {
'voter_device_id': voter_device_id,
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def facebook_disconnect_view(request):
"""
Disconnect this We Vote account from current Facebook account
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = facebook_disconnect_for_api(voter_device_id)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def facebook_sign_in_view(request): # facebookSignIn
"""
Saving the results of signing in with Facebook
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
facebook_id = request.GET.get('facebook_id', None)
facebook_email = request.GET.get('facebook_email', None)
results = facebook_sign_in_for_api(voter_device_id, facebook_id, facebook_email)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'facebook_id': results['facebook_id'],
'facebook_email': results['facebook_email'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def measure_retrieve_view(request): # measureRetrieve
measure_id = request.GET.get('measure_id', 0)
measure_we_vote_id = request.GET.get('measure_we_vote_id', None)
return measure_retrieve_for_api(measure_id, measure_we_vote_id)
def office_retrieve_view(request): # officeRetrieve
office_id = request.GET.get('office_id', 0)
office_we_vote_id = request.GET.get('office_we_vote_id', None)
return office_retrieve_for_api(office_id, office_we_vote_id)
def organization_count_view(request):
return organization_count()
def organization_follow_api_view(request):
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
return organization_follow(voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id)
def organization_stop_following_api_view(request):
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
return organization_stop_following(voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id)
def organization_follow_ignore_api_view(request):
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
return organization_follow_ignore(voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id)
def organization_retrieve_view(request):
"""
Retrieve a single organization based on unique identifier
:param request:
:return:
"""
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
return organization_retrieve_for_api(
organization_id=organization_id, organization_we_vote_id=organization_we_vote_id)
def organization_save_view(request): # organizationSave
"""
Save a single organization based on unique identifier
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
organization_name = request.GET.get('organization_name', False)
organization_email = request.GET.get('organization_email', False)
organization_website = request.GET.get('organization_website', False)
organization_twitter_handle = request.GET.get('organization_twitter_handle', False)
organization_facebook = request.GET.get('organization_facebook', False)
organization_image = request.GET.get('organization_image', False)
refresh_from_twitter = request.GET.get('refresh_from_twitter', False)
# We only want to allow save if either this is your organization (i.e., you have the Twitter handle)
voter_owns_twitter_handle = False
# or if you are a verified volunteer or admin
authority_required = {'admin', 'verified_volunteer'} # admin, verified_volunteer
voter_is_admin_or_verified_volunteer = False
if voter_has_authority(request, authority_required):
voter_is_admin_or_verified_volunteer = True
else:
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if voter_results['voter_found']:
voter = voter_results['voter']
# Does this voter have the same Twitter handle as this organization? If so, link this organization to
# this particular voter
if voter.twitter_screen_name.lower() == organization_twitter_handle.lower():
# This will fail if the organization's twitter handle isn't passed in as a variable
voter_owns_twitter_handle = True
if not voter_is_admin_or_verified_volunteer:
if not voter_owns_twitter_handle:
# Only refuse entry if *both* conditions are not met
results = {
'status': "VOTER_LACKS_AUTHORITY_TO_SAVE_ORGANIZATION",
'success': False,
'organization_id': organization_id,
'organization_we_vote_id': organization_we_vote_id,
'new_organization_created': False,
'organization_name': organization_name,
'organization_email': organization_email,
'organization_website': organization_website,
'organization_facebook': organization_facebook,
'organization_photo_url': organization_image,
'organization_twitter_handle': organization_twitter_handle,
'refresh_from_twitter': refresh_from_twitter,
'twitter_followers_count': 0,
'twitter_description': "",
}
return HttpResponse(json.dumps(results), content_type='application/json')
results = organization_save_for_api(
voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
organization_name=organization_name, organization_email=organization_email,
organization_website=organization_website, organization_twitter_handle=organization_twitter_handle,
organization_facebook=organization_facebook, organization_image=organization_image,
refresh_from_twitter=refresh_from_twitter)
return HttpResponse(json.dumps(results), content_type='application/json')
def organization_search_view(request):
"""
Search for organizations based on a few search terms
:param request:
:return:
"""
organization_name = request.GET.get('organization_name', '')
organization_twitter_handle = request.GET.get('organization_twitter_handle', '')
organization_website = request.GET.get('organization_website', '')
organization_email = request.GET.get('organization_email', '')
return organization_search_for_api(organization_name=organization_name,
organization_twitter_handle=organization_twitter_handle,
organization_website=organization_website,
organization_email=organization_email)
def organizations_followed_retrieve_api_view(request):
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
maximum_number_to_retrieve = get_maximum_number_to_retrieve_from_request(request)
return organizations_followed_retrieve_for_api(voter_device_id=voter_device_id,
maximum_number_to_retrieve=maximum_number_to_retrieve)
def get_maximum_number_to_retrieve_from_request(request):
if 'maximum_number_to_retrieve' in request.GET:
maximum_number_to_retrieve = request.GET['maximum_number_to_retrieve']
else:
maximum_number_to_retrieve = 40
if maximum_number_to_retrieve is "":
maximum_number_to_retrieve = 40
else:
maximum_number_to_retrieve = convert_to_int(maximum_number_to_retrieve)
return maximum_number_to_retrieve
def position_list_for_ballot_item_view(request): # positionListForBallotItem
"""
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
stance = request.GET.get('stance', ANY_STANCE)
if stance in (ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
stance_we_are_looking_for = stance
else:
stance_we_are_looking_for = ANY_STANCE
friends_vs_public_incoming = request.GET.get('friends_vs_public', FRIENDS_AND_PUBLIC)
if friends_vs_public_incoming in (FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC):
friends_vs_public = friends_vs_public_incoming
else:
friends_vs_public = FRIENDS_AND_PUBLIC
show_positions_this_voter_follows = request.GET.get('show_positions_this_voter_follows', True)
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', "")
if kind_of_ballot_item == OFFICE:
office_id = ballot_item_id
office_we_vote_id = ballot_item_we_vote_id
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == CANDIDATE:
office_id = 0
office_we_vote_id = ''
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == MEASURE:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
return position_list_for_ballot_item_for_api(voter_device_id=voter_device_id,
friends_vs_public=friends_vs_public,
office_id=office_id,
office_we_vote_id=office_we_vote_id,
candidate_id=candidate_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id,
measure_we_vote_id=measure_we_vote_id,
stance_we_are_looking_for=stance_we_are_looking_for,
show_positions_this_voter_follows=show_positions_this_voter_follows)
def position_list_for_opinion_maker_view(request): # positionListForOpinionMaker
"""
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
stance = request.GET.get('stance', ANY_STANCE)
if stance in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
stance_we_are_looking_for = stance
else:
stance_we_are_looking_for = ANY_STANCE
friends_vs_public_incoming = request.GET.get('friends_vs_public', ANY_STANCE)
if friends_vs_public_incoming in (FRIENDS_ONLY, PUBLIC_ONLY, FRIENDS_AND_PUBLIC):
friends_vs_public = friends_vs_public_incoming
else:
friends_vs_public = FRIENDS_AND_PUBLIC
kind_of_opinion_maker = request.GET.get('kind_of_opinion_maker', "")
opinion_maker_id = request.GET.get('opinion_maker_id', 0)
opinion_maker_we_vote_id = request.GET.get('opinion_maker_we_vote_id', "")
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
state_code = request.GET.get('state_code', "")
filter_for_voter = request.GET.get('filter_for_voter', True)
filter_for_voter = convert_to_bool(filter_for_voter)
filter_out_voter = request.GET.get('filter_out_voter', False)
filter_out_voter = convert_to_bool(filter_out_voter)
# Make sure filter_for_voter is reset to False if filter_out_voter is true
filter_for_voter = False if filter_out_voter else filter_for_voter
if (kind_of_opinion_maker == ORGANIZATION) or (kind_of_opinion_maker == "ORGANIZATION"):
organization_id = opinion_maker_id
organization_we_vote_id = opinion_maker_we_vote_id
public_figure_id = 0
public_figure_we_vote_id = ''
elif (kind_of_opinion_maker == PUBLIC_FIGURE) or (kind_of_opinion_maker == "PUBLIC_FIGURE"):
organization_id = 0
organization_we_vote_id = ''
public_figure_id = opinion_maker_id
public_figure_we_vote_id = opinion_maker_we_vote_id
else:
organization_id = 0
organization_we_vote_id = ''
public_figure_id = 0
public_figure_we_vote_id = ''
return position_list_for_opinion_maker_for_api(voter_device_id=voter_device_id,
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
public_figure_id=public_figure_id,
public_figure_we_vote_id=public_figure_we_vote_id,
friends_vs_public=friends_vs_public,
stance_we_are_looking_for=stance_we_are_looking_for,
filter_for_voter=filter_for_voter,
filter_out_voter=filter_out_voter,
google_civic_election_id=google_civic_election_id,
state_code=state_code)
def position_retrieve_view(request):
"""
Retrieve all of the details about a single position based on unique identifier (positionRetrieve)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_we_vote_id = request.GET.get('position_we_vote_id', '')
return position_retrieve_for_api(
position_we_vote_id=position_we_vote_id,
voter_device_id=voter_device_id
)
def position_save_view(request): # positionSave
"""
Save a single position
:param request:
:return:
"""
# We set values that aren't passed in, to False so we know to treat them as null or unchanged. This allows us to
# only change the values we want to
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_we_vote_id = request.GET.get('position_we_vote_id', False)
organization_we_vote_id = request.GET.get('organization_we_vote_id', False)
public_figure_we_vote_id = request.GET.get('public_figure_we_vote_id', False)
voter_we_vote_id = request.GET.get('voter_we_vote_id', False)
google_civic_election_id = request.GET.get('google_civic_election_id', False)
ballot_item_display_name = request.GET.get('ballot_item_display_name', False)
office_we_vote_id = request.GET.get('office_we_vote_id', False)
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', False)
measure_we_vote_id = request.GET.get('measure_we_vote_id', False)
stance = request.GET.get('stance', False)
set_as_public_position = request.GET.get('set_as_public_position', True)
statement_text = request.GET.get('statement_text', False)
statement_html = request.GET.get('statement_html', False)
more_info_url = request.GET.get('more_info_url', False)
results = position_save_for_api(
voter_device_id=voter_device_id,
position_we_vote_id=position_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
public_figure_we_vote_id=public_figure_we_vote_id,
voter_we_vote_id=voter_we_vote_id,
google_civic_election_id=google_civic_election_id,
ballot_item_display_name=ballot_item_display_name,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
stance=stance,
set_as_public_position=set_as_public_position,
statement_text=statement_text,
statement_html=statement_html,
more_info_url=more_info_url,
)
return HttpResponse(json.dumps(results), content_type='application/json')
def position_oppose_count_for_ballot_item_view(request):
"""
Retrieve the number of orgs and friends that oppose this (positionOpposeCountForBallotItem)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return position_oppose_count_for_ballot_item_for_api(
voter_device_id=voter_device_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def position_support_count_for_ballot_item_view(request):
"""
Retrieve the number of orgs and friends that support this (positionSupportCountForBallotItem)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return position_support_count_for_ballot_item_for_api(
voter_device_id=voter_device_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def position_public_oppose_count_for_ballot_item_view(request):
"""
Retrieve the number of orgs and public figures that publicly oppose this (positionPublicOpposeCountForBallotItem)
:param request:
:return:
"""
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return position_public_oppose_count_for_ballot_item_for_api(
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def position_public_support_count_for_ballot_item_view(request):
"""
Retrieve the number of orgs and public figures that publicly support this (positionPublicSupportCountForBallotItem)
:param request:
:return:
"""
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return position_public_support_count_for_ballot_item_for_api(
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def positions_count_for_all_ballot_items_view(request): # positionsCountForAllBallotItems
"""
Retrieve the number of support/oppose positions from the voter's network
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
results = positions_count_for_all_ballot_items_for_api(
voter_device_id=voter_device_id,
google_civic_election_id=google_civic_election_id)
json_data = {
'status': results['status'],
'success': results['success'],
'position_counts_list': results['position_counts_list'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def positions_count_for_one_ballot_item_view(request): # positionsCountForOneBallotItem
"""
Retrieve the number of support/oppose positions from the voter's network for one ballot item
We return results in the same format as positions_count_for_all_ballot_items_view
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', "")
results = positions_count_for_one_ballot_item_for_api(
voter_device_id=voter_device_id,
ballot_item_we_vote_id=ballot_item_we_vote_id)
json_data = {
'status': results['status'],
'success': results['success'],
'position_counts_list': results['position_counts_list'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def quick_info_retrieve_view(request):
"""
Retrieve the information necessary to populate a bubble next to a ballot item.
:param request:
:return:
"""
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', "")
return quick_info_retrieve_for_api(kind_of_ballot_item=kind_of_ballot_item,
ballot_item_we_vote_id=ballot_item_we_vote_id)
def search_all_view(request):
"""
Find information anywhere in the We Vote universe.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
text_from_search_field = request.GET.get('text_from_search_field', '')
if not positive_value_exists(text_from_search_field):
status = 'MISSING_TEXT_FROM_SEARCH_FIELD'
json_data = {
'status': status,
'success': False,
'text_from_search_field': text_from_search_field,
'voter_device_id': voter_device_id,
'search_results': [],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
results = search_all_for_api(text_from_search_field, voter_device_id)
status = "UNABLE_TO_FIND_ANY_SEARCH_RESULTS "
search_results = []
if results['search_results_found']:
search_results = results['search_results']
status = results['status']
else:
status += results['status']
json_data = {
'status': status,
'success': True,
'text_from_search_field': text_from_search_field,
'voter_device_id': voter_device_id,
'search_results': search_results,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def twitter_identity_retrieve_view(request):
"""
Find the kind of owner and unique id of this twitter handle. We use this to take an incoming URI like
https://wevote.guide/RepBarbaraLee and return the owner of 'RepBarbaraLee'. (twitterIdentityRetrieve)
:param request:
:return:
"""
twitter_handle = request.GET.get('twitter_handle', '')
if not positive_value_exists(twitter_handle):
status = 'VALID_TWITTER_HANDLE_MISSING'
json_data = {
'status': status,
'success': False,
'twitter_handle': twitter_handle,
'owner_found': False,
'kind_of_owner': '',
'owner_we_vote_id': '',
'owner_id': 0,
'google_civic_election_id': 0,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = twitter_identity_retrieve_for_api(twitter_handle, voter_device_id)
json_data = {
'status': results['status'],
'success': results['success'],
'twitter_handle': results['twitter_handle'],
'owner_found': results['owner_found'],
'kind_of_owner': results['kind_of_owner'],
'owner_we_vote_id': results['owner_we_vote_id'],
'owner_id': results['owner_id'],
'google_civic_election_id': results['google_civic_election_id'],
# These values only returned if kind_of_owner == TWITTER_HANDLE_NOT_FOUND_IN_WE_VOTE
'twitter_description': results['twitter_description'],
'twitter_followers_count': results['twitter_followers_count'],
'twitter_photo_url': results['twitter_photo_url'],
'twitter_user_website': results['twitter_user_website'],
'twitter_name': results['twitter_name'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def twitter_sign_in_start_view(request):
"""
Start off the process of signing in with Twitter (twitterSignInStart)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
return_url = request.GET.get('return_url', '')
results = twitter_sign_in_start_for_api(voter_device_id, return_url)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'twitter_redirect_url': results['twitter_redirect_url'],
'voter_info_retrieved': results['voter_info_retrieved'],
'switch_accounts': results['switch_accounts'], # If true, new voter_device_id returned
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def twitter_sign_in_request_access_token_view(request):
"""
Step 2 of the Twitter Sign In Process (twitterSignInRequestAccessToken)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
incoming_request_token = request.GET.get('oauth_token', '')
incoming_oauth_verifier = request.GET.get('oauth_verifier', '')
return_url = request.GET.get('return_url', '')
results = twitter_sign_in_request_access_token_for_api(voter_device_id,
incoming_request_token, incoming_oauth_verifier,
return_url)
if positive_value_exists(results['return_url']):
next_step_url = WE_VOTE_SERVER_ROOT_URL + "/apis/v1/twitterSignInRequestVoterInfo/"
next_step_url += "?voter_device_id=" + voter_device_id
next_step_url += "&return_url=" + results['return_url']
return HttpResponseRedirect(next_step_url)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'access_token_and_secret_returned': results['access_token_and_secret_returned'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def twitter_sign_in_request_voter_info_view(request):
"""
Step 3 of the Twitter Sign In Process (twitterSignInRequestVoterInfo)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
return_url = request.GET.get('return_url', '')
results = twitter_sign_in_request_voter_info_for_api(voter_device_id, return_url)
if positive_value_exists(results['return_url']):
modified_return_url = results['return_url']
if results['twitter_handle_found']:
modified_return_url = modified_return_url.replace("signinswitchend", "signinswitchend/" +
results['twitter_handle'])
return HttpResponseRedirect(modified_return_url)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': results['voter_device_id'],
'twitter_handle': results['twitter_handle'],
'twitter_handle_found': results['twitter_handle_found'],
'voter_info_retrieved': results['voter_info_retrieved'],
'switch_accounts': results['switch_accounts'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_address_retrieve_view(request): # voterAddressRetrieveView
"""
Retrieve an address for this voter so we can figure out which ballot to display
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
guess_if_no_address_saved = request.GET.get('guess_if_no_address_saved', True)
if guess_if_no_address_saved == 'false':
guess_if_no_address_saved = False
elif guess_if_no_address_saved == 'False':
guess_if_no_address_saved = False
elif guess_if_no_address_saved == '0':
guess_if_no_address_saved = False
status = ''
voter_address_manager = VoterAddressManager()
voter_device_link_manager = VoterDeviceLinkManager()
voter_address_retrieve_results = voter_address_retrieve_for_api(voter_device_id)
if voter_address_retrieve_results['address_found']:
status += voter_address_retrieve_results['status']
if positive_value_exists(voter_address_retrieve_results['google_civic_election_id']):
google_civic_election_id = voter_address_retrieve_results['google_civic_election_id']
else:
# This block of code helps us if the google_civic_election_id hasn't been saved in the voter_address table
# We retrieve voter_device_link
google_civic_election_id = 0
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if voter_device_link_results['voter_device_link_found']:
voter_device_link = voter_device_link_results['voter_device_link']
else:
voter_device_link = VoterDeviceLink()
# Retrieve the voter_address
voter_address_results = voter_address_manager.retrieve_ballot_address_from_voter_id(voter_id)
if voter_address_results['voter_address_found']:
voter_address = voter_address_results['voter_address']
else:
voter_address = VoterAddress()
results = choose_election_and_prepare_ballot_data(voter_device_link, google_civic_election_id,
voter_address)
status += results['status']
if results['voter_ballot_saved_found']:
google_civic_election_id = results['google_civic_election_id']
json_data = {
'voter_device_id': voter_address_retrieve_results['voter_device_id'],
'address_type': voter_address_retrieve_results['address_type'],
'text_for_map_search': voter_address_retrieve_results['text_for_map_search'],
'google_civic_election_id': google_civic_election_id,
'latitude': voter_address_retrieve_results['latitude'],
'longitude': voter_address_retrieve_results['longitude'],
'normalized_line1': voter_address_retrieve_results['normalized_line1'],
'normalized_line2': voter_address_retrieve_results['normalized_line2'],
'normalized_city': voter_address_retrieve_results['normalized_city'],
'normalized_state': voter_address_retrieve_results['normalized_state'],
'normalized_zip': voter_address_retrieve_results['normalized_zip'],
'success': voter_address_retrieve_results['success'],
'status': voter_address_retrieve_results['status'],
'address_found': voter_address_retrieve_results['address_found'],
'guess_if_no_address_saved': guess_if_no_address_saved,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
status += voter_address_retrieve_results['status'] + ", "
# If we are here, then an address wasn't found, and we either want to return that info, or take a guess
# at the voter's location by looking it up by IP address
if not positive_value_exists(guess_if_no_address_saved):
# Do not guess at an address
status += 'DO_NOT_GUESS_IF_NO_ADDRESS_SAVED'
json_data = {
'voter_device_id': voter_device_id,
'address_type': '',
'text_for_map_search': '',
'google_civic_election_id': 0,
'latitude': '',
'longitude': '',
'normalized_line1': '',
'normalized_line2': '',
'normalized_city': '',
'normalized_state': '',
'normalized_zip': '',
'success': voter_address_retrieve_results['success'],
'status': voter_address_retrieve_results['status'],
'address_found': voter_address_retrieve_results['address_found'],
'guess_if_no_address_saved': guess_if_no_address_saved,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status += 'GUESS_IF_NO_ADDRESS_SAVED' + ", "
# If here, we are going to guess at the voter's location based on IP address
voter_location_results = voter_location_retrieve_from_ip_for_api(request)
if voter_location_results['voter_location_found']:
status += 'VOTER_ADDRESS_RETRIEVE-VOTER_LOCATION_FOUND_FROM_IP '
# Since a new location was found, we need to save the address and then reach out to Google Civic
text_for_map_search = voter_location_results['voter_location']
status += '*** ' + text_for_map_search + ' ***, '
google_civic_election_id = 0
voter_address_save_results = voter_address_manager.update_or_create_voter_address(
voter_id, BALLOT_ADDRESS, text_for_map_search)
status += voter_address_save_results['status'] + ", "
if voter_address_save_results['success'] and voter_address_save_results['voter_address_found']:
voter_address = voter_address_save_results['voter_address']
use_test_election = False
# Reach out to Google and populate ballot items in the database with fresh ballot data
# NOTE: 2016-05-26 Google civic NEVER returns a ballot for City, State ZIP, so we could change this
google_retrieve_results = voter_ballot_items_retrieve_from_google_civic_for_api(
voter_device_id, text_for_map_search, use_test_election)
status += google_retrieve_results['status'] + ", "
if positive_value_exists(google_retrieve_results['google_civic_election_id']):
# Update voter_address with the google_civic_election_id retrieved from Google Civic
# and clear out ballot_saved information
google_civic_election_id = google_retrieve_results['google_civic_election_id']
voter_address.google_civic_election_id = google_civic_election_id
voter_address_update_results = voter_address_manager.update_existing_voter_address_object(
voter_address)
if voter_address_update_results['success']:
# Replace the former google_civic_election_id from this voter_device_link
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(
voter_device_id)
if voter_device_link_results['voter_device_link_found']:
voter_device_link = voter_device_link_results['voter_device_link']
voter_device_link_manager.update_voter_device_link_with_election_id(
voter_device_link, google_retrieve_results['google_civic_election_id'])
else:
# This block of code helps us if the google_civic_election_id wasn't found when we reached out
# to the Google Civic API, following finding the voter's location from IP address.
google_civic_election_id = 0
# We retrieve voter_device_link
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if voter_device_link_results['voter_device_link_found']:
voter_device_link = voter_device_link_results['voter_device_link']
else:
voter_device_link = VoterDeviceLink()
# Retrieve the voter_address
voter_address_results = voter_address_manager.retrieve_ballot_address_from_voter_id(voter_id)
if voter_address_results['voter_address_found']:
voter_address = voter_address_results['voter_address']
else:
voter_address = VoterAddress()
results = choose_election_and_prepare_ballot_data(voter_device_link, google_civic_election_id,
voter_address)
status += results['status']
if results['voter_ballot_saved_found']:
google_civic_election_id = results['google_civic_election_id']
voter_address_retrieve_results = voter_address_retrieve_for_api(voter_device_id)
status += voter_address_retrieve_results['status']
if voter_address_retrieve_results['address_found']:
json_data = {
'voter_device_id': voter_device_id,
'address_type': voter_address_retrieve_results['address_type'],
'text_for_map_search': voter_address_retrieve_results['text_for_map_search'],
'google_civic_election_id': google_civic_election_id,
'latitude': voter_address_retrieve_results['latitude'],
'longitude': voter_address_retrieve_results['longitude'],
'normalized_line1': voter_address_retrieve_results['normalized_line1'],
'normalized_line2': voter_address_retrieve_results['normalized_line2'],
'normalized_city': voter_address_retrieve_results['normalized_city'],
'normalized_state': voter_address_retrieve_results['normalized_state'],
'normalized_zip': voter_address_retrieve_results['normalized_zip'],
'success': voter_address_retrieve_results['success'],
'status': status,
'address_found': voter_address_retrieve_results['address_found'],
'guess_if_no_address_saved': guess_if_no_address_saved,
}
else:
# Address not found from IP address
status += 'VOTER_ADDRESS_RETRIEVE_PART2_NO_ADDRESS'
json_data = {
'voter_device_id': voter_device_id,
'address_type': '',
'text_for_map_search': '',
'google_civic_election_id': 0,
'latitude': '',
'longitude': '',
'normalized_line1': '',
'normalized_line2': '',
'normalized_city': '',
'normalized_state': '',
'normalized_zip': '',
'success': voter_address_retrieve_results['success'],
'status': voter_address_retrieve_results['status'],
'address_found': voter_address_retrieve_results['address_found'],
'guess_if_no_address_saved': guess_if_no_address_saved,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status += 'VOTER_ADDRESS_RETRIEVE-VOTER_LOCATION_NOT_FOUND_FROM_IP: '
status += voter_location_results['status']
json_data = {
'voter_device_id': voter_device_id,
'address_type': '',
'text_for_map_search': '',
'google_civic_election_id': 0,
'latitude': '',
'longitude': '',
'normalized_line1': '',
'normalized_line2': '',
'normalized_city': '',
'normalized_state': '',
'normalized_zip': '',
'success': False,
'status': status,
'address_found': False,
'guess_if_no_address_saved': guess_if_no_address_saved,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_address_save_view(request): # voterAddressSave
"""
Save or update an address for this voter. Once the address is saved, update the ballot information.
:param request:
:return:
"""
google_civic_election_id = 0
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
try:
text_for_map_search = request.GET['text_for_map_search']
text_for_map_search = text_for_map_search.strip()
address_variable_exists = True
except KeyError:
text_for_map_search = ''
address_variable_exists = False
device_id_results = is_voter_device_id_valid(voter_device_id)
if not device_id_results['success']:
json_data = {
'status': device_id_results['status'],
'success': False,
'voter_device_id': voter_device_id,
'text_for_map_search': text_for_map_search,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if not address_variable_exists:
json_data = {
'status': "MISSING_GET_VARIABLE-ADDRESS",
'success': False,
'voter_device_id': voter_device_id,
'text_for_map_search': text_for_map_search,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# We retrieve voter_device_link
voter_device_link_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if voter_device_link_results['voter_device_link_found']:
voter_device_link = voter_device_link_results['voter_device_link']
voter_id = voter_device_link.voter_id
else:
json_data = {
'status': "VOTER_DEVICE_LINK_NOT_FOUND_FROM_DEVICE_ID",
'success': False,
'voter_device_id': voter_device_id,
'text_for_map_search': text_for_map_search,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if not positive_value_exists(voter_id):
json_data = {
'status': "VOTER_NOT_FOUND_FROM_DEVICE_ID",
'success': False,
'voter_device_id': voter_device_id,
'text_for_map_search': text_for_map_search,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# Save the address value, and clear out ballot_saved information
voter_address_manager = VoterAddressManager()
voter_address_save_results = voter_address_manager.update_or_create_voter_address(
voter_id, BALLOT_ADDRESS, text_for_map_search)
if voter_address_save_results['success'] and voter_address_save_results['voter_address_found']:
# # Remove the former google_civic_election_id from this voter_device_id
# voter_device_link_manager.update_voter_device_link_with_election_id(voter_device_link, 0)
voter_address = voter_address_save_results['voter_address']
use_test_election = False
# Reach out to Google and populate ballot items in the database with fresh ballot data
google_retrieve_results = voter_ballot_items_retrieve_from_google_civic_for_api(
voter_device_id, text_for_map_search, use_test_election)
# Update voter_address with the google_civic_election_id retrieved from Google Civic
# and clear out ballot_saved information IFF we got a valid google_civic_election_id back
google_civic_election_id = convert_to_int(google_retrieve_results['google_civic_election_id'])
# At this point proceed to update google_civic_election_id whether it is a positive integer or zero
voter_address.google_civic_election_id = google_civic_election_id
voter_address_update_results = voter_address_manager.update_existing_voter_address_object(voter_address)
if voter_address_update_results['success']:
# Replace the former google_civic_election_id from this voter_device_link
voter_device_link_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if voter_device_link_results['voter_device_link_found']:
voter_device_link = voter_device_link_results['voter_device_link']
voter_device_link_manager.update_voter_device_link_with_election_id(
voter_device_link, google_civic_election_id)
json_data = voter_ballot_items_retrieve_for_api(voter_device_id, google_civic_election_id)
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_ballot_items_retrieve_view(request):
"""
(voterBallotItemsRetrieve) Request a skeleton of ballot data for this voter location,
so that the web_app has all of the ids it needs to make more requests for data about each ballot item.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
# If passed in, we want to look at
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
use_test_election = request.GET.get('use_test_election', False)
use_test_election = False if use_test_election == 'false' else use_test_election
use_test_election = False if use_test_election == 'False' else use_test_election
if use_test_election:
google_civic_election_id = 2000 # The Google Civic test election
json_data = voter_ballot_items_retrieve_for_api(voter_device_id, google_civic_election_id)
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_ballot_items_retrieve_from_google_civic_view(request):
voter_device_id = get_voter_device_id(request)
text_for_map_search = request.GET.get('text_for_map_search', '')
use_test_election = request.GET.get('use_test_election', False)
use_test_election = False if use_test_election == 'false' else use_test_election
use_test_election = False if use_test_election == 'False' else use_test_election
voter_id = 0
results = voter_ballot_items_retrieve_from_google_civic_for_api(
voter_device_id, text_for_map_search, use_test_election)
if results['google_civic_election_id'] and not use_test_election:
# After the ballot is retrieved from google we want to save some info about it for the voter
if positive_value_exists(voter_device_id):
voter_device_link_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
if voter_device_link_results['voter_device_link_found']:
voter_device_link = voter_device_link_results['voter_device_link']
voter_id = voter_device_link.voter_id
if positive_value_exists(voter_id):
voter_ballot_saved_manager = VoterBallotSavedManager()
is_from_substituted_address = False
substituted_address_nearby = ''
is_from_test_address = False
# We don't update the voter_address because this view might be used independent of the voter_address
# Save the meta information for this ballot data. If it fails, ignore the failure
voter_ballot_saved_manager.create_voter_ballot_saved(
voter_id,
results['google_civic_election_id'],
results['election_date_text'],
results['election_description_text'],
results['text_for_map_search'],
substituted_address_nearby,
is_from_substituted_address,
is_from_test_address
)
return HttpResponse(json.dumps(results), content_type='application/json')
def voter_count_view(request):
return voter_count()
def voter_create_view(request): # voterCreate
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
return voter_create_for_api(voter_device_id)
def voter_guide_possibility_retrieve_view(request):
"""
Retrieve a previously saved website that may contain a voter guide (voterGuidePossibilityRetrieve)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
voter_guide_possibility_url = request.GET.get('voter_guide_possibility_url', '')
return voter_guide_possibility_retrieve_for_api(voter_device_id=voter_device_id,
voter_guide_possibility_url=voter_guide_possibility_url)
def voter_guide_possibility_save_view(request):
"""
Save a website that may contain a voter guide (voterGuidePossibilitySave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
voter_guide_possibility_url = request.GET.get('voter_guide_possibility_url', '')
return voter_guide_possibility_save_for_api(voter_device_id=voter_device_id,
voter_guide_possibility_url=voter_guide_possibility_url)
def voter_guides_followed_retrieve_view(request):
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
maximum_number_to_retrieve = get_maximum_number_to_retrieve_from_request(request)
return voter_guides_followed_retrieve_for_api(voter_device_id=voter_device_id,
maximum_number_to_retrieve=maximum_number_to_retrieve)
def voter_guides_to_follow_retrieve_view(request): # voterGuidesToFollowRetrieve
"""
Retrieve a list of voter_guides that a voter might want to follow (voterGuidesToFollow)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', '')
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', '')
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
search_string = request.GET.get('search_string', '')
use_test_election = request.GET.get('use_test_election', False)
use_test_election = False if use_test_election == 'false' else use_test_election
use_test_election = False if use_test_election == 'False' else use_test_election
maximum_number_to_retrieve = get_maximum_number_to_retrieve_from_request(request)
if positive_value_exists(ballot_item_we_vote_id):
# We don't need both ballot_item and google_civic_election_id
google_civic_election_id = 0
else:
if positive_value_exists(use_test_election):
google_civic_election_id = 2000 # The Google Civic API Test election
elif positive_value_exists(google_civic_election_id) or google_civic_election_id == 0:
# If an election was specified, we can skip down to retrieving the voter_guides
pass
else:
# If here we don't have either a ballot_item or a google_civic_election_id.
# Look in the places we cache google_civic_election_id
google_civic_election_id = 0
voter_device_link_manager = VoterDeviceLinkManager()
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id)
voter_device_link = voter_device_link_results['voter_device_link']
if voter_device_link_results['voter_device_link_found']:
voter_id = voter_device_link.voter_id
voter_address_manager = VoterAddressManager()
voter_address_results = voter_address_manager.retrieve_address(0, voter_id)
if voter_address_results['voter_address_found']:
voter_address = voter_address_results['voter_address']
else:
voter_address = VoterAddress()
else:
voter_address = VoterAddress()
results = choose_election_from_existing_data(voter_device_link, google_civic_election_id, voter_address)
google_civic_election_id = results['google_civic_election_id']
# In order to return voter_guides that are independent of an election or ballot_item, we need to pass in
# google_civic_election_id as 0
results = voter_guides_to_follow_retrieve_for_api(voter_device_id, kind_of_ballot_item, ballot_item_we_vote_id,
google_civic_election_id, search_string,
maximum_number_to_retrieve)
return HttpResponse(json.dumps(results['json_data']), content_type='application/json')
def voter_location_retrieve_from_ip_view(request): # GeoIP geo location
"""
Take the IP address and return a location (voterLocationRetrieveFromIP)
:param request:
:return:
"""
ip_address = request.GET.get('ip_address', '')
voter_location_results = voter_location_retrieve_from_ip_for_api(request, ip_address)
json_data = {
'success': voter_location_results['success'],
'status': voter_location_results['status'],
'voter_location_found': voter_location_results['voter_location_found'],
'voter_location': voter_location_results['voter_location'],
'ip_address': voter_location_results['ip_address'],
'x_forwarded_for': voter_location_results['x_forwarded_for'],
'http_x_forwarded_for': voter_location_results['http_x_forwarded_for'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_photo_save_view(request):
"""
Save or update a photo for this voter
:param request:
:return:
"""
status = ''
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
try:
facebook_profile_image_url_https = request.GET['facebook_profile_image_url_https']
facebook_profile_image_url_https = facebook_profile_image_url_https.strip()
facebook_photo_variable_exists = True
except KeyError:
facebook_profile_image_url_https = ''
facebook_photo_variable_exists = False
results = voter_photo_save_for_api(voter_device_id,
facebook_profile_image_url_https, facebook_photo_variable_exists)
voter_photo_saved = True if results['success'] else False
if not positive_value_exists(facebook_profile_image_url_https):
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'voter_photo_saved': voter_photo_saved,
}
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
status += results['status'] + ", "
# If here, we saved a valid photo
json_data = {
'status': status,
'success': results['success'],
'voter_device_id': voter_device_id,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'voter_photo_saved': voter_photo_saved,
}
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
def voter_position_retrieve_view(request):
"""
Retrieve all of the details about a single position based on unique identifier. voterPositionRetrieve
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
# ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == OFFICE:
office_we_vote_id = ballot_item_we_vote_id
candidate_we_vote_id = ''
measure_we_vote_id = ''
elif kind_of_ballot_item == CANDIDATE:
office_we_vote_id = ''
candidate_we_vote_id = ballot_item_we_vote_id
measure_we_vote_id = ''
elif kind_of_ballot_item == MEASURE:
office_we_vote_id = ''
candidate_we_vote_id = ''
measure_we_vote_id = ballot_item_we_vote_id
else:
office_we_vote_id = ''
candidate_we_vote_id = ''
measure_we_vote_id = ''
return voter_position_retrieve_for_api(
voter_device_id=voter_device_id,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id
)
def voter_position_visibility_save_view(request): # voterPositionVisibilitySave
"""
Change the visibility (between public vs. friends-only) for a single measure or candidate for one voter
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
visibility_setting = request.GET.get('visibility_setting', False)
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_we_vote_id = ballot_item_we_vote_id
measure_we_vote_id = None
office_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_we_vote_id = None
measure_we_vote_id = ballot_item_we_vote_id
office_we_vote_id = None
elif kind_of_ballot_item == OFFICE:
candidate_we_vote_id = None
measure_we_vote_id = None
office_we_vote_id = ballot_item_we_vote_id
else:
candidate_we_vote_id = None
measure_we_vote_id = None
office_we_vote_id = None
results = voter_position_visibility_save_for_api(
voter_device_id=voter_device_id,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
visibility_setting=visibility_setting,
)
return HttpResponse(json.dumps(results), content_type='application/json')
def voter_all_positions_retrieve_view(request): # voterAllPositionsRetrieve
"""
Retrieve a list of all positions for one voter, including "is_support", "is_oppose" and "statement_text".
Note that these can either be public positions or private positions.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
return voter_all_positions_retrieve_for_api(
voter_device_id=voter_device_id,
google_civic_election_id=google_civic_election_id
)
def voter_position_like_off_save_view(request):
"""
Un-mark the position_like for a single position for one voter (voterPositionLikeOffSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_like_id = request.GET.get('position_like_id', 0)
position_entered_id = request.GET.get('position_entered_id', 0)
return voter_position_like_off_save_for_api(
voter_device_id=voter_device_id, position_like_id=position_like_id, position_entered_id=position_entered_id)
def voter_position_like_on_save_view(request):
"""
Mark the position_like for a single position for one voter (voterPositionLikeOnSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_entered_id = request.GET.get('position_entered_id', 0)
return voter_position_like_on_save_for_api(
voter_device_id=voter_device_id, position_entered_id=position_entered_id)
def voter_position_like_status_retrieve_view(request):
"""
Retrieve whether or not a position_like is marked for position (voterPositionLikeStatusRetrieve)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_entered_id = request.GET.get('position_entered_id', 0)
return voter_position_like_status_retrieve_for_api(
voter_device_id=voter_device_id, position_entered_id=position_entered_id)
def position_like_count_view(request):
"""
Retrieve the total number of Likes that a position has received, either from the perspective of the voter's
network of friends, or the entire network. (positionLikeCount)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_entered_id = request.GET.get('position_entered_id', 0)
limit_to_voters_network = request.GET.get('limit_to_voters_network', False)
return position_like_count_for_api(voter_device_id=voter_device_id, position_entered_id=position_entered_id,
limit_to_voters_network=limit_to_voters_network)
def voter_position_comment_save_view(request): # voterPositionCommentSave
"""
Save comment for a single measure or candidate for one voter
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
position_we_vote_id = request.GET.get('position_we_vote_id', "")
statement_text = request.GET.get('statement_text', False)
statement_html = request.GET.get('statement_html', False)
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_we_vote_id = ballot_item_we_vote_id
measure_we_vote_id = None
office_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_we_vote_id = None
measure_we_vote_id = ballot_item_we_vote_id
office_we_vote_id = None
elif kind_of_ballot_item == OFFICE:
candidate_we_vote_id = None
measure_we_vote_id = None
office_we_vote_id = ballot_item_we_vote_id
else:
candidate_we_vote_id = None
measure_we_vote_id = None
office_we_vote_id = None
results = voter_position_comment_save_for_api(
voter_device_id=voter_device_id,
position_we_vote_id=position_we_vote_id,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
statement_text=statement_text,
statement_html=statement_html,
)
return HttpResponse(json.dumps(results), content_type='application/json')
def voter_opposing_save_view(request):
"""
Save support for a single measure or candidate for one voter (voterOpposingSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return voter_opposing_save(voter_device_id=voter_device_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
class VoterExportView(APIView):
"""
Export raw voter data to JSON format
"""
def get(self, request): # Removed: , format=None
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = voter_retrieve_list_for_api(voter_device_id)
if 'success' not in results:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif not results['success']:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
voter_list = results['voter_list']
serializer = VoterSerializer(voter_list, many=True)
return Response(serializer.data)
def voter_retrieve_view(request): # voterRetrieve
"""
Retrieve a single voter based on voter_device
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = voter_retrieve_for_api(voter_device_id=voter_device_id)
return HttpResponse(json.dumps(results), content_type='application/json')
def voter_sign_out_view(request): # voterSignOut
"""
Sign out from this device. (Delete this voter_device_id from the database, OR if sign_out_all_devices is True,
sign out from all devices.)
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
sign_out_all_devices = request.GET.get('sign_out_all_devices', 0)
if not positive_value_exists(voter_device_id):
success = False
status = "VOTER_SIGN_OUT_VOTER_DEVICE_ID_DOES_NOT_EXIST"
json_data = {
'voter_device_id': voter_device_id,
'sign_out_all_devices': sign_out_all_devices,
'success': success,
'status': status,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
results = voter_sign_out_for_api(voter_device_id=voter_device_id, sign_out_all_devices=sign_out_all_devices)
json_data = {
'voter_device_id': voter_device_id,
'sign_out_all_devices': sign_out_all_devices,
'success': results['success'],
'status': results['status'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_stop_opposing_save_view(request):
"""
Save support for a single measure or candidate for one voter (voterStopOpposingSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return voter_stop_opposing_save(voter_device_id=voter_device_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def voter_stop_supporting_save_view(request):
"""
Save support for a single measure or candidate for one voter (voterStopSupportingSave)
Default to set this as a position for your friends only.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return voter_stop_supporting_save(voter_device_id=voter_device_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def voter_supporting_save_view(request):
"""
Save support for a single measure or candidate for one voter (voterSupportingSave)
Default to set this as a position for your friends only.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == CANDIDATE:
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = None
elif kind_of_ballot_item == MEASURE:
candidate_id = 0
candidate_we_vote_id = None
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
candidate_id = 0
candidate_we_vote_id = None
measure_id = 0
measure_we_vote_id = None
return voter_supporting_save_for_api(voter_device_id=voter_device_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def voter_star_off_save_view(request):
"""
Un-mark the star for a single measure, office or candidate for one voter (voterStarOffSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == OFFICE:
office_id = ballot_item_id
office_we_vote_id = ballot_item_we_vote_id
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == CANDIDATE:
office_id = 0
office_we_vote_id = ''
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == MEASURE:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
return voter_star_off_save_for_api(
voter_device_id=voter_device_id,
office_id=office_id, office_we_vote_id=office_we_vote_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def voter_star_on_save_view(request):
"""
Mark the star for a single measure, office or candidate for one voter (voterStarOnSave)
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == OFFICE:
office_id = ballot_item_id
office_we_vote_id = ballot_item_we_vote_id
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == CANDIDATE:
office_id = 0
office_we_vote_id = ''
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == MEASURE:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
return voter_star_on_save_for_api(
voter_device_id=voter_device_id,
office_id=office_id, office_we_vote_id=office_we_vote_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def voter_star_status_retrieve_view(request):
"""
Retrieve whether or not a star is marked for an office, candidate or measure based on unique identifier
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', "")
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if kind_of_ballot_item == OFFICE:
office_id = ballot_item_id
office_we_vote_id = ballot_item_we_vote_id
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == CANDIDATE:
office_id = 0
office_we_vote_id = ''
candidate_id = ballot_item_id
candidate_we_vote_id = ballot_item_we_vote_id
measure_id = 0
measure_we_vote_id = ''
elif kind_of_ballot_item == MEASURE:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = ballot_item_id
measure_we_vote_id = ballot_item_we_vote_id
else:
office_id = 0
office_we_vote_id = ''
candidate_id = 0
candidate_we_vote_id = ''
measure_id = 0
measure_we_vote_id = ''
return voter_star_status_retrieve_for_api(
voter_device_id=voter_device_id,
office_id=office_id, office_we_vote_id=office_we_vote_id,
candidate_id=candidate_id, candidate_we_vote_id=candidate_we_vote_id,
measure_id=measure_id, measure_we_vote_id=measure_we_vote_id)
def voter_all_stars_status_retrieve_view(request): # voterAllStarsStatusRetrieve
"""
A list of all of the stars that the voter has marked.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
return voter_all_stars_status_retrieve_for_api(
voter_device_id=voter_device_id)
def voter_update_view(request): # voterUpdate
"""
Update profile-related information for this voter
:param request:
:return:
"""
voter_updated = False
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
# If we have an incoming GET value for a variable, use it. If we don't pass "False" into voter_update_for_api
# as a signal to not change the variable. (To set variables to False, pass in the string "False".)
try:
facebook_email = request.GET['facebook_email']
facebook_email = facebook_email.strip()
if facebook_email.lower() == 'false':
facebook_email = False
except KeyError:
facebook_email = False
try:
facebook_profile_image_url_https = request.GET['facebook_profile_image_url_https']
facebook_profile_image_url_https = facebook_profile_image_url_https.strip()
if facebook_profile_image_url_https.lower() == 'false':
facebook_profile_image_url_https = False
except KeyError:
facebook_profile_image_url_https = False
try:
first_name = request.GET['first_name']
first_name = first_name.strip()
if first_name.lower() == 'false':
first_name = False
except KeyError:
first_name = False
try:
middle_name = request.GET['middle_name']
middle_name = middle_name.strip()
if middle_name.lower() == 'false':
middle_name = False
except KeyError:
middle_name = False
try:
last_name = request.GET['last_name']
last_name = last_name.strip()
if last_name.lower() == 'false':
last_name = False
except KeyError:
last_name = False
try:
twitter_profile_image_url_https = request.GET['twitter_profile_image_url_https']
twitter_profile_image_url_https = twitter_profile_image_url_https.strip()
if twitter_profile_image_url_https.lower() == 'false':
twitter_profile_image_url_https = False
except KeyError:
twitter_profile_image_url_https = False
device_id_results = is_voter_device_id_valid(voter_device_id)
if not device_id_results['success']:
json_data = {
'status': device_id_results['status'],
'success': False,
'voter_device_id': voter_device_id,
'facebook_email': facebook_email,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'twitter_profile_image_url_https': twitter_profile_image_url_https,
'voter_updated': voter_updated,
}
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
at_least_one_variable_has_changed = True if \
facebook_email or facebook_profile_image_url_https \
or first_name or middle_name or last_name \
else False
if not at_least_one_variable_has_changed:
json_data = {
'status': "MISSING_VARIABLE-NO_VARIABLES_PASSED_IN_TO_CHANGE",
'success': True,
'voter_device_id': voter_device_id,
'facebook_email': facebook_email,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'twitter_profile_image_url_https': twitter_profile_image_url_https,
'voter_updated': voter_updated,
}
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if voter_id < 0:
json_data = {
'status': "VOTER_NOT_FOUND_FROM_DEVICE_ID",
'success': False,
'voter_device_id': voter_device_id,
'facebook_email': facebook_email,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'twitter_profile_image_url_https': twitter_profile_image_url_https,
'voter_updated': voter_updated,
}
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
# At this point, we have a valid voter
voter_manager = VoterManager()
results = voter_manager.update_voter(voter_id, facebook_email, facebook_profile_image_url_https,
first_name, middle_name, last_name, twitter_profile_image_url_https)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'facebook_email': facebook_email,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'twitter_profile_image_url_https': twitter_profile_image_url_https,
'voter_updated': results['voter_updated'],
}
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
| {
"content_hash": "305235722d56a123bab1c273f02681cc",
"timestamp": "",
"source": "github",
"line_count": 2062,
"max_line_length": 120,
"avg_line_length": 45.95392822502425,
"alnum_prop": 0.6321116118070432,
"repo_name": "wevote/WebAppPublic",
"id": "c2ef1b82a400cc4ab8f328e9fa580f50f9290b32",
"size": "94839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apis_v1/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8022"
},
{
"name": "HTML",
"bytes": "131153"
},
{
"name": "JavaScript",
"bytes": "296860"
},
{
"name": "Python",
"bytes": "1700558"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
} |
"""
byceps.services.orga_presence.transfer.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from ...party.transfer.models import Party
from ...user.transfer.models import User
from ....util.datetime.range import DateTimeRange
TimeSlotType = Enum('TimeSlotType', ['party', 'presence', 'task'])
@dataclass(frozen=True)
class TimeSlot:
type: TimeSlotType
starts_at: datetime
ends_at: datetime
@property
def range(self) -> DateTimeRange:
return DateTimeRange(self.starts_at, self.ends_at)
@dataclass(frozen=True)
class PartyTimeSlot(TimeSlot):
party: Party
@classmethod
def from_party(cls, party: Party) -> PartyTimeSlot:
return cls(
type=TimeSlotType.party,
starts_at=party.starts_at,
ends_at=party.ends_at,
party=party,
)
@dataclass(frozen=True)
class PresenceTimeSlot(TimeSlot):
orga: User
@classmethod
def from_(
cls, orga: User, starts_at: datetime, ends_at: datetime
) -> PresenceTimeSlot:
return cls(
type=TimeSlotType.presence,
starts_at=starts_at,
ends_at=ends_at,
orga=orga,
)
@dataclass(frozen=True)
class TaskTimeSlot(TimeSlot):
title: str
@classmethod
def from_(
cls, title: str, starts_at: datetime, ends_at: datetime
) -> TaskTimeSlot:
return cls(
type=TimeSlotType.task,
starts_at=starts_at,
ends_at=ends_at,
title=title,
)
| {
"content_hash": "1c58f2324375cec594bc93d83bcccca0",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 66,
"avg_line_length": 22.792207792207794,
"alnum_prop": 0.6210826210826211,
"repo_name": "m-ober/byceps",
"id": "c6a116a9d976dda738b316c4d8f5fe0d6ac53973",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/services/orga_presence/transfer/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
from openerp.modules.registry import RegistryManager
def migrate(cr, version):
registry = RegistryManager.get(cr.dbname)
from openerp.addons.account.models.chart_template import migrate_tags_on_taxes
migrate_tags_on_taxes(cr, registry)
| {
"content_hash": "1265c10282aa155a905d727ef72372a4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 82,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.78,
"repo_name": "vileopratama/vitech",
"id": "a37770d2c1c8db1cbf8b121823c2bbcd05fd4aa9",
"size": "250",
"binary": false,
"copies": "88",
"ref": "refs/heads/master",
"path": "src/addons/l10n_nl/migrations/2.0/post-migrate_tags_on_taxes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from django.db import transaction
from backend0bit.models import StaticPage
@transaction.atomic
def set_staticpage_order(order):
_sanitize_order_list(order)
_set_safe_order_values()
counter = 0
for sp_id in order:
sp = StaticPage.objects.get(id=sp_id)
sp.order = counter
sp.save()
counter += 1
def _sanitize_order_list(order):
if len(order) != StaticPage.objects.count():
raise ReorderException("Incorrect number of orders")
sp_ids = [x["id"] for x in StaticPage.objects.all().values("id")]
for sp_id in sp_ids:
if sp_id not in order:
raise ReorderException('Id ' + str(sp_id) + ' not in passed order')
def _set_safe_order_values():
max_order = StaticPage.get_max_order_or_zero()
for sp in StaticPage.objects.all():
sp.order += max_order + 1
sp.save()
class ReorderException(BaseException):
cause = None
def __init__(self, cause):
self.cause = cause
def get_cause(self):
return self.cause
| {
"content_hash": "9f334bec97b5ce7aa405bd84b31c7b30",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 24.785714285714285,
"alnum_prop": 0.6282420749279539,
"repo_name": "piotrb5e3/ComfyMS-backend",
"id": "303b516cadd06a84114094826211bf0ffe3e8e16",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend0bit/reorder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43788"
}
],
"symlink_target": ""
} |
import pickle, sys
# Loading bar for progress on the corpus
# http://stackoverflow.com/questions/2122385/dynamic-terminal-printing-with-python
# Print iterations progress
def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
print("\n")
#http://stackoverflow.com/questions/5306729/how-do-markov-chain-chatbots-work
#lukebot-trainer.py
corpus = raw_input('enter a corpus: ')
corpus_name = 'lexicons/lexicon-' + corpus
filename = 'books/' + corpus + '.txt'
b = open(filename)
text = []
for line in b:
for word in line.split():
text.append (word)
b.close()
textset = list(set(text))
follow = {}
textset_length = len(textset)
for l in range(textset_length):
working = []
check = textset[l]
printProgress(l, textset_length, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
for w in range(len(text)-1):
if check == text[w] and text[w][-1] not in '(),.?!':
working.append(str(text[w+1]))
follow[check] = working
a = open(corpus_name,'wb')
pickle.dump(follow,a,2)
a.close()
print ''
print corpus_name + ' complete!' | {
"content_hash": "c6234dcb76d31dc73a5302dac0e031a0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 95,
"avg_line_length": 37.68627450980392,
"alnum_prop": 0.627471383975026,
"repo_name": "jbobrow/pangramGenerator",
"id": "de839ad2c814f88464f899777e125c06aae6376e",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pangram-trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "233"
},
{
"name": "HTML",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "5515"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.views.decorators.csrf import csrf_exempt
from django.conf.urls import url
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotAllowed
import json
import logging
import time
from ..exceptions import JSONRPCInvalidRequestException
from ..jsonrpc import JSONRPCRequest
from ..manager import JSONRPCResponseManager
from ..utils import DatetimeDecimalEncoder
from ..dispatcher import Dispatcher
logger = logging.getLogger(__name__)
def response_serialize(obj):
""" Serializes response's data object to JSON. """
return json.dumps(obj, cls=DatetimeDecimalEncoder)
class JSONRPCAPI(object):
def __init__(self, dispatcher=None):
self.dispatcher = dispatcher if dispatcher is not None \
else Dispatcher()
@property
def urls(self):
urls = [
url(r'^$', self.jsonrpc, name='endpoint'),
]
if getattr(settings, 'JSONRPC_MAP_VIEW_ENABLED', settings.DEBUG):
urls.append(
url(r'^map$', self.jsonrpc_map, name='map')
)
return urls
@csrf_exempt
def jsonrpc(self, request):
""" JSON-RPC 2.0 handler."""
def inject_request(jsonrpc_request):
jsonrpc_request.params = jsonrpc_request.params or {}
if isinstance(jsonrpc_request.params, dict):
jsonrpc_request.params.update(request=request)
if isinstance(jsonrpc_request.params, list):
jsonrpc_request.params.insert(0, request)
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
request_str = request.body.decode('utf8')
try:
jsonrpc_request = JSONRPCRequest.from_json(request_str)
except (TypeError, ValueError, JSONRPCInvalidRequestException):
response = JSONRPCResponseManager.handle(
request_str, self.dispatcher)
else:
requests = getattr(jsonrpc_request, 'requests', [jsonrpc_request])
for jsonrpc_req in requests:
inject_request(jsonrpc_req)
response = JSONRPCResponseManager.handle_request(
jsonrpc_request, self.dispatcher)
if response:
response.serialize = response_serialize
response = response.json
return HttpResponse(response, content_type="application/json")
def jsonrpc_map(self, request):
""" Map of json-rpc available calls.
:return str:
"""
result = "<h1>JSON-RPC map</h1><pre>{0}</pre>".format("\n\n".join([
"{0}: {1}".format(fname, f.__doc__)
for fname, f in self.dispatcher.items()
]))
return HttpResponse(result)
api = JSONRPCAPI()
| {
"content_hash": "7a9d2225909b1e25170a06dd37e4ff8a",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 30.736263736263737,
"alnum_prop": 0.6313907758312478,
"repo_name": "pavlov99/json-rpc",
"id": "0708e46c780214809677dcdc7868298802117a16",
"size": "2797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonrpc/backend/django.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "469"
},
{
"name": "Python",
"bytes": "138537"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
import os
import subprocess
import sys
################################################################################
# install_dependencies.py
#
# Download and install Basset dependencies.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] arg'
parser = OptionParser(usage)
(options,args) = parser.parse_args()
# confirm luarocks
luarocks_which = subprocess.check_output('which luarocks', shell=True)
if luarocks_which == '':
print >> sys.stderr, 'Please install Torch7 first.'
exit(1)
############################################################
# luarocks database
############################################################
# install luafilesystem
cmd = 'luarocks install luafilesystem'
subprocess.call(cmd, shell=True)
# install dpnn
cmd = 'luarocks install dpnn'
subprocess.call(cmd, shell=True)
# install inn
cmd = 'luarocks install inn'
subprocess.call(cmd, shell=True)
# install dp
cmd = 'luarocks install dp'
subprocess.call(cmd, shell=True)
############################################################
# luarocks from github
############################################################
os.chdir('src')
# install torch-hdf5
cmd = 'git clone https://github.com/davek44/torch-hdf5.git'
subprocess.call(cmd, shell=True)
os.chdir('torch-hdf5')
cmd = 'luarocks make'
subprocess.call(cmd, shell=True)
os.chdir('..')
os.chdir('..')
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| {
"content_hash": "b025ae3e31fe43329bfe70eb334982c6",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 27.680555555555557,
"alnum_prop": 0.4014049172102358,
"repo_name": "davek44/Basset",
"id": "a9089aee5a7761c74f86ac4549db7f7f345abaef",
"size": "2015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install_dependencies.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3545083"
},
{
"name": "Lua",
"bytes": "98951"
},
{
"name": "Python",
"bytes": "308400"
},
{
"name": "Shell",
"bytes": "668"
}
],
"symlink_target": ""
} |
salary = float(input())
if(salary > 0 and salary <= 400):
percent = 0.15
earned = salary * percent
salary = salary+earned
elif(salary > 400 and salary <= 800):
percent = 0.12
earned = salary * percent
salary = salary+earned
elif(salary > 800 and salary <= 1200):
percent = 0.10
earned = salary * percent
salary = salary+earned
elif(salary > 1200 and salary <= 2000):
percent = 0.07
earned = salary * percent
salary = salary+earned
elif(salary > 2000):
percent = 0.04
earned = salary * percent
salary = salary+earned
percent = percent*100
print("Novo salario: {0:.2f}\nReajuste ganho: {1:.2f}\nEm percentual: {2:.0f} %".format(salary,earned,percent)) | {
"content_hash": "891f3d4e8d734a657c5aa7f56060f4ba",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 111,
"avg_line_length": 29.043478260869566,
"alnum_prop": 0.6856287425149701,
"repo_name": "henrywm/URI",
"id": "e17a1b5bb60fd17ffc67dc7737b20bd26eab55bd",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/beginner/1048.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "188"
},
{
"name": "Java",
"bytes": "11630"
},
{
"name": "Python",
"bytes": "18710"
}
],
"symlink_target": ""
} |
def f(x):
while x:
print (x)
while 0:
asgn = unreachable()
while False:
return unreachable()
while 7:
print(x)
def g(x):
if False:
unreachable()
else:
reachable()
print(x)
return 5
for x in first_unreachable_stmt():
raise more_unreachable()
def h(a,b):
if True:
reachable()
else:
unreachable()
def intish(n):
""""Regression test - the 'except' statement is reachable"""
test = 0
try:
test += n
except:
return False
return True
#ODASA-2033
def unexpected_return_result():
try:
assert 0, "async.Return with argument inside async.generator function"
except AssertionError:
return (None, sys.exc_info())
#Yield may raise -- E.g. in a context manager
def handle_yield_exception():
resources = get_resources()
try:
yield resources
except Exception as exc:
log(exc)
#ODASA-ODASA-3790
def isnt_iter(seq):
got_exc = False
try:
for x in seq:
pass
except Exception:
got_exc = True
return got_exc
class Odasa3686(object):
def is_iterable(self, obj):
#special case string
if not object:
return False
if isinstance(obj, str):
return False
#Test for iterability
try:
None in obj
return True
except TypeError:
return False
def odasa5387():
try:
str
except NameError: # Unreachable 'str' is always defined
pass
try:
unicode
except NameError: # Reachable as 'unicode' is undefined in Python 3
pass
#This is OK as type-hints require it
if False:
from typing import Any
def foo():
# type: () -> None
return
#ODASA-6483
def deliberate_name_error(cond):
if cond:
x = 0
try:
x
except NameError:
x = 1
return x
#ODASA-6783
def emtpy_gen():
if False:
yield None
def foo(x):
if True:
if x < 3:
print(x, "< 3")
if x == 0:
print(x, "== 0")
# Unreachable catch-all case
def unreachable_catch_all_assert_false(x):
if x < 0:
return "negative"
elif x >= 0:
return "positive"
else:
assert False, x
def unreachable_catch_all_raise(x):
if x < 0:
pass
elif x >= 0:
pass
else:
raise ValueError(x)
| {
"content_hash": "47018ce55e0b4e170169552520cede5f",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 78,
"avg_line_length": 18.154411764705884,
"alnum_prop": 0.5455650060753341,
"repo_name": "github/codeql",
"id": "493dcc24dd15d8a6af528f1441743dd3ff2e634c",
"size": "2489",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/test/query-tests/Statements/unreachable/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0024_merge_20170529_1436'),
]
operations = [
migrations.AddField(
model_name='task',
name='needs_motivation',
field=models.BooleanField(default=False, help_text='Indicates if a task candidate needs to submit a motivation', verbose_name='Needs motivation'),
),
migrations.AlterField(
model_name='task',
name='deadline_to_apply',
field=models.DateTimeField(help_text='Deadline to apply', verbose_name='Deadline to apply'),
),
migrations.AlterField(
model_name='taskmember',
name='resume',
field=bluebottle.utils.fields.PrivateFileField(blank=True, upload_to=b'private/private/private/task-members/resume'),
),
]
| {
"content_hash": "50fe59eca2c55ef1b52996d7e35b2e9f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 158,
"avg_line_length": 33.58620689655172,
"alnum_prop": 0.6293634496919918,
"repo_name": "onepercentclub/bluebottle",
"id": "f0dd924ed9fdb4964598ce74209d0aadc87f1398",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/tasks/migrations/0025_auto_20170601_1540.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
"""
Bibliographic database backed by the ADS API.
The intention is to have classes that the API of BibTexDB and BibTexPub.
.. todo:: Add a caching mechanism; maybe with MongoDB?
2014-12-07 - Created by Jonathan Sick
"""
import re
import ads
# import texutils
from .base import BasePub
ARXIV_PATTERN = re.compile('(\d{4,6}\.\d{4,6}|astro\-ph/\d{7})')
class ADSBibDB(object):
"""Bibliographic database derived from the NASA/SAO ADS API.
Parameters
----------
cache : :class:`starlit.bib.adscache.ADSCacheDB`
A cache instance.
"""
def __init__(self, cache=None):
super(ADSBibDB, self).__init__()
self._ads_cache = cache
def __getitem__(self, bibcode):
"""Access a paper given its bibcode."""
# grab from the cache
if self._ads_cache is not None:
if bibcode in self._ads_cache:
return self._ads_cache[bibcode]
# or query from ADS
ads_query = ads.query(query=bibcode)
pub = ADSPub(ads_query.next())
# cache it if we can
if self._ads_cache is not None:
self._ads_cache.insert(pub)
return pub
class ADSPub(BasePub):
"""A publication record obtained from the NASA/SAO ADS API.
Parameters
----------
ads_article : :class:`ads.Article`
The article instance obtained from ``ads``.
"""
def __init__(self, ads_article):
super(ADSPub, self).__init__()
self._article = ads_article
@property
def authors(self):
"""Parsed list of authors; each author is a ``(Last, First)`` tuple."""
authors = []
for a in self._article.author:
try:
a_last, a_first = a.split(',')
except:
continue
else:
authors.append((a_last.strip(), a_first.strip()))
return authors
@property
def title(self):
"""Title (unicode)"""
# why does ads give the title as a list?
try:
return self._article['title'][0]
except KeyError:
return None
@property
def abstract(self):
"""Abstract text (unicode)."""
return self._article.abstract
@property
def bibcode(self):
"""The ADS bibcode for this publication."""
return self._article.bibcode
@property
def references(self):
"""Publications referenced by this publication."""
return [ADSPub(ref) for ref in self._article.references]
@property
def reference_bibcodes(self):
return [pub.bibcode for pub in self._article.references]
@property
def citations(self):
"""Publications that cite this publication."""
return [ADSPub(ref) for ref in self._article.citations]
@property
def citation_bibcodes(self):
return [pub.bibcode for pub in self._article.citations]
@property
def doi(self):
"""DOI for paper."""
return self._article.doi[0]
@property
def arxiv_id(self):
"""Arxiv identifier for article."""
# Find an arxiv ID out of the identifier fields
for ident in self._article.identifier:
# Test if arxiv ID in indent
arxiv_matches = ARXIV_PATTERN.findall(ident)
if len(arxiv_matches) == 1:
arxiv_id = arxiv_matches[0]
return arxiv_id
| {
"content_hash": "01c1dd52892872261a3f94eba06168bb",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 26.3953488372093,
"alnum_prop": 0.5791483113069016,
"repo_name": "jonathansick/starlit",
"id": "6b43c640bf10d9e08417a8457ed0b3efe263195f",
"size": "3445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starlit/bib/adsdb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "227291"
}
],
"symlink_target": ""
} |
from migrate.versioning.shell import main
if __name__ == '__main__':
main(six='<module 'six' from '/Users/vivianli/Documents/2014-2015/microblog/flask/lib/python3.4/site-packages/six.py'>')
| {
"content_hash": "5a7c76290cad361bcb184b2dc9fc7f33",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 124,
"avg_line_length": 48.75,
"alnum_prop": 0.7076923076923077,
"repo_name": "vivianli32/TravelConnect",
"id": "38065bc25d1ef4b5d53764edf1f395f7c75b1e2f",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_repository/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "11367"
},
{
"name": "JavaScript",
"bytes": "22141"
},
{
"name": "Python",
"bytes": "10568185"
},
{
"name": "Shell",
"bytes": "3712"
}
],
"symlink_target": ""
} |
import datetime
from celery.schedules import crontab
from celery.task import periodic_task
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.users.models import CommCareUser
from custom.ilsgateway.models import SupplyPointStatusValues, SupplyPointStatusTypes
from custom.ilsgateway.tanzania.reminders import REMINDER_STOCKONHAND, update_statuses
from casexml.apps.stock.models import StockTransaction
from dimagi.utils.dates import get_business_day_of_month
from custom.ilsgateway.utils import send_for_all_domains, send_translated_message
import settings
def send_soh_reminder(domain, date, test_list=None):
sp_ids = set()
users = CommCareUser.by_domain(domain) if not test_list else test_list
for user in users:
if user.is_active and user.location and user.location.location_type == 'FACILITY':
sp = SupplyPointCase.get_by_location(user.location)
if sp and not StockTransaction.objects.filter(case_id=sp._id, report__date__gte=date,
type='stockonhand').exists():
result = send_translated_message(user, REMINDER_STOCKONHAND)
if not test_list and result:
sp_ids.add(sp._id)
update_statuses(sp_ids, SupplyPointStatusTypes.SOH_FACILITY, SupplyPointStatusValues.REMINDER_SENT)
def get_last_and_nth_business_day(date, n):
last_month = datetime.datetime(date.year, date.month, 1) - datetime.timedelta(days=1)
last_month_last_day = get_business_day_of_month(month=last_month.month, year=last_month.year, count=-1)
nth_business_day = get_business_day_of_month(month=date.month, year=date.year, count=n)
return last_month_last_day, nth_business_day
@periodic_task(run_every=crontab(day_of_month="26-31", hour=14, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def first_soh_task():
now = datetime.datetime.utcnow()
last_business_day = get_business_day_of_month(month=now.month, year=now.year, count=-1)
if now.day == last_business_day.day:
send_for_all_domains(last_business_day, send_soh_reminder)
@periodic_task(run_every=crontab(day_of_month="1-3", hour=9, minute=0), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def second_soh_task():
now = datetime.datetime.utcnow()
last_month_last_day, first_business_day = get_last_and_nth_business_day(now, 1)
if now.day == first_business_day.day:
send_for_all_domains(last_month_last_day, send_soh_reminder)
@periodic_task(run_every=crontab(day_of_month="5-7", hour=8, minute=15), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
def third_soh_task():
now = datetime.datetime.utcnow()
last_month_last_day, fifth_business_day = get_last_and_nth_business_day(now, 5)
if now.day == fifth_business_day.day:
send_for_all_domains(last_month_last_day, send_soh_reminder)
| {
"content_hash": "97a575ea21e51773494bf72772cc02a6",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 134,
"avg_line_length": 51.19298245614035,
"alnum_prop": 0.7145305003427005,
"repo_name": "puttarajubr/commcare-hq",
"id": "cb897472ed975df1fb70dd9ea3c4e4c1f4d5c40e",
"size": "2918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custom/ilsgateway/tanzania/reminders/stockonhand.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
"""
A script that generates the database.
"""
################################################################################
class JsonJit(object):
"""
JsonJit is a class for Just In Time instantiation of JSON resources.
The __lazy__ method downloads the JSON resource from the server.
But the __lazy__ method is called only when the first attribute is either get or set.
You can use it like this:
assemblies = JsonJit('http://bbcftools.vital-it.ch/genrep/assemblies.json', 'assembly')
:param url: Location of the JSON to load
:param list_key: Optional dictionary key to unpack the elements of JSON with
"""
def __init__(self, url, list_key=None):
"""Save the passed parameters"""
self.__dict__['url'] = url
self.__dict__['list_key'] = list_key
self.__dict__['obj'] = None
def __lazy__(self):
"""Fetch resource and instantiate object."""
import json, urllib2
try:
content = urllib2.urlopen(self.url).read()
# Create the child object #
self.__dict__['obj'] = json.loads(content)
except urllib2.URLError as err:
self.__dict__['obj'] = err
# Unpack the child object #
if self.list_key:
for num, item in enumerate(self.obj):
self.obj[num] = item[self.list_key]
def get(self, value):
"""Retrieve an item from the JSON
by searching all attributes of all items
for *name*"""
if not self.obj: self.__lazy__()
for x in self.obj:
if [k for k,v in x.items() if v == value]: return x
def filter(self, key, value):
"""Retrieve an item from the JSON
by search a key that is equal to value in
all elements"""
if not self.obj: self.__lazy__()
return [x for x in self.obj for k,v in x.items() if v == value and k == key]
def by(self, name):
"""Return a list of attributes present
in every element of the JSON"""
if not self.obj: self.__lazy__()
return [x or x.encode('ascii') and isinstance(x, basestring) for x in [x.get(name) for x in self.obj]]
def make(self, name):
"""Return an object whoes attributes are the
keys of the element's dictionary"""
if not self.obj: self.__lazy__()
class JsonObject(object): pass
obj = JsonObject()
obj.__dict__.update(self.get(name))
return obj
def __getattr__(self, name):
"""Method called when an attribute is
not found in __dict__."""
if not self.obj: self.__lazy__()
# Search in the child object #
try: return getattr(self.obj, name)
except AttributeError:
# Search in the parent object #
if name in self.__dict__: return self.__dict__[name]
else: return self.make(name)
def __setattr__(self, name, value):
"""Method called when an attribute is
assigned to."""
if not self.obj: self.__lazy__()
try: setattr(self.obj, name, value)
except AttributeError: self.__dict__[name] = value
def __len__(self):
if not self.obj: self.__lazy__()
return self.obj.__len__()
def __iter__(self):
if not self.obj: self.__lazy__()
return self.obj.__iter__()
def __repr__(self):
if not self.obj: self.__lazy__()
return self.obj.__repr__()
def __getitem__(self, key):
if not self.obj: self.__lazy__()
return self.obj[key]
def __setitem__(self, key, item):
if not self.obj: self.__lazy__()
self.obj[key] = item
def __delitem__(self, key):
if not self.obj: self.__lazy__()
del self.obj[key]
################################################################################
# Constants #
url = "http://bbcftools.vital-it.ch/genrep/"
# Expose base resources #
organisms = JsonJit(url + "organisms.json", 'organism')
genomes = JsonJit(url + "genomes.json", 'genome')
nr_assemblies = JsonJit(url + "nr_assemblies.json", 'nr_assembly')
assemblies = JsonJit(url + "assemblies.json", 'assembly')
sources = JsonJit(url + "sources.json", 'source')
chromosomes = JsonJit(url + "chromosomes.json", 'chromosome')
################################################################################
# Make a db #
import os, sqlite3
path = 'genomes.db'
if os.path.exists(path): os.remove(path)
connection = sqlite3.connect(path)
cursor = connection.cursor()
# Make the assemblies table #
col_names = assemblies[0].keys()
col_string = '(' + ','.join(col_names) + ')'
question_marks = '(' + ','.join(['?' for x in xrange(len(col_names))]) + ')'
cursor.execute("CREATE table assemblies " + col_string)
command = "INSERT into assemblies " + col_string + ' values ' + question_marks
generator = ([a[k] for k in col_names] for a in assemblies)
cursor.executemany(command, generator)
# Make one table per assembly #
base_keys = chromosomes[0].keys()
base_keys.remove('chr_names')
col_names = ['label'] + base_keys
col_string = '(' + ','.join(col_names) + ')'
question_marks = '(' + ','.join(['?' for x in xrange(len(col_names))]) + ')'
for a in assemblies:
# Get all chromsomes #
chrs = []
for ch in chromosomes:
for data in ch['chr_names']:
if data['chr_name']['assembly_id'] == a['id']:
chrs.append([data['chr_name']['value']] + [ch[k] for k in base_keys])
# Write it #
cursor.execute("CREATE table '%s' %s" % (a['name'], col_string))
command = ("INSERT into '%s' " + col_string + ' values ' + question_marks) % a['name']
cursor.executemany(command, chrs)
| {
"content_hash": "552ebd0de109ef37c0636aa02467cc94",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 110,
"avg_line_length": 36.782051282051285,
"alnum_prop": 0.554897176716626,
"repo_name": "xapple/genomes",
"id": "c0eddb30e90ed363e835ddbe807ca6e4314ff0c6",
"size": "5738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/make_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4584"
},
{
"name": "Python",
"bytes": "19378"
}
],
"symlink_target": ""
} |
"""
WSGI config for Dsystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Dsystem.settings")
application = get_wsgi_application()
| {
"content_hash": "b268f8b56a59527c010da66785a809d8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.4375,
"alnum_prop": 0.7698209718670077,
"repo_name": "yangfeiffei/Dsystem",
"id": "3ca7e0b1efd8ad3e1b4c575656f3dbb90597ad55",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dsystem/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21061"
},
{
"name": "Python",
"bytes": "41507"
}
],
"symlink_target": ""
} |
from typing import Optional
from models.PricewarsObject import PricewarsObject
from models import Product
class Offer(PricewarsObject):
def __init__(self, offer_id=-1, uid=-1, product_id=-1, quality=0, merchant_id: Optional[str] = None, amount=1,
price=0.0, shipping_time=None, prime=False, signature=''):
self.offer_id = offer_id
self.uid = uid
self.product_id = product_id
self.quality = quality
self.merchant_id = merchant_id
self.amount = amount
self.price = price
self.shipping_time = shipping_time or {'standard': 3}
self.prime = prime
self.signature = signature
@staticmethod
def from_product(product: Product, price: float, shipping_time: dict, prime: bool = False) -> 'Offer':
return Offer(
uid=product.uid,
product_id=product.product_id,
quality=product.quality,
amount=product.quantity,
price=price,
shipping_time=shipping_time,
prime=prime,
signature=product.signature,
)
| {
"content_hash": "5c6e8f1ad05673b3c9954ad42225d585",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 114,
"avg_line_length": 34.65625,
"alnum_prop": 0.6086564472497745,
"repo_name": "hpi-epic/pricewars-merchant",
"id": "d82db75af7b6665f94ce57ef7cdb2b8f3eb1474c",
"size": "1109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/Offer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "143"
},
{
"name": "Python",
"bytes": "25647"
}
],
"symlink_target": ""
} |
from ..cw_model import CWModel
class Opportunity(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.name = None # *(String(100))
self.expectedCloseDate = None # (String)
self.type = None # (OpportunityTypeReference)
self.stage = None # (OpportunityStageReference)
self.status = None # (OpportunityStatusReference)
self.priority = None # (OpportunityPriorityReference)
self.notes = None # (String)
self.probability = None # (OpportunityProbabilityReference)
self.source = None # (String(50))
self.rating = None # (OpportunityRatingReference)
self.campaign = None # (CampaignReference)
self.primarySalesRep = None # *(MemberReference)
self.secondarySalesRep = None # (MemberReference)
self.locationId = None # (Integer)
self.businessUnitId = None # (Integer)
self.company = None # *(CompanyReference)
self.contact = None # *(ContactReference)
self.site = None # *(SiteReference)
self.customerPO = None # (String(25))
self.pipelineChangeDate = None # (String)
self.dateBecameLead = None # (String)
self.closedDate = None # (String)
self.closedBy = None # (MemberReference)
self.totalSalesTax = None # (Number)
self.shipToCompany = None # (CompanyReference)
self.shipToContact = None # (ContactReference)
self.shipToSite = None # (SiteReference)
self.billToCompany = None # (CompanyReference)
self.billToContact = None # (ContactReference)
self.billToSite = None # (SiteReference)
self.billingTerms = None # (BillingTermsReference)
self.taxCode = None # (TaxCodeReference)
self._info = None # (Metadata)
self.customFields = None # (CustomFieldValue[])
# initialize object with json dict
super().__init__(json_dict)
| {
"content_hash": "c2a72c7dfc1e2c6921ebb80af3dd07b4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 45.93181818181818,
"alnum_prop": 0.6091044037605146,
"repo_name": "joshuamsmith/ConnectPyse",
"id": "cfd57908d99051b70ff99858db47ecce63d101c9",
"size": "2021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sales/opportunity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158372"
}
],
"symlink_target": ""
} |
"""
rohmu - content encryption
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
import io
import logging
import os
import struct
import cryptography
import cryptography.hazmat.backends.openssl.backend
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.hashes import SHA1, SHA256
from cryptography.hazmat.primitives.hmac import HMAC
from . import IO_BLOCK_SIZE
from .filewrap import FileWrap, Sink, Stream
if cryptography.__version__ < "1.6":
# workaround for deadlock https://github.com/pyca/cryptography/issues/2911
cryptography.hazmat.backends.openssl.backend.activate_builtin_random()
FILEMAGIC = b"pghoa1"
AES_BLOCK_SIZE = 16
class EncryptorError(Exception):
""" EncryptorError """
class Encryptor:
def __init__(self, rsa_public_key_pem):
if not isinstance(rsa_public_key_pem, bytes):
rsa_public_key_pem = rsa_public_key_pem.encode("ascii")
self.rsa_public_key = serialization.load_pem_public_key(rsa_public_key_pem, backend=default_backend())
self.cipher = None
self.authenticator = None
def update(self, data):
ret = b""
if self.cipher is None:
key = os.urandom(16)
nonce = os.urandom(16)
auth_key = os.urandom(32)
self.cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=default_backend()).encryptor()
self.authenticator = HMAC(auth_key, SHA256(), backend=default_backend())
pad = padding.OAEP(mgf=padding.MGF1(algorithm=SHA1()), algorithm=SHA1(), label=None)
cipherkey = self.rsa_public_key.encrypt(key + nonce + auth_key, pad)
ret = FILEMAGIC + struct.pack(">H", len(cipherkey)) + cipherkey
cur = self.cipher.update(data)
self.authenticator.update(cur)
if ret:
return ret + cur
else:
return cur
def finalize(self):
if self.cipher is None:
return b"" # empty plaintext input yields empty encrypted output
ret = self.cipher.finalize()
self.authenticator.update(ret)
ret += self.authenticator.finalize()
self.cipher = None
self.authenticator = None
return ret
class EncryptorFile(FileWrap):
def __init__(self, next_fp, rsa_public_key_pem):
super().__init__(next_fp)
self.key = rsa_public_key_pem
self.encryptor = Encryptor(self.key)
self.offset = 0
self.state = "OPEN"
def flush(self):
self._check_not_closed()
self.next_fp.flush()
def close(self):
if self.state == "CLOSED":
return
final = self.encryptor.finalize()
self.encryptor = None
self.next_fp.write(final)
super().close()
def writable(self):
"""True if this stream supports writing"""
self._check_not_closed()
return True
def write(self, data):
"""Encrypt and write the given bytes"""
self._check_not_closed()
if not data:
return 0
enc_data = self.encryptor.update(data)
self.next_fp.write(enc_data)
self.offset += len(data)
return len(data)
class EncryptorStream(Stream):
"""Non-seekable stream of data that adds encryption on top of given source stream"""
def __init__(self, src_fp, rsa_public_key_pem):
super().__init__(src_fp)
self._encryptor = Encryptor(rsa_public_key_pem)
def _process_chunk(self, data):
return self._encryptor.update(data)
def _finalize(self):
return self._encryptor.finalize()
class Decryptor:
def __init__(self, rsa_private_key_pem):
if not isinstance(rsa_private_key_pem, bytes):
rsa_private_key_pem = rsa_private_key_pem.encode("ascii")
self.rsa_private_key = serialization.load_pem_private_key(
data=rsa_private_key_pem, password=None, backend=default_backend()
)
self.cipher = None
self.authenticator = None
self._cipher_key_len = None
self._header_size = None
self._footer_size = 32
def expected_header_bytes(self):
if self._header_size is not None:
return 0
return self._cipher_key_len or 8
def header_size(self):
return self._header_size
def footer_size(self):
return self._footer_size
def process_header(self, data):
if self._cipher_key_len is None:
if data[0:6] != FILEMAGIC:
raise EncryptorError("Invalid magic bytes")
self._cipher_key_len = struct.unpack(">H", data[6:8])[0]
else:
pad = padding.OAEP(mgf=padding.MGF1(algorithm=SHA1()), algorithm=SHA1(), label=None)
try:
plainkey = self.rsa_private_key.decrypt(data, pad)
except AssertionError:
raise EncryptorError("Decrypting key data failed")
if len(plainkey) != 64:
raise EncryptorError("Integrity check failed")
key = plainkey[0:16]
nonce = plainkey[16:32]
auth_key = plainkey[32:64]
self._header_size = 8 + len(data)
self.cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=default_backend()).decryptor()
self.authenticator = HMAC(auth_key, SHA256(), backend=default_backend())
def process_data(self, data):
if not data:
return b""
self.authenticator.update(data)
return self.cipher.update(data)
def finalize(self, footer):
if footer != self.authenticator.finalize():
raise EncryptorError("Integrity check failed")
result = self.cipher.finalize()
self.cipher = None
self.authenticator = None
return result
class DecryptorFile(FileWrap):
def __init__(self, next_fp, rsa_private_key_pem):
super().__init__(next_fp)
self._key = rsa_private_key_pem
self.log = logging.getLogger(self.__class__.__name__)
self._decryptor = None
self._crypted_size = None
self._boundary_block = None
self._plaintext_size = None
# Our actual plain-text read offset. seek may change self.offset to something
# else temporarily but we keep _decrypt_offset intact until we actually do a
# read in case the caller just called seek in order to then immediately seek back
self._decrypt_offset = None
self.offset = None
self._reset()
def _reset(self):
self._decryptor = Decryptor(self._key)
self._crypted_size = self._file_size(self.next_fp)
self._boundary_block = None
self._plaintext_size = None
self._decrypt_offset = 0
# Plaintext offset
self.offset = 0
self.state = "OPEN"
@classmethod
def _file_size(cls, file):
current_offset = file.seek(0, os.SEEK_SET)
file_end_offset = file.seek(0, os.SEEK_END)
file.seek(current_offset, os.SEEK_SET)
return file_end_offset
def _initialize_decryptor(self):
if self._plaintext_size is not None:
return
while True:
required_bytes = self._decryptor.expected_header_bytes()
if not required_bytes:
break
self._decryptor.process_header(self._read_raw_exactly(required_bytes))
self._plaintext_size = self._crypted_size - self._decryptor.header_size() - self._decryptor.footer_size()
def _read_raw_exactly(self, required_bytes):
data = self.next_fp.read(required_bytes)
while data and len(data) < required_bytes:
next_chunk = self.next_fp.read(required_bytes - len(data))
if not next_chunk:
break
data += next_chunk
if not data or len(data) != required_bytes:
raise EncryptorError("Failed to read {} bytes of header or footer data".format(required_bytes))
return data
def _move_decrypt_offset_to_plaintext_offset(self):
if self._decrypt_offset == self.offset:
return
seek_to = self.offset
if self._decrypt_offset > self.offset:
self.log.warning("Negative seek from %d to %d, must re-initialize decryptor", self._decrypt_offset, self.offset)
self._reset()
self._initialize_decryptor()
discard_bytes = seek_to - self._decrypt_offset
self.offset = self._decrypt_offset
while discard_bytes > 0:
data = self._read_block(discard_bytes)
discard_bytes -= len(data)
def _read_all(self):
full_data = bytearray()
while True:
data = self._read_block(IO_BLOCK_SIZE)
if not data:
return bytes(full_data)
full_data.extend(data)
def _read_block(self, size):
if self._crypted_size == 0:
return b""
self._initialize_decryptor()
if self.offset == self._plaintext_size:
return b""
self._move_decrypt_offset_to_plaintext_offset()
# If we have an existing boundary block, fulfil the read entirely from that
if self._boundary_block:
size = min(size, len(self._boundary_block) - self.offset % AES_BLOCK_SIZE)
data = self._boundary_block[self.offset % AES_BLOCK_SIZE:self.offset % AES_BLOCK_SIZE + size]
if self.offset % AES_BLOCK_SIZE + size == len(self._boundary_block):
self._boundary_block = None
data_len = len(data)
self.offset += data_len
self._decrypt_offset += data_len
return data
# Only serve multiples of AES_BLOCK_SIZE whenever possible to keep things simpler
read_size = size
if self.offset + max(AES_BLOCK_SIZE, size) >= self._plaintext_size:
read_size = self._plaintext_size - self.offset
elif size > AES_BLOCK_SIZE and size % AES_BLOCK_SIZE != 0 and self.offset + size < self._plaintext_size:
read_size = size - size % AES_BLOCK_SIZE
elif size < AES_BLOCK_SIZE:
read_size = AES_BLOCK_SIZE
encrypted = self._read_raw_exactly(read_size)
decrypted = self._decryptor.process_data(encrypted)
if self.offset + read_size == self._plaintext_size:
footer = self._read_raw_exactly(self._decryptor.footer_size())
last_part = self._decryptor.finalize(footer)
if last_part:
decrypted += last_part
if size < AES_BLOCK_SIZE:
self._boundary_block = decrypted
return self._read_block(size)
decrypted_len = len(decrypted)
self.offset += decrypted_len
self._decrypt_offset += decrypted_len
return decrypted
def close(self):
super().close()
self._decryptor = None
def read(self, size=-1):
"""Read up to size decrypted bytes"""
self._check_not_closed()
if self.state == "EOF" or size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def readable(self):
"""True if this stream supports reading"""
self._check_not_closed()
return self.state in ["OPEN", "EOF"]
def seek(self, offset, whence=0):
self._check_not_closed()
self._initialize_decryptor()
if whence == os.SEEK_SET:
if offset != self.offset:
if offset > self._plaintext_size:
raise io.UnsupportedOperation("DecryptorFile does not support seeking beyond EOF")
if offset < 0:
raise ValueError("negative seek position")
self.offset = offset
return self.offset
elif whence == os.SEEK_CUR:
if offset != 0:
raise io.UnsupportedOperation("can't do nonzero cur-relative seeks")
return self.offset
elif whence == os.SEEK_END:
if offset != 0:
raise io.UnsupportedOperation("can't do nonzero end-relative seeks")
self.offset = self._plaintext_size
return self.offset
else:
raise ValueError("Invalid whence value")
def seekable(self):
"""True if this stream supports random access"""
self._check_not_closed()
return True
class DecryptSink(Sink):
def __init__(self, next_sink, file_size, encryption_key_data):
super().__init__(next_sink)
if file_size <= 0:
raise ValueError("Invalid file_size: " + str(file_size))
self.data_bytes_received = 0
self.data_size = file_size
self.decryptor = Decryptor(encryption_key_data)
self.file_size = file_size
self.footer = b""
self.header = b""
def _extract_encryption_footer_bytes(self, data):
expected_data_bytes = self.data_size - self.data_bytes_received
if len(data) > expected_data_bytes:
self.footer += data[expected_data_bytes:]
data = data[:expected_data_bytes]
return data
def _process_encryption_header(self, data):
if not data or not self.decryptor.expected_header_bytes():
return data
if self.header:
data = self.header + data
self.header = None
offset = 0
while self.decryptor.expected_header_bytes() > 0:
header_bytes = self.decryptor.expected_header_bytes()
if header_bytes + offset > len(data):
self.header = data[offset:]
return b""
self.decryptor.process_header(data[offset:offset + header_bytes])
offset += header_bytes
data = data[offset:]
self.data_size = self.file_size - self.decryptor.header_size() - self.decryptor.footer_size()
return data
def write(self, data):
written = len(data)
data = self._process_encryption_header(data)
if not data:
return written
data = self._extract_encryption_footer_bytes(data)
self.data_bytes_received += len(data)
if data:
data = self.decryptor.process_data(data)
if len(self.footer) == self.decryptor.footer_size():
final_data = self.decryptor.finalize(self.footer)
if final_data:
data += final_data
if not data:
return written
self._write_to_next_sink(data)
return written
| {
"content_hash": "215f66982bd87a63f3efa84af39b0e36",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 124,
"avg_line_length": 36.13970588235294,
"alnum_prop": 0.5976941336046118,
"repo_name": "ohmu/pghoard",
"id": "695c76d3ff9a7f0b5413508d45299ebc0c68f855",
"size": "14745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pghoard/rohmu/encryptor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4937"
},
{
"name": "Makefile",
"bytes": "1765"
},
{
"name": "Python",
"bytes": "467043"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ChannelTwitterConfig(AppConfig):
name = 'channel_twitter'
| {
"content_hash": "79812ea6c30d1477724b983cbd7233ff",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 20.8,
"alnum_prop": 0.7788461538461539,
"repo_name": "daisychainme/daisychain",
"id": "09f35d96d63855f4fd5dc246db83202ca57cfce9",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisychain/channel_twitter/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33038"
},
{
"name": "HTML",
"bytes": "69989"
},
{
"name": "JavaScript",
"bytes": "22115"
},
{
"name": "Makefile",
"bytes": "995"
},
{
"name": "Python",
"bytes": "610321"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
def home(request):
'''
Address: /
'''
return render(request, 'home.html')
| {
"content_hash": "e1197f609f18f2d30f75140f223a4b71",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.6124031007751938,
"repo_name": "prsevero/django-template",
"id": "99314d109494a6d57bfc1417de8a30c05f1ae2fc",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/website/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2250"
},
{
"name": "JavaScript",
"bytes": "2499"
},
{
"name": "Python",
"bytes": "5199"
}
],
"symlink_target": ""
} |
from utils import run
# `run` does most of the work. It creates a tempdir, copys over input data,
# Snakefile, and wrapper, runs the Snakefile, and runs a user-provided test
# function against the output.
from utils import dpath
# `dpath` figures out the path the wrapper even when in a tempdir
from utils import symlink_in_tempdir
# `symlink_in_tempdir` is a decorator function that lets us easily map fixtures
# to input files expected by our Snakefile.
# A note on fixtures
# ------------------
#
# py.test implicitly does a `from conftest import *`, so we will have the
# fixtures from that package available here.
#
# py.test also includes a built-in `tmpdir` fixture which we use here to have
# a nicely-named tmpdir for running the test.
#
# See http://doc.pytest.org/en/latest/fixture.html for more info.
# Our first test. The test function names must start with `test_` in order for
# py.test to find them.
def test_demo(sample1_se_fq, tmpdir):
# A note on these arguments
# -------------------------
#
# Arguments to py.test test functions are expected to be fixtures. The
# fixture `sample1_se_fq` will be the path to the downloaded example data.
# See conftest.sample1_se_fq().
#
# The fixture `tmpdir` will be a py.path.local object pointing to a tempdir
# created just for this test. It will match the glob /tmp/pytest-*, and
# only the last 3 tempdirs are retained.
# Write the snakefile
# -------------------
# First we write the Snakefile to use in testing. We will set up input
# files below. This is typically a triple-quoted string; it will be
# automatically run through textwrap.dedent later so you don't have to
# worry about indentation.
#
# The wrapper will be copied to a subdirectory of the temp dir called,
# appropriately enough, "wrapper". So that's the name of the wrapper
# within the snakefile.
snakefile = '''
rule demo:
input: 'a.fastq.gz'
output: 'b.fastq.gz'
wrapper: "file:wrapper"
'''
# Map fixtures to input files
# ---------------------------
# Next we map the fixture sample1_se_fq (a temp file which has downloaded
# data from the test data repo into a temp dir) to the input file that our
# Snakefile expects.
#
# Keys are paths to downloaded example data (typically downloaded just once
# per py.test session). The values of the dict are paths relative to the
# Snakefile.
#
# Since the above snakefile expects a.fastq.gz as input, we need to make
# that so:
input_data_func=symlink_in_tempdir(
{
sample1_se_fq: 'a.fastq.gz'
}
)
# Write a test function
# ---------------------
# This is our test function. It will be called after the Snakefile has been
# run and it will be called in the same temp directory in which the
# Snakefile is run, so paths should be relative to the Snakefile.
#
# In this case, the demo wrapper simply copies input to output, so here we
# assert the files are identical.
def check():
assert open('a.fastq.gz', 'rb').read() == open('b.fastq.gz', 'rb').read()
# Call `run()`
# ------------
# Now that we have defined everything, the `run` function does all of the
# work. Note we pass the `tmpdir` fixture here.
#
# (that's because py.test manages tmpdirs for tests, which are in this
# current module, but run() lives in the utils module which won't get
# nicely managed. But run() needs to know where to build the test case,
# hence the need to pass it here)
run(dpath('../wrappers/demo'), snakefile, check, input_data_func, tmpdir)
# This test function shows how to use downloaded paired-end data from
# a different fixture.
def test_demo_pe(sample1_pe_fq, tmpdir):
# In contrast to the sample1_se_fq fixture used in the previous function,
# here the paired-end fixture `sample1_pe_fq` is a tuple of path names (see
# conftest.sample1_pe_fq())
# The snakefile reflects what the wrapper expects for PE (see
# wrappers/demo/README.md).
snakefile = '''
rule demo:
input:
R1='a1.fastq.gz',
R2='a2.fastq.gz'
output:
R1='b1.fastq.gz',
R2='b2.fastq.gz'
wrapper: "file:wrapper"
'''
# Map fixture to input files
input_data_func=symlink_in_tempdir(
{
sample1_pe_fq[0]: 'a1.fastq.gz',
sample1_pe_fq[1]: 'a2.fastq.gz',
}
)
def check():
assert open('a1.fastq.gz', 'rb').read() == open('b1.fastq.gz', 'rb').read()
assert open('a2.fastq.gz', 'rb').read() == open('b2.fastq.gz', 'rb').read()
run(dpath('../wrappers/demo'), snakefile, check, input_data_func, tmpdir)
| {
"content_hash": "09592ea55b01b5c13c2140a59880109f",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 83,
"avg_line_length": 35.614814814814814,
"alnum_prop": 0.6366472545757071,
"repo_name": "lcdb/lcdb-wrapper-tests",
"id": "3c097d4882606dd300d203382d988528aa26e7b3",
"size": "4962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75551"
},
{
"name": "Shell",
"bytes": "596"
}
],
"symlink_target": ""
} |
import copy
import os
import shutil
from tempfile import mkdtemp
from .test_utils import TestCase
from dwight_chroot.cache import Cache
from dwight_chroot.resources import CacheableResource
class DummyCachedItem(CacheableResource):
def __init__(self, key, src_path):
super(DummyCachedItem, self).__init__()
self.key = key
self.src_path = src_path
self.refresh_count = 0
def get_cache_key(self):
return self.key
def refresh(self, path):
self.refresh_count += 1
def fetch(self, path):
shutil.rmtree(path)
shutil.copytree(self.src_path, path)
class DummyEnvironment(object):
def __init__(self):
super(DummyEnvironment, self).__init__()
self.cache = Cache(mkdtemp())
class CacheTest(TestCase):
def setUp(self):
super(CacheTest, self).setUp()
self.env = DummyEnvironment()
self.src_path = mkdtemp()
with open(os.path.join(self.src_path, "somefile.txt"), "w") as f:
f.write("hello")
self.item = DummyCachedItem(dict(a=1, b="some_attr"), self.src_path)
def test__fetching_from_scratch(self):
path = self.item.get_path(self.env)
self.assertTrue(os.path.isdir(path))
for filename in os.listdir(path):
self.assertIn(filename, os.listdir(self.src_path))
return path
def test__refreshing(self):
old_path = self.test__fetching_from_scratch()
self.assertEquals(self.item.refresh_count, 0)
new_path = self.item.get_path(self.env)
self.assertEquals(self.item.refresh_count, 1)
self.assertEquals(new_path, old_path)
def test__saving_and_reloading(self):
path = self.test__fetching_from_scratch()
old_state = copy.deepcopy(self.env.cache._state)
self.env.cache = Cache(self.env.cache.root)
self.assertEquals(self.env.cache._state, old_state)
def test__new_item_on_already_existing_directory(self):
os.makedirs(os.path.join(self.env.cache.root, "items", "0"))
self.assertEquals(self.env.cache.create_new_path(), os.path.join(self.env.cache.root, "items", "1"))
def test__cache_cleanup(self):
cache = Cache(mkdtemp())
size = 1000
p1 = self._create_cache_item(cache, 1, size)
p2 = self._create_cache_item(cache, 2, size)
cache.cleanup(size * 2, [])
self.assertTrue(os.path.exists(p1))
self.assertTrue(os.path.exists(p2))
p3 = self._create_cache_item(cache, 3, size)
cache.cleanup(size * 2, [])
self.assertFalse(os.path.exists(p1))
self.assertTrue(os.path.exists(p2))
self.assertTrue(os.path.exists(p3))
def test__cleanup_used_keys(self):
cache = Cache(mkdtemp())
p1 = self._create_cache_item(cache, 1, 1000)
cache.cleanup(10, skip_keys=[1])
self.assertTrue(os.path.exists(p1))
def _create_cache_item(self, cache, key, size):
root_path = cache.create_new_path()
p = cache.create_new_path()
file_path = os.path.join(p, "file")
with open(file_path, "wb") as f:
f.write("\x00".encode("utf-8") * size)
cache.register_new_path(p, key)
return file_path
| {
"content_hash": "e06cd3ed1b978e99ec74e27154dab0b2",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 108,
"avg_line_length": 39.573170731707314,
"alnum_prop": 0.6206471494607088,
"repo_name": "vmalloc/dwight",
"id": "33a2c40929c70e657315bca12012e81e1d1f2ddc",
"size": "3245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test__cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39194"
},
{
"name": "Ruby",
"bytes": "840"
},
{
"name": "Shell",
"bytes": "2678"
}
],
"symlink_target": ""
} |
import random
from django.test import TestCase
from tx_salaries.utils.transformers import base
class TestOfBaseTransformedRecord(TestCase):
def test_returns_mapped_value_as_an_attribute(self):
some_random_key = 'some-key-{0}'.format(random.randint)
class MyRecord(base.BaseTransformedRecord):
MAP = {
'compensation': some_random_key
}
instance = MyRecord()
self.assertEqual(instance.compensation_key, some_random_key)
def test_returns_actual_data_if_mapped(self):
some_random_key = 'some-key-{0}'.format(random.randint)
some_random_value = random.randint(100000, 2000000)
data = {some_random_key: some_random_value}
class MyRecord(base.BaseTransformedRecord):
MAP = {
'compensation': some_random_key
}
instance = MyRecord(data)
self.assertEqual(instance.compensation, some_random_value)
| {
"content_hash": "7679df5c28f2ebf68d8cf927edf11a88",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 31.096774193548388,
"alnum_prop": 0.6441908713692946,
"repo_name": "texastribune/tx_salaries",
"id": "7269f41d03acbda32325d4447d125e33260f1961",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/app/tests/utils/transformers/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "309280"
},
{
"name": "Ruby",
"bytes": "191"
}
],
"symlink_target": ""
} |
'''
Main handler for all the incoming bot events
'''
import logging
from concurrent.futures import ThreadPoolExecutor
import telegram
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters, CallbackQueryHandler
from telegram.ext.inlinequeryhandler import InlineQueryHandler
from telegram.ext.choseninlineresulthandler import ChosenInlineResultHandler
import boto3
from boto3.dynamodb.conditions import Key, Attr
import config
from consts import RESPONSES, COMMANDS
from db_actions import (update_users_followers, follow_user, unfollow_user,
create_user, update_user_photo, update_user, get_followers_list)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Create a connection to the database
table = None
def message_handler(bot, update):
'''
Handler for the text messages
'''
logger.info(update)
logger.info('message_handler')
pass
def contact_handler(bot, update):
'''
Handler for the messages with contacts
'''
username = str(update['message']['chat']['id'])
user_to_follow = str(update['message']['contact']['user_id'])
if not update['message']['contact']['user_id']:
bot.send_message(username, RESPONSES['empty_contact'])
return
new_follower = follow_user(username, user_to_follow, table)
if new_follower:
bot.send_message(user_to_follow, RESPONSES['new_follower'])
logger.info('contat_handler')
def start_command_handler(bot, update):
'''
Handler for the "start" command.
Add current user to the Users table
'''
# Avoid duplication of the existing users
username = str(update['message']['chat']['id'])
create_user(update, table)
photo = bot.getUserProfilePhotos(update.message.from_user.id)['photos'][0]
update_user_photo(photo, username, table)
logger.info('start_command_handler')
def update_command_handler(bot, update):
'''
Handler for the "update" commands
Update user info in database
'''
username = str(update['message']['chat']['id'])
update_user(update, table)
photo = bot.getUserProfilePhotos(update.message.from_user.id)['photos'][0]
update_user_photo(photo, username, table)
logger.info('update_command_handler')
def send_all_command_handler(bot, update):
username = str(update['message']['chat']['id'])
if not username == config.MAIN_USER:
return
all_users = table.scan()['Items']
message = update['message']['text'][len('/send_all'):]
with ThreadPoolExecutor(max_workers=min(len(all_users), config.MAX_THREADS)) as Executor:
list(Executor.map(lambda x: bot.send_message(*x),
[(int(user['username']), RESPONSES['important_message'].format(message))
for user in all_users]))
def remove_command_handler(bot, update):
'''
Handler for the "remove" commands
Remove user(s) from the current user following list
'''
chat_id = update['message']['chat']['id']
username = str(update['message']['chat']['id'])
users = get_followers_list(username, table)
if not users:
bot.send_message(chat_id, RESPONSES['empty_remove_command'])
return
logger.info(users)
buttons = [telegram.InlineKeyboardButton(text='%s %s' % (user.get('first_name', ''), user.get('last_name', '')),
callback_data=str(user['username'])) for user in users]
reply_markup = telegram.InlineKeyboardMarkup([[button] for button in buttons])
bot.sendMessage(chat_id=chat_id,
text=RESPONSES['remove_keyboard_message'],
reply_markup=reply_markup)
logger.info('remove_command_handler')
def remove_user_callback(bot, update):
'''
Handler callback from custom keyboard for the "remove" commands
Remove user from the current user following list
'''
logger.info('='*80)
username = str(update['callback_query']['message']['chat']['id'])
unfollower_id = str(update['callback_query']['data'])
logger.info("remove users %s %s" % (username, unfollower_id))
# update_user_real_follow_count(username, follow=new_follow)
unfollow_user(username, unfollower_id, table)
def send_command_handler(bot, update):
'''
Handler for the "send" command
Send message to all the followers who has more that 10 real_following
'''
message = update['message']['text'][len('/send'):]
if not message:
chat_id = update['message']['chat']['id']
bot.send_message(chat_id, RESPONSES['empty_send_command'])
return
username = str(update['message']['chat']['id'])
users_to_send = table.scan(FilterExpression=Attr('follow').contains(username))['Items']
if not users_to_send:
return
with ThreadPoolExecutor(max_workers=min(len(users_to_send), config.MAX_THREADS)) as Executor:
list(Executor.map(lambda x: bot.send_message(*x),
[(int(user['username']), RESPONSES['message_boilerplate'].format(message))
for user in users_to_send]))
# for user in users_to_send:
# bot.send_message(int(user['username']), f'Somebody told me, that "{message}"')
logger.info('send_command_handler')
def photo_handler(bot, update):
'''
Handler for the photo messages
Send message with photo to all the followers who has more that 10 real_following
'''
photo = update['message']['photo']
username = str(update['message']['chat']['id'])
users_to_send = table.scan(FilterExpression=Attr('follow').contains(username))['Items']
if not users_to_send:
return
photo_to_send = photo[-1]['file_id']
with ThreadPoolExecutor(max_workers=min(len(users_to_send), config.MAX_THREADS)) as Executor:
list(Executor.map(lambda x: bot.send_photo(*x),
[(int(user['username']), photo_to_send, RESPONSES['photo_caption'])
for user in users_to_send]))
logger.info('send_photo_handler')
def document_handler(bot, update):
'''
Handler for the document messages
Send message with photo to all the followers who has more that 10 real_following
'''
document = update['message']['document']['file_id']
username = str(update['message']['chat']['id'])
users_to_send = table.scan(FilterExpression=Attr('follow').contains(username))['Items']
if not users_to_send:
return
with ThreadPoolExecutor(max_workers=min(len(users_to_send), config.MAX_THREADS)) as Executor:
list(Executor.map(lambda x: bot.send_document(*x),
[(int(user['username']), document)
for user in users_to_send]))
logger.info('send_document_handler')
def sticker_handler(bot, update):
'''
Handler for the sticker messages
Send message with sticker to all the followers who has more that 10 real_following
'''
def send_message_and_sticker(chat_id):
'''
Just a little handler to be sure that sticker will be send
after the message
'''
bot.send_message(chat_id, RESPONSES['before_sticker_send'])
bot.send_sticker(chat_id, sticker)
username = str(update['message']['chat']['id'])
sticker = update['message']['sticker']['file_id']
users_to_send = table.scan(FilterExpression=Attr('follow').contains(username))['Items']
if not users_to_send:
return
with ThreadPoolExecutor(max_workers=min(len(users_to_send), config.MAX_THREADS)) as Executor:
list(Executor.map(send_message_and_sticker,
[int(user['username']) for user in users_to_send]))
logger.info('send_sticker_handler')
def inline_query_handler(bot, update):
query = update.inline_query.query
inline_query_id = update.inline_query.id
if len(query) < 3:
bot.answerInlineQuery(inline_query_id, [])
return
query_result = table.scan(FilterExpression=Attr('first_name').contains(query.upper()) |
Attr('last_name').contains(query.upper()))['Items']
query_articles = list(map(lambda x: telegram.InlineQueryResultArticle(x['username'],
'%s %s' % (x['first_name'] or '', x['last_name'] or ''),
telegram.InputTextMessageContent('%s %s' % (x['first_name'], x['last_name']))),
query_result))
bot.answerInlineQuery(inline_query_id, query_articles)
def inline_query_result_handler(bot, update):
username = str(update.chosen_inline_result.from_user.id)
user_to_follow = str(update.chosen_inline_result.result_id)
new_follower = follow_user(username, user_to_follow, table)
if new_follower:
bot.send_message(user_to_follow, RESPONSES['new_follower'])
logger.info('Query result handler')
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb', region_name=config.DB_REGION)
globals()['table'] = dynamodb.Table(config.DB_NAME)
logger.info(f'HANDLER UPDATE {event} {context}')
updater = Updater(config.BOT_TOKEN)
bot = telegram.Bot(config.BOT_TOKEN)
dp = updater.dispatcher
register_handlers(dp)
dp.process_update(telegram.Update.de_json(event, bot))
def main():
dynamodb = boto3.resource('dynamodb', endpoint_url=config.DB_HOST)
globals()['table'] = dynamodb.Table(config.DB_NAME)
updater = Updater(config.TEST_BOT_TOKEN)
# Get the dispatcher to register handlers
dp = updater.dispatcher
register_handlers(dp)
updater.start_polling()
updater.idle()
def register_handlers(dp):
for command in COMMANDS:
dp.add_handler(CommandHandler(command, globals()[f'{command}_command_handler']))
dp.add_handler(MessageHandler(Filters.contact, contact_handler))
dp.add_handler(MessageHandler(Filters.photo, photo_handler))
dp.add_handler(MessageHandler(Filters.document, document_handler))
dp.add_handler(MessageHandler(Filters.sticker, sticker_handler))
dp.add_handler(InlineQueryHandler(inline_query_handler))
dp.add_handler(ChosenInlineResultHandler(inline_query_result_handler))
dp.add_handler(CallbackQueryHandler(remove_user_callback))
if __name__ == '__main__':
main()
| {
"content_hash": "808a94661b1d8160e7e4ab2d8222c5de",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 153,
"avg_line_length": 37.5886524822695,
"alnum_prop": 0.6424528301886793,
"repo_name": "vz10/secretBot",
"id": "c7045759634b4f9e4aa99c10314da8d8ec103ded",
"size": "10600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15142"
}
],
"symlink_target": ""
} |
import re
from genshi.filters.transform import Transformer
from trac.core import Component, implements
from trac.web import IRequestHandler
from trac.web.api import ITemplateStreamFilter
from trac.web.chrome import Chrome
class AdsenseForSearch(Component):
config = env = log = None
implements(ITemplateStreamFilter, IRequestHandler)
# ITemplateStreamFilter method
def filter_stream(self, req, method, filename, stream, data):
if not self.config.getbool('google.search',
'google_search_active', True):
self.log.debug('Google search disabled. Returning regular stream.')
return stream
search_form_id = self.config.get('google.search',
'search_form_id', 'search')
forid = self.config.get('google.search', 'search_form_forid', None)
client_id = self.config.get('google.search',
'search_form_client_id', None)
if not search_form_id:
self.log.warn('The value of the search form id is empty. Returning '
'regular template stream')
return stream
elif not forid:
self.log.warn('The value of "FORID" for the search form is empty. '
'Returning regular template stream')
return stream
elif not client_id:
self.log.warn('The value of "Client ID" for the search form is '
'empty. Returning regular template stream')
return stream
template = Chrome(self.env).load_template('google_search_form.html')
data = dict(
req = req,
search_form_id = search_form_id,
input_width = self.config.get('google.search',
'search_form_text_input_width', 31),
charset = self.config.get('trac', 'default_charset', 'utf-8'),
forid = forid,
client_id = client_id
)
return stream | Transformer('//div/form[@id="%s"]' % search_form_id) \
.replace(template.generate(**data))
# IRequestHandler methods
def match_request(self, req):
return re.match(r'/gsearch/?', req.path_info) is not None
def process_request(self, req):
data = dict(
iframe_initial_width = self.config.getint('google.search',
'iframe_initial_width'))
return 'google_search_results.html', data, None
| {
"content_hash": "11da666b4d06d5ee8a11d3a0ff2fafdb",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 42.26229508196721,
"alnum_prop": 0.5663304887509697,
"repo_name": "UfSoft/trac-google-search",
"id": "938b3ec097d3b018cd6391b24797eec850c9f6b2",
"size": "2634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracext/google/search/search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "294"
},
{
"name": "HTML",
"bytes": "7746"
},
{
"name": "Python",
"bytes": "10346"
}
],
"symlink_target": ""
} |
import praw
from createMatchThread import createMatchThreadWrapper
from getFixturesInfo import getFixturesDictionary
from createSidebar import updateSidebar
from datetime import datetime
import time
import logging
from requests.exceptions import HTTPError
from praw.errors import ExceptionList, APIException, InvalidCaptcha, InvalidUser, RateLimitExceeded
from socket import timeout
import sqlite3 as sql
from emailGlobals import sendEmail
from inboxHandler import readInbox
from liveScoreHandler import updateLiveScores
from lineupHandler import updateLineups
from OrangeRed import OrangeRed
if __name__=="__main__":
#One time setup
r = praw.Reddit('/r/cricket sidebar updating and match thread creating bot by /u/rreyv. This does match updates every minute. Version 2.0') #reddit stuff
subredditName='cricket'
r.login() #sign in!
MTB = praw.Reddit('MegaThreadBot by /u/rreyv. Checkout https://bitbucket.org/rreyv/megathreadbot for details. Currently only live on /r/india. Version 1.0')
MTB.login()
fixturesData={}
fixturesData=getFixturesDictionary(5)
sendEmail("Bot has begun","Yep it has")
# SQL table init
i=0
#one time setup ends
while True:
#things that happen every four hours
while True:
#things that happen every 50 seconds
updateLiveScores(r)
#updateLineups(r)
updateSidebar(fixturesData,r,subredditName)
readInbox(r,subredditName)
OrangeRed(MTB)
time.sleep(50)
i+=1;
if i%240==0:
break
#End of 50 second loop
fixturesData={}
fixturesData=getFixturesDictionary(5)
sendEmail("Grabbing fixtures from Cricinfo","Grabbed fixtures from Cricinfo")
i=0
#end of four hour loop#
| {
"content_hash": "de0cb69039ad4b19d8c4f5c0a96b17f9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 157,
"avg_line_length": 32.9,
"alnum_prop": 0.780547112462006,
"repo_name": "rreyv/r-Cricket-Bot",
"id": "d4aee203507a44bd87c5de4c1b37685b149890e9",
"size": "1645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/startBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47673"
}
],
"symlink_target": ""
} |
from __future__ import division
import datetime
__author__ = 'James Robert Lloyd'
__description__ = 'Miscellaneous utility functions'
import os
import shutil
from glob import glob
import tempfile
import string
import random
import matplotlib.pyplot as pl
import numpy as np
import scipy.io
from sklearn.cross_validation import KFold
import psutil
from constants import SAVE_DIR
import signal
from automl_lib.data_manager import DataManager
from agent import TerminationEx
import time
def move_make(outdir):
"""If outdir exists and isn't empty move its contents to outdir + the date. Then make a fresh outdir."""
if os.path.isdir(outdir):
if os.listdir(outdir): # if it has files, move them
the_date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
os.rename(outdir, outdir+'_'+the_date)
os.makedirs(outdir)
else:
os.makedirs(outdir)
def move_make_file(flnm):
outdir = os.path.dirname(flnm)
if os.path.exists(flnm):
the_date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
os.rename(flnm, flnm+'_'+the_date)
elif not os.path.isdir(outdir):
os.makedirs(outdir)
open(flnm, 'a').close()
# def init_temp_dir():
# if os.path.isdir(TEMP_DIR):
# shutil.rmtree(TEMP_DIR)
# os.mkdir(TEMP_DIR)
def signal_handler(signum, frame):
"""Handlers for signals"""
if signum == 20: # SIGTSTP
signal.pause() # signal.pause puts *only* the calling thread to sleep, rather than stopping all threads
if signum == 25: # SIGCONT
pass # as long as there exists a handler for sigcont then signal.pause will return
if signum == 15: # SIGTERM
raise TerminationEx
def mkstemp_safe(directory, suffix):
"""Avoids a file handle leak present on some operating systems"""
(os_file_handle, file_name) = tempfile.mkstemp(dir=directory, suffix=suffix)
os.close(os_file_handle)
return file_name
def callback_1d(model, bounds, info, x, index, ftrue):
"""
Plot the current posterior, the index, and the value of the current
recommendation.
"""
xmin, xmax = bounds[0]
xx_ = np.linspace(xmin, xmax, 500) # define grid
xx = xx_[:, None]
# ff = ftrue(xx) # compute true function
acq = index(xx) # compute acquisition
mu, s2 = model.posterior(xx) # compute posterior and
lo = mu - 2 * np.sqrt(s2) # quantiles
hi = mu + 2 * np.sqrt(s2)
# ymin, ymax = ff.min(), ff.max() # get plotting ranges
ymin, ymax = lo.min(), hi.max() # get plotting ranges FIXME - remember observed function values
ymin -= 0.2 * (ymax - ymin)
ymax += 0.2 * (ymax - ymin)
kwplot = {'lw': 2, 'alpha': 0.5} # common plotting kwargs
fig = pl.figure(1)
fig.clf()
pl.subplot(221)
# pl.plot(xx, ff, 'k:', **kwplot) # plot true function
pl.plot(xx, mu, 'b-', **kwplot) # plot the posterior and
pl.fill_between(xx_, lo, hi, color='b', alpha=0.1) # uncertainty bands
pl.scatter(info['x'], info['y'], # plot data
marker='o', facecolor='none', zorder=3)
pl.axvline(x, color='r', **kwplot) # latest selection
pl.axvline(info[-1]['xbest'], color='g', **kwplot) # current recommendation
pl.axis((xmin, xmax, ymin, ymax))
pl.ylabel('posterior')
pl.subplot(223)
pl.fill_between(xx_, acq.min(), acq, # plot acquisition
color='r', alpha=0.1)
pl.axis('tight')
pl.axvline(x, color='r', **kwplot) # plot latest selection
pl.xlabel('input')
pl.ylabel('acquisition')
pl.subplot(222)
pl.plot(ftrue(info['xbest']), 'g', **kwplot) # plot performance
pl.axis((0, len(info['xbest']), ymin, ymax))
pl.xlabel('iterations')
pl.ylabel('value of recommendation')
# for ax in fig.axes: # remove tick labels
# ax.set_xticklabels([])
# ax.set_yticklabels([])
pl.draw()
pl.show(block=False)
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def ls(filename):
return sorted(glob(filename))
def colorbrew(i):
"""Nice colors taken from http://colorbrewer2.org/ by David Duvenaud March 2012"""
rgbs = [(228, 26, 28),
(055, 126, 184),
(077, 175, 74),
(152, 78, 163),
(255, 127, 000),
(255, 255, 051),
(166, 86, 040),
(247, 129, 191),
(153, 153, 153),
(000, 000, 000)]
# Convert to [0, 1] range
rgbs = [(r / 255, g / 255, b / 255) for (r, g, b) in rgbs]
# Return color corresponding to index - wrapping round
return rgbs[i % len(rgbs)]
def convert_automl_into_automl_folds(folder, save_folder_root, n_folds=5,
random_state=0, usage='testing'):
"""Convert a dataset in automl format into several folds of automl format"""
# Load data
input_dir, basename = os.path.split(folder)
D = DataManager(basename, input_dir, replace_missing=True, filter_features=True)
X = D.data['X_train']
y = D.data['Y_train']
info = D.info
if not usage is None:
info['usage'] = usage
# Now split into folds and save
folds = KFold(n=X.shape[0], n_folds=n_folds, shuffle=True, random_state=random_state)
for (fold, (train_index, test_index)) in enumerate(folds):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
fold_folder = os.path.join(save_folder_root + '_fold_%02d' % (fold + 1), basename)
mkdir(fold_folder)
fmt = '%f'
np.savetxt(os.path.join(fold_folder, basename + '_train.data'), X_train, fmt=fmt, delimiter=' ')
np.savetxt(os.path.join(fold_folder, basename + '_test.data'), X_test, fmt=fmt, delimiter=' ')
if info['task'] == 'binary.classification':
fmt = '%d'
np.savetxt(os.path.join(fold_folder, basename + '_train.solution'), y_train, fmt=fmt, delimiter=' ')
np.savetxt(os.path.join(fold_folder, basename + '_test.solution'), y_test, fmt=fmt, delimiter=' ')
info['train_num'] = X_train.shape[0]
info['test_num'] = X_test.shape[0]
with open(os.path.join(fold_folder, basename + '_public.info'), 'w') as info_file:
for (key, value) in info.iteritems():
info_file.write('%s = %s\n' % (key, value))
shutil.copy(os.path.join(folder, basename + '_feat.type'), os.path.join(fold_folder, basename + '_feat.type'))
def convert_automl_into_automl_folds_folder(input_folder, save_folder_root, *args, **kwargs):
"""Converts a folder"""
folders = sorted(os.listdir(input_folder))
for folder in folders:
if os.path.isdir(os.path.join(input_folder, folder)):
print('Processing ' + folder)
convert_automl_into_automl_folds(os.path.join(input_folder, folder), save_folder_root,
*args, **kwargs)
def convert_mat_into_automl_folds(filename, save_folder_root, time_budget=300, n_folds=5, input_type='Numerical',
random_state=0, metric='auc_metric', usage='testing', task='binary.classification',
target_type='Binary'):
"""Convert a dataset in .mat format into several folds of automl format"""
# Load data
data = scipy.io.loadmat(filename)
X = data['X']
y = data['y']
data_name = os.path.splitext(os.path.split(filename)[-1])[0]
# Convert data if appropriate
if task == 'binary.classification':
y_max = y.max()
y[y == y_max] = 1
y[y < y_max] = 0
# If input_type is 'infer' we now infer input types
if input_type == 'infer':
raise Exception('I do not know how to infer input types yet')
else:
input_type_list = [input_type] * X.shape[1]
# Create info dictionary
# TODO - some of these defaults need to be changed
info = dict(usage=usage, name=data_name, task=task, target_type=target_type,
feat_type='Numerical', metric=metric, feat_num=X.shape[1],
target_num=1, label_num=0, has_categorical=0, has_missing=0, is_sparse=0,
time_budget=time_budget, valid_num=0)
# Now split into folds and save
folds = KFold(n=X.shape[0], n_folds=n_folds, shuffle=True, random_state=random_state)
for (fold, (train_index, test_index)) in enumerate(folds):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
fold_folder = os.path.join(save_folder_root + '_fold_%02d' % (fold + 1), data_name)
mkdir(fold_folder)
fmt = '%f'
np.savetxt(os.path.join(fold_folder, data_name + '_train.data'), X_train, fmt=fmt, delimiter=' ')
np.savetxt(os.path.join(fold_folder, data_name + '_test.data'), X_test, fmt=fmt, delimiter=' ')
if task == 'binary.classification':
fmt = '%d'
np.savetxt(os.path.join(fold_folder, data_name + '_train.solution'), y_train, fmt=fmt, delimiter=' ')
np.savetxt(os.path.join(fold_folder, data_name + '_test.solution'), y_test, fmt=fmt, delimiter=' ')
info['train_num'] = X_train.shape[0]
info['test_num'] = X_test.shape[0]
with open(os.path.join(fold_folder, data_name + '_public.info'), 'w') as info_file:
for (key, value) in info.iteritems():
info_file.write('%s = %s\n' % (key, value))
with open(os.path.join(fold_folder, data_name + '_feat.type'), 'w') as feature_file:
for feat_type in input_type_list:
feature_file.write('%s\n' % feat_type)
def convert_mat_into_automl_folds_folder(mat_folder, save_folder_root, *args, **kwargs):
"""Converts a folder"""
filenames = sorted(os.listdir(mat_folder))
for filename in filenames:
if filename.endswith('.mat'):
print('Processing ' + filename)
convert_mat_into_automl_folds(os.path.join(mat_folder, filename), save_folder_root,
*args, **kwargs)
def create_synthetic_classification_problems(mat_folder, save_folder_root, synth_kwargs_list):
pass
def VmB(pid, VmKey):
scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
try:
t = open('/proc/%d/status' % pid)
v = t.read()
t.close()
except:
return -1 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * scale[v[2]]
def memory_usage(pid, since=0.0):
"""Return memory usage in bytes."""
return VmB(pid, 'VmSize:') - since
def resident_memory_usage(pid, since=0.0):
"""Return resident memory usage in bytes."""
return VmB(pid, 'VmRSS:') - since
def random_string(N=20):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(N))
def random_temp_file_name(suffix='.file', dirnm=SAVE_DIR, N=20):
return os.path.join(dirnm, random_string(N=N) + suffix)
def pack_list(a_list):
return [element for element in a_list if not element is None]
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def dict_of_lists_to_arrays(a_dict):
for key, value in a_dict.iteritems():
a_dict[key] = np.array(value)
return a_dict
def identity(i):
return i
def classes_to_1_of_k(classes, k):
result = np.zeros((classes.shape[0], k))
for i, a_class in enumerate(classes):
result[i, a_class] = 1
return result
def append_1_of_k_encoded_data(D):
"""Takes a data manager object and creates 1 of k encodings for targets for scoring purposes"""
k = D.info['label_num']
if 'Y_train' in D.data:
D.data['Y_train_1_of_k'] = classes_to_1_of_k(classes=D.data['Y_train'], k=k)
if 'Y_valid' in D.data:
D.data['Y_valid_1_of_k'] = classes_to_1_of_k(classes=D.data['Y_valid'], k=k)
if 'Y_test' in D.data:
D.data['Y_test_1_of_k'] = classes_to_1_of_k(classes=D.data['Y_test'], k=k)
def load_1_of_k_data(folder, basename, D):
if 'Y_train' in D.data:
D.data['Y_train_1_of_k'] = np.loadtxt(os.path.join(folder, basename, basename + '_train.solution'),
dtype=np.int8)
if 'Y_valid' in D.data:
D.data['Y_valid_1_of_k'] = np.loadtxt(os.path.join(folder, basename, basename + '_valid.solution'),
dtype=np.int8)
if 'Y_test' in D.data:
D.data['Y_test_1_of_k'] = np.loadtxt(os.path.join(folder, basename, basename + '_test.solution'),
dtype=np.int8)
def murder_family(pid=None, pgid=None, killall=False, sig=signal.SIGTERM):
"""Send signal to all processes in the process group pgid, apart from pid.
If killall is true, signal to pid as well.
SIGTERM - die nicely
SIGKILL - die right now
SIGSTOP - uncatchable stop signal - might break queues
SIGTSTP - makes main thread sleep (with signal.pause()) until sigcont received
SIGCONT - start anything that is paused or stopped"""
if pid is None:
pid = os.getpid() # pid of current process
if pgid is None:
pgid = os.getpgid(pid) # process group of pid
for ps in psutil.process_iter():
if os.getpgid(ps.pid) == pgid and ps.pid != pid:
try:
ps.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied, IOError):
pass
if killall is True:
try:
ps = psutil.Process(pid=pid)
ps.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied, IOError):
pass
def ensure_2d(an_array):
result = np.array(an_array, ndmin=2)
if result.shape[0] == 1:
result = result.T
return result
def csv_file_size(file_name, delimiter=' '):
# Peek at first line
with open(file_name, 'rU') as data_file:
first_line = data_file.readline()
elements = first_line.split(delimiter)
# Strip elements of any empty values
elements = [el for el in elements if not el == '']
n_col = len(elements)
# Count the lines
n_row = 0
with open(file_name, 'rU') as data_file:
for line in data_file:
n_row += 1
return n_row, n_col
class NotAnArray(object):
"""Records file location and size of array"""
def __init__(self, file_name, delimiter=' '):
# Load file size
self.n_row, self.n_col = csv_file_size(file_name=file_name, delimiter=delimiter)
# Remember the filename
self.file_name = file_name
@property
def shape(self):
return self.n_row, self.n_col
def is_sorted(a_list):
return all(a_list[i] <= a_list[i+1] for i in xrange(len(a_list)-1))
def waste_cpu_time(a_time):
start_time = time.clock()
# foo = 'bar'
while time.clock() < start_time + a_time:
# foo = foo[1:] + foo[0]
pass
if __name__ == '__main__':
convert_automl_into_automl_folds_folder('../data/phase_1', '../data/phase_1_cv') | {
"content_hash": "852dca480f2ad0b13d0afa5d1b89f4d8",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 119,
"avg_line_length": 36.61200923787529,
"alnum_prop": 0.5848735255156753,
"repo_name": "jamesrobertlloyd/automl-phase-2",
"id": "9963a0efeebce008000e6eabed757ffae00dfc45",
"size": "15853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "439711"
},
{
"name": "TeX",
"bytes": "153266"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class DurationValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="duration", parent_name="layout.slider.transition", **kwargs
):
super(DurationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "c7604c650a5782e9645ab7770d341b03",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 33.857142857142854,
"alnum_prop": 0.6033755274261603,
"repo_name": "plotly/plotly.py",
"id": "9dbb57cb487586cb2c9e7cf658689301b30544e3",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/slider/transition/_duration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import read_stsp
from graph import Graph
from node import Node
from edge import Edge
from algoMST import kruskal, prim
from TSPSolve import solveTSP
import sys
# Récupération de la liste de tous les fichiers stsp
TSPpath = "./instances/stsp/"
from os import listdir
from os.path import isfile, join
files = [f for f in listdir(TSPpath) if isfile(join(TSPpath, f))]
# Récupération des valeurs des chemins optimaux
best_ones = {}
with open("./res/bestones.txt") as fd:
for line in fd:
aux = line.split()
best_ones[aux[0]] = int(aux[2])
# Boucle sur tous les fichiers
for file in files:
with open(TSPpath + file, "r") as fd:
header = read_stsp.read_header(fd)
print 'Header: ', header
dim = header['DIMENSION']
edge_weight_format = header['EDGE_WEIGHT_FORMAT']
print "Reading nodes"
nodes = read_stsp.read_nodes(header, fd)
print "Reading edges"
edges = read_stsp.read_edges(header, fd)
edge_list = []
for k in range(dim):
edge_list.append([])
for e in edges:
if edge_weight_format in ['UPPER_ROW', 'LOWER_COL', \
'UPPER_DIAG_ROW', 'LOWER_DIAG_COL']:
edge_list[e[0]].append((e[1], int(e[2])))
else:
edge_list[e[1]].append((e[0], int(e[2])))
for k in range(dim):
edge_list[k].sort()
g = Graph(header['NAME'])
if not nodes:
for i in range(dim):
g.add_node(Node(str(i)))
else:
for i in range(dim):
g.add_node(Node(str(i), nodes[i]))
for i in range(dim):
for pair in edge_list[i]:
if i == pair[0] and pair[1] == 0:
continue
g.add_edge(Edge(g.get_nodes()[i], g.get_nodes()[pair[0]], pair[1]))
min = 2*[sys.maxsize]
travel = [None, None]
for i in range(2):
for node in g.get_nodes():
aux = solveTSP(g, node, i==0)
if(aux.get_weight() < min):
travel[i] = aux
min[i] = aux.get_weight()
filename = file.split('.')[0]
optimal = best_ones[filename]
g.plot_graph(mst=travel[0], title='Prim: ' + str(travel[0].get_weight()) + " ; Optimal: " + str(optimal) + " (Error: " + str(travel[0].get_weight() - optimal) + ")", show=False, filename="./res/"+filename+"_prim")
g.plot_graph(mst=travel[1], title='Kruskal: ' + str(travel[1].get_weight()) + " ; Optimal: " + str(optimal) + " (Error: " + str(travel[1].get_weight() - optimal) + ")", show=False, filename="./res/"+filename+"_kruskal") | {
"content_hash": "dc02ef1f61ce8d193f20b40dea2b013f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 227,
"avg_line_length": 33.39506172839506,
"alnum_prop": 0.5386321626617375,
"repo_name": "Amathlog/MTH6412B",
"id": "6df3de89551607952aae8c1517a732c3a1fdcd0f",
"size": "2734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TP1/finalTPScript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30577"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.