text stringlengths 4 1.02M | meta dict |
|---|---|
"""Provides the speed releasing table."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import json
import urllib
from google.appengine.ext import ndb
from dashboard import alerts
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.models import table_config
# These represent the revision ranges per milestone. For Clank, this is a
# point id, for Chromium this is a Chromium commit position.
CLANK_MILESTONES = {
54: (1473196450, 1475824394),
55: (1475841673, 1479536199),
56: (1479546161, 1485025126),
57: (1486119399, 1488528859),
58: (1488538235, 1491977185),
59: (1492542658, 1495792284),
60: (1495802833, 1500610872),
61: (1500628339, 1504160258),
62: (1504294629, 1507887190),
63: (1507887190, 1512141580),
64: (1512154460, 1516341121),
65: (1516353162, 1519951206),
66: (1519963059, 1523653340),
67: (1523629648, None),
}
CHROMIUM_MILESTONES = {
54: (416640, 423768),
55: (433391, 433400),
56: (433400, 445288),
57: (447949, 454466),
58: (454523, 463842),
59: (465221, 474839),
60: (474952, 488392),
61: (488576, 498621),
62: (499187, 508578),
63: (508578, 520719),
64: (520917, 530282),
65: (530373, 540240),
66: (540302, 550534),
67: (554148, None),
}
CURRENT_MILESTONE = max(CHROMIUM_MILESTONES.keys())
class SpeedReleasingHandler(request_handler.RequestHandler):
"""Request handler for requests for speed releasing page."""
def get(self, *args): # pylint: disable=unused-argument
"""Renders the UI for the speed releasing page."""
self.RenderStaticHtml('speed_releasing.html')
def post(self, *args):
"""Returns dynamic data for /speed_releasing.
Args:
args: May contain the table_name for the requested Speed Releasing
report. If args is empty, user is requesting the Speed Releasing
landing page.
Requested parameters:
anomalies: A boolean that is set if the POST request is for the Release
Notes alerts-table. Note, the table_name must also be passed
in (via args) to retrieve the correct set of data.
Outputs:
JSON for the /speed_releasing page XHR request.
"""
anomalies = self.request.get('anomalies')
if args[0] and not anomalies:
self._OutputTableJSON(args[0])
elif args[0]:
self._OutputAnomaliesJSON(args[0])
else:
self._OutputHomePageJSON()
def _OutputTableJSON(self, table_name):
"""Obtains the JSON values that comprise the table.
Args:
table_name: The name of the requested report.
"""
table_entity = ndb.Key('TableConfig', table_name).get()
if not table_entity:
self.response.out.write(json.dumps({'error': 'Invalid table name.'}))
return
rev_a = self.request.get('revA')
rev_b = self.request.get('revB')
milestone_param = self.request.get('m')
if milestone_param:
milestone_param = int(milestone_param)
if milestone_param not in CHROMIUM_MILESTONES:
self.response.out.write(json.dumps({
'error': 'No data for that milestone.'}))
return
master_bot_pairs = _GetMasterBotPairs(table_entity.bots)
rev_a, rev_b, milestone_dict = _GetRevisionsFromParams(
rev_a, rev_b, milestone_param, table_entity, master_bot_pairs)
revisions = [rev_b, rev_a] # In reverse intentionally. This is to support
# the format of the Chrome Health Dashboard which compares 'Current' to
# 'Reference', in that order. The ordering here is for display only.
display_a = _GetDisplayRev(master_bot_pairs, table_entity.tests, rev_a)
display_b = _GetDisplayRev(master_bot_pairs, table_entity.tests, rev_b)
display_milestone_a, display_milestone_b = _GetMilestoneForRevs(
rev_a, rev_b, milestone_dict)
navigation_milestone_a, navigation_milestone_b = _GetNavigationMilestones(
display_milestone_a, display_milestone_b, milestone_dict)
values = {}
self.GetDynamicVariables(values)
self.response.out.write(json.dumps({
'xsrf_token': values['xsrf_token'],
'table_bots': master_bot_pairs,
'table_tests': table_entity.tests,
'table_layout': json.loads(table_entity.table_layout),
'name': table_entity.key.string_id(),
'values': _GetRowValues(revisions, master_bot_pairs,
table_entity.tests),
'units': _GetTestToUnitsMap(master_bot_pairs, table_entity.tests),
'revisions': revisions,
'categories': _GetCategoryCounts(json.loads(table_entity.table_layout)),
'urls': _GetDashboardURLMap(master_bot_pairs, table_entity.tests,
rev_a, rev_b),
'display_revisions': [display_b, display_a], # Similar to revisions.
'display_milestones': [display_milestone_a, display_milestone_b],
'navigation_milestones': [navigation_milestone_a,
navigation_milestone_b]
}))
def _OutputHomePageJSON(self):
"""Returns a list of reports a user has permission to see."""
all_entities = table_config.TableConfig.query().fetch()
list_of_entities = []
for entity in all_entities:
list_of_entities.append(entity.key.string_id())
self.response.out.write(json.dumps({
'show_list': True,
'list': list_of_entities
}))
def _OutputAnomaliesJSON(self, table_name):
"""Obtains the entire alert list specified.
Args:
table_name: The name of the requested report.
"""
table_entity = ndb.Key('TableConfig', table_name).get()
if not table_entity:
self.response.out.write(json.dumps({'error': 'Invalid table name.'}))
return
rev_a = self.request.get('revA')
rev_b = self.request.get('revB')
milestone_param = self.request.get('m')
if milestone_param:
milestone_param = int(milestone_param)
if milestone_param not in CHROMIUM_MILESTONES:
self.response.out.write(json.dumps({
'error': 'No data for that milestone.'}))
return
master_bot_pairs = _GetMasterBotPairs(table_entity.bots)
rev_a, rev_b, _ = _GetRevisionsFromParams(rev_a, rev_b, milestone_param,
table_entity, master_bot_pairs)
revisions = [rev_b, rev_a]
anomalies = _FetchAnomalies(table_entity, rev_a, rev_b)
anomaly_dicts = alerts.AnomalyDicts(anomalies)
values = {}
self.GetDynamicVariables(values)
self.response.out.write(json.dumps({
'xsrf_token': values['xsrf_token'],
'revisions': revisions,
'anomalies': anomaly_dicts
}))
def _GetRevisionsFromParams(rev_a, rev_b, milestone_param, table_entity,
master_bot_pairs):
milestone_dict = _GetUpdatedMilestoneDict(master_bot_pairs,
table_entity.tests)
if milestone_param:
rev_a, rev_b = milestone_dict[milestone_param]
if not rev_a or not rev_b: # If no milestone param and <2 revs passed in.
rev_a, rev_b = _GetEndRevOrCurrentMilestoneRevs(
rev_a, rev_b, milestone_dict)
rev_a, rev_b = _CheckRevisions(rev_a, rev_b)
return rev_a, rev_b, milestone_dict
def _GetMasterBotPairs(bots):
master_bot_pairs = []
for bot in bots:
master_bot_pairs.append(bot.parent().string_id() + '/' + bot.string_id())
return master_bot_pairs
def _GetRowValues(revisions, bots, tests):
"""Builds a nested dict organizing values by rev/bot/test.
Args:
revisions: The revisions to get values for.
bots: The Master/Bot pairs the tables cover.
tests: The tests that go in each table.
Returns:
A dict with the following structure:
revisionA: {
bot1: {
test1: value,
test2: value,
...
}
...
}
revisionB: {
...
}
"""
row_values = {}
for rev in revisions:
bot_values = {}
for bot in bots:
test_values = {}
for test in tests:
test_values[test] = _GetRow(bot, test, rev)
bot_values[bot] = test_values
row_values[rev] = bot_values
return row_values
def _GetTestToUnitsMap(bots, tests):
"""Grabs the units on each test for only one bot."""
units_map = {}
if bots:
bot = bots[0]
for test in tests:
test_path = bot + '/' + test
test_entity = utils.TestMetadataKey(test_path).get()
if test_entity:
units_map[test] = test_entity.units
return units_map
def _GetRow(bot, test, rev):
test_path = bot + '/' + test
test_key = utils.TestKey(test_path)
row_key = utils.GetRowKey(test_key, rev)
row = row_key.get()
if row:
return row.value
return None
def _CheckRevisions(rev_a, rev_b):
"""Checks to ensure the revisions are valid."""
rev_a = int(rev_a)
rev_b = int(rev_b)
if rev_b < rev_a:
rev_a, rev_b = rev_b, rev_a
return rev_a, rev_b
def _GetCategoryCounts(layout):
categories = collections.defaultdict(lambda: 0)
for test in layout:
categories[layout[test][0]] += 1
return categories
def _GetDashboardURLMap(bots, tests, rev_a, rev_b):
"""Get the /report links appropriate for the bot and test."""
url_mappings = {}
for bot in bots:
for test in tests:
test_parts = test.split('/')
bot_parts = bot.split('/')
# Checked should be the last part of the test path, if available.
checked = 'all'
if len(test_parts) > 1:
checked = test_parts[len(test_parts) - 1]
url_args = {
'masters': bot_parts[0],
'bots': bot_parts[1],
'tests': test,
'checked': checked,
'start_rev': rev_a,
'end_rev': rev_b,
}
url_mappings[bot + '/' + test] = '?%s' % urllib.urlencode(url_args)
return url_mappings
def _GetDisplayRev(bots, tests, rev):
"""Creates a user friendly commit position to display.
For V8 and ChromiumPerf masters, this will just be the passed in rev.
"""
if bots and tests:
test_path = bots[0] + '/' + tests[0]
test_key = utils.TestKey(test_path)
row_key = utils.GetRowKey(test_key, rev)
row = row_key.get()
if row and hasattr(row, 'r_commit_pos'): # Rule out masters like V8
if rev != row.r_commit_pos: # Rule out ChromiumPerf
if hasattr(row, 'a_default_rev') and hasattr(row, row.a_default_rev):
return row.r_commit_pos + '-' + getattr(row, row.a_default_rev)[:3]
return rev
def _UpdateNewestRevInMilestoneDict(bots, tests, milestone_dict):
"""Updates the most recent rev in the milestone dict.
The global milestone dicts are declared with 'None' for the end of the
current milestone range. If we might be using the last milestone, update
the end of the current milestone range to be the most recent revision.
"""
if bots and tests:
test_path = bots[0] + '/' + tests[0]
test_key = utils.TestKey(test_path)
# Need to allow set this request as privileged in order to bypass datastore
# hooks. This is okay here because table_config is internal_only protected
# and will ensure that only the correct users can see internal_only data.
datastore_hooks.SetSinglePrivilegedRequest()
query = graph_data.Row.query()
query = query.filter(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
query = query.order(-graph_data.Row.revision)
row = query.get()
if row:
milestone_dict[CURRENT_MILESTONE] = (
milestone_dict[CURRENT_MILESTONE][0], row.revision)
else:
milestone_dict[CURRENT_MILESTONE] = (
milestone_dict[CURRENT_MILESTONE][0],
milestone_dict[CURRENT_MILESTONE][0])
def _GetEndOfMilestone(rev, milestone_dict):
"""Finds the end of the milestone that 'rev' is in.
Check that 'rev' is between [beginning, end) of the tuple. In case an end
'rev' is passed in, return corresponding beginning rev. But since revs can
double as end and beginning, favor returning corresponding end rev if 'rev'
is a beginning rev.
"""
beginning_rev = 0
for _, value_tuple in milestone_dict.items():
if value_tuple[0] <= int(rev) < value_tuple[1]: # 'rev' is a beginning rev.
return value_tuple[1] # Favor by returning here.
if value_tuple[1] == int(rev): # 'rev' is an end rev.
beginning_rev = value_tuple[0]
if beginning_rev:
return beginning_rev
return milestone_dict[CURRENT_MILESTONE][1]
def _GetEndRevOrCurrentMilestoneRevs(rev_a, rev_b, milestone_dict):
"""If one/both of the revisions are None, change accordingly.
If both are None, return most recent milestone, present.
If one is None, return the other, end of that milestone.
"""
if not rev_a and not rev_b:
return milestone_dict[CURRENT_MILESTONE]
return (rev_a or rev_b), _GetEndOfMilestone((rev_a or rev_b), milestone_dict)
def _GetUpdatedMilestoneDict(master_bot_pairs, tests):
"""Gets the milestone_dict with the newest rev.
Checks to see which milestone_dict to use (Clank/Chromium), and updates
the 'None' to be the newest revision for one of the specified tests.
"""
masters = set([m.split('/')[0] for m in master_bot_pairs])
if 'ClankInternal' in masters:
milestone_dict = CLANK_MILESTONES.copy()
else:
milestone_dict = CHROMIUM_MILESTONES.copy()
# If we might access the end of the milestone_dict, update it to
# be the newest revision instead of 'None'.
_UpdateNewestRevInMilestoneDict(master_bot_pairs,
tests, milestone_dict)
return milestone_dict
def _FetchAnomalies(table_entity, rev_a, rev_b):
"""Finds anomalies that have the given benchmark/master, in a given range."""
if table_entity.bots and table_entity.tests:
master_list = []
benchmark_list = []
for bot in table_entity.bots:
if bot.parent().string_id() not in master_list:
master_list.append(bot.parent().string_id())
for test in table_entity.tests:
if test.split('/')[0] not in benchmark_list:
benchmark_list.append(test.split('/')[0])
else:
return []
anomalies_futures = []
for benchmark in benchmark_list:
for master in master_list:
anomalies_futures.append(anomaly.Anomaly.QueryAsync(
min_end_revision=rev_a,
max_end_revision=rev_b,
test_suite_name=benchmark,
master_name=master))
ndb.Future.wait_all(anomalies_futures)
all_anomalies = [future.get_result()[0] for future in anomalies_futures]
# Flatten list of lists.
all_anomalies = [a for future_list in all_anomalies for a in future_list]
anomalies = []
for anomaly_entity in all_anomalies:
for test in table_entity.tests:
if test in utils.TestPath(anomaly_entity.test):
anomalies.append(anomaly_entity)
break
anomalies = [a for a in anomalies if not a.is_improvement]
return anomalies
def _GetMilestoneForRevs(rev_a, rev_b, milestone_dict):
"""Determines which milestone each revision is part of. Returns a tuple."""
rev_a_milestone = CURRENT_MILESTONE
rev_b_milestone = CURRENT_MILESTONE
for key, milestone in milestone_dict.items():
if milestone[0] <= rev_a < milestone[1]:
rev_a_milestone = key
if milestone[0] < rev_b <= milestone[1]:
rev_b_milestone = key
return rev_a_milestone, rev_b_milestone
def _GetNavigationMilestones(rev_a_milestone, rev_b_milestone, milestone_dict):
"""Finds the next/previous milestones for navigation, if available.
Most often, the milestones will be the same (e.g. the report for M57 will
have both rev_a_milestone and rev_b_milestone as 57; the navigation in this
case is 56 for back and 58 for forward). If the milestone is at either the
lower or upper bounds of the milestones that we support, return None (so
users can't navigate to an invalid milestone). In the case that the
revisions passed in cover multiple milestones (e.g. a report from
M55 -> M57), the correct navigation is 54 (back) and 57 (forward).
"""
min_milestone = min(milestone_dict)
if rev_a_milestone == min_milestone:
navigation_milestone_a = None
else:
navigation_milestone_a = rev_a_milestone - 1
if rev_b_milestone == CURRENT_MILESTONE:
navigation_milestone_b = None
elif rev_a_milestone != rev_b_milestone: # In the multiple milestone case.
navigation_milestone_b = rev_b_milestone
else:
navigation_milestone_b = rev_b_milestone + 1
return navigation_milestone_a, navigation_milestone_b
| {
"content_hash": "c8bb3529b478e9f827e790fe7883557a",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 80,
"avg_line_length": 34.672916666666666,
"alnum_prop": 0.6596166556510245,
"repo_name": "endlessm/chromium-browser",
"id": "8edd09a36b537ff60063560c74420153dc9fec50",
"size": "16806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/dashboard/dashboard/speed_releasing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
class PumpScheduler(object):
"""A wrapper for managing pump scheduling."""
def __init__(self, clock, sleep_windows):
"""Creates new PumpScheduler wrapper.
Args:
clock: A clock interface
sleep_windows: A tuple of tuples, each consisting of a sleep
window. Time zone should match that of the clock interface.
"""
self._clock = clock
self._sleep_windows = sleep_windows
def is_running_pump_allowed(self):
"""Returns True if OK to run pump, otherwise False."""
current_hour = self._clock.now().hour
for sleep_hour, wake_hour in self._sleep_windows:
if wake_hour < sleep_hour:
if current_hour >= sleep_hour or current_hour < wake_hour:
return False
else:
if sleep_hour <= current_hour < wake_hour:
return False
return True
| {
"content_hash": "88696e7f0306a18f50b95e35db0fed9f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 34.925925925925924,
"alnum_prop": 0.5673382820784729,
"repo_name": "mtlynch/GreenPiThumb",
"id": "a1a3d74318f28f78213c82d178a9e96e097cd54e",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "greenpithumb/pump_scheduler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81302"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
"""Main Nvim interface."""
import os
from msgpack import ExtType
from .buffer import Buffer
from .common import (DecodeHook, Remote, RemoteMap, RemoteSequence,
SessionFilter, SessionHook, walk)
from .tabpage import Tabpage
from .window import Window
from ..compat import IS_PYTHON3
__all__ = ('Nvim')
os_chdir = os.chdir
class Nvim(object):
"""Class that represents a remote Nvim instance.
This class is main entry point to Nvim remote API, it is a thin wrapper
around Session instances.
The constructor of this class must not be called directly. Instead, the
`from_session` class method should be used to create the first instance
from a raw `Session` instance.
Subsequent instances for the same session can be created by calling the
`with_hook` instance method and passing a SessionHook instance. This can
be useful to have multiple `Nvim` objects that behave differently without
one affecting the other.
"""
@classmethod
def from_session(cls, session):
"""Create a new Nvim instance for a Session instance.
This method must be called to create the first Nvim instance, since it
queries Nvim metadata for type information and sets a SessionHook for
creating specialized objects from Nvim remote handles.
"""
session.error_wrapper = lambda e: NvimError(e[1])
channel_id, metadata = session.request(b'vim_get_api_info')
encoding = session.request(b'vim_get_option', b'encoding')
session._async_session._msgpack_stream.set_packer_encoding(encoding)
if IS_PYTHON3:
hook = DecodeHook()
# decode all metadata strings for python3
metadata = walk(hook.from_nvim, metadata, None, None, None)
types = {
metadata['types']['Buffer']['id']: Buffer,
metadata['types']['Window']['id']: Window,
metadata['types']['Tabpage']['id']: Tabpage,
}
return cls(session, channel_id, metadata).with_hook(ExtHook(types))
def __init__(self, session, channel_id, metadata):
"""Initialize a new Nvim instance. This method is module-private."""
self._session = session
self.channel_id = channel_id
self.metadata = metadata
self.vars = RemoteMap(session, 'vim_get_var', 'vim_set_var')
self.vvars = RemoteMap(session, 'vim_get_vvar', None)
self.options = RemoteMap(session, 'vim_get_option', 'vim_set_option')
self.buffers = RemoteSequence(session, 'vim_get_buffers')
self.windows = RemoteSequence(session, 'vim_get_windows')
self.tabpages = RemoteSequence(session, 'vim_get_tabpages')
self.current = Current(session)
self.error = NvimError
def with_hook(self, hook):
"""Initialize a new Nvim instance."""
return Nvim(SessionFilter(self.session, hook), self.channel_id,
self.metadata)
@property
def session(self):
"""Return the Session or SessionFilter for a Nvim instance."""
return self._session
def ui_attach(self, width, height, rgb):
"""Register as a remote UI.
After this method is called, the client will receive redraw
notifications.
"""
return self._session.request('ui_attach', width, height, rgb)
def ui_detach(self):
"""Unregister as a remote UI."""
return self._session.request('ui_detach')
def ui_try_resize(self, width, height):
"""Notify nvim that the client window has resized.
If possible, nvim will send a redraw request to resize.
"""
return self._session.request('ui_try_resize', width, height)
def subscribe(self, event):
"""Subscribe to a Nvim event."""
return self._session.request('vim_subscribe', event)
def unsubscribe(self, event):
"""Unsubscribe to a Nvim event."""
return self._session.request('vim_unsubscribe', event)
def command(self, string, async=False):
"""Execute a single ex command."""
return self._session.request('vim_command', string, async=async)
def command_output(self, string):
"""Execute a single ex command and return the output."""
return self._session.request('vim_command_output', string)
def eval(self, string, async=False):
"""Evaluate a vimscript expression."""
return self._session.request('vim_eval', string, async=async)
def strwidth(self, string):
"""Return the number of display cells `string` occupies.
Tab is counted as one cell.
"""
return self._session.request('vim_strwidth', string)
def list_runtime_paths(self):
"""Return a list of paths contained in the 'runtimepath' option."""
return self._session.request('vim_list_runtime_paths')
def foreach_rtp(self, cb):
"""Invoke `cb` for each path in 'runtimepath'.
Call the given callable for each path in 'runtimepath' until either
callable returns something but None, the exception is raised or there
are no longer paths. If stopped in case callable returned non-None,
vim.foreach_rtp function returns the value returned by callable.
"""
for path in self._session.request('vim_list_runtime_paths'):
try:
if cb(path) is not None:
break
except Exception:
break
def chdir(self, dir_path):
"""Run os.chdir, then all appropriate vim stuff."""
os_chdir(dir_path)
return self._session.request('vim_change_directory', dir_path)
def feedkeys(self, keys, options='', escape_csi=True):
"""Push `keys` to Nvim user input buffer.
Options can be a string with the following character flags:
- 'm': Remap keys. This is default.
- 'n': Do not remap keys.
- 't': Handle keys as if typed; otherwise they are handled as if coming
from a mapping. This matters for undo, opening folds, etc.
"""
return self._session.request('vim_feedkeys', keys, options, escape_csi)
def input(self, bytes):
"""Push `bytes` to Nvim low level input buffer.
Unlike `feedkeys()`, this uses the lowest level input buffer and the
call is not deferred. It returns the number of bytes actually
written(which can be less than what was requested if the buffer is
full).
"""
return self._session.request('vim_input', bytes)
def replace_termcodes(self, string, from_part=False, do_lt=True,
special=True):
r"""Replace any terminal code strings by byte sequences.
The returned sequences are Nvim's internal representation of keys,
for example:
<esc> -> '\x1b'
<cr> -> '\r'
<c-l> -> '\x0c'
<up> -> '\x80ku'
The returned sequences can be used as input to `feedkeys`.
"""
return self._session.request('vim_replace_termcodes', string,
from_part, do_lt, special)
def out_write(self, msg):
"""Print `msg` as a normal message."""
return self._session.request('vim_out_write', msg)
def err_write(self, msg):
"""Print `msg` as an error message."""
return self._session.request('vim_err_write', msg)
def quit(self, quit_command='qa!'):
"""Send a quit command to Nvim.
By default, the quit command is 'qa!' which will make Nvim quit without
saving anything.
"""
try:
self.command(quit_command)
except IOError:
# sending a quit command will raise an IOError because the
# connection is closed before a response is received. Safe to
# ignore it.
pass
class Current(object):
"""Helper class for emulating vim.current from python-vim."""
def __init__(self, session):
self._session = session
self.range = None
@property
def line(self):
return self._session.request('vim_get_current_line')
@line.setter
def line(self, line):
return self._session.request('vim_set_current_line', line)
@property
def buffer(self):
return self._session.request('vim_get_current_buffer')
@buffer.setter
def buffer(self, buffer):
return self._session.request('vim_set_current_buffer', buffer)
@property
def window(self):
return self._session.request('vim_get_current_window')
@window.setter
def window(self, window):
return self._session.request('vim_set_current_window', window)
@property
def tabpage(self):
return self._session.request('vim_get_current_tabpage')
@tabpage.setter
def tabpage(self, tabpage):
return self._session.request('vim_set_current_tabpage', tabpage)
class ExtHook(SessionHook):
def __init__(self, types):
self.types = types
super(ExtHook, self).__init__(from_nvim=self.from_ext,
to_nvim=self.to_ext)
def from_ext(self, obj, session, method, kind):
if type(obj) is ExtType:
cls = self.types[obj.code]
return cls(session, (obj.code, obj.data))
return obj
def to_ext(self, obj, session, method, kind):
if isinstance(obj, Remote):
return ExtType(*obj.code_data)
return obj
class NvimError(Exception):
pass
| {
"content_hash": "cc2229ab4588310eb643012fe5e70eb1",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 79,
"avg_line_length": 34.204301075268816,
"alnum_prop": 0.620873939012889,
"repo_name": "0x90sled/python-client",
"id": "f12287a3dc981c17991458e4eec4f93d8168be4b",
"size": "9543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neovim/api/nvim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "142576"
},
{
"name": "Shell",
"bytes": "330"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class AutotypenumbersValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="autotypenumbers", parent_name="layout.xaxis", **kwargs
):
super(AutotypenumbersValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["convert types", "strict"]),
**kwargs,
)
| {
"content_hash": "f406d1b82051b8514161f940ed5db2f4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 36.714285714285715,
"alnum_prop": 0.6186770428015564,
"repo_name": "plotly/plotly.py",
"id": "5862fc718f33c7af2d9d3e1ec615a7df0dfae625",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/xaxis/_autotypenumbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Tests for the volume system implementation using pyvshadow."""
import unittest
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.path import vshadow_path_spec
from dfvfs.volume import vshadow_volume_system
from tests import test_lib as shared_test_lib
@shared_test_lib.skipUnlessHasTestFile([u'lvmtest.qcow2'])
class VShadowVolumeSystemTest(shared_test_lib.BaseTestCase):
"""The unit test for the Volume Shadow Snapshot (VSS) volume system object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
path_spec = os_path_spec.OSPathSpec(location=test_file)
path_spec = qcow_path_spec.QCOWPathSpec(parent=path_spec)
self._vshadow_path_spec = vshadow_path_spec.VShadowPathSpec(
location=u'/', parent=path_spec)
# qcowmount test_data/vsstest.qcow2 fuse/
# vshadowinfo fuse/qcow1
#
# Volume Shadow Snapshot information:
# Number of stores: 2
#
# Store: 1
# ...
# Identifier : 600f0b69-5bdf-11e3-9d6c-005056c00008
# Shadow copy set ID : 0a4e3901-6abb-48fc-95c2-6ab9e38e9e71
# Creation time : Dec 03, 2013 06:35:09.736378700 UTC
# Shadow copy ID : 4e3c03c2-7bc6-4288-ad96-c1eac1a55f71
# Volume size : 1073741824 bytes
# Attribute flags : 0x00420009
#
# Store: 2
# Identifier : 600f0b6d-5bdf-11e3-9d6c-005056c00008
# Shadow copy set ID : 8438a0ee-0f06-443b-ac0c-2905647ca5d6
# Creation time : Dec 03, 2013 06:37:48.919058300 UTC
# Shadow copy ID : 18f1ac6e-959d-436f-bdcc-e797a729e290
# Volume size : 1073741824 bytes
# Attribute flags : 0x00420009
def testIterateVolumes(self):
"""Test the iterate volumes functionality."""
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(self._vshadow_path_spec)
self.assertEqual(volume_system.number_of_volumes, 2)
volume = volume_system.GetVolumeByIndex(1)
self.assertIsNotNone(volume)
self.assertEqual(volume.number_of_extents, 1)
self.assertEqual(volume.number_of_attributes, 4)
self.assertEqual(volume.identifier, u'vss2')
expected_value = u'600f0b6d-5bdf-11e3-9d6c-005056c00008'
volume_attribute = volume.GetAttribute(u'identifier')
self.assertIsNotNone(volume_attribute)
self.assertEqual(volume_attribute.value, expected_value)
expected_value = u'18f1ac6e-959d-436f-bdcc-e797a729e290'
volume_attribute = volume.GetAttribute(u'copy_identifier')
self.assertIsNotNone(volume_attribute)
self.assertEqual(volume_attribute.value, expected_value)
expected_value = u'8438a0ee-0f06-443b-ac0c-2905647ca5d6'
volume_attribute = volume.GetAttribute(u'copy_set_identifier')
self.assertIsNotNone(volume_attribute)
self.assertEqual(volume_attribute.value, expected_value)
expected_value = 130305262689190583
volume_attribute = volume.GetAttribute(u'creation_time')
self.assertIsNotNone(volume_attribute)
self.assertEqual(volume_attribute.value, expected_value)
volume = volume_system.GetVolumeByIndex(7)
self.assertIsNone(volume)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c5640a16d61f7236956a88211fbe59f7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 37.65909090909091,
"alnum_prop": 0.7027761013880507,
"repo_name": "dc3-plaso/dfvfs",
"id": "3807ae286d28267affdb5009e38ddc098b97f165",
"size": "3356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/volume/vshadow_volume_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "Python",
"bytes": "1397977"
},
{
"name": "Shell",
"bytes": "1522"
}
],
"symlink_target": ""
} |
from base import BaseGraph
from reg import RegressionLine
from ..graphics import Canvas
from ..graphics.shapes import Circle, Line, Rectangle, Sector, Path, Square
from ..graphics.group import Group, Grouping
from ..graphics.utils import ViewBox
from ..graphics.color import white
from ..utils.struct import Vector as V, Matrix
from ..utils.dictionary import DefaultDictionary
from re import match
from copy import deepcopy
from math import pi
class GraphCanvas (Canvas):
def __init__ (self, **attr):
Canvas.__init__ (self, **attr)
if attr.has_key ('regLine'):
self.regLine = attr['regLine']
else:
self.regLine = True
def graph (self):
parent = self
while not isinstance (parent, BaseGraph):
parent = parent.parent
return parent
def changeSize (self, dx, dy):
self.width += dx
self.height += dy
def move (self, dx, dy):
self.x += dx
self.y += dy
def makeTransform (self, minX, maxX, minY, maxY):
matrix1 = Matrix (3, 3)
matrix1.set (-minX, 0, 2)
matrix1.set (-minY, 1, 2)
currentRangeX = float (maxX - minX)
currentRangeY = float (maxY - minY)
matrix2 = Matrix (3, 3)
matrix2.set (self.width / currentRangeX, 0, 0)
matrix2.set (self.height / currentRangeY, 1, 1)
matrix3 = Matrix (3, 3)
matrix3.set (self.height, 1, 2)
matrix3.set (-1.0, 1, 1)
return (matrix3 * (matrix2 * matrix1))
def setSVG (self):
attr = Canvas.setSVG (self)
attr['viewBox'] = ViewBox (0, 0, attr['width'], attr['height'])
return attr
class ScatterCanvas (GraphCanvas):
def __init__ (self, **attr):
GraphCanvas.__init__ (self, **attr)
self.data = Grouping ()
self.dataPoints = Group (id = 'data')
def drawPoint (self, name, x, y):
settings = self.graph ().settings
self.drawPointAux (self.dataPoints, name, x, y, settings.markerType, settings.markerSize)
def drawPointAux (self, group, name, x, y, pointType, pointSize):
if pointType == 'circle':
m = Circle (radius = pointSize, x = x, y = y)
elif pointType == 'square':
half = pointSize / 2.0
m = Square (pointSize,
x = x,
y = y,
worldDeltaX = -half,
worldDeltaY = -half,
absoluteSize = True)
if name:
m.xml['has-tooltip'] = True
m.xml['tooltip-text'] = name
m.xml['has-highlight'] = True
m.xml['highlight-fill'] = 'red'
group.draw (m)
def addColor (self):
settings = self.graph ().settings
if settings.colorScheme == 'tripleAxis':
self.setTripleColor (self.dataPoints, settings.color1, settings.color2, settings.color3)
elif settings.colorScheme == 'solid':
self.setSolidColor (self.dataPoints, settings.color1)
def setSolidColor (self, group, color):
for item in group:
item.style.fill = color
item.xml['highlight-fill'] = color.interpolate (white, .35)
def setTripleColor (self, group, color1, color2, color3):
for item in group:
perX = (item.x - self.minX) / (self.maxX - self.minX)
perY = (item.y - self.minY) / (self.maxY - self.minY)
c1 = color2.interpolate (color3, perX)
c2 = color2.interpolate (color1, perY)
per = (perY + (1 - perX)) / 2.0
c = c1.interpolate (c2, per)
item.style.fill = c
item.xml['highlight-fill'] = c.interpolate (white, .35)
def setBounds (self):
self.xlist = []
self.ylist = []
for child in self.dataPoints:
self.xlist.append (child.x)
self.ylist.append (child.y)
minX = min (self.xlist)
maxX = max (self.xlist)
minY = min (self.ylist)
maxY = max (self.ylist)
rangeX = maxX - minX
rangeY = maxY - minY
self.minX = minX - rangeX * .05
self.maxX = maxX + rangeX * .05
self.minY = minY - rangeY * .05
self.maxY = maxY + rangeY * .05
self.xml['minX'] = self.minX
self.xml['maxX'] = self.maxX
self.xml['minY'] = self.minY
self.xml['maxY'] = self.maxY
def setRegLine (self):
settings = self.graph().settings
if self.regLine:
self.regLineAux (self.data, self.xlist, self.ylist,
(self.minX, self.maxX), (self.minY, self.maxY),
settings.regLineColor, settings.regLineWidth)
def regLineAux (self, group, xlist, ylist, xbounds, ybounds, color, width):
r = RegressionLine (xlist, ylist, xbounds, ybounds)
r.style.strokeWidth = width
r.style.strokeColor = color
group.draw (r)
def finalize (self):
self.draw (self.data)
if len (self.dataPoints) > 0:
self.data.draw (self.dataPoints)
self.setRegLine ()
self.addColor ()
self.data.transform = self.makeTransform (self.minX, self.maxX, self.minY, self.maxY)
class DoubleScatterCanvas (ScatterCanvas):
def __init__ (self, **attr):
ScatterCanvas.__init__ (self, **attr)
self.data2 = Grouping ()
self.dataPoints2 = Group (id = 'data2')
def drawPoint (self, name, x, y):
settings = self.graph ().settings
self.drawPointAux (self.dataPoints, name, x, y, settings.g1MarkerType, settings.g1MarkerSize)
def drawPoint2 (self, name, x, y):
settings = self.graph ().settings
self.drawPointAux (self.dataPoints2, name, x, y, settings.g2MarkerType, settings.g2MarkerSize)
def addColor (self):
settings = self.graph ().settings
if settings.g1ColorScheme == 'tripleAxis':
self.setTripleColor (self.dataPoints, settings.g1Color1, settings.g1Color2, settings.g1Color3)
elif settings.g1ColorScheme == 'solid':
self.setSolidColor (self.dataPoints, settings.g1Color1)
if settings.g2ColorScheme == 'tripleAxis':
self.setTripleColor (self.dataPoints2, settings.g2Color1, settings.g2Color2, settings.g2Color3)
elif settings.g2ColorScheme == 'solid':
self.setSolidColor (self.dataPoints2, settings.g2Color1)
#for child in self.data:
# child.style.fill = self.color1
#for child in self.data2:
# child.style.fill = self.color2
def setBounds (self):
self.xlist = []
self.x2list = []
self.ylist = []
self.y2list = []
for child in self.dataPoints:
self.xlist.append (child.x)
self.ylist.append (child.y)
for child in self.dataPoints2:
self.x2list.append (child.x)
self.y2list.append (child.y)
minX = min (self.xlist + self.x2list)
maxX = max (self.xlist + self.x2list)
minY = min (self.ylist)
maxY = max (self.ylist)
minY2 = min (self.y2list)
maxY2 = max (self.y2list)
rangeX = maxX - minX
rangeY = maxY - minY
rangeY2 = maxY2 - minY2
self.minX = minX - rangeX * .05
self.maxX = maxX + rangeX * .05
self.minY= minY - rangeY * .05
self.maxY = maxY + rangeY * .05
self.minY2 = minY2 - rangeY2 * .05
self.maxY2 = maxY2 + rangeY2 * .05
self.xml['minX'] = self.minX
self.xml['maxX'] = self.maxX
self.xml['minY'] = self.minY
self.xml['maxY'] = self.maxY
self.xml['minY2'] = self.minY2
self.xml['maxY2'] = self.maxY2
def setRegLine (self):
settings = self.graph ().settings
if settings.g1RegLine:
self.regLineAux (self.data, self.xlist, self.ylist,
(self.minX, self.maxX), (self.minY, self.maxY),
settings.g1RegLineColor, settings.g1RegLineWidth)
def setRegLine2 (self):
settings = self.graph ().settings
if settings.g2RegLine:
self.regLineAux (self.data2, self.x2list, self.y2list,
(self.minX, self.maxX), (self.minY2, self.maxY2),
settings.g2RegLineColor, settings.g2RegLineWidth)
def finalize (self):
ScatterCanvas.finalize (self)
self.draw (self.data2)
if len (self.dataPoints2) > 0:
self.data2.draw(self.dataPoints2)
self.setRegLine2 ()
self.data2.transform = self.makeTransform (self.minX, self.maxX, self.minY2, self.maxY2)
class LineCanvas (GraphCanvas):
def __init__ (self, **attr):
GraphCanvas.__init__ (self, **attr)
self.data = Grouping ()
self.dataPoints = Group (id = 'data')
self.colors = {}
self.seriesLength = 1
def setBounds (self):
ylist = []
for group in self.data:
points = group.getElementByClassName ('point-group')
for child in points:
ylist.append (child.y)
self.minX = 0
self.maxX = self.seriesLength - 1
self.minY = min (ylist)
self.maxY = max (ylist)
def addColor (self):
for group in self.data:
try:
color = self.colors[group.id]
except KeyError:
color = 'black'
for child in group.getElementByClassName ('point-group'):
child.style.fill = color
group.getElementByName ('path').style.strokeColor = color
def addData (self, name, *data):
group = Group (id = name)
pointGroup = Group (className = 'point-group')
path = None
if len (data) > self.seriesLength:
self.seriesLength = len (data)
for i, val in enumerate (data):
if val is None:
continue
if not path:
path = Path ()
path.move (i, val)
else:
path.line (i, val)
c = Circle (radius = 2, x = i, y = val)
pointGroup.draw (c)
if path:
path.style.fill = 'none'
group.draw (path)
if len (pointGroup) > 0:
group.draw (pointGroup)
self.data.draw (group)
def finalize (self):
self.draw (self.data)
if len (self.dataPoints) > 0:
self.draw (self.dataPoints)
self.addColor ()
self.data.transform = self.makeTransform (self.minX, self.maxX, self.minY, self.maxY)
class BarCanvas (GraphCanvas):
def __init__ (self, **attr):
GraphCanvas.__init__ (self, **attr)
self.data = Group (id = 'data')
self.counter = 0.0
self.lastBar = 0
self.colors = {}
def addBar (self, group, name, val):
settings = self.graph ().settings
rect = Rectangle (x = self.counter, y = 0, height = val, width = 1)
self.data.draw (rect)
rect.xml['has-tooltip'] = True
rect.xml['name'] = name
rect.xml['group'] = group
if group and name:
rect.xml['data'] = str(group) + ': ' + str(name)
elif group:
rect.xml['data'] = str(group)
elif name:
rect.xml['data'] = str(name)
else:
rect.xml['data'] = None
rounded = match ('-?\d*(\.\d{2})?', str (val))
strVal = rounded.group (0);
rect.xml['tooltip-text'] = 'Value: ' + strVal
self.lastBar = self.counter + settings.barWidth
self.counter += (settings.barWidth + settings.barSpacing)
def addSpace (self):
self.counter += self.graph ().settings.blankSpace
def setBounds (self):
ylist = []
for child in self.data:
ylist.append (child.height)
if len (ylist):
minY = min (ylist)
maxY = max (ylist)
rangeY = maxY - minY
if rangeY:
self.minY = minY - rangeY * .05
self.maxY = maxY + rangeY * .05
else:
self.minY = minY - 1.0
self.maxY = maxY + 1.0
if self.lastBar:
self.minX = 0
self.maxX = self.lastBar
else:
self.minX = 0
self.maxX = 1
else:
self.minX = 0.0
self.maxX = 1.0
self.minY = 0.0
self.maxY = 1.0
self.xml['minX'] = self.minX
self.xml['maxX'] = self.maxX
self.xml['minY'] = self.minY
self.xml['maxY'] = self.maxY
self.barHeights ()
def addColor (self):
for child in self.data:
try:
key = child.xml['name']
child.style.fill = self.colors[key]
except KeyError:
child.style.fill = self.graph ().settings.barColor
if child.xml['has-tooltip']:
child.xml['has-highlight'] = True
child.xml['default-fill'] = child.style.fill
child.xml['highlight-fill'] = child.style.fill.interpolate (white, .35)
def barHeights (self):
for child in self.data:
child.y = self.minY
child.height -= self.minY
def finalize (self):
self.addColor ()
if len (self.data) > 0:
self.draw (self.data)
self.data.transform = self.makeTransform (self.minX, self.maxX, self.minY, self.maxY)
class HorizontalBarCanvas (BarCanvas):
def addBar (self, group, name, val):
settings = self.graph ().settings
rect = Rectangle (y = self.counter, x = 0, height = 1, width = val)
self.data.draw (rect)
rect.xml['has-tooltip'] = True
rect.xml['name'] = name
rect.xml['group'] = group
if group and name:
rect.xml['data'] = str(group) + ': ' + str(name)
elif group:
rect.xml['data'] = str(group)
elif name:
rect.xml['data'] = str(name)
else:
rect.xml['data'] = None
rect.xml['tooltip-text'] = 'Value: ' + str (val)
self.lastBar = self.counter + settings.barWidth
self.counter += (settings.barWidth + settings.barSpacing)
def setBounds (self):
xlist = []
for child in self.data:
xlist.append (child.width)
minX = min (xlist)
maxX = max (xlist)
self.minX = 0
self.maxX = maxX + maxX * .05
#self.minY = minY - minY * .05
self.minY = 0
self.maxY = self.lastBar
self.xml['minX'] = self.minX
self.xml['maxX'] = self.maxX
self.xml['minY'] = self.minY
self.xml['maxY'] = self.maxY
self.barHeights ()
def barHeights (self):
for child in self.data:
child.x = self.minX
child.width -= self.minX
class PieCanvas (GraphCanvas):
def __init__ (self, **attr):
GraphCanvas.__init__ (self, **attr)
self.values = []
self.names = []
def addData (self, name, value):
self.values.append (value)
self.names.append (name)
def finalize (self):
radius = min (self.width, self.height) / 2.0 - 10.0
x = self.x + self.width / 2.0
y = self.y + self.height / 2.0
total = sum (self.values)
data = zip (self.values, self.names)
data.sort ()
current = pi / 2.0
for value, name in data:
rad = (value / total) * (2 * pi)
s = Sector (radius, current, rad, x = x, y = y)
s.xml ['name'] = name
s.style.fill = 'red'
s.style.strokeWidth = .5
s.style.strokeColor = 'black'
self.draw (s)
current += rad
| {
"content_hash": "58e49af9b415ff74f8ff6fd30d8935c6",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 107,
"avg_line_length": 32.97314049586777,
"alnum_prop": 0.5368130835265368,
"repo_name": "ksetyadi/Sahana-Eden",
"id": "0ad60bf3d04b991f9bd2ff43e14a5417acb78142",
"size": "15959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/savage/graph/canvas.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Description : This file implements the LogMine algorithm for log parsing
Author : LogPAI team
License : MIT
"""
import sys
import re
import os
import alignment
import copy
import hashlib
import pandas as pd
from datetime import datetime
from collections import defaultdict
class partition():
def __init__(self, idx, log="", lev=-1):
self.logs_idx = [idx]
self.patterns = [log]
self.level = lev
class LogParser():
def __init__(self, indir, outdir, log_format, max_dist=0.001, levels=2, k=1, k1=1, k2=1, alpha=100, rex=[]):
self.logformat = log_format
self.path = indir
self.savePath = outdir
self.rex = rex
self.levels = levels
self.max_dist = max_dist
self.k = k
self.k1 = k1
self.k2 = k2
self.alpha = alpha
self.df_log = None
self.logname = None
self.level_clusters = {}
def parse(self, logname):
print('Parsing file: ' + os.path.join(self.path, logname))
self.logname = logname
starttime = datetime.now()
self.load_data()
for lev in range(self.levels):
if lev == 0:
# Clustering
self.level_clusters[0] = self.get_clusters(self.df_log['Content_'], lev)
else:
# Clustering
patterns = [c.patterns[0] for c in self.level_clusters[lev-1]]
self.max_dist *= self.alpha
clusters = self.get_clusters(patterns, lev, self.level_clusters[lev-1])
# Generate patterns
for cluster in clusters:
cluster.patterns = [self.sequential_merge(cluster.patterns)]
self.level_clusters[lev] = clusters
self.dump()
print('Parsing done. [Time taken: {!s}]'.format(datetime.now() - starttime))
def dump(self):
if not os.path.isdir(self.savePath):
os.makedirs(self.savePath)
templates = [0] * self.df_log.shape[0]
ids = [0] * self.df_log.shape[0]
templates_occ = defaultdict(int)
for cluster in self.level_clusters[self.levels-1]:
EventTemplate = cluster.patterns[0]
EventId = hashlib.md5(' '.join(EventTemplate).encode('utf-8')).hexdigest()[0:8]
Occurences = len(cluster.logs_idx)
templates_occ[EventTemplate] += Occurences
for idx in cluster.logs_idx:
ids[idx] = EventId
templates[idx]= EventTemplate
self.df_log['EventId'] = ids
self.df_log['EventTemplate'] = templates
occ_dict = dict(self.df_log['EventTemplate'].value_counts())
df_event = pd.DataFrame()
df_event['EventTemplate'] = self.df_log['EventTemplate'].unique()
df_event['Occurrences'] = self.df_log['EventTemplate'].map(occ_dict)
df_event['EventId'] = self.df_log['EventTemplate'].map(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()[0:8])
self.df_log.drop("Content_", inplace=True, axis=1)
self.df_log.to_csv(os.path.join(self.savePath, self.logname + '_structured.csv'), index=False)
df_event.to_csv(os.path.join(self.savePath, self.logname + '_templates.csv'), index=False, columns=["EventId","EventTemplate","Occurrences"])
def get_clusters(self, logs, lev, old_clusters=None):
clusters = []
old_clusters = copy.deepcopy(old_clusters)
for logidx, log in enumerate(logs):
match = False
for cluster in clusters:
dis = self.msgDist(log, cluster.patterns[0]) if lev == 0 else self.patternDist(log, cluster.patterns[0])
if dis and dis < self.max_dist:
if lev == 0:
cluster.logs_idx.append(logidx)
else:
cluster.logs_idx.extend(old_clusters[logidx].logs_idx)
cluster.patterns.append(old_clusters[logidx].patterns[0])
match = True
if not match:
if lev == 0:
clusters.append(partition(logidx, log, lev)) # generate new cluster
else:
old_clusters[logidx].level = lev
clusters.append(old_clusters[logidx]) # keep old cluster
return clusters
def sequential_merge(self, logs):
log_merged = logs[0]
for log in logs[1:]:
log_merged = self.pair_merge(log_merged, log)
return log_merged
def pair_merge(self, loga, logb):
loga, logb = alignment.water(loga.split(), logb.split())
logn = []
for idx, value in enumerate(loga):
logn.append('<*>' if value != logb[idx] else value)
return " ".join(logn)
def print_cluster(self, cluster):
print "------start------"
print "level: {}".format(cluster.level)
print "idxs: {}".format(cluster.logs_idx)
print "patterns: {}".format(cluster.patterns)
print "count: {}".format(len(cluster.patterns))
for idx in cluster.logs_idx:
print self.df_log.iloc[idx]['Content_']
print "------end------"
def msgDist(self, seqP, seqQ):
dis = 1
seqP = seqP.split()
seqQ = seqQ.split()
maxlen = max(len(seqP), len(seqQ))
minlen = min(len(seqP), len(seqQ))
for i in range(minlen):
dis -= (self.k if seqP[i]==seqQ[i] else 0 * 1.0) / maxlen
return dis
def patternDist(self, seqP, seqQ):
dis = 1
seqP = seqP.split()
seqQ = seqQ.split()
maxlen = max(len(seqP), len(seqQ))
minlen = min(len(seqP), len(seqQ))
for i in range(minlen):
if seqP[i] == seqQ[i]:
if seqP[i] == "<*>":
dis -= self.k2 * 1.0 / maxlen
else:
dis -= self.k1 * 1.0 / maxlen
return dis
def load_data(self):
def preprocess(line):
for currentRex in self.rex:
line = re.sub(currentRex, '', line)
return line
headers, regex = self.generate_logformat_regex(self.logformat)
self.df_log = self.log_to_dataframe(os.path.join(self.path, self.logname), regex, headers, self.logformat)
self.df_log['Content_'] = self.df_log['Content'].map(preprocess)
def log_to_dataframe(self, log_file, regex, headers, logformat):
''' Function to transform log file to dataframe '''
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
pass
logdf = pd.DataFrame(log_messages, columns=headers)
logdf.insert(0, 'LineId', None)
logdf['LineId'] = [i + 1 for i in range(linecount)]
return logdf
def generate_logformat_regex(self, logformat):
'''
Function to generate regular expression to split log messages
'''
headers = []
splitters = re.split(r'(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(' +', '\s+', splitters[k])
regex += splitter
else:
header = splitters[k].strip('<').strip('>')
regex += '(?P<%s>.*?)' % header
headers.append(header)
regex = re.compile('^' + regex + '$')
return headers, regex
| {
"content_hash": "12a634ec6f1f288e9c323d77407d47b1",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 149,
"avg_line_length": 38.66990291262136,
"alnum_prop": 0.5293748430831032,
"repo_name": "logpai/logparser",
"id": "0221703ed9726bb5ea314936e4e173ceb590238b",
"size": "7966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logparser/LogMine/LogMine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "44628"
},
{
"name": "Makefile",
"bytes": "207"
},
{
"name": "Perl",
"bytes": "67264"
},
{
"name": "Python",
"bytes": "328270"
},
{
"name": "Shell",
"bytes": "1041"
}
],
"symlink_target": ""
} |
import unittest
import os
import sys
import time
import traceback
from precip import AzureExperiment
from azure_config import AzureConfig
class TestAzureExperiment(unittest.TestCase):
def setUp(self):
try:
home = os.path.expanduser('~')
filepath = os.path.join(home, 'git', 'azure_config', 'config')
self.config = AzureConfig(filepath)
except Exception as e:
print "ERROR: %s" % e
traceback.print_exc(file=sys.stdout)
def test_simple_provision(self):
test_tag = 'simple_provision'
result = False
exp = None
try:
exp = AzureExperiment(
self.config.subscription_id,
self.config.username,
self.config.password,
self.config.admin_username,
self.config.group_name,
self.config.storage_name,
self.config.virtual_network_name,
self.config.subnet_name,
self.config.region,
skip_setup = True
)
exp.provision(
self.config.image_publisher,
self.config.image_offer,
self.config.image_sku,
self.config.image_version,
tags=[test_tag],
count=1,
boot_timeout=600
)
exp.wait()
exp.run([test_tag], "echo 'Hello world from a experiment instance'")
result = True
except Exception as e:
print "ERROR: %s" % e
traceback.print_exc(file=sys.stdout)
finally:
if exp is not None:
exp.deprovision()
self.assertTrue(result)
def test_conc_provision(self):
test_tag = 'conc_provision'
result = False
exp = None
try:
exp = AzureExperiment(
self.config.subscription_id,
self.config.username,
self.config.password,
self.config.admin_username,
self.config.group_name,
self.config.storage_name,
self.config.virtual_network_name,
self.config.subnet_name,
self.config.region,
skip_setup = True
)
exp.provision(
self.config.image_publisher,
self.config.image_offer,
self.config.image_sku,
self.config.image_version,
tags=[test_tag],
count=2,
boot_timeout=600
)
exp.wait()
exp.run([test_tag], "echo 'Hello world from a experiment instance'")
result = True
except Exception as e:
print "ERROR: %s" % e
traceback.print_exc(file=sys.stdout)
finally:
if exp is not None:
exp.deprovision()
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "47de3fffe4eeae7dca402f4d3826eaf0",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 30.727272727272727,
"alnum_prop": 0.5046022353714661,
"repo_name": "rika/precip",
"id": "7b2e65535c8d3e90cf44b672bdc7baef0e70e351",
"size": "3064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_azure_precip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71434"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
import argparse, sys
import numpy as np
import kabsch
from utilities import *
def get_spec_sol_rot(spec_file, sol_csv):
if spec_file.rfind('.csv') != -1:
spec_pts = read_csv_points(spec_file)
elif spec_file.rfind('.json') != -1:
with open(spec_file, 'r') as file:
spec_pts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
solPts = read_csv_points(sol_csv)
# Centre both pts to their ends
centred_spec = spec_pts - spec_pts[-1]
centred_sol = solPts - solPts[-1]
# Equalise sample points
sol_up_pts = upsample(centred_spec, centred_sol)
sol_up_pts = sol_up_pts - sol_up_pts[-1]
# Find Kabsch rotation for solution -> spec
rot = Kabsch.kabsch(sol_up_pts, centred_spec)
return gen_pymol_txm(rot)
def main():
ap = argparse.ArgumentParser(
description='Generate spec to solution rotation string for Pymol')
ap.add_argument('spec_file')
ap.add_argument('sol_file')
args = ap.parse_args()
print(get_spec_sol_rot(args.spec_file, args.sol_file))
if __name__ == '__main__':
main() | {
"content_hash": "d20743e7468e5ad03de8dd3674eaa289",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.6422764227642277,
"repo_name": "joy13975/elfin",
"id": "b1741942171c07883d1b3bd9a523c970bd4341ca",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elfinpy/obsolete/pymol_helpers/gen_spec_sol_rot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "307"
},
{
"name": "Python",
"bytes": "143639"
},
{
"name": "Shell",
"bytes": "5129"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import time
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.testing.sqlutils import ReusedSQLTestCase
class StreamingTests(ReusedSQLTestCase):
def test_stream_trigger(self):
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime="5 seconds")
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime="5 seconds", continuous="1 second")
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger("5 seconds")
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = (
self.spark.readStream.format("text")
.option("path", "python/test_support/sql/streaming")
.schema(schema)
.load()
)
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
# SPARK-32516 disables the overwrite behavior by default.
with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}):
df = (
self.spark.readStream.format("csv")
.option("path", "python/test_support/sql/fake")
.schema(bad_schema)
.load(path="python/test_support/sql/streaming", schema=schema, format="text")
)
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = (
self.spark.readStream.format("text")
.load("python/test_support/sql/streaming")
.withColumn("id", lit(1))
)
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, "out")
chk = os.path.join(tmpPath, "chk")
q = (
df.writeStream.option("checkpointLocation", chk)
.queryName("this_query")
.format("parquet")
.partitionBy("id")
.outputMode("append")
.option("path", out)
.start()
)
try:
self.assertEqual(q.name, "this_query")
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith(".")])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, "out")
chk = os.path.join(tmpPath, "chk")
fake1 = os.path.join(tmpPath, "fake1")
fake2 = os.path.join(tmpPath, "fake2")
# SPARK-32516 disables the overwrite behavior by default.
with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}):
q = (
df.writeStream.option("checkpointLocation", fake1)
.format("memory")
.option("path", fake2)
.queryName("fake_query")
.outputMode("append")
.start(path=out, format="parquet", queryName="this_query", checkpointLocation=chk)
)
try:
self.assertEqual(q.name, "this_query")
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith(".")])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, "out")
chk = os.path.join(tmpPath, "chk")
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias("value")).writeStream.start(
path=out, format="parquet", queryName="this_query", checkpointLocation=chk
)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress["name"], q.name)
self.assertEqual(lastProgress["id"], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and "isDataAvailable" in status and "isTriggerActive" in status
)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, "out")
chk = os.path.join(tmpPath, "chk")
q = df.writeStream.start(
path=out, format="parquet", queryName="this_query", checkpointLocation=chk
)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.processAllAvailable()
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
sq = sdf.writeStream.format("memory").queryName("query_explain").start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = (
sdf.select(bad_udf(col("value")))
.writeStream.format("memory")
.queryName("this_query")
.start()
)
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self._assert_exception_tree_contains_msg(e, "ZeroDivisionError")
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self._assert_exception_tree_contains_msg(sq.exception(), "ZeroDivisionError")
def _assert_exception_tree_contains_msg(self, exception, msg):
e = exception
contains = msg in e.desc
while e.cause is not None and not contains:
e = e.cause
contains = msg in e.desc
self.assertTrue(contains, "Exception tree doesn't contain the expected message: %s" % msg)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, "out")
chk = os.path.join(tmpPath, "chk")
q = df.writeStream.start(
path=out, format="parquet", queryName="this_query", checkpointLocation=chk
)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.processAllAvailable()
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(self.open_events_dir, {"partition": partitionId, "epoch": epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {"value": "text"})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {"error": str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, "partition INT, epoch INT")
def process_events(self):
return self._read_events(self.process_events_dir, "value STRING")
def close_events(self):
return self._read_events(self.close_events_dir, "error STRING")
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format("text").load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format("text").load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), "w") as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
# Those foreach tests are failed in macOS High Sierra by defined rules
# at http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
# To work around this, OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES.
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e["epoch"] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e["error"] for e in close_events]), {"None"})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e["error"] for e in close_events]), {"None"})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise RuntimeError("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess:
process = True
tester.assert_invalid_writer(
WriterWithNonCallableProcess(), "'process' in provided object is not callable"
)
class WriterWithNoParamProcess:
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess:
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(
WriterWithNonCallableOpen(), "'open' in provided object is not callable"
)
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(
WriterWithNonCallableClose(), "'close' in provided object is not callable"
)
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise RuntimeError("this should fail the query")
try:
df = self.spark.readStream.format("text").load("python/test_support/sql/streaming")
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_streaming_read_from_table(self):
with self.table("input_table", "this_query"):
self.spark.sql("CREATE TABLE input_table (value string) USING parquet")
self.spark.sql("INSERT INTO input_table VALUES ('aaa'), ('bbb'), ('ccc')")
df = self.spark.readStream.table("input_table")
self.assertTrue(df.isStreaming)
q = df.writeStream.format("memory").queryName("this_query").start()
q.processAllAvailable()
q.stop()
result = self.spark.sql("SELECT * FROM this_query ORDER BY value").collect()
self.assertEqual(
set([Row(value="aaa"), Row(value="bbb"), Row(value="ccc")]), set(result)
)
def test_streaming_write_to_table(self):
with self.table("output_table"), tempfile.TemporaryDirectory() as tmpdir:
df = self.spark.readStream.format("rate").option("rowsPerSecond", 10).load()
q = df.writeStream.toTable("output_table", format="parquet", checkpointLocation=tmpdir)
self.assertTrue(q.isActive)
time.sleep(10)
q.stop()
result = self.spark.sql("SELECT value FROM output_table").collect()
self.assertTrue(len(result) > 0)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_streaming import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "c3f62933664f6d6108c49eaa4a20c036",
"timestamp": "",
"source": "github",
"line_count": 613,
"max_line_length": 100,
"avg_line_length": 37.582381729200655,
"alnum_prop": 0.5893306710651967,
"repo_name": "xuanyuanking/spark",
"id": "87e35641f648a0ffbc26866210ba4fc288979c24",
"size": "23823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests/test_streaming.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "54336"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26221"
},
{
"name": "Dockerfile",
"bytes": "9711"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1872438"
},
{
"name": "Java",
"bytes": "4519872"
},
{
"name": "JavaScript",
"bytes": "222664"
},
{
"name": "Jupyter Notebook",
"bytes": "4310516"
},
{
"name": "Makefile",
"bytes": "2374"
},
{
"name": "PLpgSQL",
"bytes": "352963"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "7388289"
},
{
"name": "R",
"bytes": "1272682"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "31791"
},
{
"name": "Scala",
"bytes": "40053974"
},
{
"name": "Shell",
"bytes": "230591"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "98156"
}
],
"symlink_target": ""
} |
import requests, json, argparse, sys, time, datetime
import codecs
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="Host to connect to", default="localhost")
parser.add_argument("--port", type=int, help="Port to connect to", default=8080)
parser.add_argument("--debug", action="store_true", help="Display more debug information")
parser.add_argument("--list-switches", action="store_true", help="List all switches and their state")
parser.add_argument("--list-sensors", action="store_true", help="List all sensors and their value")
parser.add_argument("--list-sensors-graphite", action="store_true", help="List all sensors in Graphite readable format")
parser.add_argument("--list-sensors-librato", action="store_true", help="Report all sensors to Librato")
parser.add_argument("--report-prefix", metavar='NAME', required=False, help="If set, use this string as prefix for all Graphite and Librato data")
parser.add_argument("--list-scenes", action="store_true", help="List all scenes")
parser.add_argument("--list-groups", action="store_true", help="List all groups")
parser.add_argument("--toggle-switch", action="store", help="Toggle a switch", metavar='NAME')
parser.add_argument("--run-scene", action="store", help="Run a scene", metavar='NAME')
parser.add_argument("--get-sunrise", action="store_true", help="Get and display time for sunrise and sunset")
parser.add_argument("--librato-user", help="Librato user name. If also Librato token is set, report sensors to Librato.", required=False)
parser.add_argument("--librato-token", help="Librato token. If also Librato user is set, report sensors to Librato.", required=False)
args = parser.parse_args()
debug = args.debug
def _get_request(cmd):
global debug
url = "http://"+args.host+":"+str(args.port)+"/json.htm?"+cmd
if debug: print url
r = requests.get(url)
if r.status_code != 200 or r.json()["status"] != "OK":
print "Problem talking with domoticz, exit..."
print r.status_code
print r.text
sys.exit(1)
return r.json()
if args.get_sunrise:
data = _get_request("type=command¶m=getSunRiseSet")
print "Sunrise is at "+data["Sunrise"]+" and sunset is at "+data["Sunset"]
if args.list_sensors:
data = _get_request("type=devices&filter=all&used=true&order=Name")
for result in data["result"]:
if result["Type"] not in ["Lighting 2","Scene", "Group"]:
print u"%-30s %-20s %20s" % (result["Name"], result["Data"], result["LastUpdate"])
if args.list_sensors_graphite:
data = _get_request("type=devices&filter=all&used=true&order=Name")
for result in data["result"]:
graphite_path = result["Name"].lower().replace(" ","-").encode("ascii","ignore")
#graphite_path = result["Name"].lower().replace(" ","-")
if args.report_prefix:
graphite_path = args.report_prefix+"."+graphite_path
timestamp = int(time.mktime(datetime.datetime.strptime(result["LastUpdate"], "%Y-%m-%d %H:%M:%S").timetuple()))
if result["Type"] == "Temp":
print u"%s.temperature %.1f %d" % (graphite_path, result["Temp"], timestamp)
if result["Type"] == "Temp + Humidity":
print u"%s.temperature %.1f %d" % (graphite_path, result["Temp"], timestamp)
print u"%s.humidity %.1f %d" % (graphite_path, result["Humidity"], timestamp)
if result["Type"] == "Usage":
print u"%s.energy %.1f %d" % (graphite_path, float(result["Data"].split(" ")[0]), timestamp)
if result["Type"] == "Value":
print u"%s.value %d %d" % (graphite_path, int(result["Data"]), timestamp)
if result["Type"] == "RFXMeter":
print u"%s.counter %d %d" % (graphite_path, int(result["Data"].split(" ")[0].replace(".","")), timestamp)
if args.list_sensors_librato and args.librato_user and args.librato_token:
import librato
librato_api = librato.connect(args.librato_user, args.librato_token)
q = librato_api.new_queue()
q.add('temperature', 23.1, source='dowstairs')
q.add('num_requests', 100, type='counter', source='server1')
q.add('num_requests', 102, type='counter', source='server2')
data = _get_request("type=devices&filter=all&used=true&order=Name")
nbr_values = 0
for result in data["result"]:
librato_path = result["Name"].lower().replace(" ","-").encode("ascii","ignore")
if args.report_prefix:
librato_path = args.report_prefix+"."+librato_path
if result["Type"] == "Temp":
q.add(u"%s.temperature" % librato_path, result["Temp"])
nbr_values += 1
if result["Type"] == "Temp + Humidity":
q.add(u"%s.temperature" % librato_path, result["Temp"])
q.add(u"%s.humidity" % librato_path, result["Temp"])
nbr_values += 2
if result["Type"] == "Usage":
q.add(u"%s.energy" % librato_path, float(result["Data"].split(" ")[0]))
nbr_values += 1
if result["Type"] == "Value":
q.add(u"%s.value" % librato_path, int(result["Data"]))
nbr_values += 1
if result["Type"] == "RFXMeter":
q.add(u"%s.counter" % librato_path, float(result["Data"].split(" ")[0].replace(".","")), type = "counter")
nbr_values += 1
print "Reported %d values to Librato." % nbr_values
q.submit()
if args.list_sensors_librato and (not args.librato_user or not args.librato_token):
print "If reporting metrics to Librato you must also provide --librato-user and --librato-token"
sys.exit(1)
if args.list_switches:
data = _get_request("type=devices&filter=all&used=true&order=Name")
for result in data["result"]:
if result["Type"] == "Lighting 2":
if debug:
print u"%-30s %-20s %20s idx: %s" % (result["Name"], result["Data"], result["LastUpdate"], str(result["idx"]))
else:
print u"%-30s %-20s %20s" % (result["Name"], result["Data"], result["LastUpdate"])
if args.list_scenes or args.list_groups:
data = _get_request("type=scenes")
for result in data["result"]:
if args.list_scenes and result["Type"] == "Scene" or args.list_groups and result["Type"] == "Group":
if debug:
print u"%-30s %-20s %20s idx: %s" % (result["Name"], result["Status"], result["LastUpdate"], str(result["idx"]))
else:
print u"%-30s %-20s %20s" % (result["Name"], result["Status"], result["LastUpdate"])
if args.toggle_switch:
data = _get_request("type=devices&filter=all&used=true&order=Name")
for result in data["result"]:
if result["Type"] == "Lighting 2":
if result["Name"] == args.toggle_switch.decode("utf-8"):
print "Switch is "+result["Status"]+", toggling..."
if result["Status"] == "On":
data = _get_request("type=command¶m=switchlight&idx=%d&switchcmd=Off" % int(result["idx"]))
else:
data = _get_request("type=command¶m=switchlight&idx=%d&switchcmd=On" % int(result["idx"]))
if args.run_scene:
data = _get_request("type=scenes")
for result in data["result"]:
if args.run_scene.decode("utf-8") == result["Name"]:
data = _get_request("type=command¶m=switchscene&idx=%d&switchcmd=On" % int(result["idx"]))
| {
"content_hash": "bbec159dc450f4e6b33efad0f77f0162",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 146,
"avg_line_length": 46.32098765432099,
"alnum_prop": 0.6143390191897654,
"repo_name": "magapp/domoticz-cli",
"id": "0a35c21e461850d780f1e013045634538a80b359",
"size": "7576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domoticz-cli.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7576"
}
],
"symlink_target": ""
} |
import datetime
import mongoengine as mongo
import httplib2
import pickle
import base64
from StringIO import StringIO
from oauth2client.client import Error as OAuthError
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from lxml import etree
from django.db import models
from django.contrib.auth.models import User
from mongoengine.queryset import OperationError
import vendor.opml as opml
from apps.rss_feeds.models import Feed, DuplicateFeed, MStarredStory
from apps.reader.models import UserSubscription, UserSubscriptionFolders
from utils import json_functions as json, urlnorm
from utils import log as logging
from utils.feed_functions import timelimit
from utils.feed_functions import add_object_to_folder
#from south.modelsinspector import add_introspection_rules
#add_introspection_rules([], ["^oauth2client\.django_orm\.FlowField"])
#add_introspection_rules([], ["^oauth2client\.django_orm\.CredentialsField"])
class OAuthToken(models.Model):
user = models.OneToOneField(User, null=True, blank=True)
session_id = models.CharField(max_length=50, null=True, blank=True)
uuid = models.CharField(max_length=50, null=True, blank=True)
remote_ip = models.CharField(max_length=50, null=True, blank=True)
request_token = models.CharField(max_length=50)
request_token_secret = models.CharField(max_length=50)
access_token = models.CharField(max_length=50)
access_token_secret = models.CharField(max_length=50)
credential = models.TextField(null=True, blank=True)
created_date = models.DateTimeField(default=datetime.datetime.now)
class Importer:
def clear_feeds(self):
UserSubscription.objects.filter(user=self.user).delete()
def clear_folders(self):
UserSubscriptionFolders.objects.filter(user=self.user).delete()
def get_folders(self):
self.usf, _ = UserSubscriptionFolders.objects.get_or_create(user=self.user,
defaults={'folders': '[]'})
return json.decode(self.usf.folders)
class OPMLExporter(Importer):
def __init__(self, user):
self.user = user
self.fetch_feeds()
def process(self, verbose=False):
now = str(datetime.datetime.now())
root = Element('opml')
root.set('version', '1.1')
root.append(Comment('Generated by PyTune - www.pytune.com'))
head = SubElement(root, 'head')
title = SubElement(head, 'title')
title.text = 'PyTune Feeds'
dc = SubElement(head, 'dateCreated')
dc.text = now
dm = SubElement(head, 'dateModified')
dm.text = now
folders = self.get_folders()
body = SubElement(root, 'body')
self.process_outline(body, folders, verbose=verbose)
return tostring(root)
def process_outline(self, body, folders, verbose=False):
for obj in folders:
if isinstance(obj, int) and obj in self.feeds:
feed = self.feeds[obj]
if verbose:
print " ---> Adding feed: %s - %s" % (feed['id'],
feed['feed_title'][:30])
feed_attrs = self.make_feed_row(feed)
body.append(Element('outline', feed_attrs))
elif isinstance(obj, dict):
for folder_title, folder_objs in obj.items():
if verbose:
print " ---> Adding folder: %s" % folder_title
folder_element = Element('outline', {'text': folder_title, 'title': folder_title})
body.append(self.process_outline(folder_element, folder_objs, verbose=verbose))
return body
def make_feed_row(self, feed):
feed_attrs = {
'text': feed['feed_title'],
'title': feed['feed_title'],
'type': 'rss',
'version': 'RSS',
'htmlUrl': feed['feed_link'] or "",
'xmlUrl': feed['feed_address'] or "",
}
return feed_attrs
def fetch_feeds(self):
subs = UserSubscription.objects.filter(user=self.user)
self.feeds = []
for sub in subs:
try:
self.feeds.append((sub.feed_id, sub.canonical()))
except Feed.DoesNotExist:
continue
self.feeds = dict(self.feeds)
class OPMLImporter(Importer):
def __init__(self, opml_xml, user):
self.user = user
self.opml_xml = opml_xml
@timelimit(10)
def try_processing(self):
folders = self.process()
return folders
def process(self):
# self.clear_feeds()
outline = opml.from_string(self.opml_xml)
folders = self.get_folders()
try:
folders = self.process_outline(outline, folders)
except AttributeError:
folders = None
else:
# self.clear_folders()
self.usf.folders = json.encode(folders)
self.usf.save()
return folders
def process_outline(self, outline, folders, in_folder=''):
for item in outline:
if (not hasattr(item, 'xmlUrl') and
(hasattr(item, 'text') or hasattr(item, 'title'))):
folder = item
title = getattr(item, 'text', None) or getattr(item, 'title', None)
# if hasattr(folder, 'text'):
# logging.info(' ---> [%s] ~FRNew Folder: %s' % (self.user, folder.text))
obj = {title: []}
folders = add_object_to_folder(obj, in_folder, folders)
folders = self.process_outline(folder, folders, title)
elif hasattr(item, 'xmlUrl'):
feed = item
if not hasattr(feed, 'htmlUrl'):
setattr(feed, 'htmlUrl', None)
# If feed title matches what's in the DB, don't override it on subscription.
feed_title = getattr(feed, 'title', None) or getattr(feed, 'text', None)
if not feed_title:
setattr(feed, 'title', feed.htmlUrl or feed.xmlUrl)
user_feed_title = None
else:
setattr(feed, 'title', feed_title)
user_feed_title = feed.title
feed_address = urlnorm.normalize(feed.xmlUrl)
feed_link = urlnorm.normalize(feed.htmlUrl)
if len(feed_address) > Feed._meta.get_field('feed_address').max_length:
continue
if feed_link and len(feed_link) > Feed._meta.get_field('feed_link').max_length:
continue
# logging.info(' ---> \t~FR%s - %s - %s' % (feed.title, feed_link, feed_address,))
feed_data = dict(feed_address=feed_address, feed_link=feed_link, feed_title=feed.title)
# feeds.append(feed_data)
# See if it exists as a duplicate first
duplicate_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if duplicate_feed:
feed_db = duplicate_feed[0].feed
else:
feed_data['active_subscribers'] = 1
feed_data['num_subscribers'] = 1
feed_db, _ = Feed.find_or_create(feed_address=feed_address,
feed_link=feed_link,
defaults=dict(**feed_data))
if user_feed_title == feed_db.feed_title:
user_feed_title = None
us, _ = UserSubscription.objects.get_or_create(
feed=feed_db,
user=self.user,
defaults={
'needs_unread_recalc': True,
'mark_read_date': datetime.datetime.utcnow() - datetime.timedelta(days=1),
'active': self.user.profile.is_premium,
'user_title': user_feed_title
}
)
if self.user.profile.is_premium and not us.active:
us.active = True
us.save()
if not us.needs_unread_recalc:
us.needs_unread_recalc = True
us.save()
folders = add_object_to_folder(feed_db.pk, in_folder, folders)
return folders
def count_feeds_in_opml(self):
opml_count = len(opml.from_string(self.opml_xml))
sub_count = UserSubscription.objects.filter(user=self.user).count()
return max(sub_count, opml_count)
class UploadedOPML(mongo.Document):
user_id = mongo.IntField()
opml_file = mongo.StringField()
upload_date = mongo.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "%s: %s characters" % (user.username, len(self.opml_file))
meta = {
'collection': 'uploaded_opml',
'allow_inheritance': False,
'order': '-upload_date',
'indexes': ['user_id', '-upload_date'],
}
class GoogleReaderImporter(Importer):
def __init__(self, user, xml=None):
self.user = user
self.scope = "http://www.google.com/reader/api"
self.xml = xml
self.auto_active = False
@timelimit(10)
def try_import_feeds(self, auto_active=False):
code = 0
try:
self.import_feeds(auto_active=auto_active)
self.import_starred_items(count=10)
except AssertionError:
code = -1
else:
code = 1
return code
def import_feeds(self, auto_active=False):
self.auto_active = auto_active
sub_url = "%s/0/subscription/list" % self.scope
if not self.xml:
feeds_xml = self.send_request(sub_url)
else:
feeds_xml = self.xml
if feeds_xml:
self.process_feeds(feeds_xml)
def send_request(self, url):
if not self.user.is_authenticated():
return
user_tokens = OAuthToken.objects.filter(user=self.user)
if user_tokens.count():
user_token = user_tokens[0]
if user_token.credential:
credential = pickle.loads(base64.b64decode(user_token.credential))
http = httplib2.Http()
http = credential.authorize(http)
content = http.request(url)
return content and content[1]
def process_feeds(self, feeds_xml):
# self.clear_feeds()
# self.clear_folders()
folders = self.get_folders()
self.feeds = self.parse(feeds_xml)
for item in self.feeds:
folders = self.process_item(item, folders)
logging.user(self.user, "~BB~FW~SBGoogle Reader import: ~BT~FW%s" % (folders))
self.usf.folders = json.encode(folders)
self.usf.save()
def parse(self, feeds_xml):
parser = etree.XMLParser(recover=True)
tree = etree.parse(StringIO(feeds_xml), parser)
feeds = tree.xpath('/object/list/object')
return feeds
def process_item(self, item, folders):
feed_title = item.xpath('./string[@name="title"]') and \
item.xpath('./string[@name="title"]')[0].text
feed_address = item.xpath('./string[@name="id"]') and \
item.xpath('./string[@name="id"]')[0].text.replace('feed/', '')
feed_link = item.xpath('./string[@name="htmlUrl"]') and \
item.xpath('./string[@name="htmlUrl"]')[0].text
category = item.xpath('./list[@name="categories"]/object/string[@name="label"]') and \
item.xpath('./list[@name="categories"]/object/string[@name="label"]')[0].text
if not feed_address:
feed_address = feed_link
try:
feed_link = urlnorm.normalize(feed_link)
feed_address = urlnorm.normalize(feed_address)
if len(feed_address) > Feed._meta.get_field('feed_address').max_length:
return folders
# See if it exists as a duplicate first
duplicate_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if duplicate_feed:
feed_db = duplicate_feed[0].feed
else:
feed_data = dict(feed_title=feed_title)
feed_data['active_subscribers'] = 1
feed_data['num_subscribers'] = 1
feed_db, _ = Feed.find_or_create(feed_address=feed_address, feed_link=feed_link,
defaults=dict(**feed_data))
us, _ = UserSubscription.objects.get_or_create(
feed=feed_db,
user=self.user,
defaults={
'needs_unread_recalc': True,
'mark_read_date': datetime.datetime.utcnow() - datetime.timedelta(days=1),
'active': self.user.profile.is_premium or self.auto_active,
}
)
if not us.needs_unread_recalc:
us.needs_unread_recalc = True
us.save()
if not category: category = ""
if category:
obj = {category: []}
folders = add_object_to_folder(obj, '', folders)
folders = add_object_to_folder(feed_db.pk, category, folders)
# if feed_db.pk not in folders[category]:
# folders[category].append(feed_db.pk)
except Exception, e:
logging.info(' *** -> Exception: %s: %s' % (e, item))
return folders
def test(self):
sub_url = "%s/0/token" % (self.scope)
try:
resp = self.send_request(sub_url)
except OAuthError:
return False
return resp
@timelimit(10)
def try_import_starred_stories(self):
self.import_starred_items(count=1000)
starred_count = MStarredStory.objects.filter(user_id=self.user.pk).count()
return starred_count
def import_starred_items(self, count=10):
sub_url = "%s/0/stream/contents/user/-/state/com.google/starred?n=%s" % (self.scope, count)
stories_str = self.send_request(sub_url)
try:
stories = json.decode(stories_str)
except:
logging.user(self.user, "~BB~FW~SBGoogle Reader starred stories: ~BT~FWNo stories")
stories = None
if stories:
logging.user(self.user, "~BB~FW~SBGoogle Reader starred stories: ~BT~FW%s stories" % (len(stories['items'])))
self.process_starred_items(stories['items'])
starred_count = MStarredStory.objects.filter(user_id=self.user.pk).count()
return starred_count
def process_starred_items(self, stories):
counts = {
'created': 0,
'existed': 0,
'failed': 0,
}
logging.user(self.user, "~FCBeginning starring...")
for story in stories:
try:
original_feed = Feed.get_feed_from_url(story['origin']['streamId'], create=False, fetch=False)
if not original_feed:
original_feed = Feed.get_feed_from_url(story['origin']['htmlUrl'], create=False, fetch=False)
content = story.get('content') or story.get('summary')
story_db = {
"user_id": self.user.pk,
"starred_date": datetime.datetime.fromtimestamp(story['updated']),
"story_date": datetime.datetime.fromtimestamp(story['published']),
"story_title": story.get('title', story.get('origin', {}).get('title', '[Untitled]')),
"story_permalink": story['alternate'][0]['href'],
"story_guid": story['id'],
"story_content": content.get('content'),
"story_author_name": story.get('author'),
"story_feed_id": original_feed and original_feed.pk,
"story_tags": [tag for tag in story.get('categories', []) if 'user/' not in tag]
}
# logging.user(self.user, "~FCStarring: ~SB%s~SN in ~SB%s" % (story_db['story_title'][:50], original_feed and original_feed))
MStarredStory.objects.create(**story_db)
counts['created'] += 1
except OperationError:
# logging.user(self.user, "~FCAlready starred: ~SB%s" % (story_db['story_title'][:50]))
counts['existed'] += 1
except Exception:
# logging.user(self.user, "~FC~BRFailed to star: ~SB%s / %s" % (story, e))
counts['failed'] += 1
logging.user(self.user, "~FCStarred: ~SB%s~SN/~SB%s%s~SN/~SB%s%s~SN" % (
counts['created'],
'~FM' if counts['existed'] else '~SN', counts['existed'],
'~FR' if counts['failed'] else '~SN', counts['failed']))
return counts
| {
"content_hash": "f0a4f6f2265db79d851f1447fdef7714",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 141,
"avg_line_length": 40.857142857142854,
"alnum_prop": 0.5417860827696893,
"repo_name": "Einsteinish/PyTune3",
"id": "d36c8e8b441715ab2c4e8292172fdcfdf799ee99",
"size": "17446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/feed_import/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "569697"
},
{
"name": "CoffeeScript",
"bytes": "6745"
},
{
"name": "HTML",
"bytes": "281641"
},
{
"name": "JavaScript",
"bytes": "1547332"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "4530"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "1549865"
},
{
"name": "R",
"bytes": "523"
},
{
"name": "Shell",
"bytes": "40404"
}
],
"symlink_target": ""
} |
import os, lxml, io, re, subprocess, tempfile
import lxml.etree as etree
from net.ecromedos.error import ECMDSPluginError
def getInstance(config):
"""Returns a plugin instance."""
return Plugin(config)
#end function
class Plugin():
def __init__(self, config):
# init counter
self.counter = 1
self.nodelist = []
# look for latex executable
try:
self.latex_bin = config['latex_bin']
except KeyError:
msg = "Location of the 'latex' executable unspecified."
raise ECMDSPluginError(msg, "math")
#end try
if not os.path.isfile(self.latex_bin):
msg = "Could not find latex executable '%s'." % (self.latex_bin,)
raise ECMDSPluginError(msg, "math")
#end if
# look for conversion tool
try:
self.dvipng_bin = ""
self.dvipng_bin = config['dvipng_bin']
except KeyError:
msg = "Location of the 'dvipng' executable unspecified."
raise ECMDSPluginError(msg, "math")
#end try
if not os.path.isfile(self.dvipng_bin):
msg = "Could not find 'dvipng' executable '%s'." % (self.dvipng_bin,)
raise ECMDSPluginError(msg, "math")
#end if
# temporary directory
self.tmp_dir = config['tmp_dir']
# conversion dpi
try:
self.dvipng_dpi = config['dvipng_dpi']
except KeyError:
self.dvipng_dpi = "100"
#end try
# output document
self.out = io.StringIO()
#end function
def process(self, node, format):
"""Prepare @node for target @format."""
if format.endswith("latex"):
result = self.LaTeX_ProcessMath(node)
else:
result = self.XHTML_ProcessMath(node)
#end if
return result
#end function
def flush(self):
"""If target format is XHTML, generate GIFs from formulae."""
# generate bitmaps of formulae
if self.out.tell() > 0:
self.out.write("\\end{document}\n")
self.__LaTeX2Dvi2Gif()
self.out.close()
self.out = io.StringIO()
#end if
#reset counter
self.counter = 1
self.nodelist = []
#end function
def LaTeX_ProcessMath(self, node):
"""Mark node, to be copied 1:1 to output document."""
math_node = etree.Element("m")
parent = node.getparent()
math_node.tail = node.tail
parent.replace(node, math_node)
node.tag = "copy"
node.tail = ""
math_node.append(node)
return math_node
#end function
def XHTML_ProcessMath(self, node):
"""Call LaTeX and ImageMagick to produce a GIF."""
if self.out.tell() == 0:
self.out.write("""\
\\documentclass[12pt]{scrartcl}\\usepackage{courier}
\\usepackage{courier}
\\usepackage{helvet}
\\usepackage{mathpazo}
\\usepackage{amsmath}
\\usepackage[active,displaymath,textmath]{preview}
\\frenchspacing{}
\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}
\\usepackage[T1]{autofe}
\\PrerenderUnicode{äöüß}
\\pagestyle{empty}
\\begin{document}""")
#end if
# save TeX markup
#formula = etree.tostring(node, method="text", encoding="unicode")
# give each formula one page
self.out.write("$%s$\n\\clearpage{}\n" % node.text)
copy_node = etree.Element("copy")
img_node = etree.Element("img")
img_node.attrib["src"] = "m%06d.gif" % (self.counter,)
img_node.attrib["alt"] = "formula"
img_node.attrib["class"] = "math"
copy_node.tail = node.tail
copy_node.append(img_node)
copy_node.tail = node.tail
node.getparent().replace(node, copy_node)
# keep track of images for flush
self.nodelist.append(img_node)
self.counter += 1
return copy_node
#end function
# PRIVATE
def __LaTeX2Dvi2Gif(self):
"""Write formulae to LaTeX file, compile and extract images."""
# open a temporary file for TeX output
tmpfp, tmpname = tempfile.mkstemp(suffix=".tex", dir=self.tmp_dir)
try:
with os.fdopen(tmpfp, "w", encoding="utf-8") as texfile:
texfile.write(self.out.getvalue())
except IOError:
msg = "Error while writing temporary TeX file."
raise ECMDSPluginError(msg, "math")
#end try
# compile LaTeX file
with open(os.devnull, "wb") as devnull:
cmd = [self.latex_bin, "-interaction", "nonstopmode", tmpname]
# run LaTeX twice
for i in range(2):
proc = subprocess.Popen(cmd, stdout=devnull, stderr=devnull,
cwd=self.tmp_dir)
rval = proc.wait()
# test exit code
if rval != 0:
msg = "Could not compile temporary TeX file."
raise ECMDSPluginError(msg, "math")
#end if
#end if
#end with
# determine dvi file name
dvifile = self.tmp_dir + os.sep + \
''.join(tmpname.split(os.sep)[-1].split('.')[:-1]) + ".dvi"
# we need to log the output
logfp, logname = tempfile.mkstemp(suffix=".log", dir=self.tmp_dir)
# convert dvi file to GIF image
with os.fdopen(logfp, "w", encoding="utf-8") as dvilog:
cmd = [self.dvipng_bin, "-D", self.dvipng_dpi, "--depth",
"-gif", "-T", "tight", "-o", "m%06d.gif", dvifile]
proc = subprocess.Popen(cmd, stdout=dvilog, stderr=dvilog)
rval = proc.wait()
# test exit code
if rval != 0:
msg = "Could not convert dvi file to GIF images."
raise ECMDSPluginError(msg, "math")
#end if
#end with
# read dvipng's log output
try:
with open(logname, "r", encoding="utf-8") as logfp:
string = logfp.read()
except IOError:
msg = "Could not read dvipng's log output from '%s'" % logname
raise ECMDSPluginError(msg, "math")
#end try
# look for [??? depth=???px]
rexpr = re.compile("\\[[0-9]* depth=[0-9]*\\]")
# add style property to node
i = 0
for match in rexpr.finditer(string):
align = match.group().split("=")[1].strip(" []")
node = self.nodelist[i]
node.attrib["style"] = "vertical-align: -" + align + "px;"
i += 1
#end for
#end function
#end class
| {
"content_hash": "fab905aac5200c12b2dbe33ec3b4bae6",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 81,
"avg_line_length": 29.68,
"alnum_prop": 0.5450733752620545,
"repo_name": "tobijk/ecromedos",
"id": "8851efab7b6703053d1e209f7e5cb687396e9832",
"size": "6878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/net/ecromedos/plugins/math.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10914"
},
{
"name": "Python",
"bytes": "180602"
},
{
"name": "Shell",
"bytes": "1247"
},
{
"name": "XSLT",
"bytes": "267254"
}
],
"symlink_target": ""
} |
import os
import logging
import redis
import urlparse
from flask import Flask
from flask import render_template
from raven.contrib.flask import Sentry
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.basicauth import BasicAuth
from flask.ext.login import LoginManager
from lrutils import dateformat, datetimeformat, currency
from lrutils.audit import Audit
from lrutils.errorhandler.errorhandler_utils import ErrorHandler, eh_after_request
from health import Health
app = Flask('application.frontend')
app.config.from_object(os.environ.get('SETTINGS'))
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
app.jinja_env.filters['datetimeformat'] = datetimeformat
app.jinja_env.filters['dateformat'] = dateformat
app.jinja_env.filters['currency'] = currency
login_manager = LoginManager()
login_manager.init_app(app)
db = SQLAlchemy(app)
SQLAlchemy.health = health
from flask_kvsession import KVSessionExtension
from simplekv.memory.redisstore import RedisStore
redis_url = urlparse.urlparse(app.config.get('REDIS_URL'))
redis_server = redis.StrictRedis(
host=redis_url.hostname,
port=redis_url.port,
password=redis_url.password
)
store = RedisStore(redis_server)
kv_store = KVSessionExtension(store, app)
Health(app, checks=[db.health])
# Audit, error handling and after_request headers all handled by lrutils
Audit(app)
ErrorHandler(app)
app.after_request(eh_after_request)
if not app.debug:
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
if app.config.get('BASIC_AUTH_USERNAME'):
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
# Sentry exception reporting
if 'SENTRY_DSN' in os.environ:
sentry = Sentry(app, dsn=os.environ['SENTRY_DSN'])
app.logger.debug("\nConfiguration\n%s\n" % app.config)
# import and register auth blueprint
from .auth.views import auth
app.register_blueprint(auth)
login_manager.login_view = 'auth.login'
login_manager.login_message = ''
from .relationship.views import relationship
app.register_blueprint(relationship)
def health(self):
try:
with self.engine.connect() as c:
c.execute('select 1=1').fetchall()
return True, 'DB'
except:
return False, 'DB'
@app.context_processor
def asset_path_context_processor():
return {
'asset_path': '/static/build/',
'landregistry_asset_path': '/static/build/'
}
@app.context_processor
def address_processor():
from lrutils import build_address
def process_address_json(address_json):
return build_address(address_json)
return dict(formatted=process_address_json)
| {
"content_hash": "af28a435438b6fdbfcbb0ff70dcb41cb",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 82,
"avg_line_length": 27.336734693877553,
"alnum_prop": 0.746173945502053,
"repo_name": "LandRegistry/service-frontend-alpha",
"id": "34c1981e3ff85ddee83799a370fa3f09bb120d5c",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15795"
},
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "95367"
},
{
"name": "Ruby",
"bytes": "45795"
},
{
"name": "Shell",
"bytes": "8487"
}
],
"symlink_target": ""
} |
"""Extracts features for different models."""
import functools
import tensorflow as tf
from deeplab.core import xception
slim = tf.contrib.slim
# A map from network name to network function.
networks_map = {
'xception_65': xception.xception_65,
}
# A map from network name to network arg scope.
arg_scopes_map = {
'xception_65': xception.xception_arg_scope,
}
# Names for end point features.
DECODER_END_POINTS = 'decoder_end_points'
# A dictionary from network name to a map of end point features.
networks_to_feature_maps = {
'xception_65': {
DECODER_END_POINTS: [
'entry_flow/block2/unit_1/xception_module/'
'separable_conv2_pointwise',
],
}
}
# A map from feature extractor name to the network name scope used in the
# ImageNet pretrained versions of these models.
name_scope = {
'xception_65': 'xception_65',
}
# Mean pixel value.
_MEAN_RGB = [123.15, 115.90, 103.06]
def _preprocess_subtract_imagenet_mean(inputs):
"""Subtract Imagenet mean RGB value."""
mean_rgb = tf.reshape(_MEAN_RGB, [1, 1, 1, 3])
return inputs - mean_rgb
def _preprocess_zero_mean_unit_range(inputs):
"""Map image values from [0, 255] to [-1, 1]."""
return (2.0 / 255.0) * tf.to_float(inputs) - 1.0
_PREPROCESS_FN = {
'xception_65': _preprocess_zero_mean_unit_range,
}
def mean_pixel(model_variant=None):
"""Gets mean pixel value.
This function returns different mean pixel value, depending on the input
model_variant which adopts different preprocessing functions. We currently
handle the following preprocessing functions:
(1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value.
(2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5].
The return values are used in a way that the padded regions after
pre-processing will contain value 0.
Args:
model_variant: Model variant (string) for feature extraction. For
backwards compatibility, model_variant=None returns _MEAN_RGB.
Returns:
Mean pixel value.
"""
if model_variant is None:
return _MEAN_RGB
else:
return [127.5, 127.5, 127.5]
def extract_features(images,
output_stride=8,
multi_grid=None,
model_variant=None,
weight_decay=0.0001,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
regularize_depthwise=False,
preprocess_images=True,
num_classes=None,
global_pool=False):
"""Extracts features by the parituclar model_variant.
Args:
images: A tensor of size [batch, height, width, channels].
output_stride: The ratio of input to output spatial resolution.
multi_grid: Employ a hierarchy of different atrous rates within network.
model_variant: Model variant for feature extraction.
weight_decay: The weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
regularize_depthwise: Whether or not apply L2-norm regularization on the
depthwise convolution weights.
preprocess_images: Performs preprocessing on images or not. Defaults to
True. Set to False if preprocessing will be done by other functions. We
supprot two types of preprocessing: (1) Mean pixel substraction and (2)
Pixel values normalization to be [-1, 1].
num_classes: Number of classes for image classification task. Defaults
to None for dense prediction tasks.
global_pool: Global pooling for image classification task. Defaults to
False, since dense prediction tasks do not use this.
Returns:
features: A tensor of size [batch, feature_height, feature_width,
feature_channels], where feature_height/feature_width are determined
by the images height/width and output_stride.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Unrecognized model variant.
"""
if 'xception' in model_variant:
arg_scope = arg_scopes_map[model_variant](
weight_decay=weight_decay,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3,
batch_norm_scale=True,
regularize_depthwise=regularize_depthwise)
features, end_points = get_network(
model_variant, preprocess_images, arg_scope)(
inputs=images,
num_classes=num_classes,
is_training=(is_training and fine_tune_batch_norm),
global_pool=global_pool,
output_stride=output_stride,
regularize_depthwise=regularize_depthwise,
multi_grid=multi_grid,
reuse=reuse,
scope=name_scope[model_variant])
elif 'mobilenet' in model_variant:
raise ValueError('MobileNetv2 support is coming soon.')
else:
raise ValueError('Unknown model variant %s.' % model_variant)
return features, end_points
def get_network(network_name, preprocess_images, arg_scope=None):
"""Gets the network.
Args:
network_name: Network name.
preprocess_images: Preprocesses the images or not.
arg_scope: Optional, arg_scope to build the network. If not provided the
default arg_scope of the network would be used.
Returns:
A network function that is used to extract features.
Raises:
ValueError: network is not supported.
"""
if network_name not in networks_map:
raise ValueError('Unsupported network %s.' % network_name)
arg_scope = arg_scope or arg_scopes_map[network_name]()
def _identity_function(inputs):
return inputs
if preprocess_images:
preprocess_function = _PREPROCESS_FN[network_name]
else:
preprocess_function = _identity_function
func = networks_map[network_name]
@functools.wraps(func)
def network_fn(inputs, *args, **kwargs):
with slim.arg_scope(arg_scope):
return func(preprocess_function(inputs), *args, **kwargs)
return network_fn
| {
"content_hash": "112710cc363d87d72febb8c707968624",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 33.42622950819672,
"alnum_prop": 0.6764753964361615,
"repo_name": "jiaphuan/models",
"id": "1ef93568d95d1ddd4127b5f0bc6f46b4b0c4d17d",
"size": "6806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/deeplab/core/feature_extractor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
} |
__all__ = [
"BatfishAssertException",
"BatfishAssertWarning",
"BatfishException",
"QuestionValidationException",
]
class BatfishException(Exception):
"""Base exception for Batfish-related errors."""
class BatfishAssertException(BatfishException):
"""Raised if a pybatfish assertion fails.
.. seealso:: :py:module:`~pybatfish.client.assert`
"""
class BatfishAssertWarning(UserWarning):
"""Used for soft assertions instead of an exception.
.. seealso:: :py:module:`~pybatfish.client.assert`
"""
class QuestionValidationException(BatfishException):
"""Raised when an invalid Batfish question is encountered."""
| {
"content_hash": "59c4213fe17fdacb691596393042149a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 65,
"avg_line_length": 23.785714285714285,
"alnum_prop": 0.7102102102102102,
"repo_name": "batfish/pybatfish",
"id": "9ef5c392764e0a0ca2acccdddbb0e0d05bdc7674",
"size": "1297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybatfish/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1219681"
},
{
"name": "Python",
"bytes": "684750"
}
],
"symlink_target": ""
} |
"""
Support for Modbus switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.modbus/
"""
import logging
import blumate.components.modbus as modbus
from blumate.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Read configuration and create Modbus devices."""
switches = []
slave = config.get("slave", None)
if modbus.TYPE == "serial" and not slave:
_LOGGER.error("No slave number provided for serial Modbus")
return False
registers = config.get("registers")
if registers:
for regnum, register in registers.items():
bits = register.get("bits")
for bitnum, bit in bits.items():
if bit.get("name"):
switches.append(ModbusSwitch(bit.get("name"),
slave,
regnum,
bitnum))
coils = config.get("coils")
if coils:
for coilnum, coil in coils.items():
switches.append(ModbusSwitch(coil.get("name"),
slave,
coilnum,
0,
coil=True))
add_devices(switches)
class ModbusSwitch(ToggleEntity):
"""Representation of a Modbus switch."""
# pylint: disable=too-many-arguments
def __init__(self, name, slave, register, bit, coil=False):
"""Initialize the switch."""
self._name = name
self.slave = int(slave) if slave else 1
self.register = int(register)
self.bit = int(bit)
self._coil = coil
self._is_on = None
self.register_value = None
def __str__(self):
"""String representation of Modbus switch."""
return "%s: %s" % (self.name, self.state)
@property
def should_poll(self):
"""Poling needed.
Slaves are not allowed to initiate communication on Modbus networks.
"""
return True
@property
def unique_id(self):
"""Return a unique ID."""
return "MODBUS-SWITCH-{}-{}-{}".format(self.slave,
self.register,
self.bit)
@property
def is_on(self):
"""Return true if switch is on."""
return self._is_on
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Set switch on."""
if self.register_value is None:
self.update()
if self._coil:
modbus.NETWORK.write_coil(self.register, True)
else:
val = self.register_value | (0x0001 << self.bit)
modbus.NETWORK.write_register(unit=self.slave,
address=self.register,
value=val)
def turn_off(self, **kwargs):
"""Set switch off."""
if self.register_value is None:
self.update()
if self._coil:
modbus.NETWORK.write_coil(self.register, False)
else:
val = self.register_value & ~(0x0001 << self.bit)
modbus.NETWORK.write_register(unit=self.slave,
address=self.register,
value=val)
def update(self):
"""Update the state of the switch."""
if self._coil:
result = modbus.NETWORK.read_coils(self.register, 1)
self.register_value = result.bits[0]
self._is_on = self.register_value
else:
result = modbus.NETWORK.read_holding_registers(
unit=self.slave, address=self.register,
count=1)
val = 0
for i, res in enumerate(result.registers):
val += res * (2**(i*16))
self.register_value = val
self._is_on = (val & (0x0001 << self.bit) > 0)
| {
"content_hash": "eb04bd11377e7c8d38dc3cec40994594",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 76,
"avg_line_length": 33.58267716535433,
"alnum_prop": 0.5097303634232122,
"repo_name": "bdfoster/blumate",
"id": "e1ec45690550350cee073eea676ae785dd4aa61d",
"size": "4265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blumate/components/switch/modbus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1309487"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2460958"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6407"
}
],
"symlink_target": ""
} |
import sys
from collections import defaultdict
import hasher
import in_generator
import name_utilities
import template_expander
from in_file import InFile
def _symbol(tag):
# FIXME: Remove this special case for the ugly x-webkit-foo attributes.
if tag['name'].startswith('-webkit-'):
return tag['name'].replace('-', '_')[1:]
return name_utilities.cpp_name(tag).replace('-', '_')
class MakeElementTypeHelpersWriter(in_generator.Writer):
defaults = {
'Conditional': None,
'ImplementedAs': None,
'JSInterfaceName': None,
'constructorNeedsCreatedByParser': None,
'interfaceName': None,
'noConstructor': None,
'noTypeHelpers': None,
'runtimeEnabled': None,
}
default_parameters = {
'attrsNullNamespace': None,
'export': '',
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
'namespace': '',
'namespacePrefix': '',
'namespaceURI': '',
}
filters = {
'hash': hasher.hash,
'symbol': _symbol,
}
def __init__(self, in_file_path):
super(MakeElementTypeHelpersWriter, self).__init__(in_file_path)
self.namespace = self.in_file.parameters['namespace'].strip('"')
self.fallbackInterface = self.in_file.parameters['fallbackInterfaceName'].strip('"')
assert self.namespace, 'A namespace is required.'
self._outputs = {
(self.namespace + "ElementTypeHelpers.h"): self.generate_helper_header,
(self.namespace + "ElementTypeHelpers.cpp"): self.generate_helper_implementation,
}
self._template_context = {
'namespace': self.namespace,
'tags': self.in_file.name_dictionaries,
'elements': set(),
}
tags = self._template_context['tags']
elements = self._template_context['elements']
interface_counts = defaultdict(int)
for tag in tags:
tag['interface'] = self._interface(tag)
interface_counts[tag['interface']] += 1
elements.add(tag['interface'])
for tag in tags:
tag['multipleTagNames'] = (interface_counts[tag['interface']] > 1 or tag['interface'] == self.fallbackInterface)
@template_expander.use_jinja("ElementTypeHelpers.h.tmpl", filters=filters)
def generate_helper_header(self):
return self._template_context
@template_expander.use_jinja("ElementTypeHelpers.cpp.tmpl", filters=filters)
def generate_helper_implementation(self):
return self._template_context
def _interface(self, tag):
if tag['interfaceName']:
return tag['interfaceName']
name = name_utilities.upper_first(tag['name'])
# FIXME: We shouldn't hard-code HTML here.
if name == 'HTML':
name = 'Html'
dash = name.find('-')
while dash != -1:
name = name[:dash] + name[dash + 1].upper() + name[dash + 2:]
dash = name.find('-')
return '%s%sElement' % (self.namespace, name)
if __name__ == "__main__":
in_generator.Maker(MakeElementTypeHelpersWriter).main(sys.argv)
| {
"content_hash": "051fbf3d1b087bf2f7c69c0f10919c3e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 124,
"avg_line_length": 33.37894736842105,
"alnum_prop": 0.6035950804162725,
"repo_name": "google-ar/WebARonARCore",
"id": "c6b4063eccf44e5e715889d12024eb7b0abfdcc4",
"size": "3356",
"binary": false,
"copies": "2",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "third_party/WebKit/Source/build/scripts/make_element_type_helpers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import sys
import os
print('Creating database tables for AllSpeak...')
if os.path.abspath(os.curdir) not in sys.path:
sys.path.append(os.path.abspath(os.curdir))
# Create the database tables, add some initial data, and commit to the database
from project import db
from project.models import User
# Drop all of the existing database tables
db.drop_all()
# Create the database and the database table
db.create_all()
# Commit the changes for the users
db.session.commit()
print('...done!')
| {
"content_hash": "fa4234960ce38e77a3110a5a9eb4533f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 20.875,
"alnum_prop": 0.7465069860279441,
"repo_name": "allspeak/api.allspeak.eu",
"id": "f0a12a8e33ea0f6cda09983286213610fca6bed4",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/instance/scripts/create_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "542"
},
{
"name": "Dockerfile",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "15330"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "132944"
},
{
"name": "Shell",
"bytes": "697"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type # make all classes new-style in python2
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='stringsearching',
version='0.1.0',
description='String searching algorithms',
long_description=readme,
url='https://github.com/enerqi/string-searching',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| {
"content_hash": "5cc7d7132d466074082e3545d668b07f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 26.523809523809526,
"alnum_prop": 0.6929982046678635,
"repo_name": "enerqi/string-searching",
"id": "abf45d15374b8f93cfaa5e5ff56dc15d69dee0d4",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4023"
}
],
"symlink_target": ""
} |
"""
roseninputs.py -- inputs for testing the rosenbrock function for testsolvers_pyre.py
"""
from mystic.models import rosen as cost
from mystic.termination import *
ND = 3
# for Differential Evolution:
NP = 30
from numpy import inf
from mystic.tools import random_seed
random_seed(123)
x0 = [0.8, 1.2, 0.5]
# used with SetStrictRanges
#min_bounds = [-0.999, -0.999, 0.999]
#max_bounds = [200.001, 100.001, inf]
termination = CandidateRelativeTolerance()
#termination = VTR()
#termination = ChangeOverGeneration()
#termination = NormalizedChangeOverGeneration()
# End of file
| {
"content_hash": "6219956c041cb2030c19cf5483995f15",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 84,
"avg_line_length": 21.214285714285715,
"alnum_prop": 0.7255892255892256,
"repo_name": "jcfr/mystic",
"id": "2306dfd542ed446a4c07afcf3db0d30290b3bf29",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples_other/roseninputs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Coq",
"bytes": "11400"
},
{
"name": "GAP",
"bytes": "545"
},
{
"name": "Logos",
"bytes": "11400"
},
{
"name": "Mathematica",
"bytes": "1044812"
},
{
"name": "Matlab",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "1666666"
},
{
"name": "Yacc",
"bytes": "11400"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Match
from .models import Tip
def delete_tips(modeladmin, request, queryset):
for match in queryset:
tips = Tip.objects.filter(match = match)
for tip in tips:
tip.score = 0
tip.scoring_field = ""
tip.is_score_calculated = False
tip.save()
delete_tips.delete_tips = "Delete calculated scores for tips for these matches"
class MatchAdmin(admin.ModelAdmin):
actions = [delete_tips]
admin.site.register(Match, MatchAdmin)
admin.site.register(Tip)
| {
"content_hash": "8afebee6322b6355af203c03f843d6b6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.660377358490566,
"repo_name": "leventebakos/football-ech",
"id": "19c2249a2894599f90b71c587737d891ee06b11c",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matches/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2135"
},
{
"name": "HTML",
"bytes": "24328"
},
{
"name": "Python",
"bytes": "40002"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
AUTHOR = u'Kai Blin'
SITENAME = u'PhDOps'
SITESUBTITLE = u'Tales from the underfunded cousin of DevOps, while trying to get research done.'
SITEURL = u'//phdops.kblin.org'
PATH = 'content'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
THEME = u'pelican-bootstrap3'
BOOTSTRAP_THEME = u'simplex'
CC_LICENSE = u'CC-BY'
# Blogroll
#LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('twitter', 'https://twitter.com/kaiblin'),
('github', 'https://github.com/kblin'),)
TWITTER_USERNAME = u'kaiblin'
DISQUS_SITENAME = u'phdops'
USE_OPEN_GRAPH = True
TWITTER_CARDS = True
TWITTER_WIDGET_ID = u'591317301112270848'
DEFAULT_PAGINATION = 10
SHOW_DATE_MODIFIED = True
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
| {
"content_hash": "8ba0144fb851578ccabaae2b3b263d79",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 97,
"avg_line_length": 25.804347826086957,
"alnum_prop": 0.6950294860994103,
"repo_name": "kblin/phdops",
"id": "568529119f4950ae620eaa755d60afeb2a795ebf",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pelicanconf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1771"
}
],
"symlink_target": ""
} |
import os
import sys
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Util import strxor
from django.conf import settings
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_bytes
from taggit.managers import TaggableManager
from extras.models import CustomFieldModel, TaggedItem
from utilities.models import ChangeLoggedModel
from .exceptions import InvalidKey
from .hashers import SecretValidationHasher
from .querysets import UserKeyQuerySet
def generate_random_key(bits=256):
"""
Generate a random encryption key. Sizes is given in bits and must be in increments of 32.
"""
if bits % 32:
raise Exception("Invalid key size ({}). Key sizes must be in increments of 32 bits.".format(bits))
return os.urandom(int(bits / 8))
def encrypt_master_key(master_key, public_key):
"""
Encrypt a secret key with the provided public RSA key.
"""
key = RSA.importKey(public_key)
cipher = PKCS1_OAEP.new(key)
return cipher.encrypt(master_key)
def decrypt_master_key(master_key_cipher, private_key):
"""
Decrypt a secret key with the provided private RSA key.
"""
key = RSA.importKey(private_key)
cipher = PKCS1_OAEP.new(key)
return cipher.decrypt(master_key_cipher)
class UserKey(models.Model):
"""
A UserKey stores a user's personal RSA (public) encryption key, which is used to generate their unique encrypted
copy of the master encryption key. The encrypted instance of the master key can be decrypted only with the user's
matching (private) decryption key.
"""
created = models.DateField(
auto_now_add=True
)
last_updated = models.DateTimeField(
auto_now=True
)
user = models.OneToOneField(
to=User,
on_delete=models.CASCADE,
related_name='user_key',
editable=False
)
public_key = models.TextField(
verbose_name='RSA public key'
)
master_key_cipher = models.BinaryField(
max_length=512,
blank=True,
null=True,
editable=False
)
objects = UserKeyQuerySet.as_manager()
class Meta:
ordering = ['user__username']
permissions = (
('activate_userkey', "Can activate user keys for decryption"),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Store the initial public_key and master_key_cipher to check for changes on save().
self.__initial_public_key = self.public_key
self.__initial_master_key_cipher = self.master_key_cipher
def __str__(self):
return self.user.username
def clean(self, *args, **kwargs):
if self.public_key:
# Validate the public key format
try:
pubkey = RSA.import_key(self.public_key)
except ValueError:
raise ValidationError({
'public_key': "Invalid RSA key format."
})
except Exception:
raise ValidationError("Something went wrong while trying to save your key. Please ensure that you're "
"uploading a valid RSA public key in PEM format (no SSH/PGP).")
# Validate the public key length
pubkey_length = pubkey.size_in_bits()
if pubkey_length < settings.SECRETS_MIN_PUBKEY_SIZE:
raise ValidationError({
'public_key': "Insufficient key length. Keys must be at least {} bits long.".format(
settings.SECRETS_MIN_PUBKEY_SIZE
)
})
# We can't use keys bigger than our master_key_cipher field can hold
if pubkey_length > 4096:
raise ValidationError({
'public_key': "Public key size ({}) is too large. Maximum key size is 4096 bits.".format(
pubkey_length
)
})
super().clean()
def save(self, *args, **kwargs):
# Check whether public_key has been modified. If so, nullify the initial master_key_cipher.
if self.__initial_master_key_cipher and self.public_key != self.__initial_public_key:
self.master_key_cipher = None
# If no other active UserKeys exist, generate a new master key and use it to activate this UserKey.
if self.is_filled() and not self.is_active() and not UserKey.objects.active().count():
master_key = generate_random_key()
self.master_key_cipher = encrypt_master_key(master_key, self.public_key)
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
# If Secrets exist and this is the last active UserKey, prevent its deletion. Deleting the last UserKey will
# result in the master key being destroyed and rendering all Secrets inaccessible.
if Secret.objects.count() and [uk.pk for uk in UserKey.objects.active()] == [self.pk]:
raise Exception("Cannot delete the last active UserKey when Secrets exist! This would render all secrets "
"inaccessible.")
super().delete(*args, **kwargs)
def is_filled(self):
"""
Returns True if the UserKey has been filled with a public RSA key.
"""
return bool(self.public_key)
is_filled.boolean = True
def is_active(self):
"""
Returns True if the UserKey has been populated with an encrypted copy of the master key.
"""
return self.master_key_cipher is not None
is_active.boolean = True
def get_master_key(self, private_key):
"""
Given the User's private key, return the encrypted master key.
"""
if not self.is_active:
raise ValueError("Unable to retrieve master key: UserKey is inactive.")
try:
return decrypt_master_key(force_bytes(self.master_key_cipher), private_key)
except ValueError:
return None
def activate(self, master_key):
"""
Activate the UserKey by saving an encrypted copy of the master key to the database.
"""
if not self.public_key:
raise Exception("Cannot activate UserKey: Its public key must be filled first.")
self.master_key_cipher = encrypt_master_key(master_key, self.public_key)
self.save()
class SessionKey(models.Model):
"""
A SessionKey stores a User's temporary key to be used for the encryption and decryption of secrets.
"""
userkey = models.OneToOneField(
to='secrets.UserKey',
on_delete=models.CASCADE,
related_name='session_key',
editable=False
)
cipher = models.BinaryField(
max_length=512,
editable=False
)
hash = models.CharField(
max_length=128,
editable=False
)
created = models.DateTimeField(
auto_now_add=True
)
key = None
class Meta:
ordering = ['userkey__user__username']
def __str__(self):
return self.userkey.user.username
def save(self, master_key=None, *args, **kwargs):
if master_key is None:
raise Exception("The master key must be provided to save a session key.")
# Generate a random 256-bit session key if one is not already defined
if self.key is None:
self.key = generate_random_key()
# Generate SHA256 hash using Django's built-in password hashing mechanism
self.hash = make_password(self.key)
# Encrypt master key using the session key
self.cipher = strxor.strxor(self.key, master_key)
super().save(*args, **kwargs)
def get_master_key(self, session_key):
# Validate the provided session key
if not check_password(session_key, self.hash):
raise InvalidKey("Invalid session key")
# Decrypt master key using provided session key
master_key = strxor.strxor(session_key, bytes(self.cipher))
return master_key
def get_session_key(self, master_key):
# Recover session key using the master key
session_key = strxor.strxor(master_key, bytes(self.cipher))
# Validate the recovered session key
if not check_password(session_key, self.hash):
raise InvalidKey("Invalid master key")
return session_key
class SecretRole(ChangeLoggedModel):
"""
A SecretRole represents an arbitrary functional classification of Secrets. For example, a user might define roles
such as "Login Credentials" or "SNMP Communities."
By default, only superusers will have access to decrypt Secrets. To allow other users to decrypt Secrets, grant them
access to the appropriate SecretRoles either individually or by group.
"""
name = models.CharField(
max_length=50,
unique=True
)
slug = models.SlugField(
unique=True
)
users = models.ManyToManyField(
to=User,
related_name='secretroles',
blank=True
)
groups = models.ManyToManyField(
to=Group,
related_name='secretroles',
blank=True
)
csv_headers = ['name', 'slug']
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?role={}".format(reverse('secrets:secret_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
)
def has_member(self, user):
"""
Check whether the given user has belongs to this SecretRole. Note that superusers belong to all roles.
"""
if user.is_superuser:
return True
return user in self.users.all() or user.groups.filter(pk__in=self.groups.all()).exists()
class Secret(ChangeLoggedModel, CustomFieldModel):
"""
A Secret stores an AES256-encrypted copy of sensitive data, such as passwords or secret keys. An irreversible
SHA-256 hash is stored along with the ciphertext for validation upon decryption. Each Secret is assigned to a
Device; Devices may have multiple Secrets associated with them. A name can optionally be defined along with the
ciphertext; this string is stored as plain text in the database.
A Secret can be up to 65,536 bytes (64KB) in length. Each secret string will be padded with random data to a minimum
of 64 bytes during encryption in order to protect short strings from ciphertext analysis.
"""
device = models.ForeignKey(
to='dcim.Device',
on_delete=models.CASCADE,
related_name='secrets'
)
role = models.ForeignKey(
to='secrets.SecretRole',
on_delete=models.PROTECT,
related_name='secrets'
)
name = models.CharField(
max_length=100,
blank=True
)
ciphertext = models.BinaryField(
max_length=65568, # 16B IV + 2B pad length + {62-65550}B padded
editable=False
)
hash = models.CharField(
max_length=128,
editable=False
)
custom_field_values = GenericRelation(
to='extras.CustomFieldValue',
content_type_field='obj_type',
object_id_field='obj_id'
)
tags = TaggableManager(through=TaggedItem)
plaintext = None
csv_headers = ['device', 'role', 'name', 'plaintext']
class Meta:
ordering = ['device', 'role', 'name']
unique_together = ['device', 'role', 'name']
def __init__(self, *args, **kwargs):
self.plaintext = kwargs.pop('plaintext', None)
super().__init__(*args, **kwargs)
def __str__(self):
if self.role and self.device and self.name:
return '{} for {} ({})'.format(self.role, self.device, self.name)
# Return role and device if no name is set
if self.role and self.device:
return '{} for {}'.format(self.role, self.device)
return 'Secret'
def get_absolute_url(self):
return reverse('secrets:secret', args=[self.pk])
def to_csv(self):
return (
self.device,
self.role,
self.name,
self.plaintext or '',
)
def _pad(self, s):
"""
Prepend the length of the plaintext (2B) and pad with garbage to a multiple of 16B (minimum of 64B).
+--+--------+-------------------------------------------+
|LL|MySecret|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+--+--------+-------------------------------------------+
"""
s = s.encode('utf8')
if len(s) > 65535:
raise ValueError("Maximum plaintext size is 65535 bytes.")
# Minimum ciphertext size is 64 bytes to conceal the length of short secrets.
if len(s) <= 62:
pad_length = 62 - len(s)
elif (len(s) + 2) % 16:
pad_length = 16 - ((len(s) + 2) % 16)
else:
pad_length = 0
# Python 2 compatibility
if sys.version_info[0] < 3:
header = chr(len(s) >> 8) + chr(len(s) % 256)
else:
header = bytes([len(s) >> 8]) + bytes([len(s) % 256])
return header + s + os.urandom(pad_length)
def _unpad(self, s):
"""
Consume the first two bytes of s as a plaintext length indicator and return only that many bytes as the
plaintext.
"""
if isinstance(s[0], str):
plaintext_length = (ord(s[0]) << 8) + ord(s[1])
else:
plaintext_length = (s[0] << 8) + s[1]
return s[2:plaintext_length + 2].decode('utf8')
def encrypt(self, secret_key):
"""
Generate a random initialization vector (IV) for AES. Pad the plaintext to the AES block size (16 bytes) and
encrypt. Prepend the IV for use in decryption. Finally, record the SHA256 hash of the plaintext for validation
upon decryption.
"""
if self.plaintext is None:
raise Exception("Must unlock or set plaintext before locking.")
# Pad and encrypt plaintext
iv = os.urandom(16)
aes = AES.new(secret_key, AES.MODE_CFB, iv)
self.ciphertext = iv + aes.encrypt(self._pad(self.plaintext))
# Generate SHA256 using Django's built-in password hashing mechanism
self.hash = make_password(self.plaintext, hasher=SecretValidationHasher())
self.plaintext = None
def decrypt(self, secret_key):
"""
Consume the first 16 bytes of self.ciphertext as the AES initialization vector (IV). The remainder is decrypted
using the IV and the provided secret key. Padding is then removed to reveal the plaintext. Finally, validate the
decrypted plaintext value against the stored hash.
"""
if self.plaintext is not None:
return
if not self.ciphertext:
raise Exception("Must define ciphertext before unlocking.")
# Decrypt ciphertext and remove padding
iv = bytes(self.ciphertext[0:16])
ciphertext = bytes(self.ciphertext[16:])
aes = AES.new(secret_key, AES.MODE_CFB, iv)
plaintext = self._unpad(aes.decrypt(ciphertext))
# Verify decrypted plaintext against hash
if not self.validate(plaintext):
raise ValueError("Invalid key or ciphertext!")
self.plaintext = plaintext
def validate(self, plaintext):
"""
Validate that a given plaintext matches the stored hash.
"""
if not self.hash:
raise Exception("Hash has not been generated for this secret.")
return check_password(plaintext, self.hash, preferred=SecretValidationHasher())
def decryptable_by(self, user):
"""
Check whether the given user has permission to decrypt this Secret.
"""
return self.role.has_member(user)
| {
"content_hash": "c64b1f750cfed6332465e89cc966b8e7",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 120,
"avg_line_length": 34.29661016949152,
"alnum_prop": 0.6146528292562392,
"repo_name": "lampwins/netbox",
"id": "6dcb5abee9f48250d76aab1351505b90902e10fa",
"size": "16188",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/secrets/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815169"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
import os
import datetime
import subprocess
def get_version(version=None):
"""Returns a PEP 386-compliant version number from VERSION."""
if version is None:
from telemundo import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| {
"content_hash": "ab4b51e3423f3f75eb9f8c9a45d68221",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 34.11764705882353,
"alnum_prop": 0.6304597701149425,
"repo_name": "telemundo/telemundo-py",
"id": "23d155253ebd4970aafc5623e4c41e52771042c2",
"size": "1787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemundo/utils/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5865"
}
],
"symlink_target": ""
} |
click(Pattern("Flmnduana.png").targetOffset(34,0))
exit(0) | {
"content_hash": "3b89a6c2fe7ca6f41d0e4cc9046afcae",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 29,
"alnum_prop": 0.7586206896551724,
"repo_name": "silverbulleters/vanessa-behavoir",
"id": "2de7769052fa60f3759ffe91b3431672f0abd2c2",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tools/Sikuli/OpenDialogClick.sikuli/OpenDialogClick.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "161783"
}
],
"symlink_target": ""
} |
"""Support for Homekit covers."""
import logging
from homekit.model.characteristics import CharacteristicsTypes
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
CoverDevice,
)
from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING
from . import KNOWN_DEVICES, HomeKitEntity
STATE_STOPPED = "stopped"
_LOGGER = logging.getLogger(__name__)
CURRENT_GARAGE_STATE_MAP = {
0: STATE_OPEN,
1: STATE_CLOSED,
2: STATE_OPENING,
3: STATE_CLOSING,
4: STATE_STOPPED,
}
TARGET_GARAGE_STATE_MAP = {STATE_OPEN: 0, STATE_CLOSED: 1, STATE_STOPPED: 2}
CURRENT_WINDOW_STATE_MAP = {0: STATE_CLOSING, 1: STATE_OPENING, 2: STATE_STOPPED}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Legacy set up platform."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit covers."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
def async_add_service(aid, service):
info = {"aid": aid, "iid": service["iid"]}
if service["stype"] == "garage-door-opener":
async_add_entities([HomeKitGarageDoorCover(conn, info)], True)
return True
if service["stype"] in ("window-covering", "window"):
async_add_entities([HomeKitWindowCover(conn, info)], True)
return True
return False
conn.add_listener(async_add_service)
class HomeKitGarageDoorCover(HomeKitEntity, CoverDevice):
"""Representation of a HomeKit Garage Door."""
def __init__(self, accessory, discovery_info):
"""Initialise the Cover."""
super().__init__(accessory, discovery_info)
self._state = None
self._obstruction_detected = None
self.lock_state = None
@property
def device_class(self):
"""Define this cover as a garage door."""
return "garage"
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.DOOR_STATE_CURRENT,
CharacteristicsTypes.DOOR_STATE_TARGET,
CharacteristicsTypes.OBSTRUCTION_DETECTED,
]
def _update_door_state_current(self, value):
self._state = CURRENT_GARAGE_STATE_MAP[value]
def _update_obstruction_detected(self, value):
self._obstruction_detected = value
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self._state == STATE_CLOSED
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
async def async_open_cover(self, **kwargs):
"""Send open command."""
await self.set_door_state(STATE_OPEN)
async def async_close_cover(self, **kwargs):
"""Send close command."""
await self.set_door_state(STATE_CLOSED)
async def set_door_state(self, state):
"""Send state command."""
characteristics = [
{
"aid": self._aid,
"iid": self._chars["door-state.target"],
"value": TARGET_GARAGE_STATE_MAP[state],
}
]
await self._accessory.put_characteristics(characteristics)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
if self._obstruction_detected is None:
return None
return {"obstruction-detected": self._obstruction_detected}
class HomeKitWindowCover(HomeKitEntity, CoverDevice):
"""Representation of a HomeKit Window or Window Covering."""
def __init__(self, accessory, discovery_info):
"""Initialise the Cover."""
super().__init__(accessory, discovery_info)
self._state = None
self._position = None
self._tilt_position = None
self._obstruction_detected = None
self.lock_state = None
self._features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.POSITION_STATE,
CharacteristicsTypes.POSITION_CURRENT,
CharacteristicsTypes.POSITION_TARGET,
CharacteristicsTypes.POSITION_HOLD,
CharacteristicsTypes.VERTICAL_TILT_CURRENT,
CharacteristicsTypes.VERTICAL_TILT_TARGET,
CharacteristicsTypes.HORIZONTAL_TILT_CURRENT,
CharacteristicsTypes.HORIZONTAL_TILT_TARGET,
CharacteristicsTypes.OBSTRUCTION_DETECTED,
]
def _setup_position_hold(self, char):
self._features |= SUPPORT_STOP
def _setup_vertical_tilt_current(self, char):
self._features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION
)
def _setup_horizontal_tilt_current(self, char):
self._features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION
)
def _update_position_state(self, value):
self._state = CURRENT_WINDOW_STATE_MAP[value]
def _update_position_current(self, value):
self._position = value
def _update_vertical_tilt_current(self, value):
self._tilt_position = value
def _update_horizontal_tilt_current(self, value):
self._tilt_position = value
def _update_obstruction_detected(self, value):
self._obstruction_detected = value
@property
def supported_features(self):
"""Flag supported features."""
return self._features
@property
def current_cover_position(self):
"""Return the current position of cover."""
return self._position
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self._position == 0
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
async def async_stop_cover(self, **kwargs):
"""Send hold command."""
characteristics = [
{"aid": self._aid, "iid": self._chars["position.hold"], "value": 1}
]
await self._accessory.put_characteristics(characteristics)
async def async_open_cover(self, **kwargs):
"""Send open command."""
await self.async_set_cover_position(position=100)
async def async_close_cover(self, **kwargs):
"""Send close command."""
await self.async_set_cover_position(position=0)
async def async_set_cover_position(self, **kwargs):
"""Send position command."""
position = kwargs[ATTR_POSITION]
characteristics = [
{"aid": self._aid, "iid": self._chars["position.target"], "value": position}
]
await self._accessory.put_characteristics(characteristics)
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt."""
return self._tilt_position
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
tilt_position = kwargs[ATTR_TILT_POSITION]
if "vertical-tilt.target" in self._chars:
characteristics = [
{
"aid": self._aid,
"iid": self._chars["vertical-tilt.target"],
"value": tilt_position,
}
]
await self._accessory.put_characteristics(characteristics)
elif "horizontal-tilt.target" in self._chars:
characteristics = [
{
"aid": self._aid,
"iid": self._chars["horizontal-tilt.target"],
"value": tilt_position,
}
]
await self._accessory.put_characteristics(characteristics)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
state_attributes = {}
if self._obstruction_detected is not None:
state_attributes["obstruction-detected"] = self._obstruction_detected
return state_attributes
| {
"content_hash": "805e8367c69e2ac3c9ebb6b6ca47da18",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 88,
"avg_line_length": 32.05755395683453,
"alnum_prop": 0.6160233393177738,
"repo_name": "leppa/home-assistant",
"id": "7e5591d9505ea5df516a9c594b4af74061d60e41",
"size": "8912",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
'''
Main test harness: process a directory with a specified front-end.
Record the pass/fail counts and print a latex table with the pass rate.
Example use:
(1) Specify the Pythohn front-ends and the apps to process:
qualitas_test.py 2.7 3.5 gramps astropy # Two versions, two apps
(2) Process all the Qualitas apps with a particular front end:
qualitas_test.py 2.6 2.7 # Two versions, all apps
(3) Process a given apps with all front ends:
qualitas_test.py gramps astropy # All versions, two apps
This is how we get Table 1 for the ESEM 2017 paper.
'''
import os
import sys
import subprocess
import shutil
import re
import linecache
import codecs
import qualitas
# The versions of Python we have front-ends for:
SERIES2 = ['2.0', '2.2', '2.3', '2.4', '2.4.3', '2.5', '2.6', '2.7', '2.7.2']
SERIES3 = ['3.0', '3.1', '3.2', '3.3.0', '3.5.0', '3.6.0']
# The versions we're using for the ESEM paper:
FOR_ESEM = ['2.0', '2.2', '2.4', '2.5', '2.6', '2.7',
'3.0', '3.1', '3.2', '3.3.0', '3.5.0', '3.6.0']
def root_for(suite):
if suite == 'qualitas':
return qualitas.corpus_for_year('2017')
assert False, 'Unknown Python test suite: '+suite
# What are you analysing?
TEST_ROOT = root_for('qualitas')
# Need to fix 'print' to be version-agnostic, even for early 2.x
def safe_print(msg, pfile=sys.stdout, pflush=False, pend='\n'):
pfile.write(msg+pend)
if pflush:
pfile.flush()
class TestHarness:
''' Instantiate with a Python version, run over a dir of Python files.
Records no. of pass/fail, prints nice output for syntax errors.
'''
# This is the directory where the Makefile lives:
FRONTEND_DIR = os.path.join(os.path.dirname(__file__), 'multi-front-end')
# This is the exe that the Makefile builds:
FRONTEND_EXE = os.path.join(FRONTEND_DIR, 'pycomply')
# I need to (record errors for each processed file:
ERROR_LOG = os.path.join(os.getcwd(), 'error.log')
def __init__(self, version, verbose=False):
self.init_counters()
self.verbose = verbose # If true, print syntax error messages
self.ver_front_end = '%s-%s' % (TestHarness.FRONTEND_EXE, version)
self.make_executable(version)
def make_command(self, version):
return 'make PYVER=%s' % version
@staticmethod
def copy_file(srcFolder, srcFile, dstFile):
srcPath = os.path.join(TestHarness.FRONTEND_DIR, srcFolder, srcFile)
assert os.path.isfile(srcPath), 'File %s not found' % srcPath
dstPath = os.path.join(TestHarness.FRONTEND_DIR, dstFile)
shutil.copyfile(srcPath, dstPath)
@staticmethod
def shell_run(cmd, mycwd):
return subprocess.call(cmd, cwd=mycwd, shell=True)
def make_executable(self, version, forceMake=False):
''' Makes the executable front-end if it's not already there '''
if forceMake or not os.path.isfile(self.ver_front_end):
print("Making front-end for version %s..." % version, flush=True)
retcode = TestHarness.shell_run(self.make_command(version), TestHarness.FRONTEND_DIR)
assert retcode == 0, '\tFAILED to make the parser'
def init_counters(self):
self.num_passed = 0
self.num_failed = 0
def set_verbose(self, see_errors=True):
self.verbose = see_errors
def print_context(self, filename, line_no):
''' Print a line from a file, and line(s) before and after it '''
ctxt = 1 # No. of lines before and after
for d in range(line_no-ctxt, line_no+ctxt+1):
try :
print('%d:%s' % (d, linecache.getline(filename, d)),
file=sys.stderr, flush=True, end='')
except (SyntaxError) as err:
msg = str(err).replace(filename, os.path.basename(filename))
print('%d:%s' % (d, msg))
def check_return_code(self, retcode, filename, filepath):
''' Increment pass/fail counter; print error details if desired '''
if retcode > 0:
if self.verbose:
print('\n*', filepath, 'failed.', file=sys.stderr)
try:
#with open(TestHarness.ERROR_LOG, 'r') as tmp_fh:
with codecs.open(TestHarness.ERROR_LOG, 'r',
encoding='utf-8', errors='ignore') as tmp_fh:
error_msg = tmp_fh.read()
print(error_msg, file=sys.stderr, flush=True, end='')
match = re.match('^(\d+)', error_msg)
if match:
line_no = int(match.group(1))
self.print_context(filepath, line_no)
except (UnicodeDecodeError) as err:
print('Exception %s' % err, file=sys.stderr)
self.num_failed += 1
else:
# print(testcase, 'passed')
self.num_passed += 1
def test_one_file(self, root, filename):
filepath = os.path.join(root, filename)
# sed hack adds an extra '\n' at end of file:
toExec = 'sed -e \'$a\\\' "%s" | %s > %s 2>&1' \
% (filepath, self.ver_front_end, TestHarness.ERROR_LOG)
this_path = os.path.dirname(os.path.realpath(__file__))
retcode = TestHarness.shell_run(toExec, this_path)
self.check_return_code(retcode, filename, filepath)
def test_directory(self, testpath, reinit_counters=False):
''' Tests all files in this directory and its subdirectories. '''
assert os.path.isdir(testpath), testpath + 'must be a directory'
if reinit_counters:
self.init_counters()
for root, dirs, files in os.walk(testpath):
for filename in files:
if filename.endswith('.py'):
self.test_one_file(root, filename)
def get_total(self):
''' Return the total number of files processed (so far)'''
return (self.num_passed + self.num_failed)
def percent_passed(self):
''' Return the percentage pass rate (or 0 no files processed)'''
if self.get_total() == 0:
return 0
return self.num_passed * 100.0 / self.get_total()
def __str__(self):
return '%d Passed, %d Failed (%5.2f%% passed)' \
% (self.num_passed, self.num_failed, self.percentPassed())
def print_perc(perc):
''' How do you want your percentages to be printed? '''
#return '%4.1f' % perc # 1 decimal place
return '\\shade{%d}' % round(perc)
def latex_table_row(data, effect=None, want_hline=False):
''' Print a single row of a latex table (to stdout) '''
lstr = lambda s: ('\\text%s{%s}' % (effect,s)) if effect else ('%12s' % s)
row_str = ' & '.join([lstr(d) for d in data]) + '\\\\'
if want_hline:
row_str += '\n\\hline'
return row_str
def print_latex_table(pyVersions, qualapps, testroot, percs, labels=None):
# First column of table should be the application names:
if not labels: # Can optionally specify labels for rows
labels = qualapps
row_data = [[t] for t in labels]
# Data columns are the percentages for each version:
for i, plist in enumerate(percs):
row_data[i].extend([print_perc(p) for p in plist])
# Last column should be totals for each application:
for i,testdir in enumerate(qualapps):
testpath = os.path.join(testroot,testdir)
row_data[i].append('%5d' % qualitas.count_python_files(testpath))
# Now print the table, row-by-row:
safe_print('\\begin{tabular}{l*{%d}{c}c}' % len(pyVersions))
safe_print(latex_table_row(['Application'] +
[p for p in pyVersions] + ['Files'], 'bf', True))
for row in row_data:
safe_print(latex_table_row(row))
safe_print('\\hline')
safe_print('\\end{tabular}')
def test_all(pyVersions, qualapps,
testroot=qualitas.get_root_dir()):
''' For each given Python version, test each given directory.
Optionally can specify the root (for non-Qualitas),
and a different harness (instead of TestHarness).
'''
# We assemble the data column-by-column (one column per Python version)
percs = [[] for t in qualapps] # one row per application
for runver in pyVersions:
# Print the syntax errors if we're testing 1 version with 1 app:
want_verbose = len(pyVersions)==1 and len(qualapps)==1
# New harness for each version:
harness = TestHarness(runver, want_verbose)
safe_print("Running front-end for v%s on %d apps:"
% (runver, len(qualapps)),
sys.stderr, True, '')
tot_failed = 0
for i,testdir in enumerate(qualapps):
safe_print(" %s," % testdir, sys.stderr, True, '')
harness.test_directory(os.path.join(testroot,testdir), True)
percs[i].append(harness.percent_passed())
tot_failed += harness.num_failed
safe_print(' done (%d cases failed).' % tot_failed, sys.stderr)
return percs
def get_pyvers_appnames(given_args, test_dir=qualitas.get_root_dir()):
''' Use the (command-line) args to specify front-end(s) or app(s) '''
full_suite = qualitas.get_dirnames(test_dir)
qualapps = [ ]
versions = [ ]
for arg in given_args:
if arg in SERIES2+SERIES3:
versions.append(arg)
elif arg in full_suite:
qualapps.append(arg)
else:
safe_print('Unkown argument: "%s"' % arg, sys.stderr)
sys.exit(1)
if versions == []: # None specified, so use "all" the Python front-ends
versions = FOR_ESEM
if qualapps == []: # None specified, so test "all" the applications
qualapps = full_suite
return (versions, qualapps)
# On the command line you can specify the Python versions to use,
# or list the Quaitas apps you want to process (or a mixture of both)
# Default is to use all (ESEM) versions to process all Qualitas apps.
if __name__ == '__main__':
versions, qualapps = get_pyvers_appnames(sys.argv[1:])
percs = test_all(versions, qualapps, TEST_ROOT)
print_latex_table(versions, qualapps, TEST_ROOT, percs)
| {
"content_hash": "1facdfac465094e97e715410e8b29e9b",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 97,
"avg_line_length": 41.46774193548387,
"alnum_prop": 0.5994749124854143,
"repo_name": "MalloyPower/python-compliance",
"id": "11f4d388fd9e972dba0f2a4d68dec039fce60fc3",
"size": "10304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis-code/qualitas_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5903"
},
{
"name": "Lex",
"bytes": "499347"
},
{
"name": "Makefile",
"bytes": "4761"
},
{
"name": "Python",
"bytes": "78573"
},
{
"name": "Shell",
"bytes": "10775"
},
{
"name": "Yacc",
"bytes": "456204"
}
],
"symlink_target": ""
} |
from django.db import migrations
# This migration only made sense earlier in the git history, during
# the player->account transition. Now it will do nothing since players.PlayerDB
# no longer exists.
def forwards(apps, schema_editor):
try:
apps.get_model("players", "PlayerDB")
except LookupError:
return
AccountDB = apps.get_model("accounts", "AccountDB")
Msg = apps.get_model("comms", "Msg")
for msg in Msg.objects.all():
for player in msg.db_sender_players.all():
account = AccountDB.objects.get(id=player.id)
msg.db_sender_accounts.add(account)
for player in msg.db_receivers_players.all():
account = AccountDB.objects.get(id=player.id)
msg.db_receivers_accounts.add(account)
for player in msg.db_hide_from_players.all():
account = AccountDB.objects.get(id=player.id)
msg.db_hide_from_accounts.add(account)
ChannelDB = apps.get_model("comms", "ChannelDB")
for channel in ChannelDB.objects.all():
for player in channel.db_subscriptions.all():
account = AccountDB.objects.get(id=player.id)
channel.db_account_subscriptions.add(player)
class Migration(migrations.Migration):
dependencies = [("comms", "0013_auto_20170705_1726")]
operations = [migrations.RunPython(forwards)]
| {
"content_hash": "259ef67777c5737820c4f0e9ede8ae1f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 35.05128205128205,
"alnum_prop": 0.6627651792245793,
"repo_name": "jamesbeebop/evennia",
"id": "dbe39353fb3a0c69b3b28d3f7e62f77509c9edd8",
"size": "1442",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/comms/migrations/0014_auto_20170705_1736.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
} |
"""
new_summary.py -- Run the sample analyses for the remapping summary figure.
Created by Joe Monaco on 2010-06-21.
Copyright (c) 2010 Johns Hopkins University. All rights reserved.
"""
from grid_remap.analysis.two_rooms import SampleRemap
from scipy.stats import ks_2samp
from numpy import pi, empty, arange, array
import os, cPickle
DATA_DIR = '/Users/joe/projects/grid_model/remapping/sample_sets/'
def main():
"""
Perform random sampling under various realignment conditions and collate
the statistical results for display.
"""
old_dir = os.getcwd()
os.chdir(DATA_DIR)
# Load data from various sampling conditions
rnd = SampleRemap.load_data('random/analysis.pickle').results
s1 = SampleRemap.load_data('shift/shift_1/analysis.pickle').results
s1f = SampleRemap.load_data('shift/shift_1_freq/analysis.pickle').results
s2 = SampleRemap.load_data('shift/shift_2/analysis.pickle').results
s2f = SampleRemap.load_data('shift/shift_2_freq/analysis.pickle').results
s4 = SampleRemap.load_data('shift/shift_4/analysis.pickle').results
s4f = SampleRemap.load_data('shift/shift_4_freq/analysis.pickle').results
s8 = SampleRemap.load_data('shift/shift_8/analysis.pickle').results
s8f = SampleRemap.load_data('shift/shift_8_freq/analysis.pickle').results
s16 = SampleRemap.load_data('shift/shift_16/analysis.pickle').results
s16f = SampleRemap.load_data('shift/shift_16_freq/analysis.pickle').results
srnd = SampleRemap.load_data('shift/shift_rnd/analysis.pickle').results
e1 = SampleRemap.load_data('ellipticity/ellipticity_1/analysis.pickle').results
e1f = SampleRemap.load_data('ellipticity/ellipticity_1_freq/analysis.pickle').results
e2 = SampleRemap.load_data('ellipticity/ellipticity_2/analysis.pickle').results
e2f = SampleRemap.load_data('ellipticity/ellipticity_2_freq/analysis.pickle').results
e4 = SampleRemap.load_data('ellipticity/ellipticity_4/analysis.pickle').results
e4f = SampleRemap.load_data('ellipticity/ellipticity_4_freq/analysis.pickle').results
e8 = SampleRemap.load_data('ellipticity/ellipticity_8/analysis.pickle').results
e8f = SampleRemap.load_data('ellipticity/ellipticity_8_freq/analysis.pickle').results
e16 = SampleRemap.load_data('ellipticity/ellipticity_16/analysis.pickle').results
e16f = SampleRemap.load_data('ellipticity/ellipticity_16_freq/analysis.pickle').results
ernd = SampleRemap.load_data('ellipticity/ellipticity_rnd/analysis.pickle').results
z1 = SampleRemap.load_data('rescaling/rescaling_1/analysis.pickle').results
z1f = SampleRemap.load_data('rescaling/rescaling_1_freq/analysis.pickle').results
z2 = SampleRemap.load_data('rescaling/rescaling_2/analysis.pickle').results
z2f = SampleRemap.load_data('rescaling/rescaling_2_freq/analysis.pickle').results
z4 = SampleRemap.load_data('rescaling/rescaling_4/analysis.pickle').results
z4f = SampleRemap.load_data('rescaling/rescaling_4_freq/analysis.pickle').results
z8 = SampleRemap.load_data('rescaling/rescaling_8/analysis.pickle').results
z8f = SampleRemap.load_data('rescaling/rescaling_8_freq/analysis.pickle').results
z16 = SampleRemap.load_data('rescaling/rescaling_16/analysis.pickle').results
z16f = SampleRemap.load_data('rescaling/rescaling_16_freq/analysis.pickle').results
zrnd = SampleRemap.load_data('rescaling/rescaling_rnd/analysis.pickle').results
# Data collation
labels = ('rnd', 's1', 's2', 's4', 's8', 's16', 's1f', 's2f', 's4f', 's8f', 's16f', 'srnd',
'e1', 'e2', 'e4', 'e8', 'e16', 'e1f', 'e2f', 'e4f', 'e8f', 'e16f', 'ernd',
'z1', 'z2', 'z4', 'z8', 'z16', 'z1f', 'z2f', 'z4f', 'z8f', 'z16f', 'zrnd')
results = (rnd, s1, s2, s4, s8, s16, s1f, s2f, s4f, s8f, s16f, srnd,
e1, e2, e4, e8, e16, e1f, e2f, e4f, e8f, e16f, ernd,
z1, z2, z4, z8, z16, z1f, z2f, z4f, z8f, z16f, zrnd)
# Create column-wise summary data matrix
N_conds = len(labels)
data = empty((N_conds, 9), 'd')
for i in xrange(N_conds):
data[i,0] = results[i]['remapping']
data[i,1] = results[i]['rate_remapping']
data[i,2] = results[i]['turnover']
data[i,3] = results[i]['remapping_int']
data[i,4] = results[i]['rate_remapping_int']
data[i,5] = results[i]['turnover_int']
data[i,6] = results[i]['remapping_std']
data[i,7] = results[i]['rate_remapping_std']
data[i,8] = results[i]['turnover_std']
# Create K-S p-value matrices across all conditions
ks = empty((3, N_conds, N_conds), 'd')
for c1 in xrange(N_conds):
for c2 in xrange(c1, N_conds):
ks[0,c1,c2] = ks[0,c2,c1] = \
ks_2samp(results[c1]['remapping_samples'], results[c2]['remapping_samples'])[1]
ks[1,c1,c2] = ks[1,c2,c1] = \
ks_2samp(results[c1]['rate_remapping_samples'], results[c2]['rate_remapping_samples'])[1]
ks[2,c1,c2] = ks[2,c2,c1] = \
ks_2samp(results[c1]['turnover_samples'], results[c2]['turnover_samples'])[1]
# Save the data
data_fd = file('summary.pickle', 'w')
data_dict = dict( data=data,
labels=array(labels),
visible=array([True]*N_conds),
color=array(['k']*N_conds),
pvals=ks
)
cPickle.dump(data_dict, data_fd)
data_fd.close()
os.chdir(old_dir)
return data_dict
def summary_plot(summary, data_x=0, data_y=1, annotate=True):
"""Create a scatter ellipse plot of means and confidence intervals for
summary data
Keyword arguments:
data_x/y -- which data to display on the x- and y- axes; 0=remapping,
1=rate_remapping, and 2=turnover.
"""
from pylab import figure, axes, rcParams
from matplotlib.patches import Ellipse
# Plot parameters
fig_size = 6.5, 7.5
anno_x = 150 # pts to right of left axis border
anno_y = -50 # pts above the bottom axis border
anno_dy = 25 # annotate row height
padding = 1.5 # factor of ellipse radius
plot_data_x = data_x
plot_data_y = data_y
ellipse_kw = dict( alpha=0.75,
aa=True,
lw=2,
fill=True,
fc='0.6'
)
# Create the figure and axes
old_fig_size = rcParams['figure.figsize']
rcParams['figure.figsize'] = fig_size
f = figure()
f.set_size_inches(fig_size)
f.suptitle('Remapping Sample Summary Data')
ellipse_kw['axes'] = ax = axes()
ellipse_kw['clip_box'] = ax.bbox
# Draw ellipses and annotations to the axes
box = [1]*4
c = 0
for i,row in enumerate(summary['data']):
if not summary['visible'][i]:
continue
xy = row[plot_data_x], row[plot_data_y]
ellipse_kw['ec'] = summary['color'][i]
ax.add_artist(
Ellipse(xy, 2*row[plot_data_x+3], 2*row[plot_data_y+3], **ellipse_kw))
if annotate:
ax.annotate(
summary['labels'][i], xy,
xytext=(anno_x, anno_y-c*anno_dy),
textcoords='axes points',
arrowprops=dict(width=0.5, frac=0.0, headwidth=0.0, shrink=0.0))
if xy[0] - padding*row[plot_data_x+3] < box[0] or c == 0:
box[0] = xy[0] - padding*row[plot_data_x+3]
if xy[0] + padding*row[plot_data_x+3] > box[2] or c == 0:
box[2] = xy[0] + padding*row[plot_data_x+3]
if xy[1] - padding*row[plot_data_y+3] < box[1] or c == 0:
box[1] = xy[1] - padding*row[plot_data_y+3]
if xy[1] + padding*row[plot_data_y+3] > box[3] or c == 0:
box[3] = xy[1] + padding*row[plot_data_y+3]
c += 1
# Set 1.0 lines and axis extent
ax.hlines(1.0, xmin=box[0], xmax=box[2], linestyle=':', color='k')
ax.vlines(1.0, ymin=box[1], ymax=box[3], linestyle=':', color='k')
ax.set_xlim(box[0], box[2])
ax.set_ylim(box[1], box[3])
# Axis labels
if plot_data_x == 0:
ax.set_xlabel('Place Remapping')
elif plot_data_x == 1:
ax.set_xlabel('Rate Remapping')
elif plot_data_x == 2:
ax.set_xlabel('Turnover')
if plot_data_y == 0:
ax.set_ylabel('Place Remapping')
elif plot_data_y == 1:
ax.set_ylabel('Rate Remapping')
elif plot_data_y == 2:
ax.set_ylabel('Turnover')
rcParams['figure.figsize'] = old_fig_size
return f
def summary_bar_plot(summary, sig_ref=None, sig_thresh='>0.05', ymin=0.5,
show_data=[0,1,2], legend=False):
"""Bar plot of summary statistics with SEM errorbars
Arguments:
summary -- summary results object
ymin -- y-axis minimum
show_data -- which data to show: list of integers, where 0=remapping,
1=rate_remapping, and 2=turnover
sig_ref -- index number of condition that will be reference for determining
statistical significance
sig_thresh -- string indicating threshold relationship for placing a
significance mark (star/asterisk): either '<' or '>' followed by float
threshold value
legend -- whether to display a legend
"""
from pylab import figure, axes, rcParams
from matplotlib import cm
# Plot parameters
fig_size = 9, 6
show_data = array(show_data)
num_data = len(show_data)
bar_w = 1/float(num_data+1)
# Significance parameters
sig_less = True
if sig_thresh[0] == '>':
sig_less = False
sig_value = float(sig_thresh[1:])
if sig_ref is not None:
print '* Significance indicates p%s%.5f.'%(sig_thresh[0], sig_value)
# Create the figure and axes
old_fig_size = rcParams['figure.figsize']
rcParams['figure.figsize'] = fig_size
f = figure()
f.set_size_inches(fig_size)
f.suptitle('Remapping Sample Summary Data')
ax = axes()
# Create the bar data
left = []
height = []
yerr = []
xticklabels = []
sig_x = []
sig_y = []
c = 0
for i,row in enumerate(summary['data']):
if not summary['visible'][i]:
continue
if num_data == 1:
left.extend([c-0.5*bar_w])
elif num_data == 2:
left.extend([c-bar_w, c])
elif num_data == 3:
left.extend([c-1.5*bar_w, c-0.5*bar_w, c+0.5*bar_w])
height.extend(list(row[show_data]))
yerr.extend(list(row[3+show_data]/1.96))
xticklabels.append(summary['labels'][i])
if sig_ref is not None and sig_ref <> i:
for n in xrange(num_data):
sig_mark = False
if sig_less:
if summary['pvals'][show_data[n], sig_ref, i] < sig_value:
sig_mark = True
else:
if summary['pvals'][show_data[n], sig_ref, i] >= sig_value:
sig_mark = True
if sig_mark:
sig_x.append(left[n-num_data]+0.5*bar_w)
sig_y.append(height[n-num_data]+yerr[n-num_data]+0.02)
c += 1
# Create the bar chart and legend
bar_cols = cm.gray(([0.25, 0.6, 0.8][:num_data])*c)
bar_h = ax.bar(left, height, width=bar_w, yerr=yerr,
ec='k', color=bar_cols, linewidth=0, ecolor='k', capsize=3, aa=False)
if legend:
labels = ['Remapping', 'Rate Remapping', 'Turnover']
data_labels = [labels[i] for i in show_data]
ax.legend(bar_h[:num_data], data_labels)
if sig_ref is not None:
ax.plot(sig_x, sig_y, 'k*', ms=8)
ax.hlines(1.0, xmin=-0.5, xmax=c-0.5, linestyle=':', color='k')
ax.set_xlim(-0.5, c-0.5)
ax.set_ylim(ymin, 1.1)
ax.set_xticks(arange(c))
ax.set_xticklabels(xticklabels)
rcParams['figure.figsize'] = old_fig_size
return f
if __name__ == '__main__':
main()
| {
"content_hash": "8d722952d7cac53f05093a8ebc685caf",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 105,
"avg_line_length": 41.275862068965516,
"alnum_prop": 0.5995822890559732,
"repo_name": "jdmonaco/grid-remapping-model",
"id": "1e1bfc8ca8d3ec389250a6f4125be631db6a4584",
"size": "12010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/summary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "338176"
},
{
"name": "Shell",
"bytes": "7912"
}
],
"symlink_target": ""
} |
class TypeCheckError(TypeError):
"""Error type to indicate all errors regarding runtime typechecking.
"""
pass
class InputTypeError(TypeCheckError):
"""Error type to indicate errors regarding failing typechecks of
function or method parameters.
"""
pass
class ReturnTypeError(TypeCheckError):
"""Error type to indicate errors regarding failing typechecks of
function or method return values.
"""
pass
class TypeWarning(RuntimeWarning):
"""Warning type to indicate errors regarding failing typechecks.
"""
pass
class InputTypeWarning(TypeWarning):
"""Warning type to indicate errors regarding failing typechecks of
function or method parameters.
"""
pass
class ReturnTypeWarning(TypeWarning):
"""Warning type to indicate errors regarding failing typechecks of
function or method return values.
"""
pass
class OverrideError(TypeError):
"""Error type to indicate errors regarding failing checks of
method's override consistency.
"""
pass
class TypeSyntaxError(TypeError):
"""Error type to indicate errors regarding ill-formated typestrings.
"""
pass
class ForwardRefError(TypeError):
"""Error type to indicate errors regarding forward references.
"""
pass
| {
"content_hash": "76a122411c2d703e91648eb28657d1de",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 22.75438596491228,
"alnum_prop": 0.7139552814186585,
"repo_name": "Stewori/pytypes",
"id": "f809b4eb23c3cdefcc782a588983bd89de62d5e9",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytypes/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "675123"
}
],
"symlink_target": ""
} |
from error_messages import *
from extra import *
from fields import FieldsTests
from forms import *
from formsets import *
from input_formats import *
from media import *
from models import *
from regressions import *
from util import *
from validators import TestFieldWithValidators
from widgets import *
from regressiontests.forms.localflavortests import (
ARLocalFlavorTests,
ATLocalFlavorTests,
AULocalFlavorTests,
BELocalFlavorTests,
BRLocalFlavorTests,
CALocalFlavorTests,
CHLocalFlavorTests,
CLLocalFlavorTests,
CNLocalFlavorTests,
CZLocalFlavorTests,
DELocalFlavorTests,
ESLocalFlavorTests,
FILocalFlavorTests,
FRLocalFlavorTests,
GBLocalFlavorTests,
GenericLocalFlavorTests,
HRLocalFlavorTests,
IDLocalFlavorTests,
IELocalFlavorTests,
ILLocalFlavorTests,
ISLocalFlavorTests,
ITLocalFlavorTests,
JPLocalFlavorTests,
KWLocalFlavorTests,
MKLocalFlavorTests,
NLLocalFlavorTests,
PLLocalFlavorTests,
PTLocalFlavorTests,
ROLocalFlavorTests,
RULocalFlavorTests,
SELocalFlavorTests,
SKLocalFlavorTests,
TRLocalFlavorTests,
USLocalFlavorTests,
UYLocalFlavorTests,
ZALocalFlavorTests,
AssertFieldOutputTests,
)
| {
"content_hash": "3a02be658d8f26207cc99a97d0a54ec6",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 52,
"avg_line_length": 24.21153846153846,
"alnum_prop": 0.7680698967434472,
"repo_name": "mitsuhiko/django",
"id": "cb5f83cdaca72514364b54677d047c32da13bf72",
"size": "1259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/forms/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85678"
},
{
"name": "Python",
"bytes": "7282847"
},
{
"name": "Shell",
"bytes": "4559"
}
],
"symlink_target": ""
} |
"""Deploys Dataflow job of inbound pub/sub.
Script used by Cloud Build to deploy dataflow jobs via DeploymentTool.
Typical usage example:
python3 ./deploy_dataflow_inbound_pubsub.py -s $_SCOPE_FILE -e $BRANCH_NAME
-d $_DOMAIN -c $$CFG
"""
import argparse
import sys
from deployment_configuration import DeploymentToolConfig
from deployment_dataflow_tool import DeploymentTool
from grizzly.forecolor import ForeColor
def main(args: argparse.Namespace) -> None:
"""Deploys Dataflow job via DeploymentTool.
Args:
args: Command input arguments. next arguments are supported.
environment - Environment name, string argument passed in the commandline.
domain - Domain name to be deployed, string argument passed in the
commandline.
env_config_file - Environment file, string argument passed in the
commandline.
scope_file - Scope file with a list of objects to be deployed, default
value is 'SCOPE.yml'.
"""
scope_file = args.scope_file
environment = args.environment.lower()
domain = args.domain.lower()
env_config_file = args.env_config_file
config = DeploymentToolConfig(
scope_file=scope_file,
domain=domain,
environment=environment,
env_config_file=env_config_file)
deploy_tool = DeploymentTool(config=config)
deploy_tool.deploy()
if __name__ == '__main__':
try:
# Construct the argument parser
ap = argparse.ArgumentParser(description=__doc__)
# Add the arguments to the parser
ap.add_argument(
'-s',
'--scope',
dest='scope_file',
default='SCOPE.yaml',
help='Scope file with a list of Dataflow objects to be deployed.')
ap.add_argument(
'-e',
'--environment',
dest='environment',
required=True,
help='Environment name')
ap.add_argument(
'-d',
'--domain',
dest='domain',
required=True,
help='Domain name to be deployed')
ap.add_argument(
'-c',
'--env_config_file',
dest='env_config_file',
required=True,
help='Environment file')
main(ap.parse_args())
except:
print(f'{ForeColor.RED}Unexpected error:{ForeColor.RESET}',
sys.exc_info()[1])
raise
| {
"content_hash": "9d9e5570a297d58c5a5b92bd6bb70781",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 28.123456790123456,
"alnum_prop": 0.6483757682177349,
"repo_name": "google/grizzly",
"id": "acb918fbf4d6a0cea2058a90f51592f1fbefbe60",
"size": "2854",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/deploy_dataflow_inbound_pubsub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1075"
},
{
"name": "Dockerfile",
"bytes": "1294"
},
{
"name": "HCL",
"bytes": "107097"
},
{
"name": "HTML",
"bytes": "1152"
},
{
"name": "JavaScript",
"bytes": "52626"
},
{
"name": "Jinja",
"bytes": "8031"
},
{
"name": "Python",
"bytes": "569193"
},
{
"name": "Shell",
"bytes": "13761"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class CustomersConfig(AppConfig):
name = 'customers'
| {
"content_hash": "f03e90f978e4877f14ef7192f4878e19",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.7244897959183674,
"repo_name": "kaiocesar/django-eshop",
"id": "60d3fe04481a0c23e149f1ec60eff2c98e38fb8e",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customers/apps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5043"
},
{
"name": "Python",
"bytes": "17095"
}
],
"symlink_target": ""
} |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import netCDF4
from datetime import datetime
import pyroms
import pyroms_toolbox
import sys
year = int(sys.argv[1])
def create_HYCOM_file(name):
global nc
print 'Creating file %s' %name
#create netCDF file
nc = netCDF4.Dataset(name, 'w', format='NETCDF3_64BIT')
nc.Author = sys._getframe().f_code.co_name
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'ROMS + CoSiNE Pacific (U Maine)'
#create dimensions
Mp, Lp = zeta0.shape
N = 20
theta_s = 5
theta_b = 0
Tcline = 50
nc.createDimension('xi_rho', Lp)
nc.createDimension('eta_rho', Mp)
nc.createDimension('s_rho', N)
nc.createDimension('ocean_time', None)
#create variables
# nc.createVariable('lon_rho', 'f', ('eta_rho', 'xi_rho'))
# nc.variables['lon_rho'].long_name = 'longitude'
# nc.variables['lon_rho'].units = 'degrees_east'
# nc.variables['lon_rho'][:] = lon
#
# nc.createVariable('lat_rho', 'f', ('eta_rho', 'xi_rho'))
# nc.variables['lat_rho'].long_name = 'latitude'
# nc.variables['lat_rho'].units = 'degrees_north'
# nc.variables['lat_rho'][:] = lat
#
# nc.createVariable('s_rho', 'f', ('s_rho'))
# nc.variables['s_rho'].standard_name = 'ocean_s_coordinate'
# nc.variables['s_rho'].formula_terms = 's: s_rho eta: zeta depth: h a: theta_s b: theta_b depth_c: Tcline'
# nc.variables['s_rho'][:] = s_rho
nc.createVariable('theta_s', 'f')
nc.variables['theta_s'][:] = theta_s
nc.createVariable('theta_b', 'f')
nc.variables['theta_b'][:] = theta_b
nc.createVariable('Tcline', 'f')
nc.variables['Tcline'][:] = Tcline
nc.createVariable('ocean_time', 'f', ('ocean_time'))
nc.variables['ocean_time'].units = 'days since 1900-01-01 00:00:00'
nc.variables['ocean_time'].calendar = 'LEAP'
nc.variables['ocean_time'].long_name = 'ocean time'
# nc.variables['ocean_time'][:] = time
nc.createVariable(outvarname, 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=spval)
nc.variables[outvarname].long_name = long_name
nc.variables[outvarname].units = units
nc.variables[outvarname].field = field
nc.variables[outvarname].time = 'ocean_time'
nc.variables[outvarname].coordinates = 'ocean_time lon_rho lat_rho'
print 'Done with header for file %s' %name
# get Pacific data from 1991 to 2008
#year = 1991
retry='True'
spval = 1.e30
rec_start = (year-1948)*120
rec_end = rec_start + 120
invarname = 'zeta'
outvarname = 'zeta'
#read grid and variable attributes from the first file
#url='http://viz.clusters.umaine.edu:8080/thredds/dodsC/pacific/1991-2008'
url='http://viz.clusters.umaine.edu:8080/thredds/dodsC/pacific/roms50-avg'
dataset = netCDF4.Dataset(url)
zeta0 = dataset.variables['zeta'][0,170:215,195:265]
#lon = dataset.variables['lon_rho'][170:215,195:265]
#lat = dataset.variables['lat_rho'][170:215,195:265]
#h = dataset.variables['h'][170:215,195:265]
#theta_s = dataset.variables['theta_s'][:]
#theta_b = dataset.variables['theta_b'][:]
#Tcline = dataset.variables['Tcline'][:]
#s_rho = dataset.variables['s_rho'][:]
time = dataset.variables['scrum_time'][:]
#spval = dataset.variables[invarname]._FillValue
units = dataset.variables[invarname].units
field = dataset.variables[invarname].field
long_name = dataset.variables[invarname].long_name
#dataset.close()
retry_day = []
# loop over records, 73 hours apart
outfile = 'data/Pacific_%s_%04d.nc' %(outvarname,year)
create_HYCOM_file(outfile)
day_out = 0
for day in range(rec_start,rec_end):
print 'Processing file for %s, day %d, year %04d' %(invarname, day_out*3, year)
#get data from server
try:
var = dataset.variables[invarname][day,170:215,195:265]
# spval = var.get_fill_value()
# dataset.close()
print 'Got %s from server...' %invarname
except:
print 'No file on the server... We skip this day.'
retry_day.append((day,day_out))
continue
#create netCDF file
nc.variables[outvarname][day_out] = var
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day_out*73/24.
nc.variables['ocean_time'][day_out] = jday
day_out += 1
if retry == 'True':
if len(retry_day) != 0:
print "Some file have not been downloded... Let's try again"
while len(retry_day) != 0:
for (day,day_out) in retry_day:
print 'Retry file for %s, day %03d, year %04d' %(invarname, day_out, year)
#get data from server
try:
var = dataset.variables[invarname][day,170:215,195:265]
# spval = var.get_fill_value()
print 'Got %s from server...' %invarname
except:
print 'No file on the server... We skip this day.'
continue
#create netCDF file
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day_out*73/24.
nc.variables[outvarname][day_out] = var
nc.variables['ocean_time'][day_out] = jday
retry_day.remove((day,day_out))
dataset.close()
nc.close()
| {
"content_hash": "9e71b58266be1f677eb215421e0c871d",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 110,
"avg_line_length": 32.314465408805034,
"alnum_prop": 0.631957960295835,
"repo_name": "kshedstrom/pyroms",
"id": "4a3536c1a7713b3ba151001b542af25fa472979c",
"size": "5138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/NWGOA3/Fetch_Pacific/get_pacific_zeta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48069"
},
{
"name": "FORTRAN",
"bytes": "84335"
},
{
"name": "HTML",
"bytes": "6824662"
},
{
"name": "JavaScript",
"bytes": "31743"
},
{
"name": "Makefile",
"bytes": "879"
},
{
"name": "Python",
"bytes": "615238"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from rest_any_permissions import __version__
import os
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
setup(
name="drf-any-permissions",
version=__version__,
url="https://github.com/kevin-brown/drf-any-permissions/",
license="MIT",
description="Permissions don't have to be all or nothing anymore, make "
"integrating different authentication types easier.",
author="Kevin Brown",
author_email="kbrown@rediker.com",
packages=find_packages(exclude=["tests*", ]),
package_data=get_package_data("rest_any_permissions"),
install_requires=[
"Django>=1.3",
"djangorestframework"
]
)
| {
"content_hash": "37806f1b448aad189d8a9a420d326c9c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 76,
"avg_line_length": 32.054054054054056,
"alnum_prop": 0.6382799325463744,
"repo_name": "kevin-brown/drf-any-permissions",
"id": "4d873dde0bd049b9140573b0c28ff9494bab8025",
"size": "1186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9866"
}
],
"symlink_target": ""
} |
__authors__ = ['Joel Wright']
from plugins_base import TestPlugin
class TestPlugin1(TestPlugin):
__name__ = "TestPlugin1"
__version__ = 0.01
def __init__(self):
self.initialised = True
def config(self):
self.configured = True
def start(self):
print("Started TestPlugin1")
def stop(self):
print("Stopped TestPlugin1")
| {
"content_hash": "25c6b93136fc4d8467aaf4cb14fe8b82",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 36,
"avg_line_length": 19.4,
"alnum_prop": 0.5902061855670103,
"repo_name": "joel-wright/DDRPi",
"id": "322284a3c09310f46597eae73b26b9cc1af047ac",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/python/plugins_test/plugins/p1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1996"
},
{
"name": "Python",
"bytes": "132258"
}
],
"symlink_target": ""
} |
from google.appengine.ext import db
from mcfw.properties import unicode_property, long_property
from mcfw.serialization import s_long, ds_long, s_unicode, ds_unicode, get_list_serializer, get_list_deserializer, \
s_bool, ds_bool, CustomProperty
from rogerthat.to import TO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class MobileDetail(object):
account = unicode_property('1')
type_ = long_property('2')
pushId = unicode_property('3') # Apple Push or Google Cloud Messaging id
app_id = unicode_property('4')
def _serialize_mobile_detail(stream, md):
s_unicode(stream, md.account)
s_long(stream, md.type_)
s_unicode(stream, md.pushId)
s_unicode(stream, md.app_id)
def _deserialize_mobile_detail(stream, version):
from rogerthat.models import App
md = MobileDetail()
md.account = ds_unicode(stream)
md.type_ = ds_long(stream)
md.pushId = ds_unicode(stream)
if version < 2:
md.app_id = App.APP_ID_ROGERTHAT
else:
md.app_id = ds_unicode(stream)
return md
_serialize_mobile_detail_list = get_list_serializer(_serialize_mobile_detail)
_deserialize_mobile_detail_list = get_list_deserializer(_deserialize_mobile_detail, True)
class MobileDetails(object):
def __init__(self):
self._table = dict()
def append(self, md):
if not md or not isinstance(md, MobileDetail):
raise ValueError
self._table[md.account] = md
return md
def addNew(self, account, type_, pushId, app_id):
md = MobileDetail()
md.account = account
md.type_ = type_
md.pushId = pushId
md.app_id = app_id
self.append(md)
return md
def remove(self, account):
self._table.pop(account, None)
def __iter__(self):
for val in self._table.values():
yield val
def __getitem__(self, key):
return self._table[key]
def __contains__(self, key):
return key in self._table
def __len__(self):
return len(self._table)
def values(self):
return self._table.values()
CURRENT_MOBILE_DETAILS_VERSION = 2
def _serialize_mobile_details(stream, fds):
s_long(stream, CURRENT_MOBILE_DETAILS_VERSION) # version in case we need to adjust the MobileDetail structure
if fds is None:
s_bool(stream, False)
else:
s_bool(stream, True)
_serialize_mobile_detail_list(stream, fds.values())
def _deserialize_mobile_details(stream):
version = ds_long(stream)
if ds_bool(stream):
mds = MobileDetails()
for md in _deserialize_mobile_detail_list(stream, version):
mds.append(md)
return mds
else:
return MobileDetails()
class MobileDetailsProperty(db.UnindexedProperty, CustomProperty):
get_serializer = lambda self: _serialize_mobile_details
get_deserializer = lambda self: _deserialize_mobile_details
# Tell what the user type is.
data_type = MobileDetails
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
stream = StringIO()
_serialize_mobile_details(stream, super(MobileDetailsProperty,
self).get_value_for_datastore(model_instance))
return db.Blob(stream.getvalue())
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return MobileDetails()
return _deserialize_mobile_details(StringIO(value))
def validate(self, value):
if value is not None and not isinstance(value, MobileDetails):
raise ValueError('Property %s must be convertible to a MobileDetails instance (%s)' % (self.name, value))
return super(MobileDetailsProperty, self).validate(value)
def empty(self, value):
return not value
class PublicKeyTO(TO):
algorithm = unicode_property('1', default=None)
name = unicode_property('2', default=None)
index = unicode_property('3', default=None)
public_key = unicode_property('4', default=None) # base64
@classmethod
def from_model(cls, model):
return cls(algorithm=model.algorithm, name=model.name, index=model.index, public_key=model.public_key)
def serialize_public_key(stream, pk):
s_unicode(stream, pk.algorithm)
s_unicode(stream, pk.name)
s_unicode(stream, pk.index)
s_unicode(stream, pk.public_key)
def deserialize_public_key(stream, version):
pk = PublicKeyTO()
pk.algorithm = ds_unicode(stream)
pk.name = ds_unicode(stream)
pk.index = ds_unicode(stream)
pk.public_key = ds_unicode(stream)
return pk
_serialize_public_key_list = get_list_serializer(serialize_public_key)
_deserialize_public_key_list = get_list_deserializer(deserialize_public_key, True)
class PublicKeys(object):
def __init__(self):
self._table = dict()
def append(self, pk):
if not pk or not isinstance(pk, PublicKeyTO):
raise ValueError
self._table[u"%s.%s.%s" % (pk.algorithm, pk.name, pk.index)] = pk
return pk
def addNew(self, algorithm, name, index, public_key):
pk = PublicKeyTO()
pk.algorithm = algorithm
pk.name = name
pk.index = index
pk.public_key = public_key
self.append(pk)
return pk
def remove(self, algorithm, name, index):
self._table.pop(u"%s.%s.%s" % (algorithm, name, index), None)
def __iter__(self):
for val in self._table.values():
yield val
def __getitem__(self, key):
return self._table[key]
def __contains__(self, key):
return key in self._table
def __len__(self):
return len(self._table)
def values(self):
return self._table.values()
def _serialize_public_keys(stream, public_keys):
s_long(stream, 1)
if public_keys is None:
s_bool(stream, False)
else:
s_bool(stream, True)
_serialize_public_key_list(stream, public_keys.values())
def _deserialize_public_keys(stream):
version = ds_long(stream)
if ds_bool(stream):
pks = PublicKeys()
for pk in _deserialize_public_key_list(stream, version):
pks.append(pk)
return pks
else:
return None
class PublicKeysProperty(db.UnindexedProperty, CustomProperty):
get_serializer = lambda self: _serialize_public_keys
get_deserializer = lambda self: _deserialize_public_keys
# Tell what the user type is.
data_type = PublicKeys
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
stream = StringIO()
_serialize_public_keys(stream, super(PublicKeysProperty,
self).get_value_for_datastore(model_instance))
return db.Blob(stream.getvalue())
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return _deserialize_public_keys(StringIO(value))
def validate(self, value):
if value is not None and not isinstance(value, PublicKeys):
raise ValueError('Property %s must be convertible to a PublicKeys instance (%s)' % (self.name, value))
return super(PublicKeysProperty, self).validate(value)
def empty(self, value):
return not value
| {
"content_hash": "fae93bd59ceca3663776f64fc8850492",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 117,
"avg_line_length": 30.556016597510375,
"alnum_prop": 0.6455730581205866,
"repo_name": "rogerthat-platform/rogerthat-backend",
"id": "045c3f80c6afd4872595dcb51a3fec3e01a6471a",
"size": "7998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rogerthat/models/properties/profiles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "687088"
},
{
"name": "HTML",
"bytes": "948569"
},
{
"name": "Java",
"bytes": "521272"
},
{
"name": "JavaScript",
"bytes": "1830068"
},
{
"name": "Python",
"bytes": "4220314"
}
],
"symlink_target": ""
} |
"""
Fakes For Scheduler tests.
"""
import mox
from nova.compute import instance_types
from nova.compute import vm_states
from nova import db
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
service=dict(host='host1', disabled=False)),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
service=dict(host='host2', disabled=True)),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
service=dict(host='host3', disabled=False)),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
service=dict(host='host4', disabled=False)),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host5'),
]
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
self.host_manager = host_manager.HostManager()
class FakeHostManager(host_manager.HostManager):
"""host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
host4: free_ram_mb=8192 free_disk_gb=8192"""
def __init__(self):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
},
'host2': {
'compute': {'host_memory_free': 2147483648},
},
'host3': {
'compute': {'host_memory_free': 3221225472},
},
'host4': {
'compute': {'host_memory_free': 999999999},
},
}
class FakeHostState(host_manager.HostState):
def __init__(self, host, topic, attribute_dict):
super(FakeHostState, self).__init__(host, topic)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
class FakeInstance(object):
def __init__(self, context=None, params=None, type_name='m1.tiny'):
"""Create a test instance. Returns uuid"""
self.context = context
i = self._create_fake_instance(params, type_name=type_name)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
"""Create a test instance"""
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst.update(params)
return db.instance_create(self.context, inst)
class FakeComputeAPI(object):
def create_db_entry_for_new_instance(self, *args, **kwargs):
pass
def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'compute_node_get_all')
mock.StubOutWithMock(db, 'instance_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
db.instance_get_all(mox.IgnoreArg(),
columns_to_join=['instance_type']).AndReturn(INSTANCES)
| {
"content_hash": "520f3a9e64d9d308a7a086ae88bfa787",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 75,
"avg_line_length": 33.99173553719008,
"alnum_prop": 0.587892049598833,
"repo_name": "NoBodyCam/TftpPxeBootBareMetal",
"id": "84424c1c78812330b2c8afb20a0f5229fb1a2a4b",
"size": "4742",
"binary": false,
"copies": "1",
"ref": "refs/heads/tftp_pxe_boot",
"path": "nova/tests/scheduler/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6568288"
},
{
"name": "Shell",
"bytes": "17010"
}
],
"symlink_target": ""
} |
import zeit.cms.testing
import zeit.content.text.testing
class PythonScriptTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.content.text.testing.ZCML_LAYER
def create(self, text):
py = zeit.content.text.python.PythonScript()
py.uniqueId = 'http://xml.zeit.de/py'
py.text = text
return py
def test_returns_value(self):
py = self.create('__return(42)')
self.assertEqual(42, py())
def test_return_stops_execution(self):
py = self.create("""\
__return(1)
__return(2)""")
self.assertEqual(1, py())
def test_keyword_args_are_passed_in_as_context(self):
py = self.create('__return(context["foo"])')
self.assertEqual(42, py(foo=42))
def test_syntax_error_raises_on_call(self):
py = self.create('foo(')
with self.assertRaises(SyntaxError):
py()
def test_exceptions_are_propagated(self):
py = self.create('raise RuntimeError()')
with self.assertRaises(RuntimeError):
py()
| {
"content_hash": "734d551bfff0999ebc3cdbc85d216096",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 60,
"avg_line_length": 28.16216216216216,
"alnum_prop": 0.6161228406909789,
"repo_name": "ZeitOnline/zeit.content.text",
"id": "b5911499b360d2ad64a89590d1c047bf68b8be85",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/content/text/tests/test_python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24215"
}
],
"symlink_target": ""
} |
'''
Production Configurations
- Use djangosecure
'''
from __future__ import absolute_import, unicode_literals
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure",)
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn",)
# AUTHENTICATION
# --------------
AUTH_LDAP_SERVER_URI = env('AUTH_LDAP_SERVER_URI')
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER = env('AUTH_LDAP_BIND_AS_AUTHENTICATING_USER', default=True)
AUTH_LDAP_GLOBAL_OPTIONS = {
ldap.OPT_X_TLS_REQUIRE_CERT: ldap.OPT_X_TLS_NEVER,
ldap.OPT_NETWORK_TIMEOUT: 10,
ldap.OPT_DEBUG_LEVEL: 255
}
AUTH_LDAP_BIND_DN = ''
AUTH_LDAP_BIND_PASSWORD = ''
AUTH_LDAP_USER_DN_TEMPLATE = env('AUTH_LDAP_USER_DN_TEMPLATE', default=None)
AUTH_LDAP_USER_SEARCH_DN = env('AUTH_LDAP_USER_SEARCH_DN')
AUTH_LDAP_GROUP_SEARCH_DN = env('AUTH_LDAP_GROUP_SEARCH_DN')
AUTH_LDAP_SUPERUSER_DN = env('AUTH_LDAP_SUPERUSER_DN', default=None)
WRISTBAND_ENV = env('WRISTBAND_ENV', default='prod')
AUTH_LDAP_USER_SEARCH = LDAPSearch(AUTH_LDAP_USER_SEARCH_DN, ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_SEARCH_DN, ldap.SCOPE_SUBTREE, "(objectClass=groupOfNames)")
AUTH_LDAP_GROUP_TYPE = GroupOfNamesType()
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 300
AUTH_LDAP_REQUIRE_GROUP = env('AUTH_LDAP_REQUIRE_GROUP', default=None)
# We're not ready for this yet....
# AUTH_LDAP_USER_FLAGS_BY_GROUP = {
# "is_superuser": AUTH_LDAP_SUPERUSER_DN
# }
| {
"content_hash": "292ca13a0d492009e782c684d64c22f4",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 112,
"avg_line_length": 34.6219512195122,
"alnum_prop": 0.6819302571327932,
"repo_name": "hmrc/wristband",
"id": "438340759b32e2daace07ebc3985062995cb8ab3",
"size": "2863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/production.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69698"
}
],
"symlink_target": ""
} |
import string
import os
from fabric.api import task
from conf import settings
from fabric.context_managers import lcd
from fabric.operations import local as lrun, prompt
from fabric.colors import red, green
from fabric.tasks import Task
@task
def install(horizon_path=settings.HORIZON_ROOT):
"""Download and install the Front-end and its dependencies."""
if os.path.isdir(horizon_path[:-1]):
print 'Already downloaded.'
else:
lrun('git clone https://github.com/ging/horizon.git \
{0}'.format(horizon_path))
with lcd(horizon_path):
dependencies = ' '.join(settings.UBUNTU_DEPENDENCIES['horizon'])
lrun('sudo apt-get install -y {0}'.format(dependencies))
lrun('sudo python tools/install_venv.py')
path = horizon_path + '/openstack_dashboard/local/'
class Template(string.Template):
delimiter = '$$'
template_settings = Template(open(path + 'local_settings.py.example').read())
out_file = open(path + "local_settings.py", "w")
out_file.write(
template_settings.substitute({
'IDM_NAME': settings.IDM_USER_CREDENTIALS['username'],
'IDM_PASS': settings.IDM_USER_CREDENTIALS['password'],
'IDM_PROJECT': settings.IDM_USER_CREDENTIALS['project'],
'KEYSTONE_ADDRESS': settings.KEYSTONE_INTERNAL_ADDRESS,
'KEYSTONE_PUBLIC_PORT':settings.KEYSTONE_PUBLIC_PORT,
}))
out_file.close()
instance.run(horizon_path=horizon_path) # run check task
@task
def update(horizon_path=settings.HORIZON_ROOT):
"""Update the Front-end and its dependencies."""
# returns 1 if everything went OK, 0 otherwise
print 'Updating Horizon...'
with lcd(horizon_path):
lrun('git pull origin')
lrun('sudo python tools/install_venv.py')
print green('Horizon updated.')
if not instance.run(horizon_path=horizon_path):
return 0 # flag for the main task
else:
return 1 # flag for the main task
@task
def dev_server(address=settings.HORIZON_DEV_ADDRESS,
horizon_path=settings.HORIZON_ROOT):
"""Run horizon server for development purposes"""
with lcd(horizon_path):
lrun(('sudo tools/with_venv.sh python manage.py runserver '
'{0}').format(address))
class CheckTask(Task):
"""Run several checks in the Front-end settings file."""
name = "check"
def run(self, horizon_path=settings.HORIZON_ROOT):
# returns 1 if everything went OK, 0 otherwise
print 'Checking Horizon... ',
self._check_for_new_settings(horizon_path + 'openstack_dashboard/local/')
self._check_for_roles_ids(horizon_path + 'openstack_dashboard/local/')
def _check_for_new_settings(self, settings_path):
"""Checks for new settings in the template which don't exist in the current file"""
# returns 1 if everything went OK, 0 otherwise
with open(settings_path+'local_settings.py', 'r') as old_file,\
open(settings_path+'local_settings.py.example', 'r') as new_file:
old = set(old_file)
new = set(new_file)
new_settings = set()
old_settings = set()
# remove values to have settings' names
for s in new.difference(old):
if '=' in s:
new_settings.add(s[0:s.find('=')])
for s in old.difference(new):
if '=' in s:
old_settings.add(s[0:s.find('=')])
latest_settings = new_settings.difference(old_settings)
if not latest_settings:
print green('Settings OK.'),
return 1 # flag for the main task
else:
print red('Some errors were encountered:')
print red('The following settings couldn\'t be found in your local_settings.py module:')
settings_to_write = list()
for s in latest_settings:
with open(settings_path+'local_settings.py.example', 'r') as template:
block = 0
for line in template.readlines():
if s in line or block > 0:
settings_to_write.append(line)
if '{' in line: block += 1
if '}' in line: block -= 1
print '\t'+red(s)
autofix = prompt(red('Would you like to add defaults for the missing settings? [Y/n]: '),\
default='n', validate='[Y,n]')
if autofix == 'Y':
with open(settings_path+'local_settings.py', 'a') as output:
output.write('\n\n# --- NEW SETTINGS ADDED AUTOMATICALLY ---\n')
for s in settings_to_write:
output.write(s)
print green('The missing settings were added.\nPlease check the local_settings.py module to make any necessary changes.')
else:
print red('Please edit the local_settings.py module manually so that it contains the settings above.')
return 0 # flag for the main task
def _check_for_roles_ids(self, settings_path):
# returns 1 if everything went OK, 0 otherwise
if not hasattr(settings,'INTERNAL_ROLES_IDS'):
print red("INTERNAL_ROLES_IDS attribute could not be found. Please make sure you have completely installed Keystone before running this check.")
return 0
with open(settings_path+'local_settings.py', 'r') as local_settings:
error = False
for line in local_settings.readlines():
if 'FIWARE_PURCHASER_ROLE_ID' in line and\
settings.INTERNAL_ROLES_IDS['purchaser'] not in line:
error = True
elif 'FIWARE_PROVIDER_ROLE_ID' in line and\
settings.INTERNAL_ROLES_IDS['provider'] not in line:
error = True
break
if not error:
print green('Role IDs OK.')
return 1
else:
autofix = prompt(red('Would you like to add the internal roles\' IDs to the local_settings.py module? [Y/n]: '), default='n', validate='[Y,n]')
if autofix == 'Y':
with open(settings_path+'local_settings.py', 'r+') as settings_file:
lines = settings_file.readlines()
settings_file.seek(0)
settings_file.truncate()
for line in lines:
if 'FIWARE_PURCHASER_ROLE_ID' in line:
line = 'FIWARE_PURCHASER_ROLE_ID = \''+settings.INTERNAL_ROLES_IDS['purchaser']+'\'\n'
if 'FIWARE_PROVIDER_ROLE_ID' in line:
line = 'FIWARE_PROVIDER_ROLE_ID = \''+settings.INTERNAL_ROLES_IDS['provider']+'\'\n'
settings_file.write(line)
return 0
instance = CheckTask() | {
"content_hash": "bb3809828a16e05170c5048fbcba1c7c",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 156,
"avg_line_length": 43.425,
"alnum_prop": 0.5808865860679332,
"repo_name": "Plexical/fiware-idm",
"id": "9ad6c63207016f4a4dc2bb38d8a55b64319fc28e",
"size": "7551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deployment/horizon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38915"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
"""
auto rule template
~~~~
:author: LoRexxar <LoRexxar@gmail.com>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
from utils.api import *
class CVI_1002():
"""
rule class
"""
def __init__(self):
self.svid = 1002
self.language = "php"
self.author = "LoRexxar/wufeifei"
self.vulnerability = "SSRF"
self.description = "file_get_contents函数的参数可控,可能会导致SSRF漏洞"
self.level = 7
# status
self.status = True
# 部分配置
self.match_mode = "function-param-regex"
self.match = r"file_get_contents"
# for solidity
self.match_name = None
self.black_list = None
# for chrome ext
self.keyword = None
# for regex
self.unmatch = None
self.vul_function = None
def main(self, regex_string):
"""
regex string input
:regex_string: regex match string
:return:
"""
pass
| {
"content_hash": "0f0e638014f154b2c918a9087e71bb88",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 65,
"avg_line_length": 21.384615384615383,
"alnum_prop": 0.5539568345323741,
"repo_name": "LoRexxar/Cobra-W",
"id": "a02322c692a320f8eceb272622507b56657af21b",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rules/php/CVI_1002.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "204"
},
{
"name": "Hack",
"bytes": "82"
},
{
"name": "Java",
"bytes": "45"
},
{
"name": "PHP",
"bytes": "6172"
},
{
"name": "Python",
"bytes": "441482"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from setuptools.command.develop import develop as STDevelopCmd
class DevelopCmd(STDevelopCmd):
def run(self):
# add in requirements for testing only when using the develop command
self.distribution.install_requires.extend([
'mock',
'nose',
])
STDevelopCmd.run(self)
cdir = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(cdir, 'readme.rst')).read()
CHANGELOG = open(os.path.join(cdir, 'changelog.rst')).read()
VERSION = open(os.path.join(cdir, 'savalidation', 'version.txt')).read().strip()
setup(
name='SAValidation',
version=VERSION,
description="Active Record like validation on SQLAlchemy declarative model objects",
long_description=README + '\n\n' + CHANGELOG,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Database',
],
author='Randy Syring',
author_email='randy.syring@level12.io',
url='http://bitbucket.org/blazelibs/sqlalchemy-validation/',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
cmdclass={'develop': DevelopCmd},
install_requires=[
'SQLAlchemy>=0.7.6',
'python-dateutil',
'FormEncode>=1.2'
],
)
| {
"content_hash": "ab9e62bd36a42ac6f737dd5e4bbf8d33",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 88,
"avg_line_length": 33.59183673469388,
"alnum_prop": 0.6330498177399757,
"repo_name": "marquisthunder/sqlalchemy-validation",
"id": "db49bd34425ce1026e6e7930ed2a606d17d16265",
"size": "1646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57463"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def LocalizableMessage(vim, *args, **kwargs):
''''''
obj = vim.client.factory.create('ns0:LocalizableMessage')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'key' ]
optional = [ 'arg', 'message', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "9016662973c031a7fb1faf4cf54222d5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 124,
"avg_line_length": 30.03125,
"alnum_prop": 0.5775234131113424,
"repo_name": "xuru/pyvisdk",
"id": "1aed78e6032e046e5feb2a7b09ea30e5a2e581bc",
"size": "962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/localizable_message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
"""
Output mapcache configuration information to `node-gyp`
Configuration options are retrieved from environment variables set using `npm
config set`. This allows for a simple `npm install mapcache` to work.
"""
from optparse import OptionParser
import os
import re
import sys
def warn(msg):
print >> sys.stderr, msg
def die(msg):
warn('Configuration failed: %s' % msg)
sys.exit(1)
class ConfigError(Exception):
pass
class Config(object):
"""Base Class for obtaining mapcache configuration information"""
def __init__(self, build_dir):
self.build_dir = build_dir
def getLibDir(self):
return os.environ.get('npm_config_mapcache_lib_dir', '')
def getIncludeDir(self):
return os.path.join(self.build_dir, 'include')
def getCflags(self):
# add debugging flags and defines
if 'npm_config_mapcache_debug' in os.environ:
return '-DDEBUG -g -ggdb'
return ''
class AutoconfConfig(Config):
"""Class for obtaining mapcache configuration pre mapcache 1.0
Mapcache uses autotools for building and configuration in this version.
"""
def __init__(self, *args, **kwargs):
super(AutoconfConfig, self).__init__(*args, **kwargs)
makefile_inc = os.path.join(self.build_dir, 'Makefile.inc')
if not os.path.exists(makefile_inc):
raise ConfigError('Expected `Makefile.inc` in %s' % self.build_dir)
self.makefile_inc = makefile_inc
def getLibDir(self):
p = re.compile('^prefix *= *(.+)$') # match the prefix
with open(self.makefile_inc, 'r') as f:
for line in f:
match = p.match(line)
if match:
arg = match.groups()[0].strip()
if arg:
return os.path.join(arg, 'lib')
return ''
def getCflags(self):
# add includes from the Makefile
p = re.compile('^[A-Z]+_INC *= *(.+)$') # match an include header
matches = []
with open(self.makefile_inc, 'r') as f:
for line in f:
match = p.match(line)
if match:
arg = match.groups()[0].strip()
if arg:
matches.append(arg)
debug_flags = super(AutoconfConfig, self).getCflags()
if debug_flags:
matches.append(debug_flags)
return ' '.join(matches)
class CmakeConfig(Config):
"""Class for obtaining mapcache configuration for versions >= 1.0
Mapcache uses Cmake for building and configuration in this version.
"""
def __init__(self, *args, **kwargs):
super(CmakeConfig, self).__init__(*args, **kwargs)
cmake_cache = os.path.join(self.build_dir, 'CMakeCache.txt')
if not os.path.exists(cmake_cache):
raise ConfigError('Expected `CMakeCache.txt` in %s' % self.build_dir)
self.cmake_cache = cmake_cache
def getLibDir(self):
p = re.compile('^CMAKE_INSTALL_PREFIX:PATH *= *(.+)$') # match the prefix
with open(self.cmake_cache, 'r') as f:
for line in f:
match = p.match(line)
if match:
arg = match.groups()[0].strip()
if arg:
return os.path.join(arg, 'lib')
return ''
def getIncludeDir(self):
dirs = [os.path.join(self.build_dir, 'include')]
patterns = [ # a list of path patterns associated with paths to append to matches
(re.compile('^\w+_INCLUDE_DIR:PATH *= *(.+)$'), ), # match a library directory
(re.compile('^MapCache_SOURCE_DIR:STATIC *= *(.+)$'), 'include') # match the source directory
]
with open(self.cmake_cache, 'r') as f:
for line in f:
for p in patterns:
match = p[0].match(line)
if match:
arg = match.groups()[0].strip()
if arg:
dirs.append(os.path.join(arg, *p[1:]))
return ' '.join(dirs)
parser = OptionParser()
parser.add_option("--include",
action="store_true", default=False,
help="output the mapcache include path")
parser.add_option("--libraries",
action="store_true", default=False,
help="output the mapcache library link option")
parser.add_option("--ldflags",
action="store_true", default=False,
help="output the mapcache library rpath option")
parser.add_option("--cflags",
action="store_true", default=False,
help="output the mapcache cflag options")
(options, args) = parser.parse_args()
try:
build_dir = os.environ['npm_config_mapcache_build_dir']
except KeyError:
die('`npm config set mapcache:build_dir` has not been called')
# get the config object, trying the new cmake system first and falling back to
# the legacy autoconf build sytem
try:
try:
config = CmakeConfig(build_dir)
except ConfigError, e:
try:
config = AutoconfConfig(build_dir)
except ConfigError, e2:
warn("Failed to configure using Cmake: %s" % e)
warn("Attempting configuration using autotools...")
die(e2)
# output the requested options
if options.include:
print config.getIncludeDir()
if options.libraries:
lib_dir = config.getLibDir()
if lib_dir:
print "-L%s" % lib_dir
if options.ldflags:
# write the library path into the resulting binary
lib_dir = config.getLibDir()
if lib_dir:
print "-Wl,-rpath=%s" % lib_dir
if options.cflags:
print config.getCflags()
except ConfigError, e:
die(e)
| {
"content_hash": "4b6b7b0e8bb66ebab3fbd04140545d9b",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 105,
"avg_line_length": 31.49462365591398,
"alnum_prop": 0.5689655172413793,
"repo_name": "geo-data/node-mapcache",
"id": "d4a5a9fa86bcc07a11b60ecb23a439eedef61345",
"size": "7416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/config.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "44309"
},
{
"name": "JavaScript",
"bytes": "47664"
},
{
"name": "Makefile",
"bytes": "3253"
},
{
"name": "Python",
"bytes": "1041"
}
],
"symlink_target": ""
} |
import pandas as pd
import pandas_datareader as pdr
import datetime
import matplotlib.pyplot as plt
import pylab
"""
This one looks like a more reasonable tutorial using Yahoo data and pandas.
https://ntguardian.wordpress.com/2016/09/19/introduction-stock-market-data-python-1/
"""
from matplotlib.dates import DateFormatter, WeekdayLocator,\
DayLocator, MONDAY
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
def pandas_candlestick_ohlc(dat, stick = "day", otherseries = None):
"""
:param dat: pandas DataFrame object with datetime64 index, and float columns "Open", "High", "Low", and "Close", likely created via DataReader from "yahoo"
:param stick: A string or number indicating the period of time covered by a single candlestick. Valid string inputs include "day", "week", "month", and "year", ("day" default), and any numeric input indicates the number of trading days included in a period
:param otherseries: An iterable that will be coerced into a list, containing the columns of dat that hold other series to be plotted as lines
This will show a Japanese candlestick plot for stock data stored in dat, also plotting other series if passed.
"""
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
dayFormatter = DateFormatter('%d') # e.g., 12
# Create a new DataFrame which includes OHLC data for each period specified by stick input
transdat = dat.loc[:,["Open", "High", "Low", "Close"]]
if (type(stick) == str):
if stick == "day":
plotdat = transdat
stick = 1 # Used for plotting
elif stick in ["week", "month", "year"]:
if stick == "week":
transdat["week"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[1]) # Identify weeks
elif stick == "month":
transdat["month"] = pd.to_datetime(transdat.index).map(lambda x: x.month) # Identify months
transdat["year"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[0]) # Identify years
grouped = transdat.groupby(list(set(["year",stick]))) # Group by year and other appropriate variable
plotdat = pd.DataFrame({"Open": [], "High": [], "Low": [], "Close": []}) # Create empty data frame containing what will be plotted
for name, group in grouped:
plotdat = plotdat.append(pd.DataFrame({"Open": group.iloc[0,0],
"High": max(group.High),
"Low": min(group.Low),
"Close": group.iloc[-1,3]},
index = [group.index[0]]))
if stick == "week": stick = 5
elif stick == "month": stick = 30
elif stick == "year": stick = 365
elif (type(stick) == int and stick >= 1):
transdat["stick"] = [np.floor(i / stick) for i in range(len(transdat.index))]
grouped = transdat.groupby("stick")
plotdat = pd.DataFrame({"Open": [], "High": [], "Low": [], "Close": []}) # Create empty data frame containing what will be plotted
for name, group in grouped:
plotdat = plotdat.append(pd.DataFrame({"Open": group.iloc[0,0],
"High": max(group.High),
"Low": min(group.Low),
"Close": group.iloc[-1,3]},
index = [group.index[0]]))
else:
raise ValueError('Valid inputs to argument "stick" include the strings "day", "week", "month", "year", or a positive integer')
# Set plot parameters, including the axis object ax used for plotting
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
if plotdat.index[-1] - plotdat.index[0] < pd.Timedelta('730 days'):
weekFormatter = DateFormatter('%b %d') # e.g., Jan 12
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
else:
weekFormatter = DateFormatter('%b %d, %Y')
ax.xaxis.set_major_formatter(weekFormatter)
ax.grid(True)
# Create the candelstick chart
candlestick_ohlc(ax, list(zip(list(mdates.date2num(plotdat.index.tolist())), plotdat["Open"].tolist(), plotdat["High"].tolist(),
plotdat["Low"].tolist(), plotdat["Close"].tolist())),
colorup = "black", colordown = "red", width = stick * .4)
# Plot other series (such as moving averages) as lines
if otherseries != None:
if type(otherseries) != list:
otherseries = [otherseries]
dat.loc[:,otherseries].plot(ax = ax, lw = 1.3, grid = True)
ax.xaxis_date()
ax.autoscale_view()
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.show()
def show_candelstick_for(yahoo_data, start_date, end_date):
# Let's get Apple stock data; Apple's ticker symbol is AAPL
# First argument is the series we want, second is the source ("yahoo" for Yahoo! Finance), third is the start date, fourth is the end date
btc = pdr.get_data_yahoo(yahoo_data, start_date, end_date)
print btc.head()
pandas_candlestick_ohlc(btc)
show_candelstick_for("BTC-USD", datetime.datetime(2017,10,1), datetime.date.today())
| {
"content_hash": "6e4434dab4844322195ae1af421c556b",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 260,
"avg_line_length": 49.58181818181818,
"alnum_prop": 0.605977264393106,
"repo_name": "martinschaef/coinstuff",
"id": "830780d501980612ba16bc4037d85046382f0523",
"size": "5454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "historic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23009"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class GatewayV1Config(AppConfig):
name = 'gateway_v1'
| {
"content_hash": "e87877b306801d7b139a00209ae46552",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 19.285714285714285,
"alnum_prop": 0.7555555555555555,
"repo_name": "vinay-pad/commit_service",
"id": "aff44d2dda1deb2e4c34707e58de7ea91c511b3a",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gateway_v1/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32106"
}
],
"symlink_target": ""
} |
import urllib
import httplib
import json
class Topicsio(object):
'''
Topics.io API Handler
Parameters
----------
auth_api_key : string the Topics.io API Key.
'''
API_URL = 'api.topics.io'
def __init__(self, auth_api_key):
self._auth_api_key = auth_api_key
def make_api_call(self, verb_http, method_url, page=None, params={}):
params['page'] = page
url = self._build_url(method_url, params)
conn = httplib.HTTPConnection(self.API_URL)
conn.request(verb_http, url)
resp = conn.getresponse()
data = resp.read()
data = json.loads(data)
return data
def _build_url(self, method_url, params):
all_params = {'auth_api_key': self._auth_api_key}
params_not_none = dict((k, v) for (k, v) in params.iteritems() if v is not None)
all_params.update(params_not_none)
return "%s?%s" % (method_url, urllib.urlencode( all_params))
def search_topics(self, query, entity=None, page=1):
verb_http = 'GET'
method_url = '/topics/search/v1/'
return self.make_api_call( verb_http, method_url, page, {'query': query, 'entity': entity})
def subscribe_topic(self, topic_id):
verb_http = 'POST'
method_url = '/topics/%s/subscribe/v1/' % topic_id
return self.make_api_call( verb_http, method_url)
def unsubscribe_topic(self, topic_id):
verb_http = 'DELETE'
method_url = '/topics/%s/subscribe/v1/' % topic_id
return self.make_api_call( verb_http, method_url)
def list_topics(self, page=1):
verb_http = 'GET'
method_url = '/topics/v1/'
return self.make_api_call( verb_http, method_url, page)
def create_topic(self, display_name, entity):
verb_http = 'POST'
method_url = '/topics/v1/'
return self.make_api_call( verb_http, method_url,
page=None, params={'displayName': display_name, 'entity': entity})
def get_last_news(self, topic_id=None, page=1, before_id=None):
verb_http = 'GET'
if topic_id is None:
method_url = '/topics/news/v1/'
else:
method_url = '/topics/%s/news/v1/' % topic_id
return self.make_api_call( verb_http, method_url, page, params={'beforeId': before_id}) | {
"content_hash": "4def7848a901e97a890e675460304491",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 101,
"avg_line_length": 34.29577464788732,
"alnum_prop": 0.5613963039014374,
"repo_name": "drano/python-topicsio",
"id": "740dcd2f8dda45f023315b85fae0e8fa5d077019",
"size": "2435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topicsio/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6310"
}
],
"symlink_target": ""
} |
"""FMTK high-level maintenance procedures
"""
import os, os.path
import sys
from getopt import getopt, GetoptError
from .provision import provide
#import pdb
def findpathwithchild(pathlist, childpath, limit=None):
"""Find a path from pathlist which has childpath underneath it.
For example, '/this/path/lib/has' may have 'src/models' beneath 'lib'.
The return value would be '/this/path/lib'.
The maximum number of parent directories to set is set by 'limit'.
"""
# TODO: may be dead code; was used by quickstart()
#print ("pathlist", pathlist)
childpath = os.path.normpath(childpath)
for climbingpath in pathlist:
here = os.path.normpath(climbingpath + '/.')
for _n in range(limit):
possible = here + '/' + childpath
if os.path.exists(possible):
return here # found it
here = os.path.dirname(here)
def providedir(dirpath):
"""Make sure that directory 'dirpath' is available.
Create it, if necessary.
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
print("Directory '%s' created." % dirpath)
def add_local_dep(confspec):
"Add local project relative paths."
links = confspec['links']
links['include/math'] = "../../../tksrc/src/math"
links['include/models'] = "../../../tksrc/src/models"
links['include/utils'] = "../../../tksrc/src/utils"
so_suffix = confspec['so-suffix']
for module in ('Math', 'Models', 'Utils'):
lib_so = 'libfmtk%s.%s' % (module, so_suffix)
subdir = module.lower()
links['lib/' + lib_so] = "../../../src/%s/%s" % (subdir, lib_so)
def parseargs(argv, spec):
"""Parse optional command line args 'argv' and load into config spec
dictionary 'spec'.
"""
paths = spec['paths']
links = spec['links']
try:
(opts, args) = getopt(argv[1:], 'r:g:G:s:m:hq',
['buildroot', 'gtestinclude', 'gtestlib',
'so-suffix', 'mathimpl', 'help', 'quick'])
except GetoptError as ex:
#print ("error: %s" % str(ex), file=sys.stderr)
sys.exit(1)
for opt, arg in opts:
if opt == '-r' or opt == '--buildroot':
paths['build'] = arg
elif opt == '-g' or opt == '--gtestinclude':
links['include/gtest'] = arg + '/gtest'
elif opt == '-G' or opt == '--gtestlib':
libpath = arg
for lib in ('libgtest.a', 'libgtest_main.a'):
links['lib/' + lib] = libpath + '/' + lib
elif opt == '-s' or opt == '--so-suffix':
spec['so-suffix'] = arg
#elif opt == '-m' or opt == '--mathimpl':
# spec['cart3mpl-'] = arg
elif opt == '-h' or opt == '--help':
spec['help'] = True
elif opt == '-q' or opt == '--quick':
spec['quick'] = True
def query(confspec):
"Interactive query for values."
paths = confspec['paths']
links = confspec['links']
print ("Please enter values for the following settings.")
print ("(Press Enter to accept a default, if available.)")
# Target build path
# default = paths['fmtkroot'] + '/build' # proper default
default = paths['fmtkroot'] # workaround
response = input('Target build path (%s): ' % default)
paths['build'] = (response, default)[response=='']
#print ('path:', paths['build'])
# GTest paths
print ('Google C++ test framework (GTest) paths')
inclpath = input('GTest include (above gtest/gtest.h): ')
links['include/gtest'] = inclpath + '/gtest'
libpath = input('GTest lib (above libgtest.a): ')
for lib in ('libgtest.a', 'libgtest_main.a'):
links['lib/' + lib] = libpath + '/' + lib
# Target OS (shared library suffix, etc.)
print ("Shared library/object suffix, e.g.: dylib, so")
confspec['so-suffix'] = input('shared object suffix: ')
# Target Cart3 math implementation
# not yet
print ("Cart3 math implementation; one of: " "baseline, sse2, msa, altivec, neon")
confspec['cart3-impl'] = input('Cart3 implementation: ')
def quickstart(argv, tkroot):
"""Main quick start routine.
If using "-q", run something like:
fmtk-qs -q -r $(SRCTOP) -g $(GTEST)/include -G $(GTEST)/lib -s dylib
Otherwise, just enter command and add values interactively.
"""
confspec = {}
confspec['paths'] = paths = {}
confspec['links'] = links = {}
print ("Welcome to FMTK quickstart.")
# TODO: may not need findpathwithchild()
#paths['fmtkroot'] = findpathwithchild(
# (os.path.abspath('.'),), 'src/models', 3)
paths['fmtkroot'] = tkroot
print ("root path discovered at", paths['fmtkroot'])
parseargs(argv, confspec)
if ('quick' not in confspec):
query(confspec)
add_local_dep(confspec)
provide(confspec) # do it
return 0
# vim: set sw=4 tw=80 :
| {
"content_hash": "b999e693156aca9688e41fe0c86df011",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 86,
"avg_line_length": 33.351351351351354,
"alnum_prop": 0.5790113452188006,
"repo_name": "kwan0xfff/fmtk",
"id": "553f2162c7f843d98b3a831692120442cf00ce2a",
"size": "4983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/lib/quickstart/fmtk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2696"
},
{
"name": "C++",
"bytes": "82281"
},
{
"name": "Makefile",
"bytes": "9196"
},
{
"name": "Python",
"bytes": "15557"
},
{
"name": "Shell",
"bytes": "1232"
}
],
"symlink_target": ""
} |
import collections
import logging
import inspect
import pymel.core as pymel
from classNode import Node
from omtk import constants
from omtk.libs import libAttr
from omtk.libs import libPymel
from omtk.libs import libRigging
log = logging.getLogger('omtk')
class BaseCtrl(Node):
"""
A Ctrl is the layer between the rig and the animator.
When unbuilt/built it's shapes and animatable attributes are automatically saved/loaded.
If no shapes are stored, a Ctrl have the ability to resize itself automatically.
"""
def __init__(self, create=False, create_offset=True, *args, **kwargs):
# TODO: Deprecate the usage of create.
self._create_offset = create_offset
# Reserve maya default transform attributes.
self.tx = None
self.ty = None
self.tz = None
self.rx = None
self.ry = None
self.rz = None
self.sx = None
self.sy = None
self.sz = None
# Reserve channels to preserve the transform limits
self.minTransXLimit = None
self.maxTransXLimit = None
self.minTransYLimit = None
self.maxTransYLimit = None
self.minTransZLimit = None
self.maxTransZLimit = None
self.minRotXLimit = None
self.maxRotXLimit = None
self.minRotYLimit = None
self.maxRotYLimit = None
self.minRotZLimit = None
self.maxRotZLimit = None
self.minScaleXLimit = None
self.maxScaleXLimit = None
self.minScaleYLimit = None
self.maxScaleYLimit = None
self.minScaleZLimit = None
self.maxScaleZLimit = None
# Store information concerning how the ctrl should mirror.
# For more information see the omtk.animation.mirrorPose module.
# The default behavior follow the result we get when mirroring joints using the 'behavior' option.
# TODO: Find a way to automatically guess the correct values.
self.mirror_x = False
self.mirror_y = False
self.mirror_z = False
self.mirror_flip_rot_x = False
self.mirror_flip_rot_y = False
self.mirror_flip_rot_z = False
self.mirror_flip_pos_x = True
self.mirror_flip_pos_y = True
self.mirror_flip_pos_z = True
self.offset = None # An intermediate parent that store the original transform of the ctrl.
self.shapes = None # The list of shape to be used by the ctrl
self.node = None
self.rotateOrder = None # Keep the axis order information on unbuild
self.targets = [] # A list representing all the space switch target for the ctrl
self.targets_indexes = [] # A list representing all the space switch target indexes for the ctrl
# We need to keep the local index separately because self referencing break maya deletion mechanism (*%&?%*&)
self.local_index = constants.SpaceSwitchReservedIndex.local
self._reserved_index = [] # A list representing all the index already reserved for the space switch target
super(BaseCtrl, self).__init__(create=create, *args, **kwargs)
'''
def __createOffset__(self):
"""
Create an intermediate parent used to store the origin offset of the ctrl.
"""
self.offset = pymel.group(self.node, absolute=True, name=(self.node.name() + '_offset')) # faster
return self.offset
'''
def _get_recommended_size(self, refs, geometries, default_size=1.0, multiplier=1.0, **kwargs):
ref = next(iter(refs), None) if isinstance(refs, collections.Iterable) else refs
if ref is not None:
return libRigging.get_recommended_ctrl_size(ref, geometries=geometries, **kwargs) * multiplier
else:
return default_size * multiplier
def __createNode__(self, size=None, normal=(1, 0, 0), multiplier=1.0, refs=None, offset=None, geometries=None,
*args, **kwargs):
"""
Create a simple circle nurbsCurve.
size: The maximum dimension of the controller.
"""
# Hack: Ensure geometries are hashable
if isinstance(geometries, list):
geometries = tuple(geometries)
# Resolve size automatically if refs are provided.
if size is None:
size = self._get_recommended_size(refs, geometries, multiplier=multiplier)
transform, make = pymel.circle()
make.radius.set(size)
make.normal.set(normal)
# Expose the rotateOrder
# transform.rotateOrder.setKeyable(True)
return transform
def exists(self):
if self.node is None:
return False
return self.node.exists() # PyNode
def build(self, name=None, fetch_shapes=True, *args, **kwargs):
"""
Create ctrl setup, also fetch animation and shapes if necessary.
"""
# TODO: Add support for multiple shapes?
if self.can_fetch_shapes():
self.node = pymel.createNode('transform')
self.fetch_shapes()
else:
super(BaseCtrl, self).build(name=None, *args, **kwargs)
# The name keep since the last unbuild will have the priority over the name that could be set in the code
if name:
self.node.rename(name)
if self.rotateOrder is not None:
self.node.rotateOrder.set(self.rotateOrder)
# Create an intermediate parent if necessary
if self._create_offset:
self.offset = self.append_layer('offset')
# Fetch stored animations
# Disabled for now, see method docstring.
# self.fetch_attr_all()
self.fetch_transform_limits()
# Fetch stored shapes
return self.node
def restore_bind_pose(self):
val_by_att_names = {
'translateX': 0,
'translateY': 0,
'translateZ': 0,
'rotateX': 0,
'rotateY': 0,
'rotateZ': 0,
'scaleX': 1,
'scaleY': 1,
'scaleZ': 1
}
for attr_name, val in val_by_att_names.iteritems():
if not self.node.hasAttr(attr_name):
continue
attr = self.node.attr(attr_name)
if attr.isLocked():
continue
attr.set(val)
def can_fetch_shapes(self):
return libPymel.is_valid_PyNode(self.shapes) and self.shapes.getShape()
def hold_shapes(self):
self.shapes = libRigging.hold_ctrl_shapes(self.node)
def fetch_shapes(self):
# libAttr.unlock_rotation(self.shapes)
# libAttr.unlock_scale(self.shapes)
# pymel.makeIdentity(self.shapes, rotate=False, scale=True, apply=True) # Ensure the shape don't have any extra transformation.
libRigging.fetch_ctrl_shapes(self.shapes, self.node)
self.shapes = None
def hold_transform_limits(self):
"""Store internally any limits set on the controller."""
self.minTransXLimit = self.node.minTransXLimit.get() if self.node.minTransXLimitEnable.get() else None
self.maxTransXLimit = self.node.maxTransXLimit.get() if self.node.maxTransXLimitEnable.get() else None
self.minTransYLimit = self.node.minTransYLimit.get() if self.node.minTransYLimitEnable.get() else None
self.maxTransYLimit = self.node.maxTransYLimit.get() if self.node.maxTransYLimitEnable.get() else None
self.minTransZLimit = self.node.minTransZLimit.get() if self.node.minTransZLimitEnable.get() else None
self.maxTransZLimit = self.node.maxTransZLimit.get() if self.node.maxTransZLimitEnable.get() else None
self.minRotXLimit = self.node.minRotXLimit.get() if self.node.minRotXLimitEnable.get() else None
self.maxRotXLimit = self.node.maxRotXLimit.get() if self.node.maxRotXLimitEnable.get() else None
self.minRotYLimit = self.node.minRotYLimit.get() if self.node.minRotYLimitEnable.get() else None
self.maxRotYLimit = self.node.maxRotYLimit.get() if self.node.maxRotYLimitEnable.get() else None
self.minRotZLimit = self.node.minRotZLimit.get() if self.node.minRotZLimitEnable.get() else None
self.maxRotZLimit = self.node.maxRotZLimit.get() if self.node.maxRotZLimitEnable.get() else None
self.minScaleXLimit = self.node.minScaleXLimit.get() if self.node.minScaleXLimitEnable.get() else None
self.maxScaleXLimit = self.node.maxScaleXLimit.get() if self.node.maxScaleXLimitEnable.get() else None
self.minScaleYLimit = self.node.minScaleYLimit.get() if self.node.minScaleYLimitEnable.get() else None
self.maxScaleYLimit = self.node.maxScaleYLimit.get() if self.node.maxScaleYLimitEnable.get() else None
self.minScaleZLimit = self.node.minScaleZLimit.get() if self.node.minScaleZLimitEnable.get() else None
self.maxScaleZLimit = self.node.maxScaleZLimit.get() if self.node.maxScaleZLimitEnable.get() else None
def fetch_transform_limits(self):
self.node.minTransXLimitEnable.set(self.minTransXLimit is not None)
if self.minTransXLimit is not None:
self.node.minTransXLimit.set(self.minTransXLimit)
self.node.maxTransXLimitEnable.set(self.maxTransXLimit is not None)
if self.maxTransXLimit is not None:
self.node.maxTransXLimit.set(self.maxTransXLimit)
self.node.minTransYLimitEnable.set(self.minTransYLimit is not None)
if self.minTransYLimit is not None:
self.node.minTransYLimit.set(self.minTransYLimit)
self.node.maxTransYLimitEnable.set(self.maxTransYLimit is not None)
if self.maxTransYLimit is not None:
self.node.maxTransYLimit.set(self.maxTransYLimit)
self.node.minTransZLimitEnable.set(self.minTransZLimit is not None)
if self.minTransZLimit is not None:
self.node.minTransZLimit.set(self.minTransZLimit)
self.node.maxTransZLimitEnable.set(self.maxTransZLimit is not None)
if self.maxTransZLimit is not None:
self.node.maxTransZLimit.set(self.maxTransZLimit)
self.node.minRotXLimitEnable.set(self.minRotXLimit is not None)
if self.minRotXLimit is not None:
self.node.minRotXLimit.set(self.minRotXLimit)
self.node.maxRotXLimitEnable.set(self.maxRotXLimit is not None)
if self.maxRotXLimit is not None:
self.node.maxRotXLimit.set(self.maxRotXLimit)
self.node.minRotYLimitEnable.set(self.minRotYLimit is not None)
if self.minRotYLimit is not None:
self.node.minRotYLimit.set(self.minRotYLimit)
self.node.maxRotYLimitEnable.set(self.maxRotYLimit is not None)
if self.maxRotYLimit is not None:
self.node.maxRotYLimit.set(self.maxRotYLimit)
self.node.minRotZLimitEnable.set(self.minRotZLimit is not None)
if self.minRotZLimit is not None:
self.node.minRotZLimit.set(self.minRotZLimit)
self.node.maxRotZLimitEnable.set(self.maxRotZLimit is not None)
if self.maxRotZLimit is not None:
self.node.maxRotZLimit.set(self.maxRotZLimit)
self.node.minScaleXLimitEnable.set(self.minScaleXLimit is not None)
if self.minScaleXLimit is not None:
self.node.minScaleXLimit.set(self.minScaleXLimit)
self.node.maxScaleXLimitEnable.set(self.maxScaleXLimit is not None)
if self.maxScaleXLimit is not None:
self.node.maxScaleXLimit.set(self.maxScaleXLimit)
self.node.minScaleYLimitEnable.set(self.minScaleYLimit is not None)
if self.minScaleYLimit is not None:
self.node.minScaleYLimit.set(self.minScaleYLimit)
self.node.maxScaleYLimitEnable.set(self.maxScaleYLimit is not None)
if self.maxScaleYLimit is not None:
self.node.maxScaleYLimit.set(self.maxScaleYLimit)
self.node.minScaleZLimitEnable.set(self.minScaleZLimit is not None)
if self.minScaleZLimit is not None:
self.node.minScaleZLimit.set(self.minScaleZLimit)
self.node.maxScaleZLimitEnable.set(self.maxScaleZLimit is not None)
if self.maxScaleZLimit is not None:
self.node.maxScaleZLimit.set(self.maxScaleZLimit)
def unbuild(self, keep_shapes=True, *args, **kwargs):
"""
Delete ctrl setup, but store the animation, shapes and rotate order0.
"""
if not libPymel.is_valid_PyNode(self.node):
raise Exception("Can't hold ctrl attribute! Some information may be lost... {0}".format(self.node))
else:
self.rotateOrder = self.node.rotateOrder.get()
self.hold_attrs_all()
self.hold_transform_limits()
self.hold_shapes()
super(BaseCtrl, self).unbuild(*args, **kwargs)
# Delete offset node if necessary.
# Note that we delete the offset node AFTER deleting the original node.
if libPymel.is_valid_PyNode(self.offset):
pymel.delete(self.offset)
self.offset = None
def rename(self, _sName, *args, **kwargs):
"""
Rename the internet network.
"""
if self.node is not None:
self.node.rename(_sName, *args, **kwargs)
if self.offset is not None:
self.offset.rename(_sName + '_offset')
def setParent(self, *args, **kwargs):
"""
Override of pymel.PyNode .setParent method.
Redirect the call to the ctrl top node.
"""
if not isinstance(self.offset, pymel.PyNode):
print "[setParent] {0} don't have an offset attribute, node will be parented instead".format(self)
return self.node.setParent(*args, **kwargs)
return self.offset.setParent(*args, **kwargs)
def setMatrix(self, *args, **kwargs):
"""
Override of pymel.PyNode .setMatrix method.
Redirect the call to the ctrl top node.
"""
if not isinstance(self.offset, pymel.PyNode):
print "[setParent] {0} don't have an offset attribute".format(self)
return self.offset.setMatrix(*args, **kwargs)
def setTranslation(self, *args, **kwargs):
"""
Override of pymel.PyNode .setTranslation method.
Redirect the call to the ctrl top node.
"""
if not isinstance(self.offset, pymel.PyNode):
print "[setParent] {0} don't have an offset attribute".format(self)
return self.offset.setTranslation(*args, **kwargs)
def setRotation(self, *args, **kwargs):
"""
Override of pymel.PyNode .setRotation method.
Redirect the call to the ctrl top node.
"""
if not isinstance(self.offset, pymel.PyNode):
print "[setParent] {0} don't have an offset attribute".format(self)
return self.offset.setRotation(*args, **kwargs)
def hold_attrs_all(self):
"""
Hold all ctrl keyable attributes.
Note that if an attribute is locked or non-keyable, we'll only hold it's value.
"""
self.tx = libAttr.hold_attrs(self.node.translateX)
self.ty = libAttr.hold_attrs(self.node.translateY)
self.tz = libAttr.hold_attrs(self.node.translateZ)
self.rx = libAttr.hold_attrs(self.node.rotateX)
self.ry = libAttr.hold_attrs(self.node.rotateY)
self.rz = libAttr.hold_attrs(self.node.rotateZ)
self.sx = libAttr.hold_attrs(self.node.scaleX)
self.sy = libAttr.hold_attrs(self.node.scaleY)
self.sz = libAttr.hold_attrs(self.node.scaleZ)
def fetch_attr_all(self):
"""
Fetch all ctrl keyable attributes.
Disabled for now as this can affect how things generate.
The fetch_attr_all should be called LAST after a Module generation.
"""
pass
# # Note: we're forced to use __dict__ since we don't self.tx to be interpreted as self.node.tx
# libAttr.fetch_attr(self.__dict__.get('tx', None), self.node.translateX)
# libAttr.fetch_attr(self.__dict__.get('ty', None), self.node.translateY)
# libAttr.fetch_attr(self.__dict__.get('tz', None), self.node.translateZ)
# libAttr.fetch_attr(self.__dict__.get('rx', None), self.node.rotateX)
# libAttr.fetch_attr(self.__dict__.get('ry', None), self.node.rotateY)
# libAttr.fetch_attr(self.__dict__.get('rz', None), self.node.rotateZ)
# libAttr.fetch_attr(self.__dict__.get('sx', None), self.node.scaleX)
# libAttr.fetch_attr(self.__dict__.get('sy', None), self.node.scaleY)
# libAttr.fetch_attr(self.__dict__.get('sz', None), self.node.scaleZ)
#
# SPACE SWITH LOGIC
#
def get_bestmatch_index(self, target, reserved_idx=None):
"""
This function will return the best match index depending of if the target that could be already know and a list
of reserved index (-3 = world, -2 = local, -1 = root)
:param target: The target we want to get the index for
:param reserved_idx: Should be a reserved index. Will be set when trying to get a specific reserved space target
:return: Return the index that the target will use in the space switch system
"""
# Populate a list that represent all index already in use in the system
if not self._reserved_index:
self._reserved_index = [member[1] for member in inspect.getmembers(constants.SpaceSwitchReservedIndex)
if not member[0].startswith("__") and not member[0].endswith("__")]
if self.local_index not in self._reserved_index:
self._reserved_index.append(self.local_index)
# Keep all the indexes that were serialized
if self.targets_indexes:
for index in self.targets_indexes:
if index not in self._reserved_index:
self._reserved_index.append(index)
# First, check if the target already have an index associated
for i, keep_target in enumerate(self.targets):
if keep_target == target:
return self.targets_indexes[i]
# If no index is found and a reserved type is used, return the good reserved index
if reserved_idx:
return reserved_idx
# If no index is found, find the next available one
new_max_idx = max(self._reserved_index) + 1
# Since reserved index are always negative, we know that the first possible index is 0
for i in xrange(0, new_max_idx + 1):
if i not in self._reserved_index:
self._reserved_index.append(i) # Hack the reserved target list to include the new used index
return i
# Finally, if no index is still found, return the next possible one in the list
return new_max_idx
def create_spaceswitch(self, module, parent, add_local=True, local_label=None, local_target=None, add_world=False,
**kwargs):
"""
Create the space switch attribute on the controller using a list of target found from it's module hierarchy.
:param module: The module on which we want to process space switch targets
:param parent: The parent used as the default (local) target
:param add_local: If True, a 'local' target will be used. Local is generally the absence of any constraint and always have the same index.
:param local_label: The name of the 'local' target
:param local_target: The objects to use as the local target. This is only used to cheat (see the FaceEyes module).
:param add_world: Is the world will be added as a target
:param kwargs: Additional parameters
:return: None
"""
# TODO: Handle when parent is None?
nomenclature = module.rig.nomenclature
# Basically we resolve 3 list:
# - targets: Contain the space switch targets.
# - labels: Contain the visible text for each targets
# - indexes: Contain the stored logical index for each targets. Note that some indexes are reserved.
targets, labels, indexes = self.get_spaceswitch_targets(module, parent,
add_world=add_world, add_local=add_local)
if not targets:
module.warning("Can't add space switch on {0}. No targets found!".format(self.node.__melobject__()))
return
if local_label is None:
local_label = 'Local'
# Resolve the niceName of the targets
for i in range(len(targets)):
target = targets[i]
label = labels[i]
if label is None and target is not None:
name = nomenclature(target.name())
name.remove_extra_tokens()
labels[i] = name.resolve()
# Build the enum string from the information we got
enum_string = ""
# Add the local option if needed
if add_local:
# We cannot self referencing since it will break maya deletion mechanism
# targets.append(self)
# indexes.append(default_index)
# labels.append(default_name)
# In some case the user might have provided what we should use as the local target.
# This is used to cheat, for exemple the FaceEye module ctrl are parented to the world,
# however it make sense that their 'local' space is still the head.
if local_target:
# If the local_target exist in the list, we'll want to remove it.
if local_target in targets:
index = targets.index(local_target)
targets.pop(index)
labels.pop(index)
indexes.pop(index)
targets.append(local_target)
indexes.append(constants.SpaceSwitchReservedIndex.local)
labels.append(local_label)
else:
enum_string += local_label + "=" + \
str(self.local_index)
# The enum string will skip index if needed
for label, index in zip(labels, indexes):
if enum_string:
enum_string += ":"
enum_string += label + "=" + str(index)
# Update the serialized variable to make sure everything is up to date
for i, target in enumerate(targets):
if target not in self.targets:
self.targets.append(target)
if indexes[i] in self.targets_indexes:
log.warning("Index ({0}) is already used for space switch on ctrl {1}. "
"Strange behavior could happen".format(indexes[i], self.name()))
self.targets_indexes.append(indexes[i])
# Create the parent constraint before adding the local since local target will be set to itself
# to keep a serialized link to the local target
layer_space_switch = self.append_layer('spaceSwitch')
parent_constraint = pymel.parentConstraint(targets, layer_space_switch, maintainOffset=True, **kwargs)
attr_space = libAttr.addAttr(self.node, 'space', at='enum', enumName=enum_string, k=True)
atts_weights = parent_constraint.getWeightAliasList()
for i, att_weight in enumerate(atts_weights):
index_to_match = indexes[i]
att_enabled = libRigging.create_utility_node( # Equal
'condition',
firstTerm=attr_space,
secondTerm=index_to_match,
colorIfTrueR=1,
colorIfFalseR=0
).outColorR
pymel.connectAttr(att_enabled, att_weight)
# By Default, the active space will be local, else root and finally fallback on the first index found
if add_local:
self.node.space.set(local_label)
elif constants.SpaceSwitchReservedIndex.root in self.targets_indexes:
self.node.space.set(constants.SpaceSwitchReservedIndex.root)
else:
if self.targets_indexes:
self.node.space.set(self.targets_indexes[0])
# Sometimes Maya will be drunk and set a bad 'restRotate'.
# We'll want to ensure ourself that there's no rest offset. (see Task #70729)
parent_constraint.restTranslateX.set(0)
parent_constraint.restTranslateY.set(0)
parent_constraint.restTranslateZ.set(0)
parent_constraint.restRotateX.set(0)
parent_constraint.restRotateY.set(0)
parent_constraint.restRotateZ.set(0)
def get_spaceswitch_targets(self, module, jnt, add_world=True, add_root=True, add_local=True,
root_name='Root', world_name='World', **kwargs):
"""
Return the list of target used by the space switch of a controller. It will try get all module pin location it
can find from it's jnt parameter
:param module: The module on which we want to process space switch targets
:param jnt: A list of joint that will be used to find associated modules to find space objects
:param add_world: Is the world will be added as a space switch target of the ctrl
:param add_root: Is the root will be added as a space switch target of the ctrl
:param add_local: Is the local option will be used. Local will be the same than the first module target
:param root_name: The name in the list of targets the root will take
:param world_name: The name in the list of targets the world will take
:param kwargs: Additional parameters
:return: The targets obj, name and index of the found space switch target
"""
targets = []
targets.extend(self.targets) # The target
# Initialize the target name list with the same number of item than the targets keeped before
target_names = []
for i in range(0, len(targets)):
target_names.append(None)
indexes = []
indexes.extend(self.targets_indexes)
# Use the grp_rip node as the world target. It will always be the first target in the list
if add_world and libPymel.is_valid_PyNode(module.rig.grp_rig):
if module.rig.grp_rig not in targets:
targets.append(module.rig.grp_rig)
# World will always be -1
indexes.append(self.get_bestmatch_index(module.rig.grp_rig, constants.SpaceSwitchReservedIndex.world))
target_names.append(world_name)
else:
idx = targets.index(module.rig.grp_rig)
target_names[idx] = world_name
# Add the master ctrl as a spaceswitch target
if libPymel.is_valid_PyNode(module.rig.grp_anm):
if module.rig.grp_anm not in targets:
targets.append(module.rig.grp_anm)
target_names.append(root_name)
# The root will always be index 1, because we want to let local to be 0
indexes.append(self.get_bestmatch_index(module.rig.grp_anm, constants.SpaceSwitchReservedIndex.root))
else:
idx = targets.index(module.rig.grp_anm)
target_names[idx] = root_name
# Resolve modules targets
first_module = True
while jnt:
m = module.rig.get_module_by_input(jnt)
# We will not add as a target the first modules target found if we add the local space
# The local space is an equivalent to not having any space activated so as if it follow it's parent which
# would be the first module found
if m and ((add_local and not first_module) or not add_local):
target, target_name = m.get_pin_locations(jnt)
if target:
if target not in targets:
targets.append(target)
target_names.append(target_name)
indexes.append(self.get_bestmatch_index(target))
else:
idx = targets.index(target)
target_names[idx] = target_name
else:
first_module = False
jnt = jnt.getParent()
# Final check to ensure that not target is None. If one None target is found, we need to remove it and let the
# index in the space attribute to be free to fix manually
for i, t in reversed(list(enumerate(targets))):
if t is None:
log.warning("Space switch index {0} target is None on {1}, "
"maybe a manual connection will be needed".format(indexes[i], self.name))
targets.pop(i)
target_names.pop(i)
indexes.pop(i)
return targets, target_names, indexes
def get_spaceswitch_enum_targets(self):
"""
Return a dictionnary representing the enum space switch attribute data (space name, index and object)
:return: A dictionary representing the data of the space switch style [index] = (name, target_obj)
"""
space_attr = getattr(self.node, 'space', None)
dict_sw_data = {}
# log.info("Processing {0}".format(self.node))
if space_attr:
enum_items = space_attr.getEnums().items()
enum_items.sort(key=lambda tup: tup[1])
all_enum_connections = [con for con in space_attr.listConnections(d=True, s=False)]
for name, index in enum_items:
target_found = False
for con in all_enum_connections:
if con.secondTerm.get() == index:
target_found = True
out_connections = con.outColorR.listConnections(d=True, s=False)
if out_connections:
const = out_connections[0]
const_target_weight_attr = con.outColorR.listConnections(d=True, s=False, p=True)[0] \
.listConnections(d=True, s=False, p=True)
for target in const.target:
const_target_name = const_target_weight_attr[0].name(fullDagPath=True)
target_name = target.targetWeight.name(fullDagPath=True)
if target_name == const_target_name:
target_obj = target.targetParentMatrix.listConnections(s=True)[0]
dict_sw_data[index] = (name, target_obj)
if not target_found:
dict_sw_data[index] = (name, None)
else:
pass
# log.warning("No space attribute found on {0}".format(self.node))
return dict_sw_data
| {
"content_hash": "0d675a248b1983f719a0cfcbe3e3e8fa",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 146,
"avg_line_length": 47.356923076923074,
"alnum_prop": 0.6235137417971541,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "4b7b2609e4471b6fa814a8148fa89950a1bed3f2",
"size": "30782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/omtk/core/classCtrl.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
"""
WARNING: This code is deprecated and will be removed.
Keystone is the recommended solution for auth management.
Nova authentication management
"""
import os
import shutil
import string # pylint: disable=W0402
import tempfile
import uuid
import zipfile
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import signer
FLAGS = flags.FLAGS
flags.DEFINE_bool('use_deprecated_auth',
False,
'This flag must be set to use old style auth')
flags.DEFINE_list('allowed_roles',
['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
'Allowed roles for project')
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
'Roles that ignore authorization checking completely')
# NOTE(vish): a user with one of these roles will have it for every
# project, even if he or she is not a member of the project
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', '%src',
'Filename of rc in credentials zip, %s will be '
'replaced by name of the region (nova by default)')
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
LOG = logging.getLogger('nova.auth.manager')
if FLAGS.memcached_servers:
import memcache
else:
from nova import fakememcache as memcache
class AuthBase(object):
"""Base class for objects relating to auth
Objects derived from this class should be stupid data objects with
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
@classmethod
def safe_id(cls, obj):
"""Safely get object id.
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as paramaters.
"""
if isinstance(obj, cls):
return obj.id
else:
return obj
class User(AuthBase):
"""Object representing a user
The following attributes are defined:
:id: A system identifier for the user. A string (for LDAP)
:name: The user name, potentially in some more friendly format
:access: The 'username' for EC2 authentication
:secret: The 'password' for EC2 authenticatoin
:admin: ???
"""
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
assert isinstance(id, basestring)
self.id = id
self.name = name
self.access = access
self.secret = secret
self.admin = admin
def is_superuser(self):
return AuthManager().is_superuser(self)
def is_admin(self):
return AuthManager().is_admin(self)
def has_role(self, role):
return AuthManager().has_role(self, role)
def add_role(self, role):
return AuthManager().add_role(self, role)
def remove_role(self, role):
return AuthManager().remove_role(self, role)
def is_project_member(self, project):
return AuthManager().is_project_member(self, project)
def is_project_manager(self, project):
return AuthManager().is_project_manager(self, project)
def __repr__(self):
return "User('%s', '%s')" % (self.id, self.name)
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
def __init__(self, id, name, project_manager_id, description, member_ids):
AuthBase.__init__(self)
self.id = id
self.name = name
self.project_manager_id = project_manager_id
self.description = description
self.member_ids = member_ids
@property
def project_manager(self):
return AuthManager().get_user(self.project_manager_id)
@property
def vpn_ip(self):
ip, _port = AuthManager().get_project_vpn_data(self)
return ip
@property
def vpn_port(self):
_ip, port = AuthManager().get_project_vpn_data(self)
return port
def has_manager(self, user):
return AuthManager().is_project_manager(user, self)
def has_member(self, user):
return AuthManager().is_project_member(user, self)
def add_role(self, user, role):
return AuthManager().add_role(user, role, self)
def remove_role(self, user, role):
return AuthManager().remove_role(user, role, self)
def has_role(self, user, role):
return AuthManager().has_role(user, role, self)
def get_credentials(self, user):
return AuthManager().get_credentials(user, self)
def __repr__(self):
return "Project('%s', '%s')" % (self.id, self.name)
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
Methods accept objects or ids.
AuthManager uses a driver object to make requests to the data backend.
See ldapdriver for reference.
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
_instance = None
mc = None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(AuthManager, cls).__new__(cls)
return cls._instance
def __init__(self, driver=None, *args, **kwargs):
"""Inits the driver from parameter or flag
__init__ is run every time AuthManager() is called, so we only
reset the driver if it is not set or a new driver is specified.
"""
self.network_manager = utils.import_object(FLAGS.network_manager)
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
if AuthManager.mc is None:
AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
check_type='ec2', headers=None):
"""Authenticates AWS request using access key and signature
If the project is not specified, attempts to authenticate to
a project with the same name as the user. This way, older tools
that have no project knowledge will still work.
@type access: str
@param access: Access key for user in the form "access:project".
@type signature: str
@param signature: Signature of the request.
@type params: list of str
@param params: Web paramaters used for the signature.
@type verb: str
@param verb: Web request verb ('GET' or 'POST').
@type server_string: str
@param server_string: Web request server string.
@type path: str
@param path: Web request path.
@type check_type: str
@param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
S3. Any other value will cause signature not to be
checked.
@type headers: list
@param headers: HTTP headers passed with the request (only needed for
s3 signature checks)
@rtype: tuple (User, Project)
@return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
LOG.debug('user: %r', user)
if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.AccessKeyNotFound(access_key=access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project is None:
pjid = project_id
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
" (user=%(uname)s)") % locals())
raise exception.ProjectNotFound(project_id=project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
uname = user.name
uid = user.id
pjname = project.name
pjid = project.id
LOG.audit(_("Failed authorization: user %(uname)s not admin"
" and not member of project %(pjname)s") % locals())
raise exception.ProjectMembershipNotFound(project_id=pjid,
user_id=uid)
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
LOG.debug(_('user.secret: %s'), user.secret)
LOG.debug(_('expected_signature: %s'), expected_signature)
LOG.debug(_('signature: %s'), signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.InvalidSignature(signature=signature,
user=user)
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
LOG.debug(_('user.secret: %s'), user.secret)
LOG.debug(_('expected_signature: %s'), expected_signature)
LOG.debug(_('signature: %s'), signature)
if signature != expected_signature:
(addr_str, port_str) = utils.parse_server_string(server_string)
# If the given server_string contains port num, try without it.
if port_str != '':
host_only_signature = signer.Signer(
user.secret.encode()).generate(params, verb,
addr_str, path)
LOG.debug(_('host_only_signature: %s'),
host_only_signature)
if signature == host_only_signature:
return (user, project)
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.InvalidSignature(signature=signature,
user=user)
return (user, project)
def get_access_key(self, user, project):
"""Get an access key that includes user and project"""
if not isinstance(user, User):
user = self.get_user(user)
return "%s:%s" % (user.access, Project.safe_id(project))
def is_superuser(self, user):
"""Checks for superuser status, allowing user to bypass authorization
@type user: User or uid
@param user: User to check.
@rtype: bool
@return: True for superuser.
"""
if not isinstance(user, User):
user = self.get_user(user)
# NOTE(vish): admin flag on user represents superuser
if user.admin:
return True
for role in FLAGS.superuser_roles:
if self.has_role(user, role):
return True
def is_admin(self, user):
"""Checks for admin status, allowing user to access all projects
@type user: User or uid
@param user: User to check.
@rtype: bool
@return: True for admin.
"""
if not isinstance(user, User):
user = self.get_user(user)
if self.is_superuser(user):
return True
for role in FLAGS.global_roles:
if self.has_role(user, role):
return True
def _build_mc_key(self, user, role, project=None):
key_parts = ['rolecache', User.safe_id(user), str(role)]
if project:
key_parts.append(Project.safe_id(project))
return '-'.join(key_parts)
def _clear_mc_key(self, user, role, project=None):
# NOTE(anthony): it would be better to delete the key
self.mc.set(self._build_mc_key(user, role, project), None)
def _has_role(self, user, role, project=None):
mc_key = self._build_mc_key(user, role, project)
rslt = self.mc.get(mc_key)
if rslt is None:
with self.driver() as drv:
rslt = drv.has_role(user, role, project)
self.mc.set(mc_key, rslt)
return rslt
else:
return rslt
def has_role(self, user, role, project=None):
"""Checks existence of role for user
If project is not specified, checks for a global role. If project
is specified, checks for the union of the global role and the
project role.
Role 'projectmanager' only works for projects and simply checks to
see if the user is the project_manager of the specified project. It
is the same as calling is_project_manager(user, project).
@type user: User or uid
@param user: User to check.
@type role: str
@param role: Role to check.
@type project: Project or project_id
@param project: Project in which to look for local role.
@rtype: bool
@return: True if the user has the role.
"""
if role == 'projectmanager':
if not project:
raise exception.Error(_("Must specify project"))
return self.is_project_manager(user, project)
global_role = self._has_role(User.safe_id(user),
role,
None)
if not global_role:
return global_role
if not project or role in FLAGS.global_roles:
return global_role
return self._has_role(User.safe_id(user),
role,
Project.safe_id(project))
def add_role(self, user, role, project=None):
"""Adds role for user
If project is not specified, adds a global role. If project
is specified, adds a local role.
The 'projectmanager' role is special and can't be added or removed.
@type user: User or uid
@param user: User to which to add role.
@type role: str
@param role: Role to add.
@type project: Project or project_id
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
raise exception.UserRoleNotFound(role_id=role)
if project is not None and role in FLAGS.global_roles:
raise exception.GlobalRoleNotAllowed(role_id=role)
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Adding role %(role)s to user %(uid)s"
" in project %(pid)s") % locals())
else:
LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
% locals())
with self.driver() as drv:
self._clear_mc_key(uid, role, pid)
drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
If project is not specified, removes a global role. If project
is specified, removes a local role.
The 'projectmanager' role is special and can't be added or removed.
@type user: User or uid
@param user: User from which to remove role.
@type role: str
@param role: Role to remove.
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Removing role %(role)s from user %(uid)s"
" on project %(pid)s") % locals())
else:
LOG.audit(_("Removing sitewide role %(role)s"
" from user %(uid)s") % locals())
with self.driver() as drv:
self._clear_mc_key(uid, role, pid)
drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
"""Get list of allowed roles"""
if project_roles:
return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
else:
return FLAGS.allowed_roles
def get_user_roles(self, user, project=None):
"""Get user global or per-project roles"""
with self.driver() as drv:
return drv.get_user_roles(User.safe_id(user),
Project.safe_id(project))
def get_active_roles(self, user, project=None):
"""Get all active roles for context"""
if project:
roles = FLAGS.allowed_roles + ['projectmanager']
else:
roles = FLAGS.global_roles
return [role for role in roles if self.has_role(user, role, project)]
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
project_dict = drv.get_project(pid)
if project_dict:
return Project(**project_dict)
def get_projects(self, user=None):
"""Retrieves list of projects, optionally filtered by user"""
with self.driver() as drv:
project_list = drv.get_projects(User.safe_id(user))
if not project_list:
return []
return [Project(**project_dict) for project_dict in project_list]
def create_project(self, name, manager_user, description=None,
member_users=None):
"""Create a project
@type name: str
@param name: Name of the project to create. The name will also be
used as the project id.
@type manager_user: User or uid
@param manager_user: This user will be the project manager.
@type description: str
@param project: Description of the project. If no description is
specified, the name of the project will be used.
@type member_users: list of User or uid
@param: Initial project members. The project manager will always be
added as a member, even if he isn't specified in this list.
@rtype: Project
@return: The new project.
"""
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with self.driver() as drv:
project_dict = drv.create_project(name,
User.safe_id(manager_user),
description,
member_users)
if project_dict:
LOG.audit(_("Created project %(name)s with"
" manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
def modify_project(self, project, manager_user=None, description=None):
"""Modify a project
@type name: Project or project_id
@param project: The project to modify.
@type manager_user: User or uid
@param manager_user: This user will be the new project manager.
@type description: str
@param project: This will be the new description of the project.
"""
LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
drv.modify_project(Project.safe_id(project),
manager_user,
description)
def add_to_project(self, user, project):
"""Add user to project"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) == project.project_manager_id
def is_project_member(self, user, project):
"""Checks to see if user is a member of project"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) in project.member_ids
def remove_from_project(self, user, project):
"""Removes a user from a project"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
"""Gets vpn ip and port for project
@type project: Project or project_id
@param project: Project from which to get associated vpn data
@rvalue: tuple of (str, str)
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
networks = db.project_get_networks(context.get_admin_context(),
Project.safe_id(project), False)
if not networks:
return (None, None)
# TODO(tr3buchet): not sure what you guys plan on doing with this
# but it's possible for a project to have multiple sets of vpn data
# for now I'm just returning the first one
network = networks[0]
return (network['vpn_public_address'],
network['vpn_public_port'])
def delete_project(self, project):
"""Deletes a project"""
LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
def get_user(self, uid):
"""Retrieves a user by id"""
with self.driver() as drv:
user_dict = drv.get_user(uid)
if user_dict:
return User(**user_dict)
def get_user_from_access_key(self, access_key):
"""Retrieves a user by access key"""
with self.driver() as drv:
user_dict = drv.get_user_from_access_key(access_key)
if user_dict:
return User(**user_dict)
def get_users(self):
"""Retrieves a list of all users"""
with self.driver() as drv:
user_list = drv.get_users()
if not user_list:
return []
return [User(**user_dict) for user_dict in user_list]
def create_user(self, name, access=None, secret=None, admin=False):
"""Creates a user
@type name: str
@param name: Name of the user to create.
@type access: str
@param access: Access Key (defaults to a random uuid)
@type secret: str
@param secret: Secret Key (defaults to a random uuid)
@type admin: bool
@param admin: Whether to set the admin flag. The admin flag gives
superuser status regardless of roles specifed for the user.
@type create_project: bool
@param: Whether to create a project for the user with the same name.
@rtype: User
@return: The new user.
"""
if access is None:
access = str(uuid.uuid4())
if secret is None:
secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
rvname = rv.name
rvadmin = rv.admin
LOG.audit(_("Created user %(rvname)s"
" (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
drv.delete_user(uid)
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
if access_key:
LOG.audit(_("Access Key change for user %s"), uid)
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
LOG.audit(_("Admin status set to %(admin)r"
" for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
def get_credentials(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
private_key, signed_cert = crypto.generate_x509_cert(user.id, pid)
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
if use_dmz and FLAGS.region_list:
regions = {}
for item in FLAGS.region_list:
region, _sep, region_host = item.partition("=")
regions[region] = region_host
else:
regions = {'nova': FLAGS.ec2_host}
for region, host in regions.iteritems():
rc = self.__generate_rc(user,
pid,
use_dmz,
host)
zippy.writestr(FLAGS.credential_rc_file % region, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
if vpn_ip:
configfile = open(FLAGS.vpn_client_template, "r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=vpn_ip,
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
with open(zf, 'rb') as f:
read_buffer = f.read()
shutil.rmtree(tmpdir)
return read_buffer
def get_environment_rc(self, user, project=None, use_dmz=True):
"""Get environment rc for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
return self.__generate_rc(user, pid, use_dmz)
@staticmethod
def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
ec2_host = FLAGS.ec2_dmz_host
else:
ec2_host = FLAGS.ec2_host
# NOTE(vish): Always use the dmz since it is used from inside the
# instance
s3_host = FLAGS.s3_dmz
if host:
s3_host = host
ec2_host = host
rc = open(FLAGS.credentials_template).read()
# NOTE(vish): Deprecated auth uses an access key, no auth uses a
# the user_id in place of it.
if FLAGS.use_deprecated_auth:
access = user.access
else:
access = user.id
rc = rc % {'access': access,
'project': pid,
'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_path),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
'os': '%s://%s:%s%s' % (FLAGS.osapi_scheme,
ec2_host,
FLAGS.osapi_port,
FLAGS.osapi_path),
'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}
return rc
| {
"content_hash": "6a2f1c7f8eb050d62f854ae8f326521e",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 79,
"avg_line_length": 37.36674816625917,
"alnum_prop": 0.5639926715958908,
"repo_name": "nii-cloud/dodai-compute",
"id": "e0504464ee8df7e0f88b9766c4776edcb408b7d7",
"size": "31343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/auth/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4253758"
},
{
"name": "Shell",
"bytes": "42407"
}
],
"symlink_target": ""
} |
from typing import List
import setuptools
from setuptools import find_packages
def get_requirements() -> List[str]:
with open("requirements.txt") as f:
requirements = f.read().splitlines()
return requirements
setuptools.setup(
name="capitalone-dataprofiler-expectations",
version="0.1.0",
install_requires=get_requirements(),
packages=find_packages(exclude=["assets", "tests"]),
)
| {
"content_hash": "a4c6159b1a5fbbb5bd938422eac9c40c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 23.22222222222222,
"alnum_prop": 0.7057416267942583,
"repo_name": "great-expectations/great_expectations",
"id": "3fe83cd9d3084d4f12b9efb99ef79a1de6c896fa",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contrib/capitalone_dataprofiler_expectations/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
from amon.apps.servers.models import server_model
from amon.apps.processes.models import process_model
from amon.apps.plugins.models import plugin_model
| {
"content_hash": "25a3894b240a4b75c2ebbed42edf8ed2",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 51,
"alnum_prop": 0.8431372549019608,
"repo_name": "martinrusev/amonone",
"id": "f2cd2690169d97ef2885ff5857f29fa76d605511",
"size": "153",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "amon/apps/system/common_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77950"
},
{
"name": "JavaScript",
"bytes": "28811"
},
{
"name": "Python",
"bytes": "180983"
},
{
"name": "Ruby",
"bytes": "131"
},
{
"name": "Shell",
"bytes": "5652"
}
],
"symlink_target": ""
} |
"""
Cascade decomposition
=====================
This example script shows how to compute and plot the cascade decompositon of
a single radar precipitation field in pysteps.
"""
from matplotlib import cm, pyplot as plt
import numpy as np
import os
from pprint import pprint
from pysteps.cascade.bandpass_filters import filter_gaussian
from pysteps import io, rcparams
from pysteps.cascade.decomposition import decomposition_fft
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read precipitation field
# ------------------------
#
# First thing, the radar composite is imported and transformed in units
# of dB.
# Import the example radar composite
root_path = rcparams.data_sources["fmi"]["root_path"]
filename = os.path.join(
root_path, "20160928", "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz"
)
R, _, metadata = io.import_fmi_pgm(filename, gzipped=True)
# Convert to rain rate
R, metadata = conversion.to_rainrate(R, metadata)
# Nicely print the metadata
pprint(metadata)
# Plot the rainfall field
plot_precip_field(R, geodata=metadata)
plt.show()
# Log-transform the data
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
###############################################################################
# 2D Fourier spectrum
# --------------------
#
# Compute and plot the 2D Fourier power spectrum of the precipitaton field.
# Set Nans as the fill value
R[~np.isfinite(R)] = metadata["zerovalue"]
# Compute the Fourier transform of the input field
F = abs(np.fft.fftshift(np.fft.fft2(R)))
# Plot the power spectrum
M, N = F.shape
fig, ax = plt.subplots()
im = ax.imshow(
np.log(F**2), vmin=4, vmax=24, cmap=cm.jet, extent=(-N / 2, N / 2, -M / 2, M / 2)
)
cb = fig.colorbar(im)
ax.set_xlabel("Wavenumber $k_x$")
ax.set_ylabel("Wavenumber $k_y$")
ax.set_title("Log-power spectrum of R")
plt.show()
###############################################################################
# Cascade decomposition
# ---------------------
#
# First, construct a set of Gaussian bandpass filters and plot the corresponding
# 1D filters.
num_cascade_levels = 7
# Construct the Gaussian bandpass filters
filter = filter_gaussian(R.shape, num_cascade_levels)
# Plot the bandpass filter weights
L = max(N, M)
fig, ax = plt.subplots()
for k in range(num_cascade_levels):
ax.semilogx(
np.linspace(0, L / 2, len(filter["weights_1d"][k, :])),
filter["weights_1d"][k, :],
"k-",
base=pow(0.5 * L / 3, 1.0 / (num_cascade_levels - 2)),
)
ax.set_xlim(1, L / 2)
ax.set_ylim(0, 1)
xt = np.hstack([[1.0], filter["central_wavenumbers"][1:]])
ax.set_xticks(xt)
ax.set_xticklabels(["%.2f" % cf for cf in filter["central_wavenumbers"]])
ax.set_xlabel("Radial wavenumber $|\mathbf{k}|$")
ax.set_ylabel("Normalized weight")
ax.set_title("Bandpass filter weights")
plt.show()
###############################################################################
# Finally, apply the 2D Gaussian filters to decompose the radar rainfall field
# into a set of cascade levels of decreasing spatial scale and plot them.
decomp = decomposition_fft(R, filter, compute_stats=True)
# Plot the normalized cascade levels
for i in range(num_cascade_levels):
mu = decomp["means"][i]
sigma = decomp["stds"][i]
decomp["cascade_levels"][i] = (decomp["cascade_levels"][i] - mu) / sigma
fig, ax = plt.subplots(nrows=2, ncols=4)
ax[0, 0].imshow(R, cmap=cm.RdBu_r, vmin=-5, vmax=5)
ax[0, 1].imshow(decomp["cascade_levels"][0], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 2].imshow(decomp["cascade_levels"][1], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 3].imshow(decomp["cascade_levels"][2], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 0].imshow(decomp["cascade_levels"][3], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 1].imshow(decomp["cascade_levels"][4], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 2].imshow(decomp["cascade_levels"][5], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 3].imshow(decomp["cascade_levels"][6], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 0].set_title("Observed")
ax[0, 1].set_title("Level 1")
ax[0, 2].set_title("Level 2")
ax[0, 3].set_title("Level 3")
ax[1, 0].set_title("Level 4")
ax[1, 1].set_title("Level 5")
ax[1, 2].set_title("Level 6")
ax[1, 3].set_title("Level 7")
for i in range(2):
for j in range(4):
ax[i, j].set_xticks([])
ax[i, j].set_yticks([])
plt.tight_layout()
plt.show()
# sphinx_gallery_thumbnail_number = 4
| {
"content_hash": "94dbdda270ece14ec19dc6321e90303e",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 86,
"avg_line_length": 32.098591549295776,
"alnum_prop": 0.6290039491004826,
"repo_name": "pySTEPS/pysteps",
"id": "a9f02799309783f3e9483e137b9ec495dccdebb9",
"size": "4576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plot_cascade_decomposition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "35339"
},
{
"name": "Python",
"bytes": "1211447"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-eve-auth',
version='0.0.1',
packages=['eve_auth'],
include_package_data=True,
license='BSD License', # example license
description='EvE Online SSO login and api access.',
long_description=README,
url='https://github.com/bastianh/django-eve-auth/',
author='Bastian Hoyer',
author_email='dafire@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| {
"content_hash": "642507d1cd24fd779696dfe538f2d674",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6234276729559748,
"repo_name": "bastianh/django-eve-auth",
"id": "51ec4a16b644207c603a538ed0ed7856aad3fc78",
"size": "1272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25211"
}
],
"symlink_target": ""
} |
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PersonExternalIdentifier(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created_date=None, last_modified_date=None, source=None, external_id_type=None, external_id_value=None, external_id_url=None, external_id_relationship=None, visibility=None, path=None, put_code=None, display_index=None):
"""
PersonExternalIdentifier - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created_date': 'CreatedDate',
'last_modified_date': 'LastModifiedDate',
'source': 'Source',
'external_id_type': 'str',
'external_id_value': 'str',
'external_id_url': 'Url',
'external_id_relationship': 'str',
'visibility': 'str',
'path': 'str',
'put_code': 'int',
'display_index': 'int'
}
self.attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'external_id_type': 'external-id-type',
'external_id_value': 'external-id-value',
'external_id_url': 'external-id-url',
'external_id_relationship': 'external-id-relationship',
'visibility': 'visibility',
'path': 'path',
'put_code': 'put-code',
'display_index': 'display-index'
}
self._created_date = created_date
self._last_modified_date = last_modified_date
self._source = source
self._external_id_type = external_id_type
self._external_id_value = external_id_value
self._external_id_url = external_id_url
self._external_id_relationship = external_id_relationship
self._visibility = visibility
self._path = path
self._put_code = put_code
self._display_index = display_index
@property
def created_date(self):
"""
Gets the created_date of this PersonExternalIdentifier.
:return: The created_date of this PersonExternalIdentifier.
:rtype: CreatedDate
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""
Sets the created_date of this PersonExternalIdentifier.
:param created_date: The created_date of this PersonExternalIdentifier.
:type: CreatedDate
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""
Gets the last_modified_date of this PersonExternalIdentifier.
:return: The last_modified_date of this PersonExternalIdentifier.
:rtype: LastModifiedDate
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""
Sets the last_modified_date of this PersonExternalIdentifier.
:param last_modified_date: The last_modified_date of this PersonExternalIdentifier.
:type: LastModifiedDate
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""
Gets the source of this PersonExternalIdentifier.
:return: The source of this PersonExternalIdentifier.
:rtype: Source
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this PersonExternalIdentifier.
:param source: The source of this PersonExternalIdentifier.
:type: Source
"""
self._source = source
@property
def external_id_type(self):
"""
Gets the external_id_type of this PersonExternalIdentifier.
:return: The external_id_type of this PersonExternalIdentifier.
:rtype: str
"""
return self._external_id_type
@external_id_type.setter
def external_id_type(self, external_id_type):
"""
Sets the external_id_type of this PersonExternalIdentifier.
:param external_id_type: The external_id_type of this PersonExternalIdentifier.
:type: str
"""
if external_id_type is None:
raise ValueError("Invalid value for `external_id_type`, must not be `None`")
self._external_id_type = external_id_type
@property
def external_id_value(self):
"""
Gets the external_id_value of this PersonExternalIdentifier.
:return: The external_id_value of this PersonExternalIdentifier.
:rtype: str
"""
return self._external_id_value
@external_id_value.setter
def external_id_value(self, external_id_value):
"""
Sets the external_id_value of this PersonExternalIdentifier.
:param external_id_value: The external_id_value of this PersonExternalIdentifier.
:type: str
"""
if external_id_value is None:
raise ValueError("Invalid value for `external_id_value`, must not be `None`")
self._external_id_value = external_id_value
@property
def external_id_url(self):
"""
Gets the external_id_url of this PersonExternalIdentifier.
:return: The external_id_url of this PersonExternalIdentifier.
:rtype: Url
"""
return self._external_id_url
@external_id_url.setter
def external_id_url(self, external_id_url):
"""
Sets the external_id_url of this PersonExternalIdentifier.
:param external_id_url: The external_id_url of this PersonExternalIdentifier.
:type: Url
"""
self._external_id_url = external_id_url
@property
def external_id_relationship(self):
"""
Gets the external_id_relationship of this PersonExternalIdentifier.
:return: The external_id_relationship of this PersonExternalIdentifier.
:rtype: str
"""
return self._external_id_relationship
@external_id_relationship.setter
def external_id_relationship(self, external_id_relationship):
"""
Sets the external_id_relationship of this PersonExternalIdentifier.
:param external_id_relationship: The external_id_relationship of this PersonExternalIdentifier.
:type: str
"""
allowed_values = ["PART_OF", "SELF"]
if external_id_relationship not in allowed_values:
raise ValueError(
"Invalid value for `external_id_relationship` ({0}), must be one of {1}"
.format(external_id_relationship, allowed_values)
)
self._external_id_relationship = external_id_relationship
@property
def visibility(self):
"""
Gets the visibility of this PersonExternalIdentifier.
:return: The visibility of this PersonExternalIdentifier.
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""
Sets the visibility of this PersonExternalIdentifier.
:param visibility: The visibility of this PersonExternalIdentifier.
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"]
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}"
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""
Gets the path of this PersonExternalIdentifier.
:return: The path of this PersonExternalIdentifier.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this PersonExternalIdentifier.
:param path: The path of this PersonExternalIdentifier.
:type: str
"""
self._path = path
@property
def put_code(self):
"""
Gets the put_code of this PersonExternalIdentifier.
:return: The put_code of this PersonExternalIdentifier.
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""
Sets the put_code of this PersonExternalIdentifier.
:param put_code: The put_code of this PersonExternalIdentifier.
:type: int
"""
self._put_code = put_code
@property
def display_index(self):
"""
Gets the display_index of this PersonExternalIdentifier.
:return: The display_index of this PersonExternalIdentifier.
:rtype: int
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""
Sets the display_index of this PersonExternalIdentifier.
:param display_index: The display_index of this PersonExternalIdentifier.
:type: int
"""
self._display_index = display_index
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PersonExternalIdentifier):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "f5899791717ca389388a18e8b006a540",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 243,
"avg_line_length": 30.1869918699187,
"alnum_prop": 0.5885627076039142,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "3d41566184021c56aaa03f901f77046c1c1ea617",
"size": "11156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api/models/person_external_identifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.contrib.operators.sagemaker_base_operator import SageMakerBaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
class SageMakerModelOperator(SageMakerBaseOperator):
"""
Create a SageMaker model.
This operator returns The ARN of the model created in Amazon SageMaker
:param config: The configuration necessary to create a model.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_model`
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
"""
@apply_defaults
def __init__(self,
config,
*args, **kwargs):
super().__init__(config=config,
*args, **kwargs)
self.config = config
def expand_role(self):
if 'ExecutionRoleArn' in self.config:
hook = AwsHook(self.aws_conn_id)
self.config['ExecutionRoleArn'] = hook.expand_role(self.config['ExecutionRoleArn'])
def execute(self, context):
self.preprocess_config()
self.log.info('Creating SageMaker Model %s.', self.config['ModelName'])
response = self.hook.create_model(self.config)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise AirflowException('Sagemaker model creation failed: %s' % response)
else:
return {
'Model': self.hook.describe_model(
self.config['ModelName']
)
}
| {
"content_hash": "bf0d7b1fb51870e4b762f92ebb626996",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 95,
"avg_line_length": 33.916666666666664,
"alnum_prop": 0.6351351351351351,
"repo_name": "r39132/airflow",
"id": "df5670b78fc09096dd9cbdba02b8317696dfbcb5",
"size": "2440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/sagemaker_model_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
""" Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to normalized encoding
names as defined in the normalize_encoding() function below, e.g.
'utf-8' must be implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> codecs.CodecInfo object
The getregentry() API must return a CodecInfo object with encoder, decoder,
incrementalencoder, incrementaldecoder, streamwriter and streamreader
atttributes which adhere to the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be normalized encoding
names as defined by normalize_encoding().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
from . import aliases
_cache = {}
_unknown = '--unknown--'
_import_tail = ['*']
_aliases = aliases.aliases
class CodecRegistryError(LookupError, SystemError):
pass
def normalize_encoding(encoding):
""" Normalize an encoding name.
Normalization works as follows: all non-alphanumeric
characters except the dot used for Python package names are
collapsed and replaced with a single underscore, e.g. ' -;#'
becomes '_'. Leading and trailing underscores are removed.
Note that encoding names should be ASCII only; if they do use
non-ASCII characters, these must be Latin-1 compatible.
"""
if isinstance(encoding, bytes):
encoding = str(encoding, "ascii")
chars = []
punct = False
for c in encoding:
if c.isalnum() or c == '.':
if punct and chars:
chars.append('_')
chars.append(c)
punct = False
else:
punct = True
return ''.join(chars)
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError('module "%s" (%s) failed to register'
% (mod.__name__, mod.__file__))
if not callable(entry[0]) or not callable(entry[1]) or \
(entry[2] is not None and not callable(entry[2])) or \
(entry[3] is not None and not callable(entry[3])) or \
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
raise CodecRegistryError('incompatible codecs in module "%s" (%s)'
% (mod.__name__, mod.__file__))
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)
| {
"content_hash": "3ce4b41ad77ff943a9bbb7928a7d15f6",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 84,
"avg_line_length": 33.33552631578947,
"alnum_prop": 0.6189066508782317,
"repo_name": "ArcherSys/ArcherSys",
"id": "281b4f3edfcf7b039f8b7d94b61f13fae1595520",
"size": "5068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/encodings/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import sys
import pyglet
from pyglet.gl import *
import squirtle
config = pyglet.gl.Config(sample_buffers=1, samples=4)
w = pyglet.window.Window(config=config, resizable=True)
#w = pyglet.window.Window(800, 600)
keys = pyglet.window.key.KeyStateHandler()
w.push_handlers(keys)
glClearColor(1,1,1,1)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST);
glLineWidth(2)
squirtle.setup_gl()
filelist = [f for f in os.listdir('svgs')
if f.endswith('svg') or f.endswith('svgz')]
filename = None
svgObj = None
def nextFile():
global filename, svgObj
if not filename:
next = 0
else:
prevFile = os.path.basename(filename)
next = filelist.index(prevFile)+1
next %= len(filelist)
filename = os.path.join('svgs', filelist[next])
print 'Parsing', filename
svgObj = squirtle.SVG(filename)
svgObj.anchor_x, svgObj.anchor_y = svgObj.width/2, svgObj.height/2
nextFile()
zoom = 1
angle = 0
draw_x = 400
draw_y = 300
def tick(dt):
global zoom, angle, draw_x, draw_y
if keys[pyglet.window.key.W]:
draw_y -= 8
elif keys[pyglet.window.key.S]:
draw_y += 8
elif keys[pyglet.window.key.D]:
draw_x -= 8
elif keys[pyglet.window.key.A]:
draw_x += 8
elif keys[pyglet.window.key.UP]:
zoom *= 1.1
elif keys[pyglet.window.key.DOWN]:
zoom /= 1.1
elif keys[pyglet.window.key.LEFT]:
angle -= 8
elif keys[pyglet.window.key.RIGHT]:
angle += 8
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.SPACE:
nextFile()
w.push_handlers(on_key_press)
pyglet.clock.schedule_interval(tick, 1/60.0)
@w.event
def on_draw():
w.clear()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, 800.0, 600, 0)
glMatrixMode(GL_MODELVIEW)
svgObj.draw(draw_x, draw_y, scale=zoom, angle=angle)
pyglet.app.run()
| {
"content_hash": "301ad2da05b4188423b64b289b62fe15",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 70,
"avg_line_length": 23.536585365853657,
"alnum_prop": 0.6435233160621762,
"repo_name": "fathat/squirtle",
"id": "954aa11b6084d313cb99e0a7f1aa72249b7940c0",
"size": "1953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_suite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44935"
}
],
"symlink_target": ""
} |
import unittest
import sys
import os
import argparse
import logging
import numpy as np
import math
import copy
import itertools
from teHmm.track import TrackData, CategoryMap
from teHmm.hmm import MultitrackHmm
from teHmm.cfg import MultitrackCfg
from teHmm.trackIO import getMergedBedIntervals, readBedIntervals
from teHmm.modelIO import loadModel
from teHmm.common import myLog, EPSILON, initBedTool, cleanBedTool
from teHmm.common import addLoggingOptions, setLoggingFromOptions, logger
from teHmm.common import runParallelShellCommands, runShellCommand, getLocalTempPath
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Evaluate a given data set with a trained HMM. Display"
" the log probability of the input data given the model, and "
"optionally output the most likely sequence of hidden states.")
parser.add_argument("tracksInfo", help="Path of Tracks Info file "
"containing paths to genome annotation tracks")
parser.add_argument("inputModel", help="Path of hmm created with"
"teHmmTrain.py")
parser.add_argument("bedRegions", help="Intervals to process")
parser.add_argument("--bed", help="path of file to write viterbi "
"output to (most likely sequence of hidden states)",
default=None)
parser.add_argument("--numThreads", help="Number of threads to use (only"
" applies to CFG parser for the moment)",
type=int, default=1)
parser.add_argument("--slice", help="Make sure that regions are sliced"
" to a maximum length of the given value. Most "
"useful when model is a CFG to keep memory down. "
"When 0, no slicing is done",
type=int, default=0)
parser.add_argument("--segment", help="Use the intervals in bedRegions"
" as segments which each count as a single column"
" for evaluattion. Note the model should have been"
" trained with the --segment option pointing to this"
" same bed file.", action="store_true", default=False)
parser.add_argument("--segLen", help="Effective segment length used for"
" normalizing input segments (specifying 0 means no"
" normalization applied)", type=int, default=0)
parser.add_argument("--maxPost", help="Use maximum posterior decoding instead"
" of Viterbi for evaluation", action="store_true",
default=False)
parser.add_argument("--pd", help="Output BED file for posterior distribution. Must"
" be used in conjunction with --pdStates (View on the "
"browser via bedGraphToBigWig)", default=None)
parser.add_argument("--pdStates", help="comma-separated list of state names to use"
" for computing posterior distribution. For example: "
" --pdStates inside,LTR_left,LTR_right will compute the probability"
", for each observation, that the hidden state is inside OR LTR_left"
" OR LTR_right. Must be used with --pd to specify output "
"file.", default=None)
parser.add_argument("--bic", help="save Bayesian Information Criterion (BIC) score"
" in given file", default=None)
parser.add_argument("--ed", help="Output BED file for emission distribution. Must"
" be used in conjunction with --edStates (View on the "
"browser via bedGraphToBigWig)", default=None)
parser.add_argument("--edStates", help="comma-separated list of state names to use"
" for computing emission distribution. For example: "
" --edStates inside,LTR_left for each obsercation the probability "
" that inside emitted that observaiton plus the probabillity that"
" LTR_left emitted it. If more than one state is selected, this "
" is not a distribution, but a sum of distributions (and values"
" can exceed 1). Mostly for debugging purposes. Note output in LOG",
default=None)
parser.add_argument("--chroms", help="list of chromosomes, or regions, to run in parallel"
" (in BED format). input regions will be intersected with each line"
" in this file, and the result will correspsond to an individual job",
default=None)
parser.add_argument("--proc", help="number of processes (use in conjunction with --chroms)",
type=int, default=1)
addLoggingOptions(parser)
args = parser.parse_args()
setLoggingFromOptions(args)
tempBedToolPath = initBedTool()
if args.slice <= 0:
args.slice = sys.maxint
elif args.segment is True:
raise RuntimeError("--slice and --segment options are not compatible at "
"this time")
if (args.pd is not None) ^ (args.pdStates is not None):
raise RuntimeError("--pd requires --pdStates and vice versa")
if (args.ed is not None) ^ (args.edStates is not None):
raise RuntimeError("--ed requires --edStates and vice versa")
if args.bed is None and (args.pd is not None or args.ed is not None):
raise RuntimeError("Both --ed and --pd only usable in conjunction with"
" --bed")
if args.chroms is not None:
# hack to allow chroms argument to chunk and rerun
parallelDispatch(argv, args)
cleanBedTool(tempBedToolPath)
return 0
# load model created with teHmmTrain.py
logger.info("loading model %s" % args.inputModel)
model = loadModel(args.inputModel)
if isinstance(model, MultitrackCfg):
if args.maxPost is True:
raise RuntimeErorr("--post not supported on CFG models")
# apply the effective segment length
if args.segLen > 0:
assert args.segment is True
model.getEmissionModel().effectiveSegmentLength = args.segLen
# read intervals from the bed file
logger.info("loading target intervals from %s" % args.bedRegions)
mergedIntervals = getMergedBedIntervals(args.bedRegions, ncol=4)
if mergedIntervals is None or len(mergedIntervals) < 1:
raise RuntimeError("Could not read any intervals from %s" %
args.bedRegions)
# slice if desired
choppedIntervals = [x for x in slicedIntervals(mergedIntervals, args.slice)]
# read segment intervals
segIntervals = None
if args.segment is True:
logger.info("loading segment intervals from %s" % args.bedRegions)
segIntervals = readBedIntervals(args.bedRegions, sort=True)
# load the input
# read the tracks, while intersecting them with the given interval
trackData = TrackData()
# note we pass in the trackList that was saved as part of the model
# because we do not want to generate a new one.
logger.info("loading tracks %s" % args.tracksInfo)
trackData.loadTrackData(args.tracksInfo, choppedIntervals,
model.getTrackList(),
segmentIntervals=segIntervals)
# do the viterbi algorithm
if isinstance(model, MultitrackHmm):
algname = "viterbi"
if args.maxPost is True:
algname = "posterior decoding"
logger.info("running %s algorithm" % algname)
elif isinstance(model, MultitrackCfg):
logger.info("running CYK algorithm")
vitOutFile = None
if args.bed is not None:
vitOutFile = open(args.bed, "w")
totalScore = 0
tableIndex = 0
totalDatapoints = 0
# Note: in general there's room to save on memory by only computing single
# track table at once (just need to add table by table interface to hmm...)
posteriors = [None] * trackData.getNumTrackTables()
posteriorsFile = None
posteriorsMask = None
if args.pd is not None:
posteriors = model.posteriorDistribution(trackData)
posteriorsFile = open(args.pd, "w")
posteriorsMask = getPosteriorsMask(args.pdStates, model)
assert len(posteriors[0][0]) == len(posteriorsMask)
emProbs = [None] * trackData.getNumTrackTables()
emissionsFile = None
emissionsMask = None
if args.ed is not None:
emProbs = model.emissionDistribution(trackData)
emissionsFile = open(args.ed, "w")
emissionsMask = getPosteriorsMask(args.edStates, model)
assert len(emProbs[0][0]) == len(emissionsMask)
decodeFunction = model.viterbi
if args.maxPost is True:
decodeFunction = model.posteriorDecode
for i, (vitLogProb, vitStates) in enumerate(decodeFunction(trackData,
numThreads=args.numThreads)):
totalScore += vitLogProb
if args.bed is not None or args.pd is not None:
if args.bed is not None:
vitOutFile.write("#Viterbi Score: %f\n" % (vitLogProb))
trackTable = trackData.getTrackTableList()[tableIndex]
tableIndex += 1
statesToBed(trackTable,
vitStates, vitOutFile, posteriors[i], posteriorsMask,
posteriorsFile, emProbs[i], emissionsMask, emissionsFile)
totalDatapoints += len(vitStates) * trackTable.getNumTracks()
print "Viterbi (log) score: %f" % totalScore
if isinstance(model, MultitrackHmm) and model.current_iteration is not None:
print "Number of EM iterations: %d" % model.current_iteration
if args.bed is not None:
vitOutFile.close()
if posteriorsFile is not None:
posteriorsFile.close()
if emissionsFile is not None:
emissionsFile.close()
if args.bic is not None:
bicFile = open(args.bic, "w")
# http://en.wikipedia.org/wiki/Bayesian_information_criterion
lnL = float(totalScore)
try:
k = float(model.getNumFreeParameters())
except:
# numFreeParameters still not done for semi-supervised
# just pass through a 0 instead of crashing for now
k = 0.0
n = float(totalDatapoints)
bic = -2.0 * lnL + k * (np.log(n) + np.log(2 * np.pi))
bicFile.write("%f\n" % bic)
bicFile.write("# = -2.0 * lnL + k * (lnN + ln(2 * np.pi))\n"
"# where lnL=%f k=%d (%d states) N=%d (%d obs * %d tracks) lnN=%f\n" % (
lnL, int(k), model.getEmissionModel().getNumStates(), int(totalDatapoints),
totalDatapoints / model.getEmissionModel().getNumTracks(),
model.getEmissionModel().getNumTracks(), np.log(n)))
bicFile.close()
cleanBedTool(tempBedToolPath)
def statesToBed(trackTable, states, bedFile,
posteriors, posteriorsMask, posteriorsFile,
emProbs, emissionsMask, emissionsFile):
"""write a sequence of states out in bed format. Note: continguous
intervals with same state no longer merged (since mask support added)
"""
chrom = trackTable.getChrom()
start = trackTable.getStart()
end = trackTable.getEnd()
segOffsets = trackTable.getSegmentOffsets()
maskOffsets = trackTable.getMaskRunningOffsets()
if segOffsets is None:
assert len(states) == end - start
segDist = 0
for i in xrange(len(states)):
curStart = start + segDist
intLen = 1
if segOffsets is not None:
intLen = trackTable.getSegmentLength(i)
segDist += intLen
if maskOffsets is not None:
curStart += maskOffsets[curStart - trackTable.getStart()]
curEnd = curStart + intLen
if bedFile is not None:
bedFile.write("%s\t%d\t%d\t%s\n" % (chrom, curStart, curEnd, states[i]))
if posteriors is not None:
posteriorsFile.write("%s\t%d\t%d\t%s\n" % (chrom, curStart, curEnd,
np.sum(posteriors[i-1] * posteriorsMask)))
if emProbs is not None:
emissionsFile.write("%s\t%d\t%d\t%s\n" % (chrom, curStart, curEnd,
np.log(np.sum(np.exp(emProbs[i-1]) * emissionsMask))))
def slicedIntervals(bedIntervals, chunkSize):
"""slice bed intervals by a given length. used as a quick way to get
cfg working via cutting up the input beds (after they get merged)."""
for interval in bedIntervals:
iLen = interval[2] - interval[1]
if iLen <= chunkSize:
yield interval
else:
nCuts = int(math.ceil(float(iLen) / float(chunkSize)))
for sliceNo in xrange(nCuts):
sInt = list(copy.deepcopy(interval))
sInt[1] = sliceNo * chunkSize
if sliceNo < nCuts - 1:
sInt[2] = sInt[1] + chunkSize
assert sInt[2] > sInt[1]
yield tuple(sInt)
def getPosteriorsMask(pdStates, hmm):
""" returns array mask where mask[i] == 1 iff state i is part of our desired
posterior distribution"""
stateMap = hmm.getStateNameMap()
if stateMap is None:
stateMap = CategoryMap(reserved = 0)
for i in xrange(hmm.getEmissionModel().getNumStates()):
stateMap.update(str(i))
mask = np.zeros((len(stateMap)), dtype=np.int8)
for state in pdStates.split(","):
if not stateMap.has(state):
logger.warning("Posterior (or Emission) Distribution state %s"
" not found in model" % state)
else:
stateNumber = stateMap.getMap(state)
mask[stateNumber] = 1
return mask
def parallelDispatch(argv, args):
""" chunk up input with chrom option. recursivlely launch eval. merge
results """
jobList = []
chromIntervals = readBedIntervals(args.chroms, sort=True)
chromFiles = []
regionFiles = []
bedFiles = []
pdFiles = []
bicFiles = []
edFiles = []
for chrom in chromIntervals:
cmdToks = copy.deepcopy(argv)
cmdToks[cmdToks.index("--chrom") + 1] = ""
cmdToks[cmdToks.index("--chrom")] = ""
chromPath = getLocalTempPath("Temp", ".bed")
cpFile = open(chromPath, "w")
cpFile.write("%s\t%d\t%d\t0\t0\t.\n" % (chrom[0], chrom[1], chrom[2]))
cpFile.close()
regionPath = getLocalTempPath("Temp", ".bed")
runShellCommand("intersectBed -a %s -b %s | sortBed > %s" % (args.bedRegions,
chromPath,
regionPath))
if os.path.getsize(regionPath) < 2:
continue
regionFiles.append(regionPath)
chromFiles.append(chromPath)
cmdToks[3] = regionPath
if args.bed is not None:
bedPath = getLocalTempPath("Temp", ".bed")
cmdToks[cmdToks.index("--bed")+1] = bedPath
bedFiles.append(bedPath)
if args.pd is not None:
pdPath = getLocalTempPath("Temp", ".bed")
cmdToks[cmdToks.index("--pd")+1] = pdPath
pdFiles.append(pdPath)
if args.ed is not None:
edPath = getLocalTempPath("Temp", ".bed")
cmdToks[cmdToks.index("--ed")+1] = edPath
edFiles.append(edPath)
if args.bic is not None:
bicPath = getLocalTempPath("Temp", ".bic")
cmdToks[cmdToks.index("--bic")+1] = bicPath
bicFiles.append(bicPath)
cmd = " ".join(cmdToks)
jobList.append(cmd)
runParallelShellCommands(jobList, args.proc)
for i in xrange(len(jobList)):
if i == 0:
ct = ">"
else:
ct = ">>"
if len(bedFiles) > 0:
runShellCommand("cat %s %s %s" % (bedFiles[i], ct, args.bed))
if len(pdFiles) > 0:
runShellCommand("cat %s %s %s" % (pdFiles[i], ct, args.pd))
if len(edFiles) > 0:
runShellCommand("cat %s %s %s" % (edFiles[i], ct, args.ed))
if len(bicFiles) > 0:
runShellCommand("cat %s %s %s" % (bicFiles[i], ct, args.bic))
for i in itertools.chain(chromFiles, regionFiles, bedFiles, pdFiles, edFiles,
bicFiles):
runShellCommand("rm %s" % i)
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "69373f4371b716301793c599d3da22e3",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 97,
"avg_line_length": 44.146596858638745,
"alnum_prop": 0.5965962998102466,
"repo_name": "glennhickey/teHmm",
"id": "12ea04eae5cbc2ad750b891b02f1be0eb043c60d",
"size": "16974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/teHmmEval.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "722928"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
} |
import os
import sys
root_path = os.path.split(os.path.dirname(__file__))[0]
sys.path.insert(0, os.path.join(root_path, 'lib'))
import ssl
import tornado.ioloop
import tornado.httpserver
#import com.web
#import com.config
import com
#from com.utils import make_cookie_secret
def write_pid():
pidfile = '/var/run/onepanel.pid'
pidfp = open(pidfile, 'w')
pidfp.write(str(os.getpid()))
pidfp.close()
def main():
# settings of tornado application
settings = {
'root_path': root_path,
'data_path': os.path.join(root_path, 'data'),
'static_path': os.path.join(root_path, 'static'),
'xsrf_cookies': True,
'cookie_secret': com.utils.make_cookie_secret(),
}
# read configuration from config.ini
cfg = com.config.Config(settings['data_path'] + '/config.ini')
server_ip = cfg.get('server', 'ip')
server_port = cfg.get('server', 'port')
application = com.set_ui.SetUI(settings)
server = tornado.httpserver.HTTPServer(application)
server.listen(server_port, address=server_ip)
write_pid()
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | {
"content_hash": "2b2362abd4d4722cd9c93715b176e732",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 26.613636363636363,
"alnum_prop": 0.6447480785653288,
"repo_name": "dingzg/onepanel",
"id": "5b03947d7ffc39e6b9a51458d22683dec121f70c",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/start_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9692"
},
{
"name": "HTML",
"bytes": "310481"
},
{
"name": "JavaScript",
"bytes": "227540"
},
{
"name": "Python",
"bytes": "1165917"
},
{
"name": "Shell",
"bytes": "4027"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "skidom.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "ed416edc8e111bbd4f8583aeef20ec7f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.23809523809524,
"alnum_prop": 0.6202046035805626,
"repo_name": "racmariano/skidom",
"id": "61df74d3c73824eebfd8f688ea35ea457a5c4211",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4094"
},
{
"name": "HTML",
"bytes": "10934"
},
{
"name": "JavaScript",
"bytes": "6583"
},
{
"name": "Python",
"bytes": "71579"
}
],
"symlink_target": ""
} |
from panoptes.pocs import dome
from panoptes.utils import error
from panoptes.utils import rs232
class AbstractSerialDome(dome.AbstractDome):
"""Abstract base class for controlling a dome via a serial connection.
Takes care of a single thing: configuring the connection to the device.
"""
def __init__(self, *args, **kwargs):
"""Initialize an AbstractSerialDome.
Creates a serial connection to the port indicated in the config.
"""
super().__init__(*args, **kwargs)
# Get config info, e.g. which port (e.g. /dev/ttyUSB123) should we use?
# TODO(jamessynge): Switch to passing configuration of serial port in as a sub-section
# of the dome config in the YAML. That way we don't intermingle serial settings and
# any other settings required.
cfg = self._dome_config
self._port = cfg.get('port')
if not self._port:
msg = 'No port specified in the config for dome: {}'.format(cfg)
self.logger.error(msg)
raise error.DomeNotFound(msg=msg)
baudrate = int(cfg.get('baudrate', 9600))
# Setup our serial connection to the given port.
self.serial = None
try:
self.serial = rs232.SerialData(port=self._port, baudrate=baudrate)
except Exception as err:
raise error.DomeNotFound(err)
def __del__(self):
try:
if self.serial:
self.serial.disconnect()
except AttributeError:
pass
@property
def is_connected(self):
"""True if connected to the hardware or driver."""
if self.serial:
return self.serial.is_connected
return False
def connect(self):
"""Connects to the device via the serial port, if disconnected.
Returns:
bool: Returns True if connected, False otherwise.
"""
if not self.is_connected:
self.logger.debug('Connecting to dome')
try:
self.serial.connect()
self.logger.info('Dome connected: {}'.format(self.is_connected))
except OSError as err:
self.logger.error("OS error: {0}".format(err))
except error.BadSerialConnection as err:
self.logger.warning(
'Could not create serial connection to dome\n{}'.format(err))
else:
self.logger.debug('Already connected to dome')
return self.is_connected
def disconnect(self):
self.logger.debug("Closing serial port for dome")
self.serial.disconnect()
def verify_connected(self):
"""Throw an exception if not connected."""
if not self.is_connected:
raise error.BadSerialConnection(
msg='Not connected to dome at port {}'.format(self._port))
| {
"content_hash": "ee3fb1601889f4bfb23dc0d224342a9b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 35.03658536585366,
"alnum_prop": 0.6000696136442742,
"repo_name": "panoptes/POCS",
"id": "b52857b4f7f68a42af72cf09fce1cf137402d3d3",
"size": "2873",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/panoptes/pocs/dome/abstract_serial_dome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5689"
},
{
"name": "JavaScript",
"bytes": "18198"
},
{
"name": "Python",
"bytes": "837393"
},
{
"name": "Shell",
"bytes": "9960"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0105_auto_20210816_1106'),
]
operations = [
migrations.RemoveField(
model_name='section',
name='articles',
),
migrations.AddField(
model_name='section',
name='articles',
field=models.BooleanField(default=False, help_text='Show the latest articles', verbose_name='articles'),
),
]
| {
"content_hash": "2171021cf7ede17c8137fda4575faa54",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 116,
"avg_line_length": 25.5,
"alnum_prop": 0.5764705882352941,
"repo_name": "flavoi/diventi",
"id": "8a36b843ee2baa8baf8b1158c042ada7320a7b7e",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/landing/migrations/0106_auto_20210930_2005.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('statuses', '0002_auto_20141009_0224'),
]
operations = [
migrations.AlterModelOptions(
name='status',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='status',
name='uuid',
field=models.UUIDField(default=uuid.uuid4),
),
]
| {
"content_hash": "ca682528205e5fa853a097644efe8592",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 55,
"avg_line_length": 22.565217391304348,
"alnum_prop": 0.5703275529865125,
"repo_name": "matthewlane/mesa",
"id": "350e45bde68613f8312d60559598f66812fa42b0",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statuses/migrations/0003_auto_20151022_2332.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1276"
},
{
"name": "JavaScript",
"bytes": "12216"
},
{
"name": "Nginx",
"bytes": "509"
},
{
"name": "Python",
"bytes": "6923"
},
{
"name": "Shell",
"bytes": "2296"
}
],
"symlink_target": ""
} |
from ftplib import FTP
f = FTP('ftp.ibiblio.org')
f.login()
f.cwd('/pub/academic/astronomy/')
entries = []
f.dir(entries.append)
print("%d entries:" % len(entries))
for entry in entries:
print(entry)
f.quit()
| {
"content_hash": "8ef002a9c7ae52343ff2eb4b8c7bfd5e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 35,
"avg_line_length": 19.454545454545453,
"alnum_prop": 0.677570093457944,
"repo_name": "jac2130/BayesGame",
"id": "a372552dfc69f0acb83d5c58cde620f77cf11e93",
"size": "275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foundations-of-python-network-programming/python3/17/dir.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "95"
},
{
"name": "C#",
"bytes": "1110"
},
{
"name": "CSS",
"bytes": "2118"
},
{
"name": "HTML",
"bytes": "166635"
},
{
"name": "JavaScript",
"bytes": "751618"
},
{
"name": "PHP",
"bytes": "339"
},
{
"name": "Perl",
"bytes": "3136"
},
{
"name": "Python",
"bytes": "1821680"
},
{
"name": "Shell",
"bytes": "1630"
},
{
"name": "Smarty",
"bytes": "7840"
}
],
"symlink_target": ""
} |
from datetime import datetime
class FpsSync:
def __init__(self, fps=None):
self.fps = fps
if not self.fps:
self.fps = 120.0
self._dtFrame = 1.0/self.fps
self.reset()
def start(self):
self.reset()
def reset(self):
self.startTime = datetime.now()
self.frameCount = 0
self._nextFrameTime = 0
def time(self):
return (datetime.now()-self.startTime).total_seconds()
def timeForNewFrame(self):
return self.time() >= self._nextFrameTime
def doFrame(self):
self._nextFrameTime += self._dtFrame
def nextFrame(self):
if not self.timeForNewFrame():
return False
self.doFrame()
return True
| {
"content_hash": "112c653f69f45cbbf039ab5996c6aa4b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 62,
"avg_line_length": 21.457142857142856,
"alnum_prop": 0.5699067909454061,
"repo_name": "markkorput/PyMoCap",
"id": "b4ab6b8e6ef2acf9f1778858f747b302d62f0f07",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymocap/fps_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156397"
}
],
"symlink_target": ""
} |
"""
Grafeas API
An API to insert and retrieve annotations on cloud artifacts. # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import grafeas
from grafeas.models.api_list_notes_response import ApiListNotesResponse # noqa: E501
from grafeas.rest import ApiException
class TestApiListNotesResponse(unittest.TestCase):
"""ApiListNotesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListNotesResponse(self):
"""Test ApiListNotesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = grafeas.models.api_list_notes_response.ApiListNotesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "89572071fdf363e4c5e9d91198054156",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 93,
"avg_line_length": 23.763157894736842,
"alnum_prop": 0.6954595791805094,
"repo_name": "grafeas/client-python",
"id": "d6d572f31d1175963a0775f6d3bca4edbe01a537",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_api_list_notes_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "558375"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
} |
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.urls.base import reverse
from lykops.forms import Form_Login
from lykops.views import Base
class Privacy(Base):
'''
该功能用于保存用户的机密数据,但该版本暂时不需要使用,故暂时不做展示
'''
def detail(self, request):
'''
查看用户的privacy数据
'''
result = self._is_login(request)
if result[0] :
username = result[1]
else :
return HttpResponseRedirect(reverse('login'))
vault_password = request.session['vault_password']
try :
force = request.GET['force']
except :
force = False
result = self.privacy_api.get(username, vault_password=vault_password, force=force)
if result[0] :
data_dict = result[1]
error_message = ''
self.logger.info(self.username + ' 查看用户' + username + '的机密数据,查询数据成功')
else :
data_dict = {}
error_message = self.username + ' 查看用户' + username + '的机密数据失败,原因:' + result[1]
self.logger.error(error_message)
return render_to_response('privacy_detail.html', {'data_dict':data_dict, 'login_user':username, 'error_message':error_message, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
def edit(self, request):
'''
编辑用户的privacy数据
'''
result = self._is_login(request)
if result[0] :
username = result[1]
else :
return HttpResponseRedirect(reverse('login'))
vault_password = request.session['vault_password']
http_referer = 'detail'
result = self.privacy_api.get(username, vault_password=vault_password, force=True)
if result[0] :
data_dict = result[1]
error_message = ''
else :
data_dict = {}
error_message = result[1]
if not data_dict or data_dict == {} :
ranges = range(0, 10)
else :
ranges = range(0, 5)
if request.method == 'GET' :
form = Form_Login()
if error_message :
error_message = self.username + ' 编辑用户' + username + '的机密数据失败,查询时发生错误,原因:' + result[1]
self.logger.error(error_message)
else :
self.logger.info(self.username + ' 编辑用户' + username + '的机密数据,查询数据成功')
return render(request, 'privacy_edit.html', {'data_dict': data_dict, 'login_user':username, 'error_message':error_message, 'form':form, 'new_list':list(ranges), 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else:
form = Form_Login(request.POST)
if form.is_valid():
update_dict = {}
for key , vaule in data_dict.items() :
keys = request.POST.get('key:' + key)
vaules = request.POST.get('vaule:' + key + ':' + vaule)
if not (keys == '' or not keys) :
new_key = keys
else :
new_key = key
if not (vaules == '' or not vaules) :
new_vaule = vaules
else :
new_vaule = vaule
if new_key in update_dict :
error_message = self.username + ' 编辑用户' + username + '的机密数据失败,原因:键' + new_key + '出现重复'
self.logger.error(error_message)
return render(request, 'result.html', {'error_message' : error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
update_dict[new_key] = new_vaule
for i in ranges :
keys = request.POST.get('key:' + str(i))
if not (keys == '' or not keys) :
if keys in update_dict :
error_message = self.username + ' 编辑用户' + username + '的机密数据失败,原因:键' + new_key + '出现重复'
self.logger.error(error_message)
return render(request, 'result.html', {'error_message' : error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
vaules = request.POST.get('vaule:' + str(i))
if keys == vaules :
error_message = self.username + ' 编辑用户' + username + '的机密数据失败,原因:键和值不能重复'
self.logger.error(error_message)
return render(request, 'result.html', {'error_message' : error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
update_dict[keys] = vaules
result = self.privacy_api.save(username, update_dict, vault_password)
if not result[0] :
error_message = self.username + ' 编辑用户' + username + '的机密数据失败,提交数据时发生错误,原因:' + result[1]
self.logger.error(error_message)
return render(request, 'result.html', {'error_message' : error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
self.logger.info(self.username + ' 编辑用户' + username + '的机密数据,提交并保存成功')
return HttpResponseRedirect(reverse('privacy_detail'))
| {
"content_hash": "eab6ebba89184bc87b2448b6a7a79764",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 243,
"avg_line_length": 44.39393939393939,
"alnum_prop": 0.5063139931740614,
"repo_name": "lykops/lykops",
"id": "370f114f1e4319dbaa89bceffb641e17554f2c7b",
"size": "6304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lykops/sysadmin/privacy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1446589"
}
],
"symlink_target": ""
} |
from random import randint
from os import listdir, chdir, makedirs
from os.path import isdir, join, dirname, abspath
from argh.decorators import arg
from jinja2 import Template
import yaml
from colorama import Fore
from elements import all
help = "Bootstrap a service with all you need"
@arg('name', default=all[randint(0, len(all))], help=help)
def new(name):
working_dir = dirname(abspath(__file__)) + '/..'
print working_dir
print Fore.BLUE + "\nCreating service folder..."
makedirs(name)
print Fore.CYAN + "Done!\n"
with open('%s/layouts/default.yml' % working_dir) as f:
layout = yaml.load(Template(f.read()).render(app_name=name))
print Fore.RED + "Creating layout..."
create_service(1, layout, '%s/templates' % working_dir, name, name)
print Fore.BLUE + "\nYou're service layout is ready" + Fore.RESET
return ""
def create_service(deep, layout, templates_dir, current_dir, app_name):
for item in layout:
if isinstance(item, dict):
path = "%s/%s" % (current_dir, item.keys()[0])
makedirs(path)
print Fore.BLUE + " "*deep + "|- %s/" % item.keys()[0]
create_service(deep+1, item[item.keys()[0]], templates_dir, path, app_name)
elif item.endswith("/"):
path = "%s/%s" % (current_dir, item)
makedirs(path)
print Fore.BLUE + " "*deep + "|- %s/" % item
else:
with open("%s/%s" % (templates_dir, item)) as f:
content = f.read()
template = Template(content)
content = template.render(app_name=app_name)
with open("%s/%s" % (current_dir, item), 'w') as f:
f.write(content)
print Fore.CYAN + " "*deep + "|- %s" % item
| {
"content_hash": "e4236d202915135446874aedbd1b61af",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 34.583333333333336,
"alnum_prop": 0.6295180722891566,
"repo_name": "PressLabs/lithium",
"id": "695288691f291651efe42ddc2c1a30b3b35fc836",
"size": "1660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lithium/manage/commands/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22987"
}
],
"symlink_target": ""
} |
GEOJSON_GEOMETRY_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "http://json-schema.org/geojson/geometry.json#",
"title": "geometry",
"description": "One geometry as defined by GeoJSON",
"type": "object",
"required": ["type", "coordinates"],
"oneOf": [
{
"title": "Point",
"properties": {
"type": {"enum": ["Point"]},
"coordinates": {"$ref": "#/definitions/position"}
}
},
{
"title": "MultiPoint",
"properties": {
"type": {"enum": ["MultiPoint"]},
"coordinates": {"$ref": "#/definitions/positionArray"}
}
},
{
"title": "LineString",
"properties": {
"type": {"enum": ["LineString"]},
"coordinates": {"$ref": "#/definitions/lineString"}
}
},
{
"title": "MultiLineString",
"properties": {
"type": {"enum": ["MultiLineString"]},
"coordinates": {
"type": "array",
"items": {"$ref": "#/definitions/lineString"}
}
}
},
{
"title": "Polygon",
"properties": {
"type": {"enum": ["Polygon"]},
"coordinates": {"$ref": "#/definitions/polygon"}
}
},
{
"title": "MultiPolygon",
"properties": {
"type": {"enum": ["MultiPolygon"]},
"coordinates": {
"type": "array",
"items": {"$ref": "#/definitions/polygon"}
}
}
}
],
"definitions": {
"position": {
"description": "A single position",
"type": "array",
"minItems": 2,
"items": [{"type": "number"}, {"type": "number"}],
"additionalItems": False
},
"positionArray": {
"description": "An array of positions",
"type": "array",
"items": {"$ref": "#/definitions/position"}
},
"lineString": {
"description": "An array of two or more positions",
"allOf": [
{"$ref": "#/definitions/positionArray"},
{"minItems": 2}
]
},
"linearRing": {
"description": "An array of four positions where the first equals the last",
"allOf": [
{"$ref": "#/definitions/positionArray"},
{"minItems": 4}
]
},
"polygon": {
"description": "An array of linear rings",
"type": "array",
"items": {"$ref": "#/definitions/linearRing"}
}
}
}
GEOJSON_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "http://json-schema.org/geojson/geojson.json#",
"title": "Geo JSON object",
"description": "Schema for a Geo JSON object",
"type": "object",
"required": ["type"],
"oneOf": [
{"$ref": "http://json-schema.org/geojson/geometry.json#"},
{"$ref": "#/definitions/feature"},
],
"definitions": {
"feature": {
"title": "Feature",
"description": "A Geo JSON feature object",
"required": ["geometry", "properties"],
"properties": {
"type": {"enum": ["Feature"]},
"geometry": {
"oneOf": [
{"type": "null"},
{"$ref": "http://json-schema.org/geojson/geometry.json#"}
]
},
"properties": {"type": ["object", "null"]},
"id": {"FIXME": "may be there, type not known (string? number?)"}
}
},
}
}
| {
"content_hash": "2ef008b26d647f68cf272efa0fb455b1",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 88,
"avg_line_length": 32.114754098360656,
"alnum_prop": 0.40811638591117916,
"repo_name": "6aika/issue-reporting",
"id": "1571ac2886b0457b657e2df20a308a4f15173b57",
"size": "4060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "issues_geometry/schemata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "307"
},
{
"name": "Dockerfile",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "8542"
},
{
"name": "JavaScript",
"bytes": "24775"
},
{
"name": "Python",
"bytes": "153728"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('venues', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('date', models.DateField(default=datetime.date.today)),
('venue', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='venues.Venue')),
],
),
]
| {
"content_hash": "4b47228a518bcd7f92d2d1afd09966f3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 132,
"avg_line_length": 29.576923076923077,
"alnum_prop": 0.6007802340702211,
"repo_name": "FlowFX/reggae-cdmx",
"id": "c3a8895992728383ac2f78c7e196bf2987c2a3e9",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/events/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1006"
},
{
"name": "HTML",
"bytes": "7710"
},
{
"name": "Python",
"bytes": "43589"
}
],
"symlink_target": ""
} |
from multiprocessing.synchronize import Lock
from PIL import Image
from utils import *
pathlock = Lock() # type: Lock
class TextureConvert(object):
def __init__(self):
self.args = get_args()
self.tool = "PVRTexToolCLI"
pass
def execute(self):
print("BuildTool excute >")
pass
def get_texture_format(self, option, exists_alpha=True):
args = get_args()
if args.convert_tool == "PVRTexToolCLI":
if option == "ETC1":
return "ETC1"
elif option == "ETC2":
if exists_alpha:
return "ETC2_RGBA"
else:
return "ETC2_RGB"
elif args.convert_tool == "etctool":
if option == "ETC1":
return "ETC1"
elif option == "ETC2":
if exists_alpha:
return "RGBA8"
else:
return "RGB8"
elif args.convert_tool == "TexturePacker":
if option == "PVRTC4":
if exists_alpha:
return "PVRTCI_4BPP_RGBA"
else:
return "PVRTCI_4BPP_RGB"
if option == "ETC1":
return "ETC1_RGB"
elif option == "ETC2":
if exists_alpha:
return "RGBA8"
else:
return "RGB8"
pass
return None
def get_texture_quality(self, option):
args = get_args()
if args.convert_tool == "PVRTexToolCLI":
# 'pvrtcfastest' | 'pvrtcfast' | 'pvrtcnormal' | 'pvrtchigh' | 'pvrtcbest' | 'etcfast' | 'etcslow' | 'etcfastperceptual' | 'etcslowperceptual'
# | 'astcveryfast' | 'astcfast' | 'astcmedium' | 'astcthorough' | 'astcexhaustive']
if option == "ETC1":
return "etcfast"
elif option == "PVRTC":
return "pvrtcbest"
elif args.convert_tool == "etctool":
pass
elif args.convert_tool == "TexturePacker":
pass
return None
def pvr_compress_ccz(self, temp_file, destfile):
pvr = open(temp_file, 'rb')
pvrData = pvr.read()
pvrccz = open(destfile, "wb")
pvrccz.write(struct.pack(">4sHHII",b"CCZ!",0,1,0,len(pvrData)))
pvrccz.write(zlib.compress(pvrData))
pvr.close()
pvrccz.close()
return True
def _convert_to_texture(self, input_file, output_file, exists_alpha):
args = get_args()
if args.convert_tool == "PVRTexToolCLI":
command = "%s -i %s -o %s" % (args.convert_tool, input_file, output_file)
texture_format = self.get_texture_format(args.image_option, exists_alpha)
if texture_format is not None:
command = command + " -f %s" % (texture_format)
texture_quality = self.get_texture_quality(args.image_option)
if texture_quality is not None:
command = command + " -q %s" % (texture_quality)
elif args.convert_tool == "etctool":
command = "%s %s -output %s -j 4" % ("etctool", input_file, output_file)
texture_format = self.get_texture_format(args.image_option, exists_alpha)
if texture_format is not None:
command = command + " -format %s" % (texture_format)
elif args.convert_tool == "TexturePacker":
command = "TexturePacker \"%s\" --sheet \"%s\" " % (input_file, output_file)
if args.log:
log("convert_to_texture command > %s" % command)
def convert_to_texture(self, input_path, _suffix="", _zlib=True):
log("convert_to_texture image path > %s"%(input_path))
start_time = time.time()
args = get_args()
dirname, _ = os.path.split(os.path.relpath(input_path, args.work_path))
output_dir = os.path.join(args.output, dirname)
tempdir = None
if args.tempdir:
tempdir = os.path.join(args.tempdir, dirname)
_, filename = os.path.split(input_path)
pre, ext = os.path.splitext(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
exists_alpha = ext != ".jpg"
dst_rgb_pvr_file = None
dst_a_pvr_file = None
if _zlib:
dst_rgb_pvr_file = os.path.join(output_dir, pre + _suffix + ".pvr.ccz")
else:
dst_rgb_pvr_file = os.path.join(output_dir, pre + _suffix + ".pvr")
if args.image_option == "ETC1":
if _zlib:
dst_a_pvr_file = os.path.join(output_dir, pre + _suffix + ".pvr.ccz@alpha")
else:
dst_a_pvr_file = os.path.join(output_dir, pre + _suffix + ".pvr@alpha")
if os.path.exists(dst_rgb_pvr_file):
# print("input_path > ", input_path)
# print("input_path time > ", timestamp_to_time(get_file_modifytime(input_path)))
# print("dst_rgb_pvr_file > ", dst_rgb_pvr_file)
# print("dst_rgb_pvr_file time > ", timestamp_to_time(get_file_modifytime(dst_rgb_pvr_file)))
if get_file_modifytime(input_path) < get_file_modifytime(dst_rgb_pvr_file):
if args.image_option == "ETC1":
if dst_a_pvr_file is not None:
return {"file": dst_rgb_pvr_file, "alpha": dst_a_pvr_file, "ext": ext}
return {"file": dst_rgb_pvr_file, "ext": ext}
pass
newtempdir = None
try:
if tempdir is not None:
newtempdir = tempdir
if not os.path.isdir(newtempdir):
if pathlock.acquire():
if not os.path.isdir(newtempdir):
os.makedirs(newtempdir)
pathlock.release()
else:
newtempdir = tempfile.mkdtemp()
tmp_rgb_file = None
tmp_a_file = None
if args.image_option == "ETC1":
if exists_alpha:
tmp_rgb_file = os.path.join(newtempdir, pre.replace(" ", "_") + _suffix + "_rgb.png")
tmp_a_file = os.path.join(newtempdir, pre.replace(" ", "_") + _suffix + "_a.png")
with open(input_path, 'rb') as f:
im = Image.open(f)
im = im.convert('RGBA')
alphadata = im.tobytes("raw", "A")
im.convert('RGB').save(tmp_rgb_file)
Image.frombytes("L", im.size, alphadata).save(tmp_a_file)
del im
pass
pass
else:
tmp_rgb_file = input_path
else:
tmp_rgb_file = input_path
tmp_rgb_pvr_file = os.path.join(newtempdir, pre.replace(" ", "_") + _suffix + ".pvr")
command = "%s -i %s -o %s" % ("PVRTexToolCLI", tmp_rgb_file, tmp_rgb_pvr_file)
texture_format = self.get_texture_format(args.image_option, exists_alpha)
if texture_format is not None:
command = command + " -f %s" % (texture_format)
texture_quality = self.get_texture_quality(args.image_option)
if texture_quality is not None:
command = command + " -q %s" % (texture_quality)
# command = "%s %s -output %s -j 4" % ("etctool", tmp_rgb_file, tmp_rgb_pvr_file)
# texture_format = self.get_etc_texture_format(args.image_option, exists_alpha)
# if texture_format is not None:
# command = command + " -format %s" % (texture_format)
if args.log:
log("convert_to_texture command > %s" % command)
p = Popen(command, stdout=PIPE, shell=True, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
log("Non zero exit code:%s executing: %s" % (p.returncode, command))
log(err)
return
tmp_a_pvr_file = None
if args.image_option == "ETC1" and tmp_a_file is not None:
tmp_a_pvr_file = os.path.join(newtempdir, pre.replace(" ", "_") + _suffix + "_a.pvr")
command = "%s -i %s -o %s" % ("PVRTexToolCLI", tmp_a_file, tmp_a_pvr_file)
texture_format = self.get_texture_format(args.image_option, exists_alpha)
if texture_format is not None:
command = command + " -f %s" % (texture_format)
texture_quality = self.get_texture_quality(args.image_option)
if texture_quality is not None:
command = command + " -q %s" % (texture_quality)
# command = "%s %s -output %s -j 4" % ("etctool", tmp_a_file, tmp_a_pvr_file)
# texture_format = self.get_etc_texture_format(args.image_option, exists_alpha)
# if texture_format is not None:
# command = command + " -format %s" % (texture_format)
if args.log:
log("convert_to_texture command alpha > %s" % command)
p = Popen(command, stdout=PIPE, shell=True, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
log("Non zero exit code:%s executing: %s" % (p.returncode, command))
log(err)
return
if _zlib:
if self.pvr_compress_ccz(tmp_rgb_pvr_file, dst_rgb_pvr_file):
pass
if args.image_option == "ETC1" and dst_a_pvr_file is not None:
if self.pvr_compress_ccz(tmp_a_pvr_file, dst_a_pvr_file):
pass
pass
else:
os.rename(tmp_rgb_pvr_file, dst_rgb_pvr_file)
except Exception as e:
raise
else:
pass
finally:
if tempdir is None:
shutil.rmtree(newtempdir)
microseconds = round((time.time() - start_time), 2)
log("convert %s elapsed time %.2fs" % (input_path, microseconds))
if args.image_option == "ETC1":
if dst_a_pvr_file is not None:
return {"file": dst_rgb_pvr_file, "alpha": dst_a_pvr_file, "ext": ext}
pass
return {"file": dst_rgb_pvr_file, "ext": ext}
| {
"content_hash": "05fdf0d21ed0cf9c0b8a6b5ef7863299",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 154,
"avg_line_length": 42.864754098360656,
"alnum_prop": 0.5050196003442011,
"repo_name": "guobin8205/texturetool",
"id": "db22950e9039b681a52631da800dc13dd69243e8",
"size": "10513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/textureconvert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92723"
}
],
"symlink_target": ""
} |
import unittest
from spyne.client.zeromq import ZeroMQClient
from spyne.test.interop._test_soap_client_base import SpyneClientTestBase
from spyne.test.interop.server.soap11.soap_http_basic import soap11_application
class TestSpyneZmqClient(SpyneClientTestBase, unittest.TestCase):
def setUp(self):
SpyneClientTestBase.setUp(self, 'zeromq')
self.client = ZeroMQClient('tcp://localhost:55555', soap11_application)
self.ns = "spyne.test.interop.server._service"
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b2da10fa6cb90085db211c044ea7afd1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 31.764705882352942,
"alnum_prop": 0.7425925925925926,
"repo_name": "deevarvar/myLab",
"id": "6bb69e7bff68705495541937aaec564d10582ad1",
"size": "1336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baidu_code/soap_mockserver/spyne/test/interop/test_soap_client_zeromq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "850"
},
{
"name": "C",
"bytes": "856044"
},
{
"name": "C++",
"bytes": "2988"
},
{
"name": "CSS",
"bytes": "6488"
},
{
"name": "DIGITAL Command Language",
"bytes": "282400"
},
{
"name": "HTML",
"bytes": "119253"
},
{
"name": "JavaScript",
"bytes": "445705"
},
{
"name": "Makefile",
"bytes": "20119"
},
{
"name": "Objective-C",
"bytes": "108"
},
{
"name": "PHP",
"bytes": "2502"
},
{
"name": "Python",
"bytes": "2305843"
},
{
"name": "Roff",
"bytes": "106"
},
{
"name": "Ruby",
"bytes": "478"
},
{
"name": "Shell",
"bytes": "68858"
}
],
"symlink_target": ""
} |
import unittest
from programy.processors.pre.cleanup import CleanUpPreProcessor
from programy.bot import Bot
from programy.brain import Brain
from programy.config import BrainConfiguration, BotConfiguration
class PreCleanUpTests(unittest.TestCase):
def setUp(self):
self.bot = Bot(Brain(BrainConfiguration()), config=BotConfiguration())
def test_pre_cleanup(self):
processor = CleanUpPreProcessor()
result = processor.process(self.bot, "testid", "Hello")
self.assertIsNotNone(result)
self.assertEqual("HELLO", result)
| {
"content_hash": "facde1d1f1ece4fb8dcbe1ad9608b49e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 31.72222222222222,
"alnum_prop": 0.7425569176882661,
"repo_name": "Thielak/program-y",
"id": "0063cba1f09209497305810ee6d1bda91c427ec6",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/rc",
"path": "src/test/processors/pre/test_cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "704509"
},
{
"name": "Shell",
"bytes": "1930"
}
],
"symlink_target": ""
} |
"""
Copyright [1999-2016] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
"""
import logging
import collections
'''
Data structure backing the Risk_Allele_Orientation class
'''
Risk_Allele_Orientation_Prototype = collections.namedtuple(
'Risk_Allele_Orientation_Prototype',
[
# The accession of the SNP
'rs_id',
# The base in the allele reported by GWAS
'base_at_snp_in_gwas',
# The base in the reference
'base_at_snp_in_reference',
# The outcome of: "base_at_snp_in_gwas == base_at_snp_in_reference"
'risk_allele_present_in_reference',
# The urls from which the data was obtained.
'gwas_url',
'ensembl_url',
]
)
class Risk_Allele_Orientation(Risk_Allele_Orientation_Prototype):
'''
Risk_Allele_Orientation is used to store the orientation of a risk
allele from gwas.
The interesting bit is in the field "risk_allele_present_in_reference",
the other fields are used for generating meaningful messages when
exceptions are thrown.
'''
def __str__(self):
msg = ""
msg += " rs_id = " + self.rs_id + "\n"
msg += " base_at_snp_in_gwas = " + str(self.base_at_snp_in_gwas) + "\n"
msg += " base_at_snp_in_reference = " + str(self.base_at_snp_in_reference) + "\n"
msg += " \n"
msg += " Result: risk_allele_present_in_reference = " + str(self.risk_allele_present_in_reference) + "\n"
msg += " \n"
msg += " See:\n"
msg += " gwas_url = " + self.gwas_url + "\n"
msg += " ensembl_url = " + self.ensembl_url + "\n"
return msg
class _exception_with_risk_allele_orientations(Exception):
def __init__(self, risk_allele_orientations, *args):
self.risk_allele_orientations = risk_allele_orientations
def description(self):
return "Overwrite this with a generic description of the exception."
def __str__(self):
risk_allele_orientations = self.risk_allele_orientations
msg = self.description() + "\n"
msg += "The gwas association has %i risk alleles:\n" % len(risk_allele_orientations)
for risk_allele_orientation in risk_allele_orientations:
msg += str(risk_allele_orientation) + "\n"
return msg
class some_alleles_present_in_reference_others_not_exception(_exception_with_risk_allele_orientations):
def description(self):
return "Some alleles are present in the reference, others aren't."
class none_of_the_risk_alleles_is_a_substitution_exception(_exception_with_risk_allele_orientations):
def description(self):
return "None of the risk alleles is a substitution."
class variant_mapping_is_ambiguous_exception(Exception):
def description(self):
return "The variant was mapped to more than one location in the reference. (This never seems to happen.)"
class no_dbsnp_accession_for_snp_exception(Exception):
pass
class base_in_allele_missing_exception(Exception):
pass
class gwas_data_integrity_exception(Exception):
pass
class ensembl_data_integrity_exception(Exception):
pass
class cant_determine_base_at_snp_in_reference_exception(Exception):
pass
def gwas_risk_alleles_present_in_reference(riskAlleles):
risk_allele_orientations = compute_risk_allele_orientations(riskAlleles)
risk_allele_orientation_consensus = compute_risk_allele_orientation_consensus(risk_allele_orientations)
if risk_allele_orientation_consensus == Risk_Allele_Orientation_Consensus.no_gwas_risk_allele_present_in_reference:
return False
if risk_allele_orientation_consensus == Risk_Allele_Orientation_Consensus.all_gwas_risk_alleles_present_in_reference:
return True
# This should never happen.
raise Exception
class Risk_Allele_Orientation_Consensus:
'''
This class is used like an enum to report the outcome of analysing the
orientation of the gwas risk alleles belonging to the same association.
It handles the two expected cases that are straightforward to report:
- all_gwas_risk_alleles_present_in_reference and
- no_gwas_risk_allele_present_in_reference
Exceptions to these are handled by raising exceptions.
There are only two possibilities, so it might be tempting to replace
them with a boolean, however, this has not been done, because the two
outcomes are not the opposite of each other.
The opposite of "all_gwas_risk_alleles_present_in_reference" is not
that "no_gwas_risk_allele_present_in_reference". The opposite would
be: that "at least one risk allele is not present in the reference, but
others may be". This case, however means that the there may be no
consensus on the orientation and this has to be handled differently.
'''
all_gwas_risk_alleles_present_in_reference, \
no_gwas_risk_allele_present_in_reference \
= range(2)
def compute_risk_allele_orientation_consensus(risk_allele_orientations):
'''
Figures out, whether the risk alleles reported by gwas are present in
the reference.
For any gwas association there may be multiple risk alleles, so this
also deals with special cases arising from that.
Return type: A value from the "enum" Risk_Allele_Orientation_Consensus
Returns
- all_gwas_risk_alleles_present_in_reference, if all risk alleles
from gwas are present in the reference,
- no_gwas_risk_allele_present_in_reference, if none of the risk
alleles are present in the reference.
Exceptions:
none_of_the_risk_alleles_is_a_substitution_exception:
if a risk allele in gwas doesn't have a base assigned to it, this risk
allele is skipped. In that case gwas puts a question mark where the
base should be. If none of the gwas associations have a base assigned,
a "none_of_the_risk_alleles_is_a_substitution_exception" is raised.
some_alleles_present_in_reference_others_not_exception:
It is possible to have multiple risk alleles in an association. Some
may be present in the reference, others not. This is an inconsistent
case. If it arises, a "some_alleles_present_in_reference_others_not_exception" is
raised.
no_dbsnp_accession_for_snp_exception:
If snp reported by GWAS doesn't have a dbSNP accession.
(e.g.: http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/riskAlleles/16576159/snp)
cant_determine_base_at_snp_in_reference_exception:
Other possible exceptions:
- base_in_allele_missing_exception,
- gwas_data_integrity_exception (Not necessarily gwas's fault, just more weird stuff),
- variant_mapping_is_ambiguous_exception and
- ensembl_data_integrity_exception.
'''
# Flags for keeping track, if any of these outcomes it true for all risk
# alleles of the gwas association.
#
all_gwas_risk_alleles_present_in_reference = True
no_gwas_risk_allele_present_in_reference = True
none_of_the_risk_alleles_is_a_substitution = True
for risk_allele_orientation in risk_allele_orientations:
no_base_in_gwas = risk_allele_orientation.base_at_snp_in_gwas == '?'
no_base_in_ensembl = risk_allele_orientation.base_at_snp_in_reference is None
no_risk_allele_is_a_substitution = no_base_in_gwas and no_base_in_ensembl
none_of_the_risk_alleles_is_a_substitution = none_of_the_risk_alleles_is_a_substitution and no_risk_allele_is_a_substitution
all_gwas_risk_alleles_present_in_reference = all_gwas_risk_alleles_present_in_reference and risk_allele_orientation.risk_allele_present_in_reference
no_gwas_risk_allele_present_in_reference = no_gwas_risk_allele_present_in_reference and not(risk_allele_orientation.risk_allele_present_in_reference)
if none_of_the_risk_alleles_is_a_substitution:
raise none_of_the_risk_alleles_is_a_substitution_exception(risk_allele_orientations)
if all_gwas_risk_alleles_present_in_reference:
return Risk_Allele_Orientation_Consensus.all_gwas_risk_alleles_present_in_reference
if no_gwas_risk_allele_present_in_reference:
return Risk_Allele_Orientation_Consensus.no_gwas_risk_allele_present_in_reference
raise some_alleles_present_in_reference_others_not_exception(risk_allele_orientations)
def compute_risk_allele_orientations(riskAlleles):
'''
For the array of risk alleles returned by gwas, this checks, whether
the risk allele is present in the reference or not.
It returns an array of Risk_Allele_Orientation objects.
'''
rs_id = assert_risk_alleles_are_from_same_snp(riskAlleles)
from postgap.REST import Variation400error
import requests
try:
base_at_snp_in_reference, ensembl_source_url = fetch_base_at_snp_in_reference(rs_id)
except requests.exceptions.HTTPError, e:
looks_like_dbSNP_accession = rs_id.startswith("rs")
if not(looks_like_dbSNP_accession):
# This can happen, e.g. here:
#
# http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/riskAlleles/16576159/snp
#
# (Might have been unpublished by the time you read this)
#
raise no_dbsnp_accession_for_snp_exception("The snp " + rs_id + " has no dbSNP accession.")
import json
# It seems to manifest as a "Bad Request" like this:
#
# Got an HTTPError 400 Client Error: Bad Request for url:
# http://grch37.rest.ensembl.org/variation/homo_sapiens/rs24449894?content-type=application/json for snprs24449894
#
raise cant_determine_base_at_snp_in_reference_exception("Got an HTTPError " + str(e) + " for snp " + rs_id + " and risk alleles:\n\n" + json.dumps(riskAlleles))
except Variation400error as e:
import json
logging.debug("Skipping variant, got: " + str(e.response));
raise cant_determine_base_at_snp_in_reference_exception("Unmapped snp: " + rs_id + " with risk alleles:\n\n" + json.dumps(riskAlleles))
risk_allele_orientations = []
for riskAllele in riskAlleles:
riskAlleleName = riskAllele["riskAlleleName"]
(rs_id, base_at_snp_in_gwas) = riskAlleleName.split("-")
current_gwas_risk_allele_present_in_reference = base_at_snp_in_gwas == base_at_snp_in_reference
risk_allele_orientation = Risk_Allele_Orientation(
rs_id = rs_id,
gwas_url = "No longer reported",
ensembl_url = ensembl_source_url,
risk_allele_present_in_reference = current_gwas_risk_allele_present_in_reference,
base_at_snp_in_gwas = base_at_snp_in_gwas,
base_at_snp_in_reference = base_at_snp_in_reference,
)
risk_allele_orientations.append(risk_allele_orientation)
return risk_allele_orientations
def assert_risk_alleles_are_from_same_snp(riskAlleles):
'''
This is an integrity check.
All risk alleles from a gwas association should be from the same snp.
'''
seen_rs_id = ""
for riskAllele in riskAlleles:
riskAlleleName = riskAllele["riskAlleleName"]
try:
(raw_rs_id, nucleotide_in_risk_allele) = riskAlleleName.split("-")
except ValueError, e:
import json
if "-" not in riskAlleleName:
# Happens here:
# http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/singleNucleotidePolymorphisms/6167/riskAlleles
#
# One of the risk alleles is:
#
# {
# "riskAlleleName": "rs4420638",
# ...
# }
#
# Including all riskAlleles in the error messages, because
# the context might be useful.
#
raise base_in_allele_missing_exception("One of the risk alleles in " + json.dumps(riskAlleles) + " has no base given!")
raise gwas_data_integrity_exception("Got a ValueError " + str(e) + " for risk allele " + riskAlleleName + ":\n\n" + json.dumps(riskAlleles))
# E.g.: On
# http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/singleNucleotidePolymorphisms/6167/riskAlleles
#
# The alleles are from the snp rs4420638. But one of them is: "
# rs4420638 -?", so the rs_id will appear as "rs4420638 " with an extra space a the end.
#
# The extra whitespace can trip up this assertion.
#
rs_id = raw_rs_id.strip()
if seen_rs_id == "":
seen_rs_id = rs_id
if seen_rs_id != rs_id:
import json
raise gwas_data_integrity_exception("More than one snp reported for the same gwas association! One snp is: " + seen_rs_id + " another is: " + rs_id + " \n\n" + json.dumps(riskAlleles))
return seen_rs_id
def fetch_base_at_snp_in_reference(rs_id):
'''
Fetches the base for this snp in the reference genome.
If there is more than one, this will raise a
variant_mapping_is_ambiguous_exception.
A snp can have more than one base, if it was mapped more than once.
'''
bases_at_snp_in_reference, source_url = fetch_bases_at_snp_in_reference(rs_id)
if len(bases_at_snp_in_reference) > 1:
raise variant_mapping_is_ambiguous_exception()
# It can happen that all of them were insertions.
if len(bases_at_snp_in_reference) == 0:
return None, source_url
return bases_at_snp_in_reference[0], source_url
def fetch_bases_at_snp_in_reference(rs_id):
'''
Fetches the bases for this snp in the reference genome.
If it the snp was mapped to more than one location and these have
a different nucleotide, it will return all nucleotides that occurred
once in an array.
If the variant is an insertion, it is skipped.
'''
mappings, source_url = fetch_variant_mappings(rs_id)
bases_seen_in_reference = dict()
for mapping in mappings:
allele_string = mapping["allele_string"];
observed_bases = allele_string.split("/")
# The first component is the reference allele according to:
#
# https://genomes-ebi.slack.com/archives/C0JUVJV6W/p1498561584548384
#
base_in_reference = observed_bases[0]
# Not using this.
bases_observed_in_other_populations = observed_bases[1:]
if base_in_reference == "-":
logging.debug("Skipping variant, because it is not a substitution in the reference, but an insertion: " + base_in_reference);
continue
bases_seen_in_reference[base_in_reference] = 1
return bases_seen_in_reference.keys(), source_url
def fetch_variant_mappings(rs_id):
'''
Queries the rest server to find all mappings of a given SNP.
Returns the hash returned by the rest server.
'''
variant_mappings_rest_call_url = "http://grch37.rest.ensembl.org/variation/homo_sapiens/%s?content-type=application/json" % rs_id
import postgap.REST
hash = postgap.REST.get(variant_mappings_rest_call_url, ext="")
'''
{
"source": "Variants (including SNPs and indels) imported from dbSNP",
"mappings": [
{
"location": "9:136131429-136131429",
"assembly_name": "GRCh37",
"end": 136131429,
"seq_region_name": "9",
"strand": 1,
"coord_system": "chromosome",
"allele_string": "C/T",
"start": 136131429
}
],
"name": "rs56116432",
"MAF": 0.00259585,
"ambiguity": "Y",
"var_class": "SNP",
"synonyms": [ ],
"evidence": [
"Frequency",
"1000Genomes",
"ESP",
"ExAC"
],
"ancestral_allele": "C",
"minor_allele": "T",
"most_severe_consequence": "non_coding_transcript_exon_variant"
}
'''
reported_name = hash["name"]
synonyms = hash["synonyms"]
response_matches_query = rs_id == reported_name or rs_id in synonyms
if not(response_matches_query):
raise ensembl_data_integrity_exception("For snp " + rs_id + " the url " + variant_mappings_rest_call_url + " reports a different name("+hash["name"]+")!")
mappings = hash["mappings"]
return mappings, variant_mappings_rest_call_url
| {
"content_hash": "48756e5197095706e24add0dbbb78618",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 187,
"avg_line_length": 33.45894736842105,
"alnum_prop": 0.7107531617693325,
"repo_name": "Ensembl/cttv024",
"id": "f6925ba94a0788c9ee5ec17c56dc2164b2a1f454",
"size": "15916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/postgap/GWAS_Lead_Snp_Orientation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4609"
},
{
"name": "Python",
"bytes": "83035"
},
{
"name": "Shell",
"bytes": "1978"
}
],
"symlink_target": ""
} |
import json
import unittest
from netengine.backends.ssh import AirOS
from ..settings import settings
__all__ = ['TestSSHAirOS']
class TestSSHAirOS(unittest.TestCase):
def setUp(self):
self.host = settings['airos-ssh']['host']
self.username = settings['airos-ssh']['username']
self.password = settings['airos-ssh']['password']
self.port = settings['airos-ssh'].get('port', 22)
self.device = AirOS(self.host, self.username, self.password, self.port)
self.device.connect()
def test_to_dict(self):
self.assertTrue(isinstance(self.device.to_dict(), dict))
def test_to_json(self):
json_string = self.device.to_json()
self.assertTrue(isinstance(json_string, basestring))
dictionary = json.loads(json_string)
self.device.disconnect()
def test_properties(self):
device = self.device
device._ubntbox
device._systemcfg
device.os
device.name
device.model
device.RAM_total
device.ethernet_standard
device.ethernet_duplex
device.wireless_channel_width
device.wireless_mode
device.wireless_channel
device.wireless_output_power
device.wireless_dbm
device.wireless_noise
device.olsr
device.disconnect()
def test_run(self):
self.device.run('ls -l')
self.device.disconnect()
def test_temp_methods(self):
device = self.device
self.assertTrue(type(device.get_ipv6_of_interface('eth0')) in [str, type(None)])
self.assertTrue(type(device.get_ipv6_of_interface('wrong')) is type(None))
device.disconnect()
def test_uptime(self):
self.assertIs(type(self.device.uptime), int)
| {
"content_hash": "dd189ba35ae51fdd36bf65ec38de9774",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 88,
"avg_line_length": 28.206349206349206,
"alnum_prop": 0.63083849184018,
"repo_name": "ninuxorg/netengine",
"id": "bf17d1cb2f51db1b512ea9b7818d1ffa54ec9ef4",
"size": "1777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ssh/airos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6783"
},
{
"name": "Python",
"bytes": "901096"
}
],
"symlink_target": ""
} |
from django.db import models
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class PaymentMethodType(models.Model):
name = models.CharField(max_length=255, unique=True)
sysname = models.CharField(max_length=96, blank=True)
def __unicode__(self):
return self.name
class PaymentMethod(models.Model):
name = models.CharField(max_length=255, unique=True)
sysname = models.CharField(max_length=96, blank=True)
payment_method_type = models.ForeignKey(PaymentMethodType, null=True, blank=True)
def __unicode__(self):
return self.name
class Transaction(models.Model):
remote_id = models.CharField(max_length=192, blank=True)
invoice = models.CharField(max_length=48, unique=True)
reference = models.CharField(max_length=48, unique=True)
price = models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True)
description = models.CharField(max_length=765, blank=True)
return_url = models.CharField(max_length=765, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class TransactionState(models.Model):
payment_method = models.ForeignKey(PaymentMethod, null=True, blank=True)
transaction = models.ForeignKey(Transaction, null=True, blank=True)
valid_from = models.DateTimeField(null=True, blank=True)
valid_to = models.DateTimeField(null=True, blank=True)
response_message = models.TextField(null=True, blank=True)
# machtiging
first_name = models.CharField(max_length=255, blank=True)
last_name = models.CharField(max_length=255, blank=True)
email = models.CharField(max_length=255, blank=True)
account_name = models.CharField(max_length=255, blank=True)
account_number = models.CharField(max_length=32, blank=True)
#ideal
idealurl = models.CharField(max_length=765, blank=True, null=True)
transactionkey = models.CharField(max_length=128, blank=True, null=True)
responsestatusdescription = models.CharField(max_length=128, blank=True, null=True)
idealtransactionid = models.CharField(max_length=128, blank=True, null=True)
responsestatus = models.CharField(max_length=16, blank=True, null=True)
additionalmessage = models.CharField(max_length=128, blank=True, null=True)
# state = models.ForeignKey(State, null=True, blank=True)
| {
"content_hash": "b1cf594a6acd6eadbd647564d27a2b19",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 87,
"avg_line_length": 40.23880597014925,
"alnum_prop": 0.7318249258160238,
"repo_name": "quarkness/django-buckaroo",
"id": "3df4802cb17bfba19d30bc2be26dae869fc2695d",
"size": "2696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-buckaroo/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "32991"
}
],
"symlink_target": ""
} |
from django.db import models
import datetime
from django.utils import timezone
class Book(models.Model):
title = models.CharField(max_length=200)
pub_year = models.IntegerField('year published', default=2000)
def was_published_recently(self):
return self.pub_year >= timezone.now().year - 1
def __str__(self):
return "%s %s" % (self.title, self.pub_year)
class Review(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField(default="")
review_date = models.DateTimeField('review date')
def __str__(self):
return "%s %s" % (self.title, self.review_date)
class Author(models.Model):
name = models.CharField(max_length=200)
books = models.ManyToManyField(Book)
def __str__(self):
return self.name
| {
"content_hash": "f6ebfca93b9073508171e99273841fc9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 27.40625,
"alnum_prop": 0.6590649942987458,
"repo_name": "louridas/djbr",
"id": "44e2de9169a2916458e441d873906aab84caee2d",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djbr/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "90"
},
{
"name": "HTML",
"bytes": "9997"
},
{
"name": "Python",
"bytes": "13642"
}
],
"symlink_target": ""
} |
"""Demonstrates controlling the timing of interactive authentication using InteractiveBrowserCredential.
DeviceCodeCredential supports the same API.
"""
import os
import sys
from azure.identity import AuthenticationRequiredError, InteractiveBrowserCredential
from azure.keyvault.secrets import SecretClient
# This sample uses Key Vault only for demonstration. Any client accepting azure-identity credentials will work the same.
VAULT_URL = os.environ.get("VAULT_URL")
if not VAULT_URL:
print("This sample expects environment variable 'VAULT_URL' to be set with the URL of a Key Vault.")
sys.exit(1)
# If it's important for your application to prompt for authentication only at certain times,
# create the credential with disable_automatic_authentication=True. This configures the credential to raise
# when interactive authentication is required, instead of immediately beginning that authentication.
credential = InteractiveBrowserCredential(disable_automatic_authentication=True)
client = SecretClient(VAULT_URL, credential)
try:
secret_names = [s.name for s in client.list_properties_of_secrets()]
except AuthenticationRequiredError as ex:
# Interactive authentication is necessary to authorize the client's request. The exception carries the
# requested authentication scopes as well as any additional claims the service requires. If you pass
# both to 'authenticate', it will cache an access token for the necessary scopes.
credential.authenticate(scopes=ex.scopes, claims=ex.claims)
# the client operation should now succeed
secret_names = [s.name for s in client.list_properties_of_secrets()]
| {
"content_hash": "d726009408026bd76214ec58072104bc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 120,
"avg_line_length": 49.54545454545455,
"alnum_prop": 0.8012232415902141,
"repo_name": "Azure/azure-sdk-for-python",
"id": "584be63de237347fa6c8f9cdf52a4e377196845d",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/identity/azure-identity/samples/control_interactive_prompts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from mangopaysdk.entities.entitybase import EntityBase
class BankAccount(EntityBase):
"""Bank Account entity."""
def __init__(self, id = None):
self.UserId = None
# Type of bank account
self.Type = 'IBAN'
self.OwnerName = None
self.OwnerAddress = None
# must be valid ^[a-zA-Z]{2}\d{2}\s*(\w{4}\s*){2,7}\w{1,4}
self.IBAN = None
# example BREXPLPWKRA
self.BIC = None
return super(BankAccount, self).__init__(id)
def GetReadOnlyProperties(self):
properties = super(BankAccount, self).GetReadOnlyProperties()
properties.append('UserId' )
return properties | {
"content_hash": "cfbb0aa37c2aa9a7839a346a6dd5e42b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 31.09090909090909,
"alnum_prop": 0.5935672514619883,
"repo_name": "ebewe/mangopay2-python-sdk",
"id": "49fa4065c244540bbb043f94191b28f433f560ec",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mangopaysdk/entities/bankaccount.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import boto3
import sure # noqa
import six
from botocore.exceptions import ClientError
from moto import mock_logs, settings
from nose.tools import assert_raises
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
@mock_logs
def test_log_group_create():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_exceptions():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'dummp-stream'
conn.create_log_group(logGroupName=log_group_name)
with assert_raises(ClientError):
conn.create_log_group(logGroupName=log_group_name)
# descrine_log_groups is not implemented yet
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
with assert_raises(ClientError):
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
with assert_raises(ClientError):
conn.put_log_events(
logGroupName=log_group_name,
logStreamName="invalid-stream",
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
@mock_logs
def test_put_logs():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
putRes = conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
events = res['events']
nextSequenceToken = putRes['nextSequenceToken']
assert isinstance(nextSequenceToken, six.string_types) == True
assert len(nextSequenceToken) == 56
events.should.have.length_of(2)
@mock_logs
def test_filter_logs_interleaved():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
interleaved=True,
)
events = res['events']
events.should.have.length_of(2)
| {
"content_hash": "802ee6242570f62335356f8f0f4a1cc9",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 74,
"avg_line_length": 27.733870967741936,
"alnum_prop": 0.6115149752835126,
"repo_name": "botify-labs/moto",
"id": "05bd3c8238443a9b7c95358b2dba68f6a564cd24",
"size": "3439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_logs/test_logs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "4578457"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
import os.path
import sys
import vim
class Citation(object):
@staticmethod
def connect():
"""
Returns source from builder,
printing any errors from python to the vim console.
wrapping everything in "try: except:" is bad practise generally,
but in this case ensures all errors can actually be reported
"""
try:
set_script_path()
from citation_vim.builder import Builder
from citation_vim.context_loader import ContextLoader
return Builder(ContextLoader().context).build_source()
except:
print_exception()
def set_script_path():
script_path = os.path.join(vim.eval('s:script_path'), '../../../python')
sys.path.insert(0, script_path)
def print_exception():
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print("Citation.vim error:\n" + "".join(line for line in lines))
| {
"content_hash": "5a3d734ff514cc00568a355ff64b4994",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 30.147058823529413,
"alnum_prop": 0.6370731707317073,
"repo_name": "rafaqz/citation.vim",
"id": "4868efe97373caa295aa1d43430c6d3f978eedf3",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/citation_vim/citation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40078"
},
{
"name": "Vim script",
"bytes": "13895"
}
],
"symlink_target": ""
} |
"""
Verbose and colourful traceback formatting.
**ColorTB**
I've always found it a bit hard to visually parse tracebacks in Python. The
ColorTB class is a solution to that problem. It colors the different parts of a
traceback in a manner similar to what you would expect from a syntax-highlighting
text editor.
Installation instructions for ColorTB::
import sys,ultratb
sys.excepthook = ultratb.ColorTB()
**VerboseTB**
I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
of useful info when a traceback occurs. Ping originally had it spit out HTML
and intended it for CGI programmers, but why should they have all the fun? I
altered it to spit out colored text to the terminal. It's a bit overwhelming,
but kind of neat, and maybe useful for long-running programs that you believe
are bug-free. If a crash *does* occur in that type of program you want details.
Give it a shot--you'll love it or you'll hate it.
.. note::
The Verbose mode prints the variables currently visible where the exception
happened (shortening their strings if too long). This can potentially be
very slow, if you happen to have a huge data structure whose string
representation is complex to compute. Your computer may appear to freeze for
a while with cpu usage at 100%. If this occurs, you can cancel the traceback
with Ctrl-C (maybe hitting it more than once).
If you encounter this kind of situation often, you may want to use the
Verbose_novars mode instead of the regular Verbose, which avoids formatting
variables (but otherwise includes the information and context given by
Verbose).
.. note::
The verbose mode print all variables in the stack, which means it can
potentially leak sensitive information like access keys, or unencrypted
password.
Installation instructions for VerboseTB::
import sys,ultratb
sys.excepthook = ultratb.VerboseTB()
Note: Much of the code in this module was lifted verbatim from the standard
library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
Color schemes
-------------
The colors are defined in the class TBTools through the use of the
ColorSchemeTable class. Currently the following exist:
- NoColor: allows all of this module to be used in any terminal (the color
escapes are just dummy blank strings).
- Linux: is meant to look good in a terminal like the Linux console (black
or very dark background).
- LightBG: similar to Linux but swaps dark/light colors to be more readable
in light background terminals.
- Neutral: a neutral color scheme that should be readable on both light and
dark background
You can implement other color schemes easily, the syntax is fairly
self-explanatory. Please send back new schemes you develop to the author for
possible inclusion in future releases.
Inheritance diagram:
.. inheritance-diagram:: IPython.core.ultratb
:parts: 3
"""
#*****************************************************************************
# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import inspect
import linecache
import pydoc
import sys
import time
import traceback
from types import TracebackType
from typing import Tuple, List, Any, Optional
import stack_data
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.styles import get_style_by_name
# IPython's own modules
from IPython import get_ipython
from IPython.core import debugger
from IPython.core.display_trap import DisplayTrap
from IPython.core.excolors import exception_colors
from IPython.utils import path as util_path
from IPython.utils import py3compat
from IPython.utils.terminal import get_terminal_size
import IPython.utils.colorable as colorable
# Globals
# amount of space to put line numbers before verbose tracebacks
INDENT_SIZE = 8
# Default color scheme. This is used, for example, by the traceback
# formatter. When running in an actual IPython instance, the user's rc.colors
# value is used, but having a module global makes this functionality available
# to users of ultratb who are NOT running inside ipython.
DEFAULT_SCHEME = 'NoColor'
# ---------------------------------------------------------------------------
# Code begins
# Helper function -- largely belongs to VerboseTB, but we need the same
# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
# can be recognized properly by ipython.el's py-traceback-line-re
# (SyntaxErrors have to be treated specially because they have no traceback)
def _format_traceback_lines(lines, Colors, has_colors: bool, lvals):
"""
Format tracebacks lines with pointing arrow, leading numbers...
Parameters
----------
lines : list[Line]
Colors
ColorScheme used.
lvals : str
Values of local variables, already colored, to inject just after the error line.
"""
numbers_width = INDENT_SIZE - 1
res = []
for stack_line in lines:
if stack_line is stack_data.LINE_GAP:
res.append('%s (...)%s\n' % (Colors.linenoEm, Colors.Normal))
continue
line = stack_line.render(pygmented=has_colors).rstrip('\n') + '\n'
lineno = stack_line.lineno
if stack_line.is_current:
# This is the line with the error
pad = numbers_width - len(str(lineno))
num = '%s%s' % (debugger.make_arrow(pad), str(lineno))
start_color = Colors.linenoEm
else:
num = '%*s' % (numbers_width, lineno)
start_color = Colors.lineno
line = '%s%s%s %s' % (start_color, num, Colors.Normal, line)
res.append(line)
if lvals and stack_line.is_current:
res.append(lvals + '\n')
return res
def _format_filename(file, ColorFilename, ColorNormal, *, lineno=None):
"""
Format filename lines with `In [n]` if it's the nth code cell or `File *.py` if it's a module.
Parameters
----------
file : str
ColorFilename
ColorScheme's filename coloring to be used.
ColorNormal
ColorScheme's normal coloring to be used.
"""
ipinst = get_ipython()
if ipinst is not None and file in ipinst.compile._filename_map:
file = "[%s]" % ipinst.compile._filename_map[file]
if lineno is None:
tpl_link = f"Cell {ColorFilename}In {{file}}{ColorNormal}"
else:
tpl_link = f"Cell {ColorFilename}In {{file}}, line {{lineno}}{ColorNormal}"
else:
file = util_path.compress_user(
py3compat.cast_unicode(file, util_path.fs_encoding)
)
if lineno is None:
tpl_link = f"File {ColorFilename}{{file}}{ColorNormal}"
else:
tpl_link = f"File {ColorFilename}{{file}}:{{lineno}}{ColorNormal}"
return tpl_link.format(file=file, lineno=lineno)
#---------------------------------------------------------------------------
# Module classes
class TBTools(colorable.Colorable):
"""Basic tools used by all traceback printer classes."""
# Number of frames to skip when reporting tracebacks
tb_offset = 0
def __init__(
self,
color_scheme="NoColor",
call_pdb=False,
ostream=None,
parent=None,
config=None,
*,
debugger_cls=None,
):
# Whether to call the interactive pdb debugger after printing
# tracebacks or not
super(TBTools, self).__init__(parent=parent, config=config)
self.call_pdb = call_pdb
# Output stream to write to. Note that we store the original value in
# a private attribute and then make the public ostream a property, so
# that we can delay accessing sys.stdout until runtime. The way
# things are written now, the sys.stdout object is dynamically managed
# so a reference to it should NEVER be stored statically. This
# property approach confines this detail to a single location, and all
# subclasses can simply access self.ostream for writing.
self._ostream = ostream
# Create color table
self.color_scheme_table = exception_colors()
self.set_colors(color_scheme)
self.old_scheme = color_scheme # save initial value for toggles
self.debugger_cls = debugger_cls or debugger.Pdb
if call_pdb:
self.pdb = self.debugger_cls()
else:
self.pdb = None
def _get_ostream(self):
"""Output stream that exceptions are written to.
Valid values are:
- None: the default, which means that IPython will dynamically resolve
to sys.stdout. This ensures compatibility with most tools, including
Windows (where plain stdout doesn't recognize ANSI escapes).
- Any object with 'write' and 'flush' attributes.
"""
return sys.stdout if self._ostream is None else self._ostream
def _set_ostream(self, val):
assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
self._ostream = val
ostream = property(_get_ostream, _set_ostream)
@staticmethod
def _get_chained_exception(exception_value):
cause = getattr(exception_value, "__cause__", None)
if cause:
return cause
if getattr(exception_value, "__suppress_context__", False):
return None
return getattr(exception_value, "__context__", None)
def get_parts_of_chained_exception(
self, evalue
) -> Optional[Tuple[type, BaseException, TracebackType]]:
chained_evalue = self._get_chained_exception(evalue)
if chained_evalue:
return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
return None
def prepare_chained_exception_message(self, cause) -> List[Any]:
direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
if cause:
message = [[direct_cause]]
else:
message = [[exception_during_handling]]
return message
@property
def has_colors(self) -> bool:
return self.color_scheme_table.active_scheme_name.lower() != "nocolor"
def set_colors(self, *args, **kw):
"""Shorthand access to the color table scheme selector method."""
# Set own color table
self.color_scheme_table.set_active_scheme(*args, **kw)
# for convenience, set Colors to the active scheme
self.Colors = self.color_scheme_table.active_colors
# Also set colors of debugger
if hasattr(self, 'pdb') and self.pdb is not None:
self.pdb.set_colors(*args, **kw)
def color_toggle(self):
"""Toggle between the currently active color scheme and NoColor."""
if self.color_scheme_table.active_scheme_name == 'NoColor':
self.color_scheme_table.set_active_scheme(self.old_scheme)
self.Colors = self.color_scheme_table.active_colors
else:
self.old_scheme = self.color_scheme_table.active_scheme_name
self.color_scheme_table.set_active_scheme('NoColor')
self.Colors = self.color_scheme_table.active_colors
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return '\n'.join(stb)
def text(self, etype, value, tb, tb_offset: Optional[int] = None, context=5):
"""Return formatted traceback.
Subclasses may override this if they add extra arguments.
"""
tb_list = self.structured_traceback(etype, value, tb,
tb_offset, context)
return self.stb2text(tb_list)
def structured_traceback(
self, etype, evalue, tb, tb_offset: Optional[int] = None, context=5, mode=None
):
"""Return a list of traceback frames.
Must be implemented by each class.
"""
raise NotImplementedError()
#---------------------------------------------------------------------------
class ListTB(TBTools):
"""Print traceback information from a traceback list, with optional color.
Calling requires 3 arguments: (etype, evalue, elist)
as would be obtained by::
etype, evalue, tb = sys.exc_info()
if tb:
elist = traceback.extract_tb(tb)
else:
elist = None
It can thus be used by programs which need to process the traceback before
printing (such as console replacements based on the code module from the
standard library).
Because they are meant to be called without a full traceback (only a
list), instances of this class can't call the interactive pdb debugger."""
def __call__(self, etype, value, elist):
self.ostream.flush()
self.ostream.write(self.text(etype, value, elist))
self.ostream.write('\n')
def _extract_tb(self, tb):
if tb:
return traceback.extract_tb(tb)
else:
return None
def structured_traceback(
self,
etype: type,
evalue: BaseException,
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
context=5,
):
"""Return a color formatted string with the traceback info.
Parameters
----------
etype : exception type
Type of the exception raised.
evalue : object
Data stored in the exception
etb : list | TracebackType | None
If list: List of frames, see class docstring for details.
If Traceback: Traceback of the exception.
tb_offset : int, optional
Number of frames in the traceback to skip. If not given, the
instance evalue is used (set in constructor).
context : int, optional
Number of lines of context information to print.
Returns
-------
String with formatted exception.
"""
# This is a workaround to get chained_exc_ids in recursive calls
# etb should not be a tuple if structured_traceback is not recursive
if isinstance(etb, tuple):
etb, chained_exc_ids = etb
else:
chained_exc_ids = set()
if isinstance(etb, list):
elist = etb
elif etb is not None:
elist = self._extract_tb(etb)
else:
elist = []
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
Colors = self.Colors
out_list = []
if elist:
if tb_offset and len(elist) > tb_offset:
elist = elist[tb_offset:]
out_list.append('Traceback %s(most recent call last)%s:' %
(Colors.normalEm, Colors.Normal) + '\n')
out_list.extend(self._format_list(elist))
# The exception info should be a single entry in the list.
lines = ''.join(self._format_exception_only(etype, evalue))
out_list.append(lines)
exception = self.get_parts_of_chained_exception(evalue)
if exception and not id(exception[1]) in chained_exc_ids:
chained_exception_message = self.prepare_chained_exception_message(
evalue.__cause__)[0]
etype, evalue, etb = exception
# Trace exception to avoid infinite 'cause' loop
chained_exc_ids.add(id(exception[1]))
chained_exceptions_tb_offset = 0
out_list = (
self.structured_traceback(
etype, evalue, (etb, chained_exc_ids),
chained_exceptions_tb_offset, context)
+ chained_exception_message
+ out_list)
return out_list
def _format_list(self, extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
Lifted almost verbatim from traceback.py
"""
Colors = self.Colors
list = []
for ind, (filename, lineno, name, line) in enumerate(extracted_list):
normalCol, nameCol, fileCol, lineCol = (
# Emphasize the last entry
(Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line)
if ind == len(extracted_list) - 1
else (Colors.Normal, Colors.name, Colors.filename, "")
)
fns = _format_filename(filename, fileCol, normalCol, lineno=lineno)
item = f"{normalCol} {fns}"
if name != "<module>":
item += f" in {nameCol}{name}{normalCol}\n"
else:
item += "\n"
if line:
item += f"{lineCol} {line.strip()}{normalCol}\n"
list.append(item)
return list
def _format_exception_only(self, etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.exc_info()[:2]. The return value is a list of strings, each ending
in a newline. Normally, the list contains a single string; however,
for SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax error
occurred. The message indicating which exception occurred is the
always last string in the list.
Also lifted nearly verbatim from traceback.py
"""
have_filedata = False
Colors = self.Colors
list = []
stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal)
if value is None:
# Not sure if this can still happen in Python 2.6 and above
list.append(stype + '\n')
else:
if issubclass(etype, SyntaxError):
have_filedata = True
if not value.filename: value.filename = "<string>"
if value.lineno:
lineno = value.lineno
textline = linecache.getline(value.filename, value.lineno)
else:
lineno = "unknown"
textline = ""
list.append(
"%s %s%s\n"
% (
Colors.normalEm,
_format_filename(
value.filename,
Colors.filenameEm,
Colors.normalEm,
lineno=(None if lineno == "unknown" else lineno),
),
Colors.Normal,
)
)
if textline == "":
textline = py3compat.cast_unicode(value.text, "utf-8")
if textline is not None:
i = 0
while i < len(textline) and textline[i].isspace():
i += 1
list.append('%s %s%s\n' % (Colors.line,
textline.strip(),
Colors.Normal))
if value.offset is not None:
s = ' '
for c in textline[i:value.offset - 1]:
if c.isspace():
s += c
else:
s += ' '
list.append('%s%s^%s\n' % (Colors.caret, s,
Colors.Normal))
try:
s = value.msg
except Exception:
s = self._some_str(value)
if s:
list.append('%s%s:%s %s\n' % (stype, Colors.excName,
Colors.Normal, s))
else:
list.append('%s\n' % stype)
# sync with user hooks
if have_filedata:
ipinst = get_ipython()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
return list
def get_exception_only(self, etype, value):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
return ListTB.structured_traceback(self, etype, value)
def show_exception_only(self, etype, evalue):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
evalue : exception value
"""
# This method needs to use __call__ from *this* class, not the one from
# a subclass whose signature or behavior may be different
ostream = self.ostream
ostream.flush()
ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
ostream.flush()
def _some_str(self, value):
# Lifted from traceback.py
try:
return py3compat.cast_unicode(str(value))
except:
return u'<unprintable %s object>' % type(value).__name__
#----------------------------------------------------------------------------
class VerboseTB(TBTools):
"""A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
of HTML. Requires inspect and pydoc. Crazy, man.
Modified version which optionally strips the topmost entries from the
traceback, to be used with alternate interpreters (because their own code
would appear in the traceback)."""
_tb_highlight = "bg:ansiyellow"
def __init__(
self,
color_scheme: str = "Linux",
call_pdb: bool = False,
ostream=None,
tb_offset: int = 0,
long_header: bool = False,
include_vars: bool = True,
check_cache=None,
debugger_cls=None,
parent=None,
config=None,
):
"""Specify traceback offset, headers and color scheme.
Define how many frames to drop from the tracebacks. Calling it with
tb_offset=1 allows use of this handler in interpreters which will have
their own code at the top of the traceback (VerboseTB will first
remove that frame before printing the traceback info)."""
TBTools.__init__(
self,
color_scheme=color_scheme,
call_pdb=call_pdb,
ostream=ostream,
parent=parent,
config=config,
debugger_cls=debugger_cls,
)
self.tb_offset = tb_offset
self.long_header = long_header
self.include_vars = include_vars
# By default we use linecache.checkcache, but the user can provide a
# different check_cache implementation. This was formerly used by the
# IPython kernel for interactive code, but is no longer necessary.
if check_cache is None:
check_cache = linecache.checkcache
self.check_cache = check_cache
self.skip_hidden = True
def format_record(self, frame_info):
"""Format a single stack frame"""
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
if isinstance(frame_info, stack_data.RepeatedFrames):
return ' %s[... skipping similar frames: %s]%s\n' % (
Colors.excName, frame_info.description, ColorsNormal)
indent = " " * INDENT_SIZE
em_normal = "%s\n%s%s" % (Colors.valEm, indent, ColorsNormal)
tpl_call = f"in {Colors.vName}{{file}}{Colors.valEm}{{scope}}{ColorsNormal}"
tpl_call_fail = "in %s%%s%s(***failed resolving arguments***)%s" % (
Colors.vName,
Colors.valEm,
ColorsNormal,
)
tpl_name_val = "%%s %s= %%s%s" % (Colors.valEm, ColorsNormal)
link = _format_filename(
frame_info.filename,
Colors.filenameEm,
ColorsNormal,
lineno=frame_info.lineno,
)
args, varargs, varkw, locals_ = inspect.getargvalues(frame_info.frame)
func = frame_info.executing.code_qualname()
if func == "<module>":
call = ""
else:
# Decide whether to include variable details or not
var_repr = eqrepr if self.include_vars else nullrepr
try:
scope = inspect.formatargvalues(
args, varargs, varkw, locals_, formatvalue=var_repr
)
call = tpl_call.format(file=func, scope=scope)
except KeyError:
# This happens in situations like errors inside generator
# expressions, where local variables are listed in the
# line, but can't be extracted from the frame. I'm not
# 100% sure this isn't actually a bug in inspect itself,
# but since there's no info for us to compute with, the
# best we can do is report the failure and move on. Here
# we must *not* call any traceback construction again,
# because that would mess up use of %debug later on. So we
# simply report the failure and move on. The only
# limitation will be that this frame won't have locals
# listed in the call signature. Quite subtle problem...
# I can't think of a good way to validate this in a unit
# test, but running a script consisting of:
# dict( (k,v.strip()) for (k,v) in range(10) )
# will illustrate the error, if this exception catch is
# disabled.
call = tpl_call_fail % func
lvals = ''
lvals_list = []
if self.include_vars:
try:
# we likely want to fix stackdata at some point, but
# still need a workaround.
fibp = frame_info.variables_in_executing_piece
for var in fibp:
lvals_list.append(tpl_name_val % (var.name, repr(var.value)))
except Exception:
lvals_list.append(
"Exception trying to inspect frame. No more locals available."
)
if lvals_list:
lvals = '%s%s' % (indent, em_normal.join(lvals_list))
result = f'{link}{", " if call else ""}{call}\n'
result += ''.join(_format_traceback_lines(frame_info.lines, Colors, self.has_colors, lvals))
return result
def prepare_header(self, etype, long_version=False):
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
width = min(75, get_terminal_size()[0])
if long_version:
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal,
exc, ' ' * (width - len(str(etype)) - len(pyver)),
pyver, date.rjust(width) )
head += "\nA problem occurred executing Python code. Here is the sequence of function" \
"\ncalls leading up to the error, with the most recent (innermost) call last."
else:
# Simplified header
head = '%s%s' % (exc, 'Traceback (most recent call last)'. \
rjust(width - len(str(etype))) )
return head
def format_exception(self, etype, evalue):
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
return ['%s%s%s: %s' % (colors.excName, etype_str,
colorsnormal, py3compat.cast_unicode(evalue_str))]
def format_exception_as_a_whole(
self,
etype: type,
evalue: BaseException,
etb: Optional[TracebackType],
number_of_lines_of_context,
tb_offset: Optional[int],
):
"""Formats the header, traceback and exception message for a single exception.
This may be called multiple times by Python 3 exception chaining
(PEP 3134).
"""
# some locals
orig_etype = etype
try:
etype = etype.__name__
except AttributeError:
pass
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
head = self.prepare_header(etype, self.long_header)
records = (
self.get_records(etb, number_of_lines_of_context, tb_offset) if etb else []
)
frames = []
skipped = 0
lastrecord = len(records) - 1
for i, r in enumerate(records):
if not isinstance(r, stack_data.RepeatedFrames) and self.skip_hidden:
if r.frame.f_locals.get("__tracebackhide__", 0) and i != lastrecord:
skipped += 1
continue
if skipped:
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
frames.append(
" %s[... skipping hidden %s frame]%s\n"
% (Colors.excName, skipped, ColorsNormal)
)
skipped = 0
frames.append(self.format_record(r))
if skipped:
Colors = self.Colors # just a shorthand + quicker name lookup
ColorsNormal = Colors.Normal # used a lot
frames.append(
" %s[... skipping hidden %s frame]%s\n"
% (Colors.excName, skipped, ColorsNormal)
)
formatted_exception = self.format_exception(etype, evalue)
if records:
frame_info = records[-1]
ipinst = get_ipython()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(frame_info.filename, frame_info.lineno, 0)
return [[head] + frames + [''.join(formatted_exception[0])]]
def get_records(
self, etb: TracebackType, number_of_lines_of_context: int, tb_offset: int
):
assert etb is not None
context = number_of_lines_of_context - 1
after = context // 2
before = context - after
if self.has_colors:
style = get_style_by_name("default")
style = stack_data.style_with_executing_node(style, self._tb_highlight)
formatter = Terminal256Formatter(style=style)
else:
formatter = None
options = stack_data.Options(
before=before,
after=after,
pygments_formatter=formatter,
)
return list(stack_data.FrameInfo.stack_data(etb, options=options))[tb_offset:]
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType],
tb_offset: Optional[int] = None,
number_of_lines_of_context: int = 5,
):
"""Return a nice text document describing the traceback."""
formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
tb_offset)
colors = self.Colors # just a shorthand + quicker name lookup
colorsnormal = colors.Normal # used a lot
head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
structured_traceback_parts = [head]
chained_exceptions_tb_offset = 0
lines_of_context = 3
formatted_exceptions = formatted_exception
exception = self.get_parts_of_chained_exception(evalue)
if exception:
assert evalue is not None
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
etype, evalue, etb = exception
else:
evalue = None
chained_exc_ids = set()
while evalue:
formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
chained_exceptions_tb_offset)
exception = self.get_parts_of_chained_exception(evalue)
if exception and not id(exception[1]) in chained_exc_ids:
chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
etype, evalue, etb = exception
else:
evalue = None
# we want to see exceptions in a reversed order:
# the first exception should be on top
for formatted_exception in reversed(formatted_exceptions):
structured_traceback_parts += formatted_exception
return structured_traceback_parts
def debugger(self, force: bool = False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = self.debugger_cls()
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self, 'tb') and self.tb is not None:
etb = self.tb
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
assert self.tb.tb_next is not None
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
self.pdb.interaction(None, etb)
if hasattr(self, 'tb'):
del self.tb
def handler(self, info=None):
(etype, evalue, etb) = info or sys.exc_info()
self.tb = etb
ostream = self.ostream
ostream.flush()
ostream.write(self.text(etype, evalue, etb))
ostream.write('\n')
ostream.flush()
# Changed so an instance can just be called as VerboseTB_inst() and print
# out the right info on its own.
def __call__(self, etype=None, evalue=None, etb=None):
"""This hook can replace sys.excepthook (for Python 2.1 or higher)."""
if etb is None:
self.handler()
else:
self.handler((etype, evalue, etb))
try:
self.debugger()
except KeyboardInterrupt:
print("\nKeyboardInterrupt")
#----------------------------------------------------------------------------
class FormattedTB(VerboseTB, ListTB):
"""Subclass ListTB but allow calling with a traceback.
It can thus be used as a sys.excepthook for Python > 2.1.
Also adds 'Context' and 'Verbose' modes, not available in ListTB.
Allows a tb_offset to be specified. This is useful for situations where
one needs to remove a number of topmost frames from the traceback (such as
occurs with python programs that themselves execute other python code,
like Python shells). """
mode: str
def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
ostream=None,
tb_offset=0, long_header=False, include_vars=False,
check_cache=None, debugger_cls=None,
parent=None, config=None):
# NEVER change the order of this list. Put new modes at the end:
self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal']
self.verbose_modes = self.valid_modes[1:3]
VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream, tb_offset=tb_offset,
long_header=long_header, include_vars=include_vars,
check_cache=check_cache, debugger_cls=debugger_cls,
parent=parent, config=config)
# Different types of tracebacks are joined with different separators to
# form a single string. They are taken from this dict
self._join_chars = dict(Plain='', Context='\n', Verbose='\n',
Minimal='')
# set_mode also sets the tb_join_char attribute
self.set_mode(mode)
def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
tb_offset = self.tb_offset if tb_offset is None else tb_offset
mode = self.mode
if mode in self.verbose_modes:
# Verbose modes need a full traceback
return VerboseTB.structured_traceback(
self, etype, value, tb, tb_offset, number_of_lines_of_context
)
elif mode == 'Minimal':
return ListTB.get_exception_only(self, etype, value)
else:
# We must check the source cache because otherwise we can print
# out-of-date source code.
self.check_cache()
# Now we can extract and format the exception
return ListTB.structured_traceback(
self, etype, value, tb, tb_offset, number_of_lines_of_context
)
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return self.tb_join_char.join(stb)
def set_mode(self, mode: Optional[str] = None):
"""Switch to the desired mode.
If mode is not specified, cycles through the available modes."""
if not mode:
new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
len(self.valid_modes)
self.mode = self.valid_modes[new_idx]
elif mode not in self.valid_modes:
raise ValueError(
"Unrecognized mode in FormattedTB: <" + mode + ">\n"
"Valid modes: " + str(self.valid_modes)
)
else:
assert isinstance(mode, str)
self.mode = mode
# include variable details only in 'Verbose' mode
self.include_vars = (self.mode == self.valid_modes[2])
# Set the join character for generating text tracebacks
self.tb_join_char = self._join_chars[self.mode]
# some convenient shortcuts
def plain(self):
self.set_mode(self.valid_modes[0])
def context(self):
self.set_mode(self.valid_modes[1])
def verbose(self):
self.set_mode(self.valid_modes[2])
def minimal(self):
self.set_mode(self.valid_modes[3])
#----------------------------------------------------------------------------
class AutoFormattedTB(FormattedTB):
"""A traceback printer which can be called on the fly.
It will find out about exceptions by itself.
A brief example::
AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
try:
...
except:
AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
"""
def __call__(self, etype=None, evalue=None, etb=None,
out=None, tb_offset=None):
"""Print out a formatted exception traceback.
Optional arguments:
- out: an open file-like object to direct output to.
- tb_offset: the number of frames to skip over in the stack, on a
per-call basis (this overrides temporarily the instance's tb_offset
given at initialization time."""
if out is None:
out = self.ostream
out.flush()
out.write(self.text(etype, evalue, etb, tb_offset))
out.write('\n')
out.flush()
# FIXME: we should remove the auto pdb behavior from here and leave
# that to the clients.
try:
self.debugger()
except KeyboardInterrupt:
print("\nKeyboardInterrupt")
def structured_traceback(self, etype=None, value=None, tb=None,
tb_offset=None, number_of_lines_of_context=5):
etype: type
value: BaseException
# tb: TracebackType or tupleof tb types ?
if etype is None:
etype, value, tb = sys.exc_info()
if isinstance(tb, tuple):
# tb is a tuple if this is a chained exception.
self.tb = tb[0]
else:
self.tb = tb
return FormattedTB.structured_traceback(
self, etype, value, tb, tb_offset, number_of_lines_of_context)
#---------------------------------------------------------------------------
# A simple class to preserve Nathan's original functionality.
class ColorTB(FormattedTB):
"""Shorthand to initialize a FormattedTB in Linux colors mode."""
def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
FormattedTB.__init__(self, color_scheme=color_scheme,
call_pdb=call_pdb, **kwargs)
class SyntaxTB(ListTB):
"""Extension which holds some state: the last exception value"""
def __init__(self, color_scheme='NoColor', parent=None, config=None):
ListTB.__init__(self, color_scheme, parent=parent, config=config)
self.last_syntax_error = None
def __call__(self, etype, value, elist):
self.last_syntax_error = value
ListTB.__call__(self, etype, value, elist)
def structured_traceback(self, etype, value, elist, tb_offset=None,
context=5):
# If the source file has been edited, the line in the syntax error can
# be wrong (retrieved from an outdated cache). This replaces it with
# the current value.
if isinstance(value, SyntaxError) \
and isinstance(value.filename, str) \
and isinstance(value.lineno, int):
linecache.checkcache(value.filename)
newtext = linecache.getline(value.filename, value.lineno)
if newtext:
value.text = newtext
self.last_syntax_error = value
return super(SyntaxTB, self).structured_traceback(etype, value, elist,
tb_offset=tb_offset, context=context)
def clear_err_state(self):
"""Return the current error state and clear it"""
e = self.last_syntax_error
self.last_syntax_error = None
return e
def stb2text(self, stb):
"""Convert a structured traceback (a list) to a string."""
return ''.join(stb)
# some internal-use functions
def text_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value)
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return text_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % text_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eqrepr(value, repr=text_repr):
return '=%s' % repr(value)
def nullrepr(value, repr=text_repr):
return ''
| {
"content_hash": "989d0f7676959b56dec43917fb3b3932",
"timestamp": "",
"source": "github",
"line_count": 1200,
"max_line_length": 110,
"avg_line_length": 38.266666666666666,
"alnum_prop": 0.5799651567944251,
"repo_name": "ipython/ipython",
"id": "e83e2b4a0c11d1b1e0cee6b9a20f5f14189f7c3e",
"size": "45944",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "IPython/core/ultratb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "Jupyter Notebook",
"bytes": "958133"
},
{
"name": "Makefile",
"bytes": "4675"
},
{
"name": "Python",
"bytes": "2318171"
},
{
"name": "Shell",
"bytes": "12155"
}
],
"symlink_target": ""
} |
from dispatch import Dispatcher
from response import APIResponse
__all__ = (
'XmlRpcDispatcher',
'XmlRpcResponse',
)
class XmlRpcResponse(APIResponse):
def format(self, data):
if isinstance(data, APIError):
# convert to fault xmlrpc message
pass
else:
# convert standard
pass
class XmlRpcDispatcher(Dispatcher):
"""
Accepts the regular XML-RPC method call via POST. Fails on GET, or if
called with a (sub-)url. Uses a dot "." for separating namespaces.
POST /
<?xml version="1.0"?>
<methodCall>
<methodName>comments.add</methodName>
<params>
<param><value><string>great post!</string></value></param>
</params>
</methodCall>
==> api.comments.add("great post!")
Note that keyword arguments are not supported.
"""
# TODO: support automatic introspection, multicall
default_response_class = XmlRpcResponse
def parse_url(self, request):
pass | {
"content_hash": "7a6f4ca647ca9508f387cf4cd4506fb6",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 26.175,
"alnum_prop": 0.603629417382999,
"repo_name": "miracle2k/genericapi",
"id": "7f72d8043842852f3864b8abf5fb11f3aa59ef04",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genericapi/xmlrpc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "60305"
}
],
"symlink_target": ""
} |
"""This package implements a very simple Qt GUI over the PythonCalc
package to provide an example of how a package may add GUI elements
including independent windows, menu items as well as dependency
requirements.
If you're interested in developing new modules for VisTrails, you
should also consult the documentation in the User's Guide and in
core/modules/vistrails_module.py.
"""
from __future__ import division
identifier = 'org.vistrails.vistrails.pythoncalcqt'
name = 'PythonCalcQt'
version = '0.0.2'
old_identifiers = ['edu.utah.sci.vistrails.pythoncalcqt']
def package_dependencies():
return ['org.vistrails.vistrails.pythoncalc']
| {
"content_hash": "2c62f254157d39577c53b61389fdf6b6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 34.05263157894737,
"alnum_prop": 0.7836166924265843,
"repo_name": "hjanime/VisTrails",
"id": "3b8b0336de3fb6ac81cdd9836a97050419407807",
"size": "2560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/packages/pythonCalcQt/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "octopus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "42f86c8a49fa9714334242b341f0f0ec",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.7105263157894737,
"repo_name": "gregorianzhang/octopus",
"id": "f059f4215c9dcf20bdf9373257a223937be1a24a",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10595"
},
{
"name": "Python",
"bytes": "13662"
}
],
"symlink_target": ""
} |
import random as r
import math
class Roller:
'''Base class which handles die rolls and stat-to-mod calculations'''
def do_roll(self, roll_string, modifier=0):
'''
Takes a roll string (e.g. '2d4') and modifier, rolls, then
returns the result as an integer.
'''
num_dice, die_type = roll_string.split('d')
rolls = [r.randint(1, int(die_type)) for i in range(int(num_dice))]
total = sum(rolls) + modifier
return total
def stat_to_mod(self, value=10):
'''Converts a stat to its respective modifier (e.g. 14 to +2)'''
return math.floor((value - 10) / 2)
| {
"content_hash": "f34857a697abe8d1250b9d901d6e596d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 33.89473684210526,
"alnum_prop": 0.5978260869565217,
"repo_name": "etkirsch/dndarmy",
"id": "a58488e7118b58462cef4da0d43bb5629827eda7",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dndarmy/Roller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14302"
}
],
"symlink_target": ""
} |
"""
ctypesgen.ctypedescs contains classes to represent a C type. All of them
classes are subclasses of CtypesType.
Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
completely independent of the parser module.
The most important method of CtypesType and its subclasses is the py_string
method. str(ctype) returns a string which, when evaluated in the wrapper
at runtime, results in a ctypes type object.
For example, a CtypesType
representing an array of four integers could be created using:
>>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
str(ctype) would evaluate to "c_int * 4".
"""
import warnings
__docformat__ = "restructuredtext"
ctypes_type_map = {
# typename signed longs
("void", True, 0): "None",
("int", True, 0): "c_int",
("int", False, 0): "c_uint",
("int", True, 1): "c_long",
("int", False, 1): "c_ulong",
("char", True, 0): "c_char",
("char", False, 0): "c_ubyte",
("short", True, 0): "c_short",
("short", False, 0): "c_ushort",
("float", True, 0): "c_float",
("double", True, 0): "c_double",
("double", True, 1): "c_longdouble",
("int8_t", True, 0): "c_int8",
("int16_t", True, 0): "c_int16",
("int32_t", True, 0): "c_int32",
("int64_t", True, 0): "c_int64",
("uint8_t", True, 0): "c_uint8",
("uint16_t", True, 0): "c_uint16",
("uint32_t", True, 0): "c_uint32",
("uint64_t", True, 0): "c_uint64",
("_Bool", True, 0): "c_bool",
}
ctypes_type_map_python_builtin = {
("int", True, 2): "c_longlong",
("int", False, 2): "c_ulonglong",
("size_t", True, 0): "c_size_t",
("apr_int64_t", True, 0): "c_int64",
("off64_t", True, 0): "c_int64",
("apr_uint64_t", True, 0): "c_uint64",
("wchar_t", True, 0): "c_wchar",
("ptrdiff_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("ssize_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("va_list", True, 0): "c_void_p",
}
# This protocol is used for walking type trees.
class CtypesTypeVisitor(object):
def visit_struct(self, struct):
pass
def visit_enum(self, enum):
pass
def visit_typedef(self, name):
pass
def visit_error(self, error, cls):
pass
def visit_identifier(self, identifier):
# This one comes from inside ExpressionNodes. There may be
# ExpressionNode objects in array count expressions.
pass
def visit_type_and_collect_info(ctype):
class Visitor(CtypesTypeVisitor):
def visit_struct(self, struct):
structs.append(struct)
def visit_enum(self, enum):
enums.append(enum)
def visit_typedef(self, typedef):
typedefs.append(typedef)
def visit_error(self, error, cls):
errors.append((error, cls))
def visit_identifier(self, identifier):
identifiers.append(identifier)
structs = []
enums = []
typedefs = []
errors = []
identifiers = []
v = Visitor()
ctype.visit(v)
return structs, enums, typedefs, errors, identifiers
# Remove one level of indirection from funtion pointer; needed for typedefs
# and function parameters.
def remove_function_pointer(t):
if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
return t.destination
elif type(t) == CtypesPointer:
t.destination = remove_function_pointer(t.destination)
return t
else:
return t
class CtypesType(object):
def __init__(self):
super(CtypesType, self).__init__()
self.errors = []
def __repr__(self):
return '<Ctype (%s) "%s">' % (type(self).__name__, self.py_string())
def error(self, message, cls=None):
self.errors.append((message, cls))
def visit(self, visitor):
for error, cls in self.errors:
visitor.visit_error(error, cls)
class CtypesSimple(CtypesType):
"""Represents a builtin type, like "char" or "int"."""
def __init__(self, name, signed, longs):
super(CtypesSimple, self).__init__()
self.name = name
self.signed = signed
self.longs = longs
def py_string(self, ignore_can_be_ctype=None):
return ctypes_type_map[(self.name, self.signed, self.longs)]
class CtypesSpecial(CtypesType):
def __init__(self, name):
super(CtypesSpecial, self).__init__()
self.name = name
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesTypedef(CtypesType):
"""Represents a type defined by a typedef."""
def __init__(self, name):
super(CtypesTypedef, self).__init__()
self.name = name
def visit(self, visitor):
if not self.errors:
visitor.visit_typedef(self.name)
super(CtypesTypedef, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesBitfield(CtypesType):
def __init__(self, base, bitfield):
super(CtypesBitfield, self).__init__()
self.base = base
self.bitfield = bitfield
def visit(self, visitor):
self.base.visit(visitor)
super(CtypesBitfield, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.base.py_string()
class CtypesPointer(CtypesType):
def __init__(self, destination, qualifiers):
super(CtypesPointer, self).__init__()
self.destination = destination
self.qualifiers = qualifiers
def visit(self, visitor):
if self.destination:
self.destination.visit(visitor)
super(CtypesPointer, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "POINTER(%s)" % self.destination.py_string()
class CtypesArray(CtypesType):
def __init__(self, base, count):
super(CtypesArray, self).__init__()
self.base = base
self.count = count
def visit(self, visitor):
self.base.visit(visitor)
if self.count:
self.count.visit(visitor)
super(CtypesArray, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
if self.count is None:
return "POINTER(%s)" % self.base.py_string()
if type(self.base) == CtypesArray:
return "(%s) * int(%s)" % (self.base.py_string(), self.count.py_string(False))
else:
return "%s * int(%s)" % (self.base.py_string(), self.count.py_string(False))
class CtypesNoErrorCheck(object):
def py_string(self, ignore_can_be_ctype=None):
return "None"
def __bool__(self):
return False
__nonzero__ = __bool__
class CtypesPointerCast(object):
def __init__(self, target):
self.target = target
def py_string(self, ignore_can_be_ctype=None):
return "lambda v,*a : cast(v, {})".format(self.target.py_string())
class CtypesFunction(CtypesType):
def __init__(self, restype, parameters, variadic, attrib=dict()):
super(CtypesFunction, self).__init__()
self.restype = restype
self.errcheck = CtypesNoErrorCheck()
# Don't allow POINTER(None) (c_void_p) as a restype... causes errors
# when ctypes automagically returns it as an int.
# Instead, convert to POINTER(c_void). c_void is not a ctypes type,
# you can make it any arbitrary type.
if (
type(self.restype) == CtypesPointer
and type(self.restype.destination) == CtypesSimple
and self.restype.destination.name == "void"
):
# we will provide a means of converting this to a c_void_p
self.restype = CtypesPointer(CtypesSpecial("c_ubyte"), ())
self.errcheck = CtypesPointerCast(CtypesSpecial("c_void_p"))
# Return "String" instead of "POINTER(c_char)"
if self.restype.py_string() == "POINTER(c_char)":
if "const" in self.restype.qualifiers:
self.restype = CtypesSpecial("c_char_p")
else:
self.restype = CtypesSpecial("String")
self.argtypes = [remove_function_pointer(p) for p in parameters]
self.variadic = variadic
self.attrib = attrib
def visit(self, visitor):
self.restype.visit(visitor)
for a in self.argtypes:
a.visit(visitor)
super(CtypesFunction, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "CFUNCTYPE(UNCHECKED(%s), %s)" % (
self.restype.py_string(),
", ".join([a.py_string() for a in self.argtypes]),
)
last_tagnum = 0
def anonymous_struct_tagnum():
global last_tagnum
last_tagnum += 1
return last_tagnum
def fmt_anonymous_struct_tag(num):
return "anon_%d" % num
def anonymous_struct_tag():
return fmt_anonymous_struct_tag(anonymous_struct_tagnum())
class CtypesStruct(CtypesType):
def __init__(self, tag, attrib, variety, members, src=None):
super(CtypesStruct, self).__init__()
self.tag = tag
self.attrib = attrib
self.variety = variety # "struct" or "union"
self.members = members
if type(self.tag) == int or not self.tag:
if type(self.tag) == int:
self.tag = fmt_anonymous_struct_tag(self.tag)
else:
self.tag = anonymous_struct_tag()
self.anonymous = True
else:
self.anonymous = False
if self.members == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def get_required_types(self):
types = super(CtypesStruct, self).get_required_types()
types.add((self.variety, self.tag))
return types
def visit(self, visitor):
visitor.visit_struct(self)
if not self.opaque:
for name, ctype in self.members:
ctype.visit(visitor)
super(CtypesStruct, self).visit(visitor)
def get_subtypes(self):
if self.opaque:
return set()
else:
return set([m[1] for m in self.members])
def py_string(self, ignore_can_be_ctype=None):
return "%s_%s" % (self.variety, self.tag)
last_tagnum = 0
def anonymous_enum_tag():
global last_tagnum
last_tagnum += 1
return "anon_%d" % last_tagnum
class CtypesEnum(CtypesType):
def __init__(self, tag, enumerators, src=None):
super(CtypesEnum, self).__init__()
self.tag = tag
self.enumerators = enumerators
if not self.tag:
self.tag = anonymous_enum_tag()
self.anonymous = True
else:
self.anonymous = False
if self.enumerators == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def visit(self, visitor):
visitor.visit_enum(self)
super(CtypesEnum, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "enum_%s" % self.tag
| {
"content_hash": "784576a3c1e64ffe9c08f20cf7ce9ebb",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 90,
"avg_line_length": 28.93455497382199,
"alnum_prop": 0.5951325432009409,
"repo_name": "davidjamesca/ctypesgen",
"id": "8df3bbd39d675c9bb72ae4bdeb384c6ae38a6759",
"size": "11076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctypesgen/ctypedescs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "565629"
}
],
"symlink_target": ""
} |
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
""" Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
""" Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return to_utc(datetime.utcnow())
def now_local(tz=None):
""" Retrieve the current time
Args:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
""" Convert a datetime with timezone info to a UTC datetime
Args:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
""" Convert a datetime to the user's local timezone
Args:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
def to_system(dt):
""" Convert a datetime to the system's local timezone
Args:
dt (datetime): A datetime (if no timezone, assumed to be UTC)
Returns:
(datetime): time converted to the operation system's timezone
"""
tz = tzlocal()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
| {
"content_hash": "9dd88c998c36802a619fb7ce0d618997",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 76,
"avg_line_length": 26.074468085106382,
"alnum_prop": 0.6295389636882905,
"repo_name": "Dark5ide/mycroft-core",
"id": "cb1a4af114d2d4cd608f797faa377b44dd4771a2",
"size": "3033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycroft/util/time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1846776"
},
{
"name": "QML",
"bytes": "9903"
},
{
"name": "Shell",
"bytes": "80311"
}
],
"symlink_target": ""
} |
import os
import sys
import traceback
import operator
# Add the pulsar path
thispath = os.path.dirname(os.path.realpath(__file__))
psrpath = os.path.join(os.path.dirname(thispath), "modules")
sys.path.insert(0, psrpath)
import pulsar as psr
from pulsar.output import *
from pulsar.testing import *
from pulsar.math import *
from helper.SetOperations import test_set_operations
def SetComp(set1, set2):
if len(set1) != len(set2):
return False
for i in range(0, len(set1)):
if set1[i] != set2[i]:
return False
return True
def SetComp2(set1, set2):
return set1 == set2
def PrintSet(s):
string = "{} : ".format(len(s))
for it in s:
string += str(it)
print(string)
def SetElement(s, i, e):
s[i] = e
def FillUniverse(values):
u = StringSetUniverse()
for v in values:
u.insert(v)
return u
def Run():
try:
Alphabet="abcdefghijklmnopqrstuvwxyz"
BadAlphabet = "ABCDE"
tester = Tester("Testing Universe and MathSet")
tester.print_header()
################################
# Do basic testing of set stuff
################################
u0 = StringSetUniverse()
u1 = FillUniverse(Alphabet[:7])
u2 = FillUniverse(Alphabet)
u3 = FillUniverse(reversed(Alphabet))
u9 = FillUniverse(Alphabet[7:])
test_set_operations(tester, StringSetUniverse, Alphabet, BadAlphabet,
False, u0, u1, u2, u3, u9)
###############################
# Serialization
###############################
tester.test("Universe serialization - u0", True, TestSerialization_StringSetUniverse, u0)
tester.test("Universe serialization - u1", True, TestSerialization_StringSetUniverse, u1)
tester.test("Universe serialization - u2", True, TestSerialization_StringSetUniverse, u2)
tester.test("Universe serialization - u3", True, TestSerialization_StringSetUniverse, u3)
tester.test("Universe serialization - u9", True, TestSerialization_StringSetUniverse, u9)
###############################
# Universe-specific stuff
###############################
for idx in range(0, len(u1)):
tester.test_value("Element {} by idx - getitem".format(idx), u1[idx], Alphabet[idx])
tester.test_value("Element {} by idx - At".format(idx), u1.at(idx), Alphabet[idx])
# Try inserting bad values
tester.test("inserting 10", False, u1.insert, 10)
tester.test("inserting None", False, u1.insert, None)
# Modification of an element in a universe -- shouldn't be possible
tester.test("Modification of an element in universe", False, SetElement, u1, 0, "Z")
tester.print_results()
except Exception as e:
print_global_output("Caught exception in main handler. Contact the developers\n")
traceback.print_exc()
print_global_error("\n")
print_global_error(str(e))
print_global_error("\n")
psr.initialize(sys.argv, color = True, debug = True)
Run()
psr.finalize()
| {
"content_hash": "126e8be1db7abaa0f4d496b8b89ec8f5",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 97,
"avg_line_length": 29.02777777777778,
"alnum_prop": 0.594896331738437,
"repo_name": "pulsar-chem/Pulsar-Core",
"id": "0c4bc17a370504cd250bb386630da61eba3977ee",
"size": "3159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/old/Old2/Universe.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7784"
},
{
"name": "C++",
"bytes": "1100500"
},
{
"name": "CMake",
"bytes": "24227"
},
{
"name": "Python",
"bytes": "739363"
},
{
"name": "Shell",
"bytes": "2851"
}
],
"symlink_target": ""
} |
from .base import * # noqa
from .module import * # noqa
from .pod import * # noqa
from .queue import * # noqa
from .queue_entry import * # noqa
from .research import * # noqa
from .resource import * # noqa
from .user import * # noqa
| {
"content_hash": "d75c5e044c39f54cf209511e6098e8a7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 33,
"avg_line_length": 29.125,
"alnum_prop": 0.6866952789699571,
"repo_name": "Nukesor/spacesurvival",
"id": "407c5535820ecb97ef04cf23cb461d7f8ecb9419",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1318"
},
{
"name": "Elm",
"bytes": "58816"
},
{
"name": "HTML",
"bytes": "434"
},
{
"name": "JavaScript",
"bytes": "2053"
},
{
"name": "Python",
"bytes": "70795"
},
{
"name": "Shell",
"bytes": "548"
}
],
"symlink_target": ""
} |
from . import main
from flask import render_template, request, jsonify
@main.app_errorhandler(404)
def page_not_found(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'not found'})
response.status_code = 404
return response
return render_template('errors/404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'internal server error'})
response.status_code = 500
return response
return render_template('errors/500.html'), 500
| {
"content_hash": "7940d171cb93ba64e4e5b3e5e02775f2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 33,
"alnum_prop": 0.6790633608815427,
"repo_name": "seagullbird/BLEXT",
"id": "1ede3cd91055281dca9613b476dd6832ec97e900",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134122"
},
{
"name": "HTML",
"bytes": "24556"
},
{
"name": "JavaScript",
"bytes": "29756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "103003"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from riskgame.models import *
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = EmailUser
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = EmailUser
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class EmailUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'is_admin', 'is_active', 'date_joined', 'last_login')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Permissions', {'fields': ('is_admin',)}),
('Important dates', {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
admin.site.register(EmailUser, EmailUserAdmin)
# class ValidEmailDomainAdmin(admin.ModelAdmin):
# list_display = ('datecreated', 'name')
# admin.site.register(ValidEmailDomain, ValidEmailDomainAdmin)
class TeamPlayerAdmin(admin.ModelAdmin):
list_display = ('role', 'team', 'player', 'gather_pile', 'risk_pile', 'episode_events', 'active_events')
admin.site.register(TeamPlayer, TeamPlayerAdmin)
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'victory_points', 'rank_points', 'resources_collected', 'action_points', 'frontline_action_points', 'goal_zero_markers', 'goal_zero_streak', 'active_events', 'get_rank')
admin.site.register(Team, TeamAdmin)
class PlayerAdmin(admin.ModelAdmin):
list_display = ('user', 'name', 'email')
admin.site.register(Player, PlayerAdmin)
# class TeamJoinRequestAdmin(admin.ModelAdmin):
# list_display = ('team', 'player')
# admin.site.register(TeamJoinRequest, TeamJoinRequestAdmin)
class GameAdmin(admin.ModelAdmin):
list_display = ('datecreated', 'start', 'end', 'started', 'over', 'active')
admin.site.register(Game, GameAdmin)
class EpisodeAdmin(admin.ModelAdmin):
list_display = ('datecreated', 'first_day', 'number')
admin.site.register(Episode, EpisodeAdmin)
class EpisodeDayAdmin(admin.ModelAdmin):
list_display = ('datecreated', 'episode', 'number', 'current', 'end', 'next', 'secondsleft')
admin.site.register(EpisodeDay, EpisodeDayAdmin)
class NotificationAdmin(admin.ModelAdmin):
list_display = ('datecreated', 'team', 'player', 'identifier', 'target')
admin.site.register(Notification, NotificationAdmin)
| {
"content_hash": "c9694d4e207ce244134ad9c37f826a53",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 197,
"avg_line_length": 34.357723577235774,
"alnum_prop": 0.6826786559394227,
"repo_name": "whatsthehubbub/rippleeffect",
"id": "91d77356bd1f663dae0f7006c1b2509379bb51b6",
"size": "4226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riskgame/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92091"
},
{
"name": "CoffeeScript",
"bytes": "4964"
},
{
"name": "HTML",
"bytes": "125888"
},
{
"name": "JavaScript",
"bytes": "5464"
},
{
"name": "Python",
"bytes": "385802"
},
{
"name": "Ruby",
"bytes": "79"
},
{
"name": "Shell",
"bytes": "412"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../naginator_publisher'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'jenkins_jobs.sphinx.yaml',
'sphinxcontrib.programoutput',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Naginator Plugin for jenkins-job-builder'
copyright = u'2014, Thomas Van Doren'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NaginatorPluginforjenkins-job-builderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NaginatorPluginforjenkins-job-builder.tex', u'Naginator Plugin for jenkins-job-builder Documentation',
u'Thomas Van Doren', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'naginatorpluginforjenkins-job-builder', u'Naginator Plugin for jenkins-job-builder Documentation',
[u'Thomas Van Doren'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NaginatorPluginforjenkins-job-builder', u'Naginator Plugin for jenkins-job-builder Documentation',
u'Thomas Van Doren', 'NaginatorPluginforjenkins-job-builder', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "7e445d9de224280e0089ffa4797ff6ea",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 115,
"avg_line_length": 32.78313253012048,
"alnum_prop": 0.7112581158887664,
"repo_name": "thomasvandoren/jenkins-job-builder-naginator",
"id": "6d5ffc643062f3b70a31b33c703e33fa8d5a9473",
"size": "8616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18459"
},
{
"name": "Shell",
"bytes": "6524"
}
],
"symlink_target": ""
} |
from kivy_ios.toolchain import PythonRecipe, shprint
from os.path import join
import sh
import os
class ClickRecipe(PythonRecipe):
version = "7.1.2"
url = "https://github.com/mitsuhiko/click/archive/{version}.zip"
depends = ["python"]
def install(self):
arch = list(self.filtered_archs)[0]
build_dir = self.get_build_dir(arch.arch)
os.chdir(build_dir)
hostpython = sh.Command(self.ctx.hostpython)
build_env = arch.get_env()
dest_dir = join(self.ctx.dist_dir, "root", "python3")
build_env['PYTHONPATH'] = self.ctx.site_packages_dir
shprint(hostpython, "setup.py", "install", "--prefix", dest_dir, _env=build_env)
recipe = ClickRecipe()
| {
"content_hash": "06be850f087810977b5f898fe7ffba5a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 31.304347826086957,
"alnum_prop": 0.65,
"repo_name": "kivy/kivy-ios",
"id": "c2d2d59f4e154388d9bab0a1a107e45dd4d43809",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy_ios/recipes/click/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "537"
},
{
"name": "Cython",
"bytes": "7138"
},
{
"name": "Objective-C",
"bytes": "28133"
},
{
"name": "Python",
"bytes": "125074"
},
{
"name": "Shell",
"bytes": "672"
},
{
"name": "kvlang",
"bytes": "1377"
}
],
"symlink_target": ""
} |
"""MOMA Composer Robot Hand.
Rationale and difference to `robot_base.RobotHand`:
MoMa communicates to hardware (sim and real) though the Sensor and Actuator
interfaces. It does not intend users (for example scripted policies) to
perform these actions through the set_grasp function. While sim hardware can
and should be reset (aka initialized) differently to real hardware, it's
expected that normal behavioural policies, learnt or not, use the Sensor and
Actuator interfaces.
In this way the same control mechanisms, e.g. collision avoidance, cartesian
to joint mapping can be used without special cases.
"""
import abc
from typing import Sequence
from typing import Union
from dm_control import composer
from dm_control.entities.manipulators import base as robot_base
from dm_robotics.moma.models import types
class RobotHand(abc.ABC, composer.Entity):
"""MOMA composer robot hand base class."""
@abc.abstractmethod
def _build(self):
"""Entity initialization method to be overridden by subclasses."""
raise NotImplementedError
@property
@abc.abstractmethod
def joints(self) -> Sequence[types.MjcfElement]:
"""List of joint elements belonging to the hand."""
raise NotImplementedError
@property
@abc.abstractmethod
def actuators(self) -> Sequence[types.MjcfElement]:
"""List of actuator elements belonging to the hand."""
raise NotImplementedError
@property
@abc.abstractmethod
def mjcf_model(self) -> types.MjcfElement:
"""Returns the `mjcf.RootElement` object corresponding to the robot hand."""
raise NotImplementedError
@property
@abc.abstractmethod
def name(self) -> str:
"""Name of the robot hand."""
raise NotImplementedError
@property
@abc.abstractmethod
def tool_center_point(self) -> types.MjcfElement:
"""Tool center point site of the hand."""
raise NotImplementedError
# The interfaces of moma's RobotHand and dm_control's RobotHand intersect.
# In particular:
# * tool_center_point
# * actuators
# * dm_control.composer.Entity as a common base class.
#
# Some code is intended to be compatible with either type, and can use this
# Gripper type to express that intent.
AnyRobotHand = Union[RobotHand, robot_base.RobotHand]
| {
"content_hash": "18c3612ffe85cae7f1872cce60d45fd7",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 31.069444444444443,
"alnum_prop": 0.753687974966473,
"repo_name": "deepmind/dm_robotics",
"id": "627b688ecfde131f76e0a47bdcbd0ec5da079dcf",
"size": "2833",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/moma/models/end_effectors/robot_hands/robot_hand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "479450"
},
{
"name": "CMake",
"bytes": "34173"
},
{
"name": "Jupyter Notebook",
"bytes": "106284"
},
{
"name": "Python",
"bytes": "1413203"
},
{
"name": "Shell",
"bytes": "3244"
}
],
"symlink_target": ""
} |
import os
import attest
from os.path import join, isfile
from acrylamid import core, log, utils, helpers
from acrylamid.compat import iteritems
from acrylamid.commands import compile
from acrylamid.defaults import conf
# supress warnings
log.init('acrylamid', 40)
options = type('Options', (object, ), {
'ignore': False, 'force': False, 'dryrun': False, 'parser': 'compile'})
def entry(**kw):
L = [('title', 'Hänsel and Gretel!'),
('date', '12.02.2012 15:46')]
res = ['---']
for k, v in L:
if k not in kw:
res.append('%s: %s' % (k, v))
for k, v in iteritems(kw):
res.append('%s: %s' % (k, v))
res.append('---')
res.append('')
res.append('# Test')
res.append('')
res.append('This is supercalifragilisticexpialidocious.')
return '\n'.join(res)
class SingleEntry(attest.TestBase):
@classmethod
def __context__(self):
with attest.tempdir() as path:
self.path = path
os.chdir(self.path)
os.mkdir('content/')
os.mkdir('layouts/')
with open('layouts/main.html', 'w') as fp:
fp.write('{{ env.entrylist[0].content }}\n')
self.conf = core.Configuration(conf)
self.env = core.Environment({'options': options, 'globals': utils.Struct()})
self.conf['filters'] = ['HTML']
self.conf['views'] = {'/:year/:slug/': {'view': 'entry'}}
yield
def exists_at_permalink(self):
with open('content/bla.txt', 'w') as fp:
fp.write(entry())
compile(self.conf, self.env)
assert isfile(join('output/', '2012', 'haensel-and-gretel', 'index.html'))
@attest.test
def renders_custom_permalink(self):
with open('content/bla.txt', 'w') as fp:
fp.write(entry(permalink='/about/me.asp'))
compile(self.conf, self.env)
assert isfile(join('output/', 'about', 'me.asp'))
@attest.test
def appends_index(self):
with open('content/bla.txt', 'w') as fp:
fp.write(entry(permalink='/about/me/'))
compile(self.conf, self.env)
assert isfile(join('output/', 'about', 'me', 'index.html'))
@attest.test
def plaintext(self):
with open('content/bla.txt', 'w') as fp:
fp.write(entry(permalink='/'))
compile(self.conf, self.env)
expected = '# Test\n\nThis is supercalifragilisticexpialidocious.'
assert open('output/index.html').read() == expected
@attest.test
def markdown(self):
with open('content/bla.txt', 'w') as fp:
fp.write(entry(permalink='/', filter='[Markdown]'))
compile(self.conf, self.env)
expected = '<h1>Test</h1>\n<p>This is supercalifragilisticexpialidocious.</p>'
assert open('output/index.html').read() == expected
@attest.test
def fullchain(self):
with open('content/bla.txt', 'w') as fp:
fp.write(entry(permalink='/', filter='[Markdown, h1, hyphenate]', lang='en'))
compile(self.conf, self.env)
expected = ('<h2>Test</h2>\n<p>This is su­per­cal­ifrag­'
'ilis­tic­ex­pi­ali­do­cious.</p>')
assert open('output/index.html').read() == expected
class MultipleEntries(attest.TestBase):
@classmethod
def __context__(self):
with attest.tempdir() as path:
self.path = path
os.chdir(self.path)
os.mkdir('content/')
os.mkdir('layouts/')
with open('layouts/main.html', 'w') as fp:
fp.write('{{ env.entrylist[0].content }}\n')
with open('layouts/atom.xml', 'w') as fp:
fp.write("{% for entry in env.entrylist %}\n{{ entry.content ~ '\n' }}\n{% endfor %}")
self.conf = core.Configuration(conf)
self.env = core.Environment({'options': options, 'globals': utils.Struct()})
self.conf['filters'] = ['Markdown', 'h1']
self.conf['views'] = {'/:year/:slug/': {'view': 'entry'},
'/atom.xml': {'view': 'Atom', 'filters': ['h2', 'summarize+2']}}
yield
@attest.test
def markdown(self):
with open('content/foo.txt', 'w') as fp:
fp.write(entry(title='Foo'))
with open('content/bar.txt', 'w') as fp:
fp.write(entry(title='Bar'))
compile(self.conf, self.env)
expected = '<h2>Test</h2>\n<p>This is supercalifragilisticexpialidocious.</p>'
assert open('output/2012/foo/index.html').read() == expected
assert open('output/2012/bar/index.html').read() == expected
| {
"content_hash": "8fcb4e44110f8f15efe868ba43a247aa",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 102,
"avg_line_length": 31.185430463576157,
"alnum_prop": 0.5582926311318751,
"repo_name": "markvl/acrylamid",
"id": "322aa6fce92242bb77f8a12d4adc2e8e8ee661ad",
"size": "4735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specs/content.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2887"
},
{
"name": "Perl",
"bytes": "12164"
},
{
"name": "Python",
"bytes": "339946"
}
],
"symlink_target": ""
} |
from configure import Configuration
from scriptparser import ScriptParser
from PySide import QtGui, QtCore
from view import mainwindow
## Controller class for the Main window
class ControlMainWindow(QtGui.QMainWindow):
## The constructor.
#
# @param self The object pointer.
# @param parent The parent window of the windows, default none.
def __init__(self, parent=None):
super(ControlMainWindow, self).__init__(parent)
self._ui = mainwindow.Ui_MainWindow()
self._ui.setupUi(self)
Configuration("config.ini")
self._scriptparser = ScriptParser("")
self._ui.action_Quit.triggered.connect(self.quitprogram)
self._ui.actionOpen_script.triggered.connect(self.openfile)
self._scriptparser.fnf.connect(self.displayerror)
def displayerror(self, string):
text = self._ui.textEdit.toPlainText().strip()
text = text + "\n" + string
self._ui.textEdit.setText(text)
def quitprogram(self):
QtCore.QCoreApplication.instance().quit()
def openfile(self):
fileName = QtGui.QFileDialog.getOpenFileName(self,
("Open script file"),
"",
("Script file [.csh, .sh, .py] (*.csh *.py *.sh)"))
if fileName[0] != "":
self._ui.textEdit.clear()
self._scriptparser.setrootfile(fileName[0])
self._scriptparser.parseFile() | {
"content_hash": "6f93e7f625be8d808ba3f6e427e70632",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 104,
"avg_line_length": 35.74418604651163,
"alnum_prop": 0.5875081327260898,
"repo_name": "oddurk/scriptviewer",
"id": "4fa8af9a7e51a99c7fbd84899a03bc62cdcd7fc4",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "control/ControlMainWindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21645"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.