repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
pramasoul/pyboard-fun
|
tone.py
|
Python
|
mit
| 1,739
| 0.008626
|
import math
from pyb import DAC, micros, elapsed_micros
def tone1(freq):
t0 = micros()
dac = DAC(1)
w
|
hile True:
theta = 2*math.pi*float(elapsed_micros(t0))*freq/1e6
fv = math.sin(theta)
v = int(126.0 * fv) + 127
#print("Theta %f, sin %f, scaled %d" % (theta, fv, v))
#delay(100)
dac.write(v)
def tone2(freq):
t0 = micros()
dac = DAC(1)
omega = 2 * math.pi * freq / 1e6
while True:
|
theta = omega*float(elapsed_micros(t0))
fv = math.sin(theta)
v = int(126.0 * fv) + 127
#print("Theta %f, sin %f, scaled %d" % (theta, fv, v))
#delay(100)
dac.write(v)
def tone3(freq, l_buf=256):
dac = DAC(1)
dtheta = 2 * math.pi / l_buf
scale = lambda fv: int(126.0 * fv) + 127
buf = bytearray(scale(math.sin(dtheta*t)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
def tone4(freq, l_buf=256):
dac = DAC(1)
dtheta = 2 * math.pi / l_buf
scale = lambda fv: int(123 * fv) + 127
buf = bytearray(scale(math.sin(dtheta*t)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
def tone5(freq, wavefun=lambda x: math.sin(2.0*math.pi*x), l_buf=256):
dac = DAC(1)
dt = 1.0 / l_buf
scale = lambda fv: int(123 * fv) + 127
buf = bytearray(scale(wavefun(t*dt)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
def tone6(freq, wavefun=lambda x: math.sin(2.0*math.pi*x), l_buf=256, dacnum=1):
dac = DAC(dacnum)
dt = 1.0 / l_buf
scale = lambda fv: int(123 * fv) + 127
buf = bytearray(scale(wavefun(t*dt)) for t in range(l_buf))
dac.write_timed(buf, freq * l_buf, mode=DAC.CIRCULAR)
|
Kulmerov/Cinnamon
|
files/usr/share/cinnamon/cinnamon-settings-users/cinnamon-settings-users.py
|
Python
|
gpl-2.0
| 37,177
| 0.00382
|
#!/usr/bin/env python2
import sys, os
import pwd, grp
from gi.repository import Gtk, GObject, Gio, GdkPixbuf, AccountsService
import gettext
import shutil
import PIL
from PIL import Image
from random import randint
import re
import subprocess
gettext.install("cinnamon", "/usr/share/locale")
(INDEX_USER_OBJECT, INDEX_USER_PICTURE, INDEX_USER_DESCRIPTION) = range(3)
(INDEX_GID, INDEX_GROUPNAME) = range(2)
class GroupDialog (Gtk.Dialog):
def __init__ (self, label, value):
super(GroupDialog, self).__init
|
__()
try:
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_p
|
ager_hint(True)
self.set_title("")
table = DimmedTable()
table.add_labels([label])
self.entry = Gtk.Entry()
self.entry.set_text(value)
self.entry.connect("changed", self._on_entry_changed)
table.add_controls([self.entry])
self.set_border_width(6)
box = self.get_content_area()
box.add(table)
self.show_all()
self.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK, )
self.set_response_sensitive(Gtk.ResponseType.OK, False)
except Exception, detail:
print detail
def _on_entry_changed(self, entry):
name = entry.get_text()
if " " in name or name.lower() != name:
entry.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_DIALOG_WARNING)
entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("The group name cannot contain upper-case or space characters"))
self.set_response_sensitive(Gtk.ResponseType.OK, False)
else:
entry.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
self.set_response_sensitive(Gtk.ResponseType.OK, True)
if entry.get_text() == "":
self.set_response_sensitive(Gtk.ResponseType.OK, False)
class DimmedTable (Gtk.Table):
def __init__ (self):
super(DimmedTable, self).__init__()
self.set_border_width(6)
self.set_row_spacings(8)
self.set_col_spacings(15)
def add_labels(self, texts):
row = 0
for text in texts:
if text != None:
label = Gtk.Label(text)
label.set_alignment(1, 0.5)
label.get_style_context().add_class("dim-label")
self.attach(label, 0, 1, row, row+1, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
row = row + 1
def add_controls(self, controls):
row = 0
for control in controls:
self.attach(control, 1, 2, row, row+1)
row = row + 1
class EditableEntry (Gtk.Notebook):
__gsignals__ = {
'changed': (GObject.SIGNAL_RUN_FIRST, None,
(str,))
}
PAGE_BUTTON = 0
PAGE_ENTRY = 1
def __init__ (self):
super(EditableEntry, self).__init__()
self.label = Gtk.Label()
self.entry = Gtk.Entry()
self.button = Gtk.Button()
self.button.set_alignment(0.0, 0.5)
self.button.set_relief(Gtk.ReliefStyle.NONE)
self.append_page(self.button, None);
self.append_page(self.entry, None);
self.set_current_page(0)
self.set_show_tabs(False)
self.set_show_border(False)
self.editable = False
self.show_all()
self.button.connect("released", self._on_button_clicked)
self.button.connect("activate", self._on_button_clicked)
self.entry.connect("activate", self._on_entry_validated)
self.entry.connect("changed", self._on_entry_changed)
def set_text(self, text):
self.button.set_label(text)
self.entry.set_text(text)
def _on_button_clicked(self, button):
self.set_editable(True)
def _on_entry_validated(self, entry):
self.set_editable(False)
self.emit("changed", entry.get_text())
def _on_entry_changed(self, entry):
self.button.set_label(entry.get_text())
def set_editable(self, editable):
if (editable):
self.set_current_page(EditableEntry.PAGE_ENTRY)
else:
self.set_current_page(EditableEntry.PAGE_BUTTON)
self.editable = editable
def set_tooltip_text(self, tooltip):
self.button.set_tooltip_text(tooltip)
def get_editable(self):
return self.editable
def get_text(self):
return self.entry.get_text()
class PasswordDialog(Gtk.Dialog):
def __init__ (self, user, password_mask, group_mask):
super(PasswordDialog, self).__init__()
self.user = user
self.password_mask = password_mask
self.group_mask = group_mask
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_title(_("Change Password"))
table = DimmedTable()
table.add_labels([_("New password"), None, _("Confirm password")])
self.new_password = Gtk.Entry()
self.new_password.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "reload")
self.new_password.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Generate a password"))
self.new_password.connect("icon-release", self._on_new_password_icon_released)
self.new_password.connect("changed", self._on_passwords_changed)
table.attach(self.new_password, 1, 3, 0, 1)
self.strengh_indicator = Gtk.ProgressBar()
self.strengh_indicator.set_tooltip_text(_("Your new password needs to be at least 8 characters long"))
self.strengh_indicator.set_fraction(0.0)
table.attach(self.strengh_indicator, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
self.strengh_indicator.set_size_request(-1, 1)
self.strengh_label = Gtk.Label()
self.strengh_label.set_tooltip_text(_("Your new password needs to be at least 8 characters long"))
self.strengh_label.set_alignment(1, 0.5)
table.attach(self.strengh_label, 2, 3, 1, 2)
self.confirm_password = Gtk.Entry()
self.confirm_password.connect("changed", self._on_passwords_changed)
table.attach(self.confirm_password, 1, 3, 2, 3)
self.show_password = Gtk.CheckButton(_("Show password"))
self.show_password.connect('toggled', self._on_show_password_toggled)
table.attach(self.show_password, 1, 3, 3, 4)
self.set_border_width(6)
box = self.get_content_area()
box.add(table)
self.show_all()
self.infobar = Gtk.InfoBar()
self.infobar.set_message_type(Gtk.MessageType.ERROR)
label = Gtk.Label(_("An error occured. Your password was not changed."))
content = self.infobar.get_content_area()
content.add(label)
table.attach(self.infobar, 0, 3, 4, 5)
self.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, _("Change"), Gtk.ResponseType.OK, )
self.set_passwords_visibility()
self.set_response_sensitive(Gtk.ResponseType.OK, False)
self.infobar.hide()
self.connect("response", self._on_response)
def _on_response(self, dialog, response_id):
if response_id == Gtk.ResponseType.OK:
self.change_password()
else:
self.destroy()
def change_password(self):
newpass = self.new_password.get_text()
self.user.set_password(newpass, "")
mask = self.group_mask.get_text()
if "nopasswdlogin" in mask:
subprocess.call(["gpasswd", "-d", self.user.get_user_name(), "nopasswdlogin"])
mask = mask.split(", ")
mask.remove("nopasswdlogin")
mask = ", ".join(mask)
self.group_mask.set_text(mask)
self.password_mask.set_text(u'\u2022\u2022\u2022\u2022\u2022\u2022')
self.destroy()
def set_passwords_visibility(self):
visible = self.show_password.get_active()
self.new_password.set_visibility(visible)
self.confirm_password.set_visibility(visible)
def _on_n
|
googleapis/python-documentai
|
samples/snippets/process_document_splitter_sample.py
|
Python
|
apache-2.0
| 3,497
| 0.001716
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [START documentai_process_splitter_document]
# TODO(developer): Uncomment these variables before running the sample.
# project_id= 'YOUR_PROJECT_ID'
# location = 'YOUR_PROJECT_LOCATION' # Format is 'us' or 'eu'
# processor_id = 'YOUR_PROCESSOR_ID' # Create processor in Cloud Console
# file_path = '/path/to/local/pdf'
def process_document_splitter_sample(
project_id: str, location: str, processor_id: str, file_path: str
):
from google.cloud import documentai_v1beta3 as documentai
# You must set the api_endpoint if you use a location other than 'us', e.g.:
opts = {}
if location == "eu":
opts = {"api_endpoint": "eu-documentai.googleapis.com"}
client = documentai.DocumentProcessorServiceClient(client_options=opts)
# The full resource name of the processor, e.g.:
# projects/project-id/locations/location/processor/processor-id
# You must create new processors in the Cloud Console first
name = f"projects/{project_id}/locations/{location}/processors/{processor_id}"
with open(file_path, "rb") as image:
image_content = image.read()
# Read the file into memory
document = {"content": image_content, "mime_type": "application/pdf"}
# Configure the process request
request = {"name": name, "raw_document": document}
# Recognizes text entities in the PDF document
result = client.process_document(request=request)
print("Document processing complete.\n")
# Read the splitter outpu
|
t from the document splitter processor:
# https://cloud.google.com/document-ai/docs/processors-list#processor_doc-splitter
# This processor only provides text for the document and information on how
# to split the document on logical boundaries. To identify and ext
|
ract text,
# form elements, and entities please see other processors like the OCR, form,
# and specalized processors.
document = result.document
print(f"Found {len(document.entities)} subdocuments:")
for entity in document.entities:
conf_percent = "{:.1%}".format(entity.confidence)
pages_range = page_refs_to_string(entity.page_anchor.page_refs)
# Print subdocument type information, if available
try:
doctype = entity.type
print(
f'{conf_percent} confident that {pages_range} a "{doctype}" subdocument.'
)
except AttributeError:
print(f"{conf_percent} confident that {pages_range} a subdocument.")
def page_refs_to_string(page_refs: dict) -> str:
""" Converts a page ref to a string describing the page or page range."""
if len(page_refs) == 1:
num = str(int(page_refs[0].page) + 1)
return f"page {num} is"
else:
start = str(int(page_refs[0].page) + 1)
end = str(int(page_refs[1].page) + 1)
return f"pages {start} to {end} are"
# [END documentai_process_splitter_document]
|
openstack/glance
|
glance/version.py
|
Python
|
apache-2.0
| 731
| 0
|
# Copyright 2
|
012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dis
|
tributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
version_info = pbr.version.VersionInfo('glance')
version_string = version_info.version_string
|
cswaroop/airflow
|
airflow/contrib/hooks/__init__.py
|
Python
|
apache-2.0
| 264
| 0
|
'''
Imports the hooks dynamically while keeping the package API clean,
abstracting the underlying modules
'''
from airflow.utils import import_module_attrs as _import_mod
|
ule_attrs
_hooks = {
'ftp_hook':
|
['FTPHook'],
}
_import_module_attrs(globals(), _hooks)
|
stharrold/demo
|
demo/app_template/template.py
|
Python
|
mit
| 1,935
| 0.001034
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Application template.
"""
# Import standard packages.
import inspect
import logging
# Import installed packages.
import matplotlib.pyplot as plt
import seaborn as sns
# Import local packages.
from .. import utils
# Define module exports:
__all__ = ['prepend_this']
# Define state settings and globals.
# Note: For non-root-level loggers, use `getLogger(__name__)`
# http://stackoverflow.com/questions/17336680/python-logging-with-multiple-modules-does-not-work
logger = logging.getLogger(__name__)
# Set the matplotlib backend to the Anti-Grain Geometry C++ library.
# Note: Use plt.switch_backend since matplotlib.use('agg') before importing pyplot fails.
plt.switch_backend('agg')
# Set matplotlib styles with seaborn
sns.set()
def prepend_this(app_arg:str):
r"""Prepend the application argument with 'Prepended '
Args:
app_arg (str): `str` to prepend.
Returns:
app_ret (str): Prepended `str`.
Raises:
ValueError: Raised if not `isinstance(app_arg, str)`
"""
# Check arguments.
if not isinstance(app_arg, str):
raise ValueError(
"`app_arg` must be type `str`. " +
"Required: type(app_arg) == str"
"Given: type(app_arg) == {typ}").format(
typ=type(app_arg))
# Define 'here' for logger and log
|
arguments passed.
here = inspect.stack()[0].function
frame = inspect.currentframe()
(args, *_, values) = inspect.getargvalues(frame)
logger.info(here+": Argument values: {args_values}".format(
args_values=[(arg, values[arg]) for arg in sorted(args)]))
# Log the code version from util.__version__.
logger.info(here+": Version = {version}".format(version=utils.__version__))
# Prepend the argument and return.
app_ret = 'Prepended '+app_arg
return app_ret
| |
PatrickCmd/django_local_library
|
catalog/forms.py
|
Python
|
apache-2.0
| 776
| 0.016753
|
from django import f
|
orms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
import datetime # for checking renewal date range
clas
|
s RenewBookForm(forms.Form):
renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default 3). ")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
# check date is not in past
if data < datetime.date.today():
raise ValidationError(_('Invalid date - renewal in past'))
# check date is in range librarian allowed to change(+4 weeks)
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(_('Invalid date - renewal more than 4 weeks ahead'))
# Returning the cleaned data
return data
|
iocanto/bug-python-libraries
|
ButtonEvent.py
|
Python
|
gpl-3.0
| 2,246
| 0.026269
|
'''
*******************************************************************************
* ButtonEvent.py is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ButtonEvent.py is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ButtonEvent.py. If not, see <http://www.gnu.org/licenses/>.
********************************************************************************
Created on Jan 5, 2010
@author: iocanto
'''
BUTTON_SELECT = 257
BUTTON_HOTKEY_1 = 258;
BUTTON_HOTKEY_2 = 259;
BUTTON_HOTKEY_3 = 260;
BUTTON_HOTKEY_4 = 261;
BUTTON_RIGHT = 262;
BUTTON_LEFT = 263;
BUTTON_UP = 264;
BUTTON_DOWN = 265;
KEY_UP = 0
KEY_DOWN = 1
class ButtonEvent():
# Constructor
def __init__(self, button = BUTTON_HOTKEY_1, action = KEY_UP ):
self.__button = button
self.__action = action
def __str__ (self):
return "ButtonEvent [__button %i]" % self.__button
def getAction(self):
return self.__action
def getButton(self):
return self.__button
def getButtonName(self):
return {
|
257 : "BUTTON_SELECT" ,
258 : "BUTTON_HOTKEY_1",
259 : "BUTTON_HOTKEY_2",
260 : "BUTTON_HOTKEY_3",
261 : "BUTTON_HOTKEY_4",
262 : "BUTTON_RIGHT" ,
263 : "BUTTON_LEFT" ,
264 : "BUTTON_UP" ,
|
265 : "BUTTON_DOWN" ,
}[self.__button]
def setAction(self, action):
self.__action = action
def setButton(self, button):
self.__button = button
|
derrickyoo/python-jumpstart
|
apps/09_real_estate_data_miner/concept_dicts.py
|
Python
|
mit
| 933
| 0.005359
|
lookup = {}
lookup = dict()
lookup = {'age': 42, 'loc': 'Italy'}
lookup = dict(age=42, loc='Italy')
print(lookup)
print(lookup['loc'])
lookup['cat'] = 'cat'
if 'cat' in lookup:
print(lookup['cat'])
class Wizard:
# This actually creates a key value dictionary
def __init__(self, name, level):
self.level = level
self.name = name
# There is an implicit dictionary that stores this data
gandolf = Wizard('Gladolf', 42)
print(gandolf.__dict__)
# The takeway is that all objects are built around the concept of dictionary data
|
structures
# Here is another example
import collections
User = collections.namedtuple('User', 'id, name, email')
users = [
User(1, 'user1', 'user1@test.com'),
User(2, 'user2', 'user2@test.com'),
User(3,
|
'user3', 'user3@test.com'),
]
lookup = dict()
for u in users:
lookup[u.email] = u
print(lookup['user2@test.com'])
|
nirenzang/Serpent-Pyethereum-Tutorial
|
pyethereum/ethereum/slogging.py
|
Python
|
gpl-3.0
| 10,541
| 0.001613
|
import logging
import json
import textwrap
from json.encoder import JSONEncoder
from logging import StreamHandler, Formatter, FileHandler
from ethereum.utils import bcolors, is_numeric
DEFAULT_LOGLEVEL = 'INFO'
JSON_FORMAT = '%(message)s'
PRINT_FORMAT = '%(levelname)s:%(name)s\t%(message)s'
FILE_PREFIX = '%(asctime)s'
TRACE = 5
known_loggers = set()
log_listeners = []
def _inject_into_logger(name, code, namespace=None):
# This is a hack to fool the logging module into reporting correct source files.
# It determines the actual source of a logging call by inspecting the stack frame's
# source file. So we use this `eval(compile())` construct to "inject" our additional
# methods into the logging module.
if namespace is None:
namespace = {}
eval(
compile(
code,
logging._srcfile,
'exec'
),
namespace
)
setattr(logging.Logger, name, namespace[name])
# Add `trace()` level to Logger
_inject_into_logger(
'trace',
textwrap.dedent(
"""\
def trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
"""
),
{'TRACE': TRACE}
)
logging.TRACE = TRACE
logging.addLevelName(TRACE, "TRACE")
# Add `DEV()` shortcut to loggers
_inject_into_logger(
'DEV',
textwrap.dedent(
"""\
def DEV(self, msg, *args, **kwargs):
'''Shortcut to output highlighted log text'''
kwargs['highlight'] = True
self.critical(msg, *args, **kwargs)
"""
)
)
class LogRecorder(object):
"""
temporarily records all logs, w/o level filtering
use only once!
"""
max_capacity = 1000 * 1000 # check we are not forgotten or abused
def __init__(self, disable_other_handlers=False, log_config=None):
self._records = []
log_listeners.append(self._add_log_record)
self._saved_config = None
if log_config:
self._saved_config = get_configuration()
configure(log_config)
self._saved_handlers = []
if disable
|
_other_handlers:
self._saved_handlers = rootLogger.handlers[:]
rootLogger.handlers = []
def pop_records(self):
# onl
|
y returns records on the first call
r = self._records[:]
self._records = []
try:
log_listeners.remove(self._add_log_record)
except ValueError:
pass
if self._saved_config:
configure(**self._saved_config)
self._saved_config = None
if self._saved_handlers:
rootLogger.handlers = self._saved_handlers[:]
self._saved_handlers = []
return r
def _add_log_record(self, msg):
self._records.append(msg)
assert len(self._records) < self.max_capacity
def get_configuration():
"""
get a configuration (snapshot) that can be used to call configure
snapshot = get_configuration()
configure(**snapshot)
"""
root = getLogger()
name_levels = [('', logging.getLevelName(root.level))]
name_levels.extend(
(name, logging.getLevelName(logger.level))
for name, logger
in root.manager.loggerDict.items()
if hasattr(logger, 'level')
)
config_string = ','.join('%s:%s' % x for x in name_levels)
return dict(config_string=config_string, log_json=SLogger.manager.log_json)
def get_logger_names():
return sorted(known_loggers, key=lambda x: '' if not x else x)
class BoundLogger(object):
def __init__(self, logger, context):
self.logger = logger
self.context = context
def bind(self, **kwargs):
return BoundLogger(self, kwargs)
def _proxy(self, method_name, *args, **kwargs):
context = self.context.copy()
context.update(kwargs)
return getattr(self.logger, method_name)(*args, **context)
trace = lambda self, *args, **kwargs: self._proxy('trace', *args, **kwargs)
debug = lambda self, *args, **kwargs: self._proxy('debug', *args, **kwargs)
info = lambda self, *args, **kwargs: self._proxy('info', *args, **kwargs)
warn = warning = lambda self, *args, **kwargs: self._proxy('warning', *args, **kwargs)
error = lambda self, *args, **kwargs: self._proxy('error', *args, **kwargs)
exception = lambda self, *args, **kwargs: self._proxy('exception', *args, **kwargs)
fatal = critical = lambda self, *args, **kwargs: self._proxy('critical', *args, **kwargs)
class _LogJSONEncoder(JSONEncoder):
def default(self, o):
return repr(o)
class SLogger(logging.Logger):
def __init__(self, name, level=DEFAULT_LOGLEVEL):
self.warn = self.warning
super(SLogger, self).__init__(name, level=level)
@property
def log_json(self):
return SLogger.manager.log_json
def is_active(self, level_name='trace'):
return self.isEnabledFor(logging._checkLevel(level_name.upper()))
def format_message(self, msg, kwargs, highlight, level):
if getattr(self, 'log_json', False):
message = dict()
message['event'] = '{}.{}'.format(self.name, msg.lower().replace(' ', '_'))
message['level'] = logging.getLevelName(level)
try:
message.update(kwargs)
try:
msg = json.dumps(message, cls=_LogJSONEncoder)
except TypeError:
# Invalid value. With our custom encoder this can only happen with non-string
# dict keys (see: https://bugs.python.org/issue18820).
message = _stringify_dict_keys(message)
msg = json.dumps(message, cls=_LogJSONEncoder)
except UnicodeDecodeError:
message.update({
k: v if is_numeric(v) or isinstance(v, (float, complex)) else repr(v)
for k, v in kwargs.items()
})
msg = json.dumps(message, cls=_LogJSONEncoder)
else:
msg = "{}{} {}{}".format(
bcolors.WARNING if highlight else "",
msg,
" ".join("{}={!s}".format(k, v) for k, v in kwargs.items()),
bcolors.ENDC if highlight else ""
)
return msg
def bind(self, **kwargs):
return BoundLogger(self, kwargs)
def _log(self, level, msg, args, **kwargs):
exc_info = kwargs.pop('exc_info', None)
extra = kwargs.pop('extra', {})
highlight = kwargs.pop('highlight', False)
extra['kwargs'] = kwargs
extra['original_msg'] = msg
msg = self.format_message(msg, kwargs, highlight, level)
super(SLogger, self)._log(level, msg, args, exc_info, extra)
class RootLogger(SLogger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
super(RootLogger, self).__init__("root", level)
def handle(self, record):
if log_listeners:
rec_dict = getattr(record, 'kwargs', {}).copy()
rec_dict['event'] = getattr(record, 'original_msg', "")
for listener in log_listeners:
listener(rec_dict)
super(RootLogger, self).handle(record)
class SManager(logging.Manager):
def __init__(self, rootnode):
self.loggerClass = SLogger
self.log_json = False
super(SManager, self).__init__(rootnode)
def getLogger(self, name):
logging.setLoggerClass(SLogger)
return super(SManager, self).getLogger(name)
rootLogger = RootLogger(DEFAULT_LOGLEVEL)
SLogger.root = rootLogger
SLogger.manager = SManager(SLogger.root)
def _stringify_dict_keys(input_):
if isinstance(input_, dict):
res = {}
for k, v in input_.items():
v = _stringify_dict_keys(v)
if not isinstance(k, (int, long, bool, None.__class__)):
k =
|
my-zhang/nand2tetris
|
ch10-frontend/jcompiler/cli.py
|
Python
|
mit
| 709
| 0.026798
|
import re
import os
import sys
from jcompiler.token import tokenize
from jcompiler.parse import Parser
import jcompiler.xmlutil as xmlutil
def remove_comments(s):
return re.sub(r'(\s*//.*)|(\s*/\*(.|\n)*?\*/\s*)', '', s)
if __name__ == '__ma
|
in__':
if len(sys.argv) < 2:
print 'a input file is needed'
sys.exit(1)
fname = sys.argv[1]
if not os.path.isfile(fname):
print 'not a valid file path: %s' % fname
sys.exit(1)
with open(fname, 'r') as f:
source = remove_comments(f.read())
parser = Parser(tokenize(source))
tree = parser.parse_
|
tree()
# print tree
print xmlutil.dump_parse_tree(tree)
|
rootio/rootio_web
|
alembic/versions/4d0be367f095_station_timezone.py
|
Python
|
agpl-3.0
| 644
| 0.01087
|
"""add timezone to each station
Revision ID: 4d0be367f09
|
5
Revises: 6722b0ef4e1
Create Date: 2014-03-19 16:43:00.326820
"""
# revision identifiers, used by Alembic.
revision = '4d0be367f09
|
5'
down_revision = '6722b0ef4e1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('radio_station', sa.Column('timezone', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('radio_station', 'timezone')
### end Alembic commands ###
|
ipanova/pulp_ostree
|
common/setup.py
|
Python
|
gpl-2.0
| 321
| 0
|
from setuptools import setup, find_packages
setup(
name='pulp_ostree_common',
version='1.0.0a2',
packages=find_packages(),
url='http://www.pulpproject.or
|
g',
license='GPLv2+',
author='Pulp Team',
author_email='pulp-list@redhat.com',
description='common code for
|
pulp\'s ostree support',
)
|
9and3r/RPi-InfoScreen-Kivy
|
screens/mythtv/screen.py
|
Python
|
gpl-3.0
| 6,297
| 0.000476
|
import os
import sys
import datetime as dt
import json
from itertools import groupby
from kivy.properties import (StringProperty,
DictProperty,
ListProperty,
BooleanProperty)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import Screen
from kivy.uix.gridlayout import GridLayout
from core.bglabel import BGLabel
from MythTV import MythBE
EPOCH = dt.datetime(1970, 1, 1)
class MythRecording(BoxLayout):
"""Widget class for displaying information about upcoming recordings."""
rec = DictProperty({})
bg = ListProperty([0.1, 0.15, 0.15, 1])
def __init__(self, **kwargs):
super(MythRecording, self).__init__(**kwargs)
self.rec = kwargs["rec"]
class MythRecordingHeader(BGLabel):
"""Widget class for grouping recordings by day."""
rec_date = StringProperty("")
def __init__(self, **kwargs):
super(MythRecordingHeader, self).__init__(**kwargs)
self.bgcolour = [0.1, 0.1, 0.4, 1]
self.rec_date = kwargs["rec_date"]
class MythTVScreen(Screen):
"""Main screen class for MythTV schedule.
Screen attempts to connect to MythTV backend and retrieve list of
upcoming recordings and display this.
Data is cached so that information can still be viewed even if backend
is offline (e.g. for power saving purposes).
"""
backendonline = BooleanProperty(False)
isrecording = BooleanProperty(False)
def __init__(self, **kwargs):
super(MythTVScreen, self).__init__(**kwargs)
# Get the path for the folder
scr = sys.modules[self.__class__.__module__].__file__
# Create variable to retain path to our cache fie
self.screendir = os.path.dirname(scr)
self.cacheFile = os.path.join(self.screendir, "cache", "cache.json")
# Some other useful variable
self.runnin
|
g = False
self.rec_timer = None
self.status_timer = None
self.be = None
self.recs = None
def on_enter(self):
# We only update when we enter the screen. No need for regular updates.
self.getRecordings()
self.drawScreen()
self.checkRecordingStatus()
|
def on_leave(self):
pass
def cacheRecs(self, recs):
"""Method to save local copy of recordings. Backend may not be online
all the time so a cache enables us to display recordings if if we
can't poll the server for an update.
"""
with open(self.cacheFile, 'w') as outfile:
json.dump(recs, outfile)
def loadCache(self):
"""Retrieves cached recorings and returns as a python list object."""
try:
raw = open(self.cacheFile, 'r')
recs = json.load(raw)
except:
recs = []
return recs
def recs_to_dict(self, uprecs):
"""Converts the MythTV upcoming recording iterator into a list of
dict objects.
"""
raw_recs = []
recs = []
# Turn the response into a dict object and add to our list of recorings
for r in uprecs:
rec = {}
st = r.starttime
et = r.endtime
rec["title"] = r.title
rec["subtitle"] = r.subtitle if r.subtitle else ""
day = dt.datetime(st.year, st.month, st.day)
rec["day"] = (day - EPOCH).total_seconds()
rec["time"] = "{} - {}".format(st.strftime("%H:%M"),
et.strftime("%H:%M"))
rec["timestamp"] = (st - EPOCH).total_seconds()
rec["desc"] = r.description
raw_recs.append(rec)
# Group the recordings by day (so we can print a header)
for k, g in groupby(raw_recs, lambda x: x["day"]):
recs.append((k, list(g)))
return recs
def getRecordings(self):
"""Attempts to connect to MythTV backend and retrieve recordings."""
try:
# If we can connect then get recordings and save a local cache.
self.be = MythBE()
uprecs = self.be.getUpcomingRecordings()
self.recs = self.recs_to_dict(uprecs)
self.cacheRecs(self.recs)
self.backendonline = True
except:
# Can't connect so we need to set variables accordinly and try
# to load data from the cache.
self.be = None
self.recs = self.loadCache()
self.backendonline = False
def checkRecordingStatus(self):
"""Checks whether the backend is currently recording."""
try:
recbe = MythBE()
for recorder in recbe.getRecorderList():
if recbe.isRecording(recorder):
self.isrecording = True
break
except:
# If we can't connect to it then it can't be recording.
self.isrecording = False
def drawScreen(self):
"""Main method for rendering screen.
If there is recording data (live or cached) then is laid out in a
scroll view.
If not, the user is notified that the backend is unreachable.
"""
sv = self.ids.myth_scroll
sv.clear_widgets()
if self.recs:
# Create a child widget to hold the recordings.
self.sl = GridLayout(cols=1, size_hint=(1, None), spacing=2)
self.sl.bind(minimum_height=self.sl.setter('height'))
# Loop over the list of recordings.
for rec in self.recs:
# These are grouped by day so we need a header
day = dt.timedelta(0, rec[0]) + EPOCH
mrh = MythRecordingHeader(rec_date=day.strftime("%A %d %B"))
self.sl.add_widget(mrh)
# Then we loop over the recordings scheduled for that day
for r in rec[1]:
# and add them to the display.
mr = MythRecording(rec=r)
self.sl.add_widget(mr)
sv.add_widget(self.sl)
else:
lb = Label(text="Backend is unreachable and there is no cached"
" information")
sv.add_widget(lb)
|
sevas/sublime_cmake_snippets
|
compiler_completions.py
|
Python
|
mit
| 1,676
| 0.001193
|
import sublime_plugin
from cmakehelpers.compilerflags import clang, gcc
from cmakehelpers.compilerflags import find_completions
COMPLETION_DATABASES = dict(
clang=dict(loader=clang, database=None),
gcc=dict(loader=gcc, database=None))
def log_message
|
(s):
print("CMakeSnippets: {0}".format(s))
def load_completion_data
|
bases():
global COMPLETION_DATABASES
for compiler_name, database_info in COMPLETION_DATABASES.iteritems():
loader = database_info['loader']
completion_database = loader.load_compiler_options_database()
log_message("Loading {0} options database: {1} entries".format(compiler_name, len(completion_database)))
database_info['database'] = completion_database
load_completion_databases()
class CompilerFlagAutocomplete(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if not view.match_selector(locations[0], "source.cmake"):
return []
cursor = locations[0]
# print('*** prefix: ' + str(prefix))
# print('*** cursor pos: ' + str(cursor))
line_region = view.line(cursor)
line_start, line_end = line_region.a, line_region.b
cursor_offset = cursor - line_start
current_line = view.substr(line_region)[:]
# print '*** line befor cursor: ', [current_line]
all_completions = list()
for compiler_name, database_info in COMPLETION_DATABASES.iteritems():
compiler_options_db = database_info['database']
all_completions.extend(find_completions(compiler_name, compiler_options_db, current_line, cursor_offset))
return all_completions
|
mvaled/sentry
|
src/sentry/models/projectownership.py
|
Python
|
bsd-3-clause
| 5,206
| 0.000768
|
from __future__ import absolute_import
import operator
from django.db import models
from django.db.models import Q
from djan
|
go.utils import timezone
from sentry.db.models import Model, sane_repr
from sentry.db.models.fields import FlexibleForeignKey, JSONField
from sentry.ownership.grammar import load_schema
from functools import reduce
class ProjectOwnership(Model):
__core__ = True
project = FlexibleForeignKey("sentry.Project", uniq
|
ue=True)
raw = models.TextField(null=True)
schema = JSONField(null=True)
fallthrough = models.BooleanField(default=True)
auto_assignment = models.BooleanField(default=False)
date_created = models.DateTimeField(default=timezone.now)
last_updated = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(default=True)
# An object to indicate ownership is implicitly everyone
Everyone = object()
class Meta:
app_label = "sentry"
db_table = "sentry_projectownership"
__repr__ = sane_repr("project_id", "is_active")
@classmethod
def get_owners(cls, project_id, data):
"""
For a given project_id, and event data blob.
If Everyone is returned, this means we implicitly are
falling through our rules and everyone is responsible.
If an empty list is returned, this means there are explicitly
no owners.
"""
try:
ownership = cls.objects.get(project_id=project_id)
except cls.DoesNotExist:
ownership = cls(project_id=project_id)
rules = cls._matching_ownership_rules(ownership, project_id, data)
if not rules:
return cls.Everyone if ownership.fallthrough else [], None
owners = {o for rule in rules for o in rule.owners}
return filter(None, resolve_actors(owners, project_id).values()), rules
@classmethod
def get_autoassign_owner(cls, project_id, data):
"""
Get the auto-assign owner for a project if there are any.
Will return None if there are no owners, or a list of owners.
"""
try:
ownership = cls.objects.get(project_id=project_id)
except cls.DoesNotExist:
return None
if not ownership.auto_assignment:
return None
rules = cls._matching_ownership_rules(ownership, project_id, data)
if not rules:
return None
score = 0
owners = None
# Automatic assignment prefers the owner with the longest
# matching pattern as the match is more specific.
for rule in rules:
candidate = len(rule.matcher.pattern)
if candidate > score:
score = candidate
owners = rule.owners
actors = filter(None, resolve_actors(owners, project_id).values())
# Can happen if the ownership rule references a user/team that no longer
# is assigned to the project or has been removed from the org.
if not actors:
return None
return actors[0].resolve()
@classmethod
def _matching_ownership_rules(cls, ownership, project_id, data):
rules = []
if ownership.schema is not None:
for rule in load_schema(ownership.schema):
if rule.test(data):
rules.append(rule)
return rules
def resolve_actors(owners, project_id):
""" Convert a list of Owner objects into a dictionary
of {Owner: Actor} pairs. Actors not identified are returned
as None. """
from sentry.api.fields.actor import Actor
from sentry.models import User, Team
if not owners:
return {}
users, teams = [], []
owners_lookup = {}
for owner in owners:
# teams aren't technical case insensitive, but teams also
# aren't allowed to have non-lowercase in slugs, so
# this kinda works itself out correctly since they won't match
owners_lookup[(owner.type, owner.identifier.lower())] = owner
if owner.type == "user":
users.append(owner)
elif owner.type == "team":
teams.append(owner)
actors = {}
if users:
actors.update(
{
("user", email.lower()): Actor(u_id, User)
for u_id, email in User.objects.filter(
reduce(operator.or_, [Q(emails__email__iexact=o.identifier) for o in users]),
# We don't require verified emails
# emails__is_verified=True,
is_active=True,
sentry_orgmember_set__organizationmemberteam__team__projectteam__project_id=project_id,
)
.distinct()
.values_list("id", "emails__email")
}
)
if teams:
actors.update(
{
("team", slug): Actor(t_id, Team)
for t_id, slug in Team.objects.filter(
slug__in=[o.identifier for o in teams], projectteam__project_id=project_id
).values_list("id", "slug")
}
)
return {o: actors.get((o.type, o.identifier.lower())) for o in owners}
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/PIL/ImageEnhance.py
|
Python
|
mit
| 2,760
| 0
|
#
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.sgi.com/grafica/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Return
|
s an enhanced image.
:param factor: A floating point value controlling the enhancement.
|
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.convert("L").convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brighntess of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
|
clara-labs/spherecluster
|
examples/document_clustering.py
|
Python
|
mit
| 8,298
| 0.00229
|
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
import numpy as np
from tabulate import tabulate
import logging
from sklearn.cluster import KMeans
from spherecluster import SphericalKMeans
from spherecluster import VonMisesFisherMixture
# modified from
# http://scikit-learn.org/stable/auto_examples/text/document_clustering.html
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Optional params
use_LSA = False
n_components = 500
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
vectorizer = TfidfVectorizer(stop_words='english', use_idf=True)
X = vectorizer.fit_transform(dataset.data)
print("n_samples: %d, n_features: %d" % X.shape)
print()
# table for results display
table = []
###############################################################################
# LSA for dimensionality reduction (and finding dense vectors)
if use_LSA:
print("Performing dimensionality reduction using LSA")
svd = TruncatedSVD(n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# K-Means clustering
km = KMeans(n_clusters=true_k, init='k-means++', n_init=20)
print("Clustering with %s" % km)
km.fit(X)
print()
table.append([
'k-means',
metrics.homogeneity_score(labels, km.labels_),
metrics.completeness_score(labels, km.labels_),
metrics.v_measure_score(labels, km.labels_),
metrics.adjusted_rand_score(labels, km.labels_),
metrics.adjusted_mutual_info_score(labels, km.labels_),
metrics.silhouette_score(X, km.labels_, metric='cosine')])
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Adjusted Mututal Information: %.3f"
% metrics.adjusted_mutual_info_score(labels, km.labels_))
print("Silhouette Coefficient (euclidean): %0.3f"
% metrics.silhouette_score(X, km.labels_, metric='euclidean'))
print("Silhouette Coefficient (cosine): %0.3f"
% metrics.silhouette_score(X, km.labels_, metric='cosine'))
print()
###############################################################################
# Spherical K-Means clustering
skm = SphericalKMeans(n_clusters=true_k, init='k-means++', n_init=20)
print("Clustering with %s" % skm)
skm.fit(X)
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, skm.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, skm.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, skm.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, skm.labels_))
print("Adjusted Mututal Information: %.3f"
% metrics.adjusted_mutual_info_score(labels, skm.labels_))
print("Silhouette Coefficient (euclidean): %0.3f"
% metrics.silhouette_score(X, skm.labels_, metric='euclidean'))
print("Silhouette Coefficient (cosine): %0.3f"
% metrics.silhouette_score(X, skm.labels_, metric='cosine'))
print()
table.append([
'spherical k-means',
metrics.homogeneity_score(labels, skm.labels_),
metrics.completeness_score(labels, skm.labels_),
metrics.v_measure_score(labels, skm.labels_),
metrics.adjusted_rand_score(labels, skm.labels_),
metrics.adjusted_mutual_info_score(labels, skm.labels_),
metrics.silhouette_score(X, skm.labels_, metric='cosine')])
##################################################################
|
#############
# Mixture of von Mises Fisher clustering (soft)
vmf_soft = VonMisesFisherMixture(n_clusters=true_k, posterior_type='soft',
init='random-class', n_init=20, force_weights=np.ones((true_k,))/true_k)
print("Clustering with %s" % vmf_soft)
vmf_soft.fit(X)
print()
print('weights: {}'.format(vmf_soft.weights_))
print('concentrations: {}'.format(vmf_soft.concentrations_))
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, vmf_soft.labels_))
print("Completeness: %0.3f" % m
|
etrics.completeness_score(labels, vmf_soft.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, vmf_soft.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, vmf_soft.labels_))
print("Adjusted Mututal Information: %.3f"
% metrics.adjusted_mutual_info_score(labels, vmf_soft.labels_))
print("Silhouette Coefficient (euclidean): %0.3f"
% metrics.silhouette_score(X, vmf_soft.labels_, metric='euclidean'))
print("Silhouette Coefficient (cosine): %0.3f"
% metrics.silhouette_score(X, vmf_soft.labels_, metric='cosine'))
print()
table.append([
'movMF-soft',
metrics.homogeneity_score(labels, vmf_soft.labels_),
metrics.completeness_score(labels, vmf_soft.labels_),
metrics.v_measure_score(labels, vmf_soft.labels_),
metrics.adjusted_rand_score(labels, vmf_soft.labels_),
metrics.adjusted_mutual_info_score(labels, vmf_soft.labels_),
metrics.silhouette_score(X, vmf_soft.labels_, metric='cosine')])
###############################################################################
# Mixture of von Mises Fisher clustering (hard)
vmf_hard = VonMisesFisherMixture(n_clusters=true_k, posterior_type='hard',
init='spherical-k-means', n_init=20, force_weights=np.ones((true_k,))/true_k)
print("Clustering with %s" % vmf_hard)
vmf_hard.fit(X)
print()
print('weights: {}'.format(vmf_hard.weights_))
print('concentrations: {}'.format(vmf_hard.concentrations_))
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, vmf_hard.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, vmf_hard.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, vmf_hard.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, vmf_hard.labels_))
print("Adjusted Mututal Information: %.3f"
% metrics.adjusted_mutual_info_score(labels, vmf_hard.labels_))
print("Silhouette Coefficient (euclidean): %0.3f"
% metrics.silhouette_score(X, vmf_hard.labels_, metric='euclidean'))
print("Silhouette Coefficient (cosine): %0.3f"
% metrics.silhouette_score(X, vmf_hard.labels_, metric='cosine'))
print()
table.append([
'movMF-hard',
metrics.homogeneity_score(labels, vmf_hard.labels_),
metrics.completeness_score(labels, vmf_hard.labels_),
metrics.v_measure_score(labels, vmf_hard.labels_),
metrics.adjusted_rand_score(labels, vmf_hard.labels_),
metrics.adjusted_mutual_info_score(labels, vmf_hard.labels_),
metrics.silhouette_score(X, vmf_hard.labels_, metric='cosine')])
###############################################################################
# Print all results in table
headers = [
'Homogeneity',
'Completeness',
'V-Me
|
jpfairbanks/streaming
|
server.py
|
Python
|
bsd-3-clause
| 286
| 0.01049
|
import msgpackrpc
import time
class SumServer(object):
def sum(self, x, y
|
):
return x + y
def sleepy_sum(self, x, y):
time.sleep(1)
return x + y
server = msgpackrpc.Server(SumServer())
server.listen(msgpackrpc.Add
|
ress("localhost", 18800))
server.start()
|
Bradfield/algorithms-and-data-structures
|
book/deques/palindromes_test.py
|
Python
|
cc0-1.0
| 328
| 0
|
import unittest
|
from palindromes import is_palindrome
cases = (
('lsdkjfskf', False),
('radar', True),
('racecar', True),
)
class TestCorrectness(unittest.TestCase):
def test_identifies_palindromes(self):
for word, expectation in cases:
self.assertEqual(is_palindrome(word), expectati
|
on)
|
melvin0008/pythoncodestrial
|
first.py
|
Python
|
apache-2.0
| 1,107
| 0.01897
|
import ply.lex as lex
import re
tokens = (
'LANGLE', # <
'LANGLESLASH', # </
'RANGLE', # >
'EQUAL', # =
'STRING', # "hello"
'WORD') # Welcome!
state = (
("htmlcomment", "exclusive"),
)
t_ignore = ' '
def t_htmlcomment(token):
r'<!--'
token.lexer.begin('htmlcomment')
def t_htmlcomment_end(token):
r'-->'
token.lexer.lineno += token.value.count('\n')
token.lexer.begin('INITIAl')
#def t_htmlcomment_error(token):
# token.lexer.skip(1)
def t_newline(token):
r'\n'
token.lexer.lineno += 1
pass
def t_LANGLESLASH(token):
r'</'
return token
de
|
f t_LANGLE(token):
r'<'
return token
def t_RANGLE(token):
r'>'
return token
def t_EQUAL(token):
r'='
return token
def t_STRING(token):
r'"[^"]*"'
token.value = token.value[1:-1] # dropping off the double quotes
return token
def t_WORD(token):
|
r'[^ <>\n]+'
return token
webpage = "This is <!-- <b>my --> woag</b> webpage"
htmllexer = lex.lex()
htmllexer.input(webpage)
while True:
tok = htmllexer.token()
if not tok: break
print(tok)
|
owlabs/incubator-airflow
|
tests/contrib/operators/test_gcp_bigtable_operator.py
|
Python
|
apache-2.0
| 31,128
| 0.001542
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from typing import List, Dict
import google.api_core.exceptions
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.table import ClusterState
from parameterized import parameterized
from airflow import AirflowException
from airflow.contrib.operators.gcp_bigtable_operator import \
BigtableInstanceDeleteOperator, \
BigtableTableDeleteOperator, \
BigtableTableCreateOperator, \
BigtableTableWaitForReplicationSensor, \
BigtableClusterUpdateOperator, \
BigtableInstanceCreateOperator
from tests.compat import mock
PROJECT_ID = 'test_project_id'
INSTANCE_ID = 'test-instance-id'
CLUSTER_ID = 'test-cluster-id'
CLUSTER_ZONE = 'us-central1-f'
GCP_CONN_ID = 'test-gcp-conn-id'
NODES = 5
TABLE_ID = 'test-table-id'
INITIAL_SPLIT_KEYS = [] # type: List
EMPTY_COLUMN_FAMILIES = {} # type: Dict
class BigtableInstanceCreateTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', CLUSTER_ID, CLUSTER_ZONE),
('main_cluster_id', PROJECT_ID, INSTANCE_ID, '', CLUSTER_ZONE),
('main_cluster_zone', PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id,
main_cluster_id,
main_cluster_zone, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableInstanceCreateOperator(
project_id=project_id,
instance_id=instance_id,
main_cluster_id=main_cluster_id,
main_cluster_zone=main_cluster_zone,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_instance_that_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op = BigtableInstanceCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_instance.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_create_instance_that_exists_empty_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = mock.Mock(Instance)
op = BigtableInstanceCreateOperator(
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_instance.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_different_error_reraised(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = BigtableInstanceCreateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
mock_hook.return_value.create_instance.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError('error'))
with self.assertRaises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_instance.assert_called_once_with(
cluster_n
|
odes=None,
cluster_storage_type=None,
instance_display_name=None,
instance_id=INSTANCE_ID,
instance_labels=None,
instance_type=None,
main_cluster_id=CLUSTER_ID,
main_cluster_zone=CLUSTER_ZONE,
|
project_id=PROJECT_ID,
replica_cluster_id=None,
replica_cluster_zone=None,
timeout=None
)
class BigtableClusterUpdateTest(unittest.TestCase):
@parameterized.expand([
('instance_id', PROJECT_ID, '', CLUSTER_ID, NODES),
('cluster_id', PROJECT_ID, INSTANCE_ID, '', NODES),
('nodes', PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ''),
], testcase_func_name=lambda f, n, p: 'test_empty_attribute.empty_' + p.args[0])
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_empty_attribute(self, missing_attribute, project_id, instance_id,
cluster_id, nodes, mock_hook):
with self.assertRaises(AirflowException) as e:
BigtableClusterUpdateOperator(
project_id=project_id,
instance_id=instance_id,
cluster_id=cluster_id,
nodes=nodes,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
err = e.exception
self.assertEqual(str(err), 'Empty parameter: {}'.format(missing_attribute))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_but_instance_does_not_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_ID))
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_cluster.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_bigtable_operator.BigtableHook')
def test_updating_cluster_but_instance_does_not_exists_empty_project_id(self,
mock_hook):
mock_hook.return_value.get_instance.return_value = None
with self.assertRaises(AirflowException) as e:
op = BigtableClusterUpdateOperator(
instance_id=INSTANCE_ID,
cluster_id=CLUSTER_ID,
nodes=NODES,
task_id="id",
gcp_conn_id=GCP_CONN_ID
)
op.execute(None)
err = e.exception
self.assertEqual(str(err), "Dependency: instance '{}' does not exist.".format(
INSTANCE_I
|
coddingtonbear/django-mailbox
|
django_mailbox/south_migrations/0009_remove_references_table.py
|
Python
|
mit
| 2,520
| 0.007143
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field references on 'Message'
db.delete_table('django_mailbox_message_references')
def backwards(self, orm):
# Adding M2M table for field references on 'Message'
db.create_table('django_mailbox_message_references', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_message', models.ForeignKey(orm['django_mailbox.message'], null=False)),
('to_message', models.ForeignKey(orm['django_mailbox.message'], null=False))
))
db.create_unique('django_mailbox_message_references', ['from_message_id', 'to_message_id'])
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'from_header': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.field
|
s.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replies'", 'null': 'True', 'to': "orm['django_mailbox.Message']"}),
'mailbox': ('django.db
|
.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_header': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['django_mailbox']
|
flaviogrossi/sockjs-cyclone
|
sockjs/cyclone/transports/jsonp.py
|
Python
|
mit
| 3,786
| 0.003698
|
import urllib
from cyclone.web import asynchronous
from twisted.python import log
from sockjs.cyclone import proto
from sockjs.cyclone.transports import pollingbase
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id):
return
# Might get already detached because connection was closed in
# connectionMade
if not self.session:
return
if self.session.send_queue.is_empty():
self.session.start_heartbeat()
else:
self.session.flush()
def connectionLost(self, reason):
self.session.delayed_close()
def send_pack(self, message):
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type',
'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# FIXME
self.set_header('Etag', 'dummy')
self.write(msg)
self._detach()
self.safe_finish()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None:
self.set_status(404)
return
#data =
|
self.request.body.decode('utf-8')
data = self.request.body
ctype = self.reque
|
st.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
log.msg('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = urllib.unquote_plus(data[2:])
if not data:
log.msg('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
log.msg('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.messagesReceived(messages)
except Exception:
log.msg('jsonp_send: messagesReceived() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
|
darioizzo/piranha
|
tools/benchmark.py
|
Python
|
gpl-3.0
| 911
| 0.023052
|
import sys
import numpy as np
from scipy import stats
import subprocess as sp
import datetime
import socket
import os
exec_name = sys.argv[1]
max_t = int(sys.argv[2])
ntries = 5
tot_timings = []
for t_idx in r
|
ange(1,max_t + 1):
cur_timings = []
for _ in range(ntries):
# Run the process.
p = sp.Popen([exec_name,str(t
|
_idx)],stdout=sp.PIPE,stderr=sp.STDOUT)
# Wait for it to finish and get stdout.
out = p.communicate()[0]
# Parse the stderr in order to find the time.
out = out.split(bytes('\n','ascii'))[1].split()[0][0:-1]
cur_timings.append(float(out))
tot_timings.append(cur_timings)
tot_timings = np.array(tot_timings)
retval = np.array([np.mean(tot_timings,axis=1),stats.sem(tot_timings,axis=1)])
fmt='{fname}_%Y%m%d%H%M%S'
filename = datetime.datetime.now().strftime(fmt).format(fname=socket.gethostname() + '_' + os.path.basename(exec_name)) + '.txt'
np.savetxt(filename,retval)
|
hanlind/nova
|
nova/tests/functional/api/client.py
|
Python
|
apache-2.0
| 15,004
| 0.000267
|
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
import six
from six.moves.urllib import parse
LOG = logging.getLogger(__name__)
class APIResponse(object):
"""Decoded API Response
This provides a decoded version of the Requests response which
include a json decoded body, far more convenient for testing that
returned structures are correct, or using parts of returned
structures in tests.
This class is a simple wrapper around dictionaries for API
responses in tests. It includes extra attributes so that they can
be inspected in addition to the attributes.
All json responses from Nova APIs are dictionary compatible, or
blank, so other possible base classes are not needed.
"""
status = 200
"""The HTTP status code as an int"""
content = ""
"""The Raw HTTP response body as a string"""
body = {}
"""The decoded json body as a dictionary"""
headers = {}
"""Response headers as a dictionary"""
def __init__(self, response):
"""Construct an API response from a Requests response
:param response: a ``requests`` library response
"""
super(APIResponse, self).__init__()
self.status = response.status_code
self.content = response.content
if self.content:
self.body = jsonutils.loads(self.content)
self.headers = response.headers
def __str__(self):
# because __str__ falls back to __repr__ we can still use repr
# on self but add in the other attributes.
return "<Response body:%r, status_code:%s>" % (self.body, self.status)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status_code
_body = response.content
message = ('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s' %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self
|
, response=None, message=None):
if not message:
message = "Authentication error"
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthori
|
zationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authorization error"
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Item not found"
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri,
project_id=None):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
if project_id is None:
self.project_id = "6f70656e737461636b20342065766572"
else:
self.project_id = project_id
self.microversion = None
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
response = requests.request(method, url, data=body, headers=_headers)
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
self.auth_result = response.headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
if strip_version:
# NOTE(vish): cut out version number and tenant_id
base_uri = '/'.join(base_uri.split('/', 3)[:-1])
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
if ('X-OpenStack-Nova-API-Version' in headers or
'OpenStack-API-Version' in headers):
raise Exception('Microversion should be set via '
'microversion attribute in API client.')
elif self.microversion:
headers['X-OpenStack-Nova-API-Version'] = self.microversion
headers['OpenStack-API-Version'] = 'compute %s' % self.microversion
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code",
response=response)
return response
def _decode_json(self, response):
resp = APIResponse(status=response.status_code)
if response.content:
resp.body = jsonutils.loads(response.content)
return resp
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefa
|
peterstace/project-euler
|
OLD_PY_CODE/project_euler_old_old/134/134.py
|
Python
|
unlicense
| 858
| 0.006993
|
from number_theory import int_pow, prime_sieve, prime, mod_exp
from itertools import count
from math import ceil, sqrt
def find_n(p1, p2):
"""
Finds n such that for consecutive primes p1 and p2 (p2 > p1), n is
divisible by p2 and the last digits of n are formed by p1.
"""
len_p1 = len(str(p1))
n = int_pow(10, len_p1)
totient_n = int_pow(2, len_p1 - 1) * 4 * int_pow(5, len_p1 - 1)
#now solve p_2.x == p_1 (mod n) i.e. x == p_2^(-1).p_1 (mod n)
x = mod_exp(p2, totient_n - 1, n)
x *= p1
x %= n
return x * p2
#get primes up to 1000000 plus the next one
primes = prime_sieve(1000000)
p = pri
|
mes[-1] + 2
while not prime(p):
p += 2
primes += [p]
primes = primes[2:]
summation = 0
for p_i in range(len(primes) - 1):
n = find_n(primes[p_i], primes[p_i + 1])
summation += n
print(summation)
| |
haxwithaxe/qutebrowser
|
tests/unit/browser/test_webelem.py
|
Python
|
gpl-3.0
| 30,783
| 0
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the webelement utils."""
from unittest import mock
import collections.abc
import operator
import itertools
import binascii
import os.path
import hypothesis
import hypothesis.strategies
from PyQt5.QtCore import PYQT_VERSION, QRect, QPoint
from PyQt5.QtWebKit import QWebElement
import pytest
from qutebrowser.browser import webelem
def get_webelem(geometry=None, frame=None, null=False, style=None,
display='', attributes=None, tagname=None, classes=None):
"""Factory for WebElementWrapper objects based on a mock.
Args:
geometry: The geometry of the QWebElement as QRect.
frame: The QWebFrame the element is in.
null: Whether the element is null or not.
style: A dict with the styleAttributes of the element.
attributes: Boolean HTML attributes to be added.
tagname: The tag name.
classes: HTML classes to be added.
"""
elem = mock.Mock()
elem.isNull.return_value = null
elem.geometry.return_value = geometry
elem.webFrame.return_value = frame
elem.tagName.return_value = tagname
elem.toOuterXml.return_value = '<fakeelem/>'
elem.toPlainText.return_value = 'text'
attribute_dict = {}
if attributes is None:
pass
elif not isinstance(attributes, collections.abc.Mapping):
attribute_dict.update({e: None for e in attributes})
else:
attribute_dict.update(attributes)
elem.hasAttribute.side_effect = lambda k: k in attribute_dict
elem.attribute.side_effect = lambda k: attribute_dict.get(k, '')
elem.setAttribute.side_effect = (lambda k, v:
operator.setitem(attribute_dict, k, v))
elem.removeAttribute.side_effect = attribute_dict.pop
elem.attributeNames.return_value = list(attribute_dict)
if classes is not None:
elem.classes.return_value = classes.split(' ')
else:
elem.classes.return_value = []
style_dict = {'visibility': '', 'display': ''}
if style is not None:
style_dict.update(style)
def _style_property(name, strategy):
"""Helper function to act as styleProperty method
|
."""
if strategy != QWebElement.ComputedStyle:
raise ValueError("styleProperty called with strategy != "
|
"ComputedStyle ({})!".format(strategy))
return style_dict[name]
elem.styleProperty.side_effect = _style_property
wrapped = webelem.WebElementWrapper(elem)
return wrapped
class SelectionAndFilterTests:
"""Generator for tests for TestSelectionsAndFilters."""
# A mapping of a HTML element to a list of groups where the selectors
# (after filtering) should match.
#
# Based on this, test cases are generated to make sure it matches those
# groups and not the others.
TESTS = [
('<foo />', []),
('<foo bar="baz"/>', []),
('<foo href="baz"/>', [webelem.Group.url]),
('<foo src="baz"/>', [webelem.Group.url]),
('<a />', [webelem.Group.all]),
('<a href="foo" />', [webelem.Group.all, webelem.Group.links,
webelem.Group.prevnext, webelem.Group.url]),
('<a href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
('<area />', [webelem.Group.all]),
('<area href="foo" />', [webelem.Group.all, webelem.Group.links,
webelem.Group.prevnext, webelem.Group.url]),
('<area href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
('<link />', [webelem.Group.all]),
('<link href="foo" />', [webelem.Group.all, webelem.Group.links,
webelem.Group.prevnext, webelem.Group.url]),
('<link href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
('<textarea />', [webelem.Group.all]),
('<select />', [webelem.Group.all]),
('<input />', [webelem.Group.all]),
('<input type="hidden" />', []),
('<button />', [webelem.Group.all]),
('<button href="foo" />', [webelem.Group.all, webelem.Group.prevnext,
webelem.Group.url]),
('<button href="javascript://foo" />', [webelem.Group.all,
webelem.Group.url]),
# We can't easily test <frame>/<iframe> as they vanish when setting
# them via QWebFrame::setHtml...
('<p onclick="foo" foo="bar"/>', [webelem.Group.all]),
('<p onmousedown="foo" foo="bar"/>', [webelem.Group.all]),
('<p role="option" foo="bar"/>', [webelem.Group.all]),
('<p role="button" foo="bar"/>', [webelem.Group.all]),
('<p role="button" href="bar"/>', [webelem.Group.all,
webelem.Group.prevnext,
webelem.Group.url]),
]
GROUPS = [e for e in webelem.Group if e != webelem.Group.focus]
COMBINATIONS = list(itertools.product(TESTS, GROUPS))
def __init__(self):
self.tests = list(self._generate_tests())
def _generate_tests(self):
for (val, matching_groups), group in self.COMBINATIONS:
if group in matching_groups:
yield group, val, True
else:
yield group, val, False
class TestSelectorsAndFilters:
TESTS = SelectionAndFilterTests().tests
def test_test_generator(self):
assert self.TESTS
@pytest.mark.parametrize('group, val, matching', TESTS)
def test_selectors(self, webframe, group, val, matching):
webframe.setHtml('<html><body>{}</body></html>'.format(val))
# Make sure setting HTML succeeded and there's a new element
assert len(webframe.findAllElements('*')) == 3
elems = webframe.findAllElements(webelem.SELECTORS[group])
elems = [webelem.WebElementWrapper(e) for e in elems]
filterfunc = webelem.FILTERS.get(group, lambda e: True)
elems = [e for e in elems if filterfunc(e)]
assert bool(elems) == matching
class TestWebElementWrapper:
"""Generic tests for WebElementWrapper.
Note: For some methods, there's a dedicated test class with more involved
tests.
"""
@pytest.fixture
def elem(self):
return get_webelem()
def test_nullelem(self):
"""Test __init__ with a null element."""
with pytest.raises(webelem.IsNullError):
get_webelem(null=True)
def test_double_wrap(self, elem):
"""Test wrapping a WebElementWrapper."""
with pytest.raises(TypeError) as excinfo:
webelem.WebElementWrapper(elem)
assert str(excinfo.value) == "Trying to wrap a wrapper!"
@pytest.mark.parametrize('code', [
str,
lambda e: e[None],
lambda e: operator.setitem(e, None, None),
lambda e: operator.delitem(e, None),
lambda e: None in e,
len,
lambda e: e.is_visible(None),
lambda e: e.rect_on_view(),
lambda e: e.is_writable(),
lambda e: e.is_content_editable(),
lambda e: e.is_editable(),
lambda e: e.is_text_input(),
lambda e: e.debug_text(),
list, # __iter__
])
|
mmottahedi/neuralnilm_prototype
|
scripts/e362.py
|
Python
|
mit
| 5,901
| 0.009659
|
from __future__ import print_function, division
import matplotlib
import lo
|
gging
from sys import stdout
matplotlib.use('Agg') # Must be before
|
importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 10
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1024,
# random_window=64,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.8,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
include_power=True,
# clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
1000: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=100000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
wevote/WeVoteServer
|
position/views_admin.py
|
Python
|
mit
| 50,531
| 0.004552
|
# position/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import generate_position_sorting_dates_for_election, positions_import_from_master_server, \
refresh_cached_position_info_for_election, \
refresh_positions_with_candidate_details_for_election, \
refresh_positions_with_contest_office_details_for_election, \
refresh_positions_with_contest_measure_details_for_election
from .models import ANY_STANCE, PositionEntered, PositionForFriends, PositionListManager, PERCENT_RATING
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, CandidateListManager, CandidateManager
from config.base import get_environment_variable
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.db.models import Q
from election.controllers import retrieve_election_id_list_by_year_list
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from measure.controllers import push_contest_measure_data_to_other_table_caches
from office.controllers import push_contest_office_data_to_other_table_caches
from office.models import ContestOfficeManager
from organization.models import OrganizationManager
from politician.models import PoliticianManager
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, convert_integer_to_string_with_comma_for_thousands_separator, \
positive_value_exists, STATE_CODE_MAP
from django.http import HttpResponse
import json
UNKNOWN = 'U'
POSITIONS_SYNC_URL = get_environment_variable("POSITIONS_SYNC_URL") # positionsSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
def positions_sync_out_view(request): # positionsSyncOut
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
json_data = {
'success': False,
'status': 'POSITION_LIST_CANNOT_BE_RETURNED-ELECTION_ID_REQUIRED'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
stance_we_are_looking_for = ANY_STANCE
try:
# Only return public positions
position_list_query = PositionEntered.objects.order_by('date_entered')
# As of Aug 2018 we are no longer using PERCENT_RATING
position_list_query = position_list_query.exclude(stance__iexact=PERCENT_RATING)
position_list_query = position_list_query.filter(google_civic_election_id=google_civic_election_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY" it means we want to not filter down the list
position_list_query = position_list_query.filter(stance__iexact=stance_we_are_looking_for)
# convert datetime to str for date_entered and date_last_changed columns
position_list_query = position_list_query.extra(
select={'date_entered': "to_char(date_entered, 'YYYY-MM-DD HH24:MI:SS')"})
position_list_query = position_list_query.extra(
select={'date_last_changed': "to_char(date_last_changed, 'YYYY-MM-DD HH24:MI:SS')"})
position_list_dict = position_list_query.values(
'we_vote_id', 'ballot_item_display_name', 'ballot_item_image_url_https',
'ballot_item_twitter_handle', 'speaker_display_name',
'speaker_image_url_https', 'speaker_twitter_handle',
|
'date_entered',
'date_last_changed', 'organization_we_vote_id', 'voter_we_vote_id',
'public_figure_we_vote_id', 'google_civic_election_id', 'state_code',
'vote_smart_rating_id', 'vote_smart_time_span', 'vote_smart_rating',
'vote_smart_rating_name', 'contest_office_we_vote_id', 'race_office_level',
'candidate_campaign_we_vote_id', 'google_civic_candidate_name'
|
,
'politician_we_vote_id', 'contest_measure_we_vote_id', 'speaker_type', 'stance',
'position_ultimate_election_date', 'position_year',
'statement_text', 'statement_html', 'twitter_followers_count', 'more_info_url', 'from_scraper',
'organization_certified', 'volunteer_certified', 'voter_entering_position',
'tweet_source_id', 'twitter_user_entered_position', 'is_private_citizen')
if position_list_dict:
position_list_json = list(position_list_dict)
return HttpResponse(json.dumps(position_list_json), content_type='application/json')
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
json_data = {
'success': False,
'status': 'POSITION_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def positions_import_from_master_server_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in POSITIONS_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.INFO, 'Google civic election id is required for Positions import.')
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
results = positions_import_from_master_server(request, google_civic_election_id)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Positions import completed. '
'Saved: {saved}, Updated: {updated}, '
'Duplicates skipped: '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
def update_position_list_with_speaker_type(position_list):
organization_manager = OrganizationManager()
organization_dict = {}
for one_position in position_list:
position_change = False
speaker_type = UNKNOWN
twitter_followers_count = 0
if one_position.organization_we_vote_id in organization_dict:
organization = organization_dict[one_position.organizati
|
tbeadle/django
|
tests/migrations/test_auto_now_add/0001_initial.py
|
Python
|
bsd-3-clause
| 474
| 0.00211
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
name='Entry',
|
fields=[
('id', models.Au
|
toField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
],
),
]
|
memex-explorer/memex-explorer
|
source/memex/rest.py
|
Python
|
bsd-2-clause
| 8,218
| 0.003407
|
import shutil
import json
from rest_framework import routers, serializers, viewsets, parsers, filters
from rest_framework.views import APIView
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from django.core.validators import URLValidator
from base.models import Project, SeedsList
from apps.crawl_space.models import Crawl, CrawlModel
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, NotFoundError
class DataWakeIndexUnavailable(APIException):
status_code = 404
default_detail = "The server failed to find the DataWake index in elasticsearch."
class SlugModelSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(required=False, read_only=True)
class ProjectSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
class Meta:
model = Project
class CrawlSerializer(SlugModelSerializer):
# Expose these fields, but only as read only.
id = serializers.ReadOnlyField()
seeds_list = serializers.FileField(read_only=True, use_url=False)
status = serializers.CharField(read_only=True)
config = serializers.CharField(read_only=True)
index_name = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
pages_crawled = serializers.IntegerField(read_only=True)
harvest_rate = serializers.FloatField(read_only=True)
location = serializers.CharField(read_only=True)
def validate_crawler(self, value):
if value == "ache" and not self.initial_data.get("crawl_model"):
raise serializers.ValidationError("Ache crawls require a Crawl Model.")
return value
class Meta:
model = Crawl
class CrawlModelSerializer(SlugModelSerializer):
model = serializers.FileField(use_url=False)
features = serializers.FileField(use_url=False)
url = serializers.CharField(read_only=True)
def validate_model(self, value):
if value.name != "pageclassifier.model":
raise serializers.ValidationError("File must be named pageclassifier.model")
return value
def validate_features(self, value):
if value.name != "pageclassifier.features":
raise serializers.ValidationError("File must be named pageclassifier.features")
return value
class Meta:
model = CrawlModel
class SeedsListSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
file_string = serializers.CharField(read_only=True)
def validate_seeds(self, value):
try:
seeds = json.loads(value)
except ValueError:
raise serializers.ValidationError("Seeds must be a JSON encoded string.")
if type(seeds) != list:
raise serializers.ValidationError("Seeds must be an array of URLs.")
validator = URLValidator()
errors = []
for index, x in enumerate(seeds):
try:
validator(x)
except ValidationError:
# Add index to make it easier for CodeMirror to select the right
# line.
errors.append({index: x})
if errors:
errors.insert(0, "The seeds list contains invalid urls.")
errors.append({"list": "\n".join(seeds)})
raise serializers.ValidationError(errors)
return value
class Meta:
model = SeedsList
"""
Viewset Classes.
Filtering is provided by django-filter.
Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is:
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
This backend is supplied to every viewset by default. Alter query fields by adding
or removing items from filter_fields
"""
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'slug', 'name',)
class CrawlViewSet(viewsets.ModelViewSet):
queryset = Crawl.objects.all()
serializer_class = CrawlSerializer
filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project',
'crawl_model', 'crawler', 'seeds_object')
class CrawlModelViewSet(viewsets.ModelViewSet):
queryset = CrawlModel.objects.all()
serializer_class = CrawlModelSerializer
filter_fields = ('id', 'slug', 'name', 'project',)
def destroy(self, request, pk=None):
model = CrawlModel.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(crawl_model=pk)
if crawls:
message = "The Crawl Model is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
shutil.rmtree(model.get_model_path())
return super(CrawlModelViewSet, self).destroy(request)
class SeedsListViewSet(viewsets.ModelViewSet):
queryset = SeedsList.objects.all()
serializer_class = SeedsListSerializer
filter_fields = ('id', 'name', 'seeds', 'slug',)
def create(self, request):
# If a seeds file or a textseeds exists, then use those. Otherwise, look
# for a string in request.data["seeds"]
seeds_list = request.FILES.get("seeds", False)
textseeds = request.data.get("textseeds", False)
if seeds_list:
request.data["
|
seeds"] = json.dumps(map(str.strip, seeds_list.readlines()))
elif textseeds:
if type(textseeds) == unicode:
request.data["seeds"] = json.dumps(map(unicode.strip,
|
textseeds.split("\n")))
# Get rid of carriage return character.
elif type(textseeds) == str:
request.data["seeds"] = json.dumps(map(str.strip, textseeds.split("\n")))
return super(SeedsListViewSet, self).create(request)
def destroy(self, request, pk=None):
seeds = SeedsList.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(seeds_object=pk)
if crawls:
message = "The Seeds List is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
return super(SeedsListViewSet, self).destroy(request)
class DataWakeView(APIView):
index = "datawake"
es = Elasticsearch()
def create_trails(self, trail_ids):
trails = []
for x in trail_ids:
url_search = self.es.search(index=self.index, q="trail_id:%d" % x,
fields="url", size=1000)["hits"]["hits"]
new_trail = {"trail_id": x, "urls": [], "domain_name":url_search[0]["_type"]}
for y in url_search:
new_trail["urls"].append(y["fields"]["url"][0])
new_trail.update({"urls_string": "\n".join(new_trail["urls"])})
trails.append(new_trail)
return trails
def get(self, request, format=None):
# TODO: catch all exception. At the very least, deal with 404 not found and
# connection refused exceptions.
# Temporarily remove exceptions for debugging.
try:
trail_ids = [x["key"] for x in self.es.search(index=self.index, body={
"aggs" : {
"trail_id" : {
"terms" : { "field" : "trail_id" }
}
}
})["aggregations"]["trail_id"]["buckets"]]
response = self.create_trails(trail_ids)
except ConnectionError as e:
raise OSError("Failed to connect to local elasticsearch instance.")
except NotFoundError:
raise DataWakeIndexUnavailable
return Response(response)
router = routers.DefaultRouter()
router.register(r"projects", ProjectViewSet)
router.register(r"crawls", CrawlViewSet)
router.register(r"crawl_models", CrawlModelViewSet)
router.regi
|
PolyJIT/buildbot
|
polyjit/buildbot/slaves.py
|
Python
|
mit
| 2,304
| 0.003038
|
from buildbot.plugins import worker
infosun = {
"polyjit-ci": {
"host": "polyjit-ci",
"password": None,
"properties": {
"uchroot_image_path": "/data/polyjit/xenial-image/",
"uchroot_binary": "/data/polyjit/erlent/build/uchroot",
"can_build_llvm_debug": False
},
},
"debussy": {
"host": "debussy",
"password": None,
"properties": {
"llvm_prefix": "/scratch/pjtest/llvm-03-11-2017_5.0",
"llvm_libs": "/scratch/pjtest/llvm-03-11-2017_5.0/lib",
"cc": "/scratch/pjtest/llvm-03-11-2017_5.0/bin/clang",
"cxx": "/scratch/pjtest/llvm-03-11-2017_5.0/bin/clang++",
"uchroot_image_path": "/local/hdd/buildbot-polyjit/disco-image/",
"uchroot_binary": "/scratch/pjtest/erlent/build/uchroot",
"testinputs": "/scratch/pjtest/pprof-test-data",
"cmake_prefix": "/scratch/pjtest/opt/cmake",
"has_munged": True,
"can_build_llvm_debug": True
}
},
"ligeti": {
"host": "ligeti",
"password": None,
"properties": {
"llvm_prefix": "/scratch/pjtest/llvm-03-11-2017_5.0",
"llvm_libs": "/scratch/pjtest/llvm-03-11-2017_5.0/lib",
"cc": "/scratch/pjtest/llvm-03-11-2017_5.0/bin/clang",
"cxx": "/scratch/pjtest/llvm-03-11-2017_5.0/bin/clang++",
"uchroot_image_path": "/local/hdd/buildbot-polyjit/disco-image/",
"uchroot_binary": "/scratch/pjtest/erlent/build/uchroot",
"testinputs": "/scratch/pjtest/pprof-test-data",
"cmake_prefix": "/scratch/pjtest/opt/cmake",
"has_munged": True,
"can_build_llvm_debug": True
}
}
}
def get_hostlist(slave_dict, p
|
redicate = None):
if not predicate:
predicate = lambda x : True
hosts = []
for k in slave_dict:
if predicate(slave_dict[k]):
hosts.append(slave_dict[k]["host"])
return hosts
def configure(c)
|
:
for k in infosun:
slave = infosun[k]
props = {}
if "properties" in slave:
props = slave["properties"]
c['workers'].append(worker.Worker(slave["host"], slave[
"password"], properties = props))
|
glenux/contrib-mypaint
|
gui/viewmanip.py
|
Python
|
gpl-2.0
| 3,703
| 0.00135
|
# This file is part of MyPaint.
# Copyright (C) 2014 by Andrew Chadwick <a.t.chadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
"""Modes for manipulating the view"""
## Imports
import gui.mode
import math
from gettext import gettext as _
## Class defs
class PanViewMode (gui.mode.OneshotDragMode):
"""A oneshot mode for translating the viewport by dragging."""
ACTION_NAME = 'PanViewMode'
pointer_behavior = gui.mode.Behavior.CHANGE_VIEW
scroll_behavior = gui.mode.Behavior.NONE # XXX grabs ptr, so no CHANGE_VIEW
supports_button_switching = False
@classmethod
def get_name(cls):
return _(u"Scroll View")
def get_usage(self):
return _(u"Drag the canvas view")
@property
def inactive_cursor(self):
return self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME)
@property
def active_cursor(self):
return self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME)
def drag_update_cb(self, tdw, event, dx, dy):
tdw.scroll(-dx, -dy)
self.doc.notify_view_changed()
super(PanViewMode, self).drag_update_cb(tdw, event, dx, dy)
class ZoomViewMode (gui.mode.OneshotDragMode):
"""A oneshot mode for zooming the viewport by dragging."""
ACTION_NAME = 'ZoomViewMode'
pointer_behavior = gui.mode.Behavior.CHANGE_VIEW
scroll_behavior = gui.mode.Behavior.NONE # XXX grabs ptr, so no CHANGE_VIEW
supports_button_switching = False
@classmethod
def get_name(cls):
return _(u"Zoom View")
def get_usage(self):
return _(u"Zoom the canvas view")
@property
def active_cursor(self):
return self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME)
@property
def inactive_cursor(self):
return self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME)
def drag_update_cb(self, tdw, event, dx, dy):
tdw.scroll(-dx, -dy)
tdw.zoom(math.exp(dy/100.0), center=(event.x, event.y))
# TODO: Let modifiers constrain the zoom amount to
# the defined steps.
self.doc.notify_view_changed()
super(ZoomViewMode, self).drag_update_c
|
b(tdw, event, dx, dy)
class RotateViewMode (gui.mode.OneshotDragMode):
"""A oneshot mode for rotat
|
ing the viewport by dragging."""
ACTION_NAME = 'RotateViewMode'
pointer_behavior = gui.mode.Behavior.CHANGE_VIEW
scroll_behavior = gui.mode.Behavior.NONE # XXX grabs ptr, so no CHANGE_VIEW
supports_button_switching = False
@classmethod
def get_name(cls):
return _(u"Rotate View")
def get_usage(cls):
return _(u"Rotate the canvas view")
@property
def active_cursor(self):
return self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME)
@property
def inactive_cursor(self):
return self.doc.app.cursors.get_action_cursor(
self.ACTION_NAME)
def drag_update_cb(self, tdw, event, dx, dy):
# calculate angular velocity from the rotation center
x, y = event.x, event.y
cx, cy = tdw.get_center()
x, y = x-cx, y-cy
phi2 = math.atan2(y, x)
x, y = x-dx, y-dy
phi1 = math.atan2(y, x)
tdw.rotate(phi2-phi1, center=(cx, cy))
self.doc.notify_view_changed()
# TODO: Allow modifiers to constrain the transformation angle
# to 22.5 degree steps.
super(RotateViewMode, self).drag_update_cb(tdw, event, dx, dy)
|
univ-of-utah-marriott-library-apple/management_tools
|
management_tools/__init__.py
|
Python
|
mit
| 376
| 0.00266
|
impo
|
rt app_info
import loggers
import plist_editor
__version__ = '1.9.1'
__all__ = ['app_info', 'fs_analysis', 'loggers', 'plist_editor', 'slack']
# This provides the ability to get the version from the command line.
# Do something like:
# $ python -m management_tools.__init__
if __name__ == "__main__":
print("Management Tools, version: {}".format(__version__))
| |
henryneu/Python
|
sample/dict.py
|
Python
|
apache-2.0
| 415
| 0.007229
|
#!/usr/bi
|
n/env python3
# -*- coding: utf-8 -*-
d = {'Michael':95, 'Henry':96, 'Emily':97}
d['Lucy'] = 94
d['Lucy'] = 91
key = (1, 2, 3)
d[key] = 98
print(d['Michael'])
d.pop('Michael')
print(d)
print('Tom' in d)
print(d.get('Tom'))
print(d.get('Tom'), -1)
s1 = set([1, 2, 2, 3, 3])
s2 = set([2, 3, 4])
s3 = set((1, 2))
s1.add(4)
s1.add(4)
s1.remove(4)
print(s1)
print(s2)
print
|
(s1 & s2)
print(s1 | s2)
print(s3)
|
xala3pa/my-way-to-algorithms
|
graphs/hash/python/median.py
|
Python
|
mit
| 489
| 0.05317
|
import heapq
import sys
|
filename = "Median.txt"
lst = [int(l) for l in open(filename)]
H_low = []
H_high = []
sum = 0
for num in lst:
if len(H_low) > 0:
if num > -H_low[0]:
heapq.heappush(H_high, num)
else:
heapq.heappush(H_low, -num)
else:
heapq.heappush(H_low, -num)
if len(H_low) > len(H_high) + 1:
heapq.heappush(H_high, -(heapq.heappop(H_low)))
elif len(H_high) > len(H_low):
heapq.heappush(H_low, -(heapq.heappop(H_high)))
sum += -H_low[0]
pri
|
nt sum % 10000
|
barmalei/scalpel
|
lib/gravity/common/db.py
|
Python
|
lgpl-3.0
| 10,270
| 0.013048
|
#!/usr/bin/env python
#
# Copyright 2010 Andrei <vish@gravitysoft.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from __future__ import with_statement
import sys, threading, datetime
#
# Data base module to centralize working with data base connections,
# support connection pools and have a possibility to track connection
# leakage.
#
def create_db(id, parameters):
return Database(id, parameters)
def remove_db(id):
pass
def get_db(id):
return Database.getDatabase(id)
def get_dbconnection(id):
return Database(id).getConnection()
class Database(object):
DS_LOCK = threading.RLock()
SOURCES = {}
class CursorWrapper(object):
def __init__(self, cursor, owner):
assert cursor and owner
self._cursor = cursor
self._owner = owner
@property
def description(self):
return self._cursor.description
@property
def rowcount(self):
return self._cursor.rowcount
def close(self): self._owner._closeMe(self)
def callproc(self, procname, parameters=None):
return self._cursor.callproc(procname, parameters)
def execute(self, operation, parameters=None):
return self._cursor.execute(operation, parameters)
def executemany(self, operation, seq_of_parameters):
return self._cursor.executemany(operation, seq_of_parameters)
def fetchone(self):
return self._cursor.fetchone()
def fetchmany(self, size=None):
s = self.arraysize
if size : s = size
return self._cursor.fetchmany(s)
def fetchall(self):
return self._cursor.fetchall()
def nextset(self):
return self._cursor.nextset()
@property
def arraysize(self):
|
return self._cursor.arraysize
def setinputsizes(self, sizes):
return self._cursor.setinputsizes(sizes)
def setoutputsize(self, size, column=None):
return self._cursor.setoutputsize(size, column)
def _orig_cursor(self):
return self._cursor
class ConnectionWrapper(object):
def __i
|
nit__(self, connection, owner):
assert connection and owner
self._connection = connection
self._creation_time = datetime.datetime.now()
self._owner = owner
self._cursors = []
def creationTime(self): return self._creation_time
def close(self):
with Database.DS_LOCK:
self._owner._closeMe(self)
def commit(self):
return self._connection.commit()
def rollback(self):
return self._connection.rollback()
def cursor(self):
with Database.DS_LOCK:
c = Database.CursorWrapper(self._connection.cursor(), self)
self._cursors.append(c)
return c
def _closeMe(self, cur):
with Database.DS_LOCK:
try: i = self._cursors.index(cur)
except ValueError: i = -1
if i >= 0:
self._cursors.pop(i)
cur._orig_cursor().close()
def cleanup(self):
with Database.DS_LOCK:
for cur in self._cursors:
try: cur._orig_cursor().close()
except: pass
self._cursors = []
def __str__(self):
return "'%s' connection wrapper, created: " % self._owner._id + str(self._creation_time)
def __enter__(self): pass
def __exit__(self, type, value, traceback):
self.close()
def _orig_conn(self):
return self._connection
@classmethod
def createDatabase(cls, id, parameters):
assert id and parameters
with cls.DS_LOCK:
if id in cls.SOURCES:
raise BaseException("Data base '%s' already exists." % id)
return Database(id, parameters)
@classmethod
def getDatabase(cls, id):
assert id
with cls.DS_LOCK: return cls.SOURCES[id]
@classmethod
def hasDatabase(cls, id):
assert id
with cls.DS_LOCK: return id in cls.SOURCES
def init(self, id, parameters):
global DBMODULE
self._poolSize = 1
key = 'db.poolsize'
if key in parameters:
self._poolSize = int(parameters[key])
del parameters[key]
self._poolLatency = self._poolSize / 5
key = 'db.poollatency'
if key in parameters:
self._poolLatency = int(parameters[key])
del parameters[key]
assert self._poolLatency >= 0 and self._poolSize >= 0
if self._poolLatency > self._poolSize:
raise BaseException("DB '%s' pool latency cannot be less than max pool size." % id)
self._parameters = parameters
self._id = id
self._pool = []
self._module = DBMODULE
self._firstfree = 0
def __new__(cls, id, parameters = None):
assert id and len(id.strip()) > 0
id = id.strip()
with cls.DS_LOCK:
if id in cls.SOURCES:
ds = cls.SOURCES[id]
if parameters and parameters != ds._parameters:
raise BaseException("Data base '%s' have been defined with another db parameters.")
return ds
else:
if parameters == None:
raise BaseException("DB parameters have not been specified for '%s' data base." % id)
ds = object.__new__(cls)
ds.init(id, parameters)
ds.ping()
cls.SOURCES[id] = ds
return ds
def ping(self):
con = None
try: con = self._module.connect(**self._parameters)
finally:
if con : con.close()
def getConnection(self):
with self.DS_LOCK:
# connection pool should not be used
if self._poolSize == 0:
return Database.ConnectionWrapper(self._module.connect(**self._parameters), owner = self)
else:
# found free connection in pool
if self._firstfree < len(self._pool):
self._firstfree += 1
return self._pool[self._firstfree - 1]
else:
# pool is not full
if self._poolSize > len(self._pool):
c = Database.ConnectionWrapper(self._module.connect(**self._parameters), owner = self)
self._pool.append(c)
self._firstfree = len(self._pool)
return c
else:
# pool is full
raise BaseException("'%s' connection pool is full (%d connections opened)." % (self._id, len(self._pool)))
def cleanup(self):
with self.DS_LOCK:
for c in self._pool:
|
salilab/saliweb
|
test/backend/run-all-tests.py
|
Python
|
lgpl-2.1
| 3,023
| 0
|
from __future__ import print_function
import unittest
import sys
import os
import re
import tempfile
import shutil
import glob
import warnings
warnings.simplefilter("default")
# Only use coverage if it's new enough and is requested
try:
import coverage
if not hasattr(coverage.coverage, 'combine'):
coverage = None
except ImportError:
coverage = None
if 'SALIWEB_COVERAGE' not in os.environ:
coverage = None
class RunAllTests(unittest.TestProgram):
"""Custom main program that also displays a final cov
|
erage report"""
|
def __init__(self, *args, **keys):
if coverage:
# Start coverage testing now before we import any modules
self.topdir = 'python'
self.mods = (glob.glob("%s/saliweb/*.py" % self.topdir)
+ glob.glob("%s/saliweb/backend/*.py" % self.topdir))
self.cov = coverage.coverage(branch=True, include=self.mods,
data_file='.coverage.backend')
self.cov.start()
self.make_site_customize()
# Run the tests
unittest.TestProgram.__init__(self, *args, **keys)
def make_site_customize(self):
"""Get coverage information on Python subprocesses"""
self.tmpdir = tempfile.mkdtemp()
with open(os.path.join(self.tmpdir, 'sitecustomize.py'), 'w') as fh:
fh.write("""
import coverage
import atexit
_cov = coverage.coverage(branch=True, data_suffix=True, auto_data=True,
data_file='%s')
_cov.start()
def _coverage_cleanup(c):
c.stop()
atexit.register(_coverage_cleanup, _cov)
""" % os.path.abspath('.coverage.backend'))
os.environ['PYTHONPATH'] = self.tmpdir + ':' + os.environ['PYTHONPATH']
def runTests(self):
self.testRunner = unittest.TextTestRunner(verbosity=self.verbosity)
result = self.testRunner.run(self.test)
if coverage:
shutil.rmtree(self.tmpdir)
self.cov.stop()
self.cov.combine()
print("\nPython coverage report\n", file=sys.stderr)
if hasattr(coverage.files, 'RELATIVE_DIR'):
coverage.files.RELATIVE_DIR = self.topdir + '/'
else:
self.cov.file_locator.relative_dir = self.topdir + '/'
self.cov.report(self.mods, file=sys.stderr)
self.cov.save()
sys.exit(not result.wasSuccessful())
def regressionTest():
try:
os.unlink('state_file')
except OSError:
pass
path = os.path.abspath(os.path.dirname(sys.argv[0]))
files = os.listdir(path)
test = re.compile(r"^test_.*\.py$", re.IGNORECASE)
files = filter(test.search, files)
modnames = [os.path.splitext(f)[0] for f in files]
modobjs = [__import__(m) for m in modnames]
tests = [unittest.defaultTestLoader.loadTestsFromModule(o)
for o in modobjs]
return unittest.TestSuite(tests)
if __name__ == "__main__":
RunAllTests(defaultTest="regressionTest")
|
jmgilman/Neolib
|
neolib/inventory/ShopWizardResult.py
|
Python
|
mit
| 3,097
| 0.009041
|
""":mod:`ShopWizardResult` -- Provides an interface for shop wizard results
.. module:: ShopWizardResult
:synopsis: Provides an interface for shop wizard results
.. moduleauthor:: Joshua Gilman <joshuagilman@gmail.com>
"""
from neolib.exceptions import parseException
from neolib.inventory.Inventory import Inventory
from neolib.shop.UserShopFront import UserShopFront
from neolib.item.Item import Item
import logging
class ShopWizardResult(Inventory):
"""Represents a shop wizard search result
Sub-classes the Inventory class to provide an interface for the results
from a Shop Wizard search. Automatically populates itself with the results
upon initialization.
Attributes
usr (User) - The user associated with the results
Initialization
ShopWizardResult(pg, usr)
Loads results from a shop wizard search
Parameters
pg (Page) - The page containing the results
usr (User) - The user to load the SDB for
Raises
parseException
Example
>>> res = ShopWizard.search(usr, "Mau Codestone")
>>> for item in res:
... print item.price
3,000
3,001
...
"""
usr = None
def __init__(self, pg, usr):
self.usr = usr
|
try:
items = pg.find
|
("td", "contentModuleHeaderAlt").parent.parent.find_all("tr")
items.pop(0)
self.items = []
for item in items:
tmpItem = Item(item.find_all("td")[1].text)
tmpItem.owner = item.td.a.text
tmpItem.location = item.td.a['href']
tmpItem.stock = item.find_all("td")[2].text
tmpItem.price = item.find_all("td")[3].text.replace(" NP", "").replace(",", "")
tmpItem.id = tmpItem.location.split("buy_obj_info_id=")[1].split("&")[0]
self.items.append(tmpItem)
except Exception:
logging.getLogger("neolib.shop").exception("Unable to parse shop wizard results.", {'pg': pg})
raise parseException
def shop(self, index):
""" Return's the user shop the indexed item is in
Parameters:
index (int) -- The item index
Returns
UserShopFront - User shop item is in
"""
return UserShopFront(self.usr, item.owner, item.id, str(item.price))
def buy(self, index):
""" Attempts to buy indexed item, returns result
Parameters:
index (int) -- The item index
Returns
bool - True if item was bought, false otherwise
"""
item = self.items[index]
us = UserShopFront(self.usr, item.owner, item.id, str(item.price))
us.load()
if not item.name in us.inventory:
return False
if not us.inventory[item.name].buy():
return False
return True
|
dssg/babies-public
|
babysaver/evaluation.py
|
Python
|
mit
| 5,646
| 0.007793
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import markdown
from sklearn import metrics
from sklearn.externals import joblib
import re
def plot_precision_recall_n(y_true, y_prob, model_name=None):
# thanks rayid
from sklearn.metrics import precision_recall_curve
y_score = y_prob
precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, precision_curve, 'b')
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
if model_name is not None:
name = model_name
plt.title(name)
#plt.savefig(name)
plt.show()
def current_strat(outcome, conn):
sql_outcome = '"' + outcome + '_OTC"'
qts707g2 = range(35, 52)
col_names = ['707G_'+str(float(i))+'_Q' for i in qts707g2]
qts707g2 = ['"'+i+'"' for i in col_names]
query = 'SELECT "UNI_PART_ID_I", ' + ','.join(qts707g2) + ',' + \
sql_outcome + ',"BBO_F" FROM core_birth_info_rc WHERE \
"707G_LT_D" >= \'2014-07-01\' AND ' + sql_outcome + ' IS NOT NULL'
df707g2 = pd.read_sql(query, conn)
points = [1,1,1,1,1,1,1,1,2,1,2,1,1,1,1,1,1]
scores = np.dot(df707g2[col_names], points)
df707g2['elig'] = [1 if i >= 2 else 0 for i in scores]
results_dct = {}
results_dct['precision'] = metrics.precision_score(df707g2[outcome+'_OTC'],
df707g2['elig'])
results_dct['recall'] = metrics.recall_score(df707g2[outcome+'_OTC'],
df707g2['elig'])
results_dct['prior'] = df707g2[outcome+'_OTC'].mean()
results_dct['bbo_crosstab'] = pd.crosstab(df707g2['BBO_F'],
df707g2['elig'], margins=True)
results_dct['graph'] = plot_precision_recall_n(df707g2[outcome+'_OTC'],
scores,
'Precision, Recall vs % Eligible')
return results_dct
def dict_to_dataframe(eval_dct, pkl_dct):
df = pd.DataFrame(columns=eval_dct[eval_dct.keys()[0]].columns.values)
for key in eval_dct.keys():
eval_dct[key].index = [key]
df = df.append(eval_dct[key])
pkl_df = pd.DataFrame({'index': pkl_dct.keys(),
'pickle_file': pkl_dct.values()}).set_index('index')
return df.join(pkl_df)
def markdown_to_html(md_file, out_file_name=None):
input_file = open(md_file, 'r')
text = input_file.read()
html_file = markdown.markdown(text)
if out_file_name is None:
out_file_name = md_file.split('.')[0]+'.html'
out_file = open(out_file_name, 'w')
out_file.write(html_file)
input_file.close()
out_file.close()
return 'Your converted HTML file is saved as ' + out_file_name
def weight_mapper(data_dct, eval_df, sort_list, mapping, assmnt):
if type(assmnt) is not str:
assmnt = str(assmnt)
eval_df.sort(sort_list, inplace=True, ascending=False)
model = joblib.load(eval_df['pickle_file'][0])
if 'weights' in model.__dict__:
wts = model.weights
else:
wts = model.coef_[0]
mapping = pd.read_csv(mapping)
config = pd.read_csv(data_dct['config_file'])
questions = [q for q in data_dct['features']
if bool(re.search(r'(?i)_Q$', q))]
mapping.loc[:,'QUESTION_N'] = [assmnt+'_'+str(float(i))+'_Q'
for i in mapping['QUESTION_N']]
mapping_sub = mapping[[True if i in questions else False for i in mapping['QUESTION_N']]]
mapping_sub.loc[:,'weights'] = wts
return mapping_sub
def weight_html(df):
df.columns = ['QID', 'Question', 'Model Score', 'Expanded Weights', 'Simple Weights']
df = df.set_index('QID')
df.index.name = None
return df.to_html()
def metrics_getter(data_dct, eval_df, sort_list, mapping, k, scores,
rnd=True, scale=False):
eval_df.sort(sort_list, inplace=True, ascending=False)
reg_ex = r'precision at.*mean|test_percent at.*mean'
metric_cols = eval_df.columns.str.contains(reg_ex)
metric_df = pd.DataFrame(eval_df.iloc[0, metric_cols])
prec_index = metric_df.index.str.contains(r'precision')
tes
|
t_index = metric_df.index.str.contains(r'test_percent')
prec = metric_df.iloc[prec_index,:].reset_index()
test = metric_df.iloc[test_index,:].reset_index()
mdf = pd.DataFrame({'Precision': prec.iloc[:,1],
'Predicted % Eligible': test.iloc[:,1]
})
mdf.index = ['Top '+str(int(each_k*100))+'%' for each_k in k]
mdf = mdf.astype(flo
|
at)
if rnd:
fmat = lambda x: str(np.round(x,1))+'%'
mdf = (mdf*100).applymap(fmat)
scores = sorted(scores)[::-1]
mes = 'Minimum Eligibility Score'
if scale:
mdf[mes] = [np.round(scores[int(each_k*len(scores))]*100,2)
for each_k in k]
else: mdf[mes] = [scores[int(each_k*len(scores))] for each_k in k]
return mdf
|
elyezer/robottelo
|
tests/foreman/api/test_organization.py
|
Python
|
gpl-3.0
| 17,745
| 0
|
"""Unit tests for the ``organizations`` paths.
Each ``APITestCase`` subclass tests a single URL. A full list of URLs to be
tested can be found here:
http://theforeman.org/api/apidoc/v2/organizations.html
:Requirement: Organization
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_alphanumeric, gen_string
from nailgun import client, entities
from random import randint
from requests.exceptions import HTTPError
from robottelo.config import settings
from robottelo.datafactory import filtered_datapoint, invalid_values_list
from robottelo.decorators import skip_if_bug_open, tier1, tier2
from robottelo.helpers import get_nailgun_config
from robottelo.test import APITestCase
from six.moves import http_client
@filtered_datapoint
def valid_org_data_list():
"""List of valid data for input testing.
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return [
gen_string('alphanumeric', randint(1, 242)),
gen_string('alpha', randint(1, 242)),
gen_string('cjk', randint(1, 85)),
gen_string('latin1', randint(1, 242)),
gen_string('numeric', randint(1, 242)),
gen_string('utf8', randint(1, 85)),
gen_string('html', randint(1, 85)),
]
class OrganizationTestCase(APITestCase):
"""Tests for the ``organizations`` path."""
@tier1
def test_positive_create_text_plain(self):
"""Create an organization using a 'text/plain' content-type.
:id: 6f67a3f0-0c1d-498c-9a35-28207b0faec2
:expectedresults: HTTP 415 is returned.
:CaseImportance: Critical
"""
organization = entities.Organization()
organization.create_missing()
response = client.post(
organization.path(),
organization.create_payload(),
auth=settings.server.get_credentials(),
headers={'content-type': 'text/plain'},
verify=False,
)
self.assertEqual(
http_client.UNSUPPORTED_MEDIA_TYPE, response.status_code)
@tier1
def test_positive_create_with_auto_label(self):
"""Create an organization and provide a name.
:id: c9f69ee5-c6dd-4821-bb05-0d93ffa22460
:expectedresults: The organization has the provided attributes and an
auto-generated label.
:CaseImportance: Critical
"""
org = entities.Organization().create()
self.assertTrue(hasattr(org, 'label'))
self.assertIsInstance(org.label, type(u''))
@tier1
def test_positive_create_with_custom_label(self):
"""Create an org and provide a name and identical label.
:id: f0deab6a-b09b-4110-8575-d4bea945a545
:expectedresults: The organization has the provided a
|
ttributes.
:CaseImportance: Critical
"""
# A label has a more restrictive allowable charset than a name, so we
# use it for populating both name and label.
org = entities.Organization()
name_label = org.get_fields()['label'].gen_value()
org.name = org.label = name_label
org = org.create()
self.assertEqual(name_label, org.name)
self.assertEqual(name_label, org.label)
@tier1
def test_positive_create_with_name_and_label(s
|
elf):
"""Create an organization and provide a name and label.
:id: 2bdd9aa8-a36a-4009-ac29-5c3d6416a2b7
:expectedresults: The organization has the provided attributes.
:CaseImportance: Critical
"""
org = entities.Organization()
org.name = name = org.get_fields()['name'].gen_value()
org.label = label = org.get_fields()['label'].gen_value()
org = org.create()
self.assertEqual(name, org.name)
self.assertEqual(label, org.label)
@tier1
def test_positive_create_with_name_and_description(self):
"""Create an organization and provide a name and description.
:id: afeea84b-61ca-40bf-bb16-476432919115
:expectedresults: The organization has the provided attributes and an
auto-generated label.
:CaseImportance: Critical
"""
for name in valid_org_data_list():
with self.subTest(name):
org = entities.Organization(
name=name,
description=name,
).create()
self.assertEqual(org.name, name)
self.assertEqual(org.description, name)
# Was a label auto-generated?
self.assertTrue(hasattr(org, 'label'))
self.assertIsInstance(org.label, type(u''))
self.assertGreater(len(org.label), 0)
@tier1
def test_positive_create_with_name_label_description(self):
"""Create an org and provide a name, label and description.
:id: f7d92392-751e-45de-91da-5ed2a47afc3f
:expectedresults: The organization has the provided name, label and
description.
:CaseImportance: Critical
"""
org = entities.Organization()
org.name = name = org.get_fields()['name'].gen_value()
org.label = label = org.get_fields()['label'].gen_value()
org.description = desc = org.get_fields()['description'].gen_value()
org = org.create()
self.assertEqual(org.name, name)
self.assertEqual(org.label, label)
self.assertEqual(org.description, desc)
@tier1
def test_negative_create_with_invalid_name(self):
"""Create an org with an incorrect name.
:id: 9c6a4b45-a98a-4d76-9865-92d992fa1a22
:expectedresults: The organization cannot be created.
:CaseImportance: Critical
"""
for name in invalid_values_list():
with self.subTest(name):
with self.assertRaises(HTTPError):
entities.Organization(name=name).create()
@tier1
def test_negative_create_with_same_name(self):
"""Create two organizations with identical names.
:id: a0f5333c-cc83-403c-9bf7-08fb372909dc
:expectedresults: The second organization cannot be created.
:CaseImportance: Critical
"""
name = entities.Organization().create().name
with self.assertRaises(HTTPError):
entities.Organization(name=name).create()
@tier1
def test_positive_search(self):
"""Create an organization, then search for it by name.
:id: f6f1d839-21f2-4676-8683-9f899cbdec4c
:expectedresults: Searching returns at least one result.
:CaseImportance: Critical
"""
org = entities.Organization().create()
orgs = entities.Organization().search(
query={u'search': u'name="{0}"'.format(org.name)}
)
self.assertEqual(len(orgs), 1)
self.assertEqual(orgs[0].id, org.id)
self.assertEqual(orgs[0].name, org.name)
class OrganizationUpdateTestCase(APITestCase):
"""Tests for the ``organizations`` path."""
@classmethod
def setUpClass(cls): # noqa
"""Create an organization."""
super(OrganizationUpdateTestCase, cls).setUpClass()
cls.organization = entities.Organization().create()
@tier1
def test_positive_update_name(self):
"""Update an organization's name with valid values.
:id: 68f2ba13-2538-407c-9f33-2447fca28cd5
:expectedresults: The organization's name is updated.
:CaseImportance: Critical
"""
for name in valid_org_data_list():
with self.subTest(name):
setattr(self.organization, 'name', name)
self.organization = self.organization.update(['name'])
self.assertEqual(self.organization.name, name)
@tier1
def test_positive_update_description(self):
"""Update an organization's description with valid values.
:id: bd223197-1021-467e-8714-c1a767ae89af
:expectedresults: The organization's
|
teamfx/openjfx-8u-dev-rt
|
modules/web/src/main/native/Source/JavaScriptCore/Scripts/builtins/builtins_generate_internals_wrapper_implementation.py
|
Python
|
gpl-2.0
| 7,074
| 0.003534
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
from string import Template
from builtins_generator import BuiltinsGenerator, WK_lcfirst, WK_ucfirst
from builtins_templates import BuiltinsGeneratorTemplates as Templates
log = logging.getLogger('global')
class BuiltinsInternalsWrapperImplementationGenerator(BuiltinsGenerator):
def __init__(self, model):
BuiltinsGenerator.__init__(self, model)
self.internals = filter(lambda object: 'internal' in object.annotations, model.objects)
def output_filename(self):
return "%sJSBuiltinInternals.cpp" % self.model().framework.setting('namespace')
def generate_output(self):
args = {
'namespace': self.model
|
().framework.setting('namespace'),
}
sections = []
sections.append(self.generate_license())
sections.append(Template(Templates.DoNotEditWarning).substitute(args))
sections.append(self.generate_primary_header_includes())
sections.append(self.generate_secondary_header_includes())
sections.append(Templat
|
e(Templates.NamespaceTop).substitute(args))
sections.append(self.generate_section_for_object())
sections.append(Template(Templates.NamespaceBottom).substitute(args))
return "\n\n".join(sections)
def generate_secondary_header_includes(self):
header_includes = [
(["WebCore"],
("WebCore", "JSDOMGlobalObject.h"),
),
(["WebCore"],
("WebCore", "WebCoreJSClientData.h"),
),
(["WebCore"],
("JavaScriptCore", "heap/HeapInlines.h"),
),
(["WebCore"],
("JavaScriptCore", "heap/SlotVisitorInlines.h"),
),
(["WebCore"],
("JavaScriptCore", "runtime/JSCJSValueInlines.h"),
),
(["WebCore"],
("JavaScriptCore", "runtime/StructureInlines.h"),
),
]
return '\n'.join(self.generate_includes_from_entries(header_includes))
def generate_section_for_object(self):
lines = []
lines.append(self.generate_constructor())
lines.append(self.generate_visit_method())
lines.append(self.generate_initialize_method())
return '\n'.join(lines)
def accessor_name(self, object):
return WK_lcfirst(object.object_name)
def member_name(self, object):
return "m_" + self.accessor_name(object)
def member_type(self, object):
return WK_ucfirst(object.object_name) + "BuiltinFunctions"
def generate_constructor(self):
guards = set([object.annotations.get('conditional') for object in self.internals if 'conditional' in object.annotations])
lines = ["JSBuiltinInternalFunctions::JSBuiltinInternalFunctions(JSC::VM& vm)",
" : m_vm(vm)"]
for object in self.internals:
initializer = " , %s(m_vm)" % self.member_name(object)
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), initializer))
lines.append("{")
lines.append(" UNUSED_PARAM(vm);")
lines.append("}\n")
return '\n'.join(lines)
def property_macro(self, object):
lines = []
lines.append("#define DECLARE_GLOBAL_STATIC(name) \\")
lines.append(" JSDOMGlobalObject::GlobalPropertyInfo( \\")
lines.append(" clientData.builtinFunctions().%sBuiltins().name##PrivateName(), %s().m_##name##Function.get() , JSC::PropertyAttribute::DontDelete | JSC::PropertyAttribute::ReadOnly)," % (self.accessor_name(object), self.accessor_name(object)))
lines.append(" WEBCORE_FOREACH_%s_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)" % object.object_name.upper())
lines.append("#undef DECLARE_GLOBAL_STATIC")
return '\n'.join(lines)
def generate_visit_method(self):
lines = ["void JSBuiltinInternalFunctions::visit(JSC::SlotVisitor& visitor)",
"{"]
for object in self.internals:
visit = " %s.visit(visitor);" % self.member_name(object)
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), visit))
lines.append(" UNUSED_PARAM(visitor);")
lines.append("}\n")
return '\n'.join(lines)
def _generate_initialize_static_globals(self):
lines = [" JSVMClientData& clientData = *static_cast<JSVMClientData*>(m_vm.clientData);",
" JSDOMGlobalObject::GlobalPropertyInfo staticGlobals[] = {"]
for object in self.internals:
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), self.property_macro(object)))
lines.append(" };")
lines.append(" globalObject.addStaticGlobals(staticGlobals, WTF_ARRAY_LENGTH(staticGlobals));")
lines.append(" UNUSED_PARAM(clientData);")
return '\n'.join(lines)
def generate_initialize_method(self):
lines = ["void JSBuiltinInternalFunctions::initialize(JSDOMGlobalObject& globalObject)",
"{",
" UNUSED_PARAM(globalObject);"]
for object in self.internals:
init = " %s.init(globalObject);" % self.member_name(object)
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), init))
lines.append("")
guards = set([object.annotations.get('conditional') for object in self.internals if 'conditional' in object.annotations])
lines.append(BuiltinsGenerator.wrap_with_guard(" || ".join(guards), self._generate_initialize_static_globals()))
lines.append("}")
return '\n'.join(lines)
|
rafafigueroa/cws
|
build/quad/cmake/quad-genmsg-context.py
|
Python
|
apache-2.0
| 928
| 0.002155
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = ""
services_str = ""
pkg_name = "quad"
dependencies_str = "std_msgs;geometry_msgs;kobuki_msgs;hector_uav_msgs;nav_msgs;sensor_msgs;gazebo_msgs;tf"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "std_msgs;/opt/ros/hydro/sh
|
are/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/hydro/share/geometry_msgs/cmake/../msg;kobuki_msgs;/opt/ros/hydro/share/kobuki_msgs/cmake/../msg;hector_uav_msgs;/opt/ros/hydro/share/hector_ua
|
v_msgs/cmake/../msg;nav_msgs;/opt/ros/hydro/share/nav_msgs/cmake/../msg;sensor_msgs;/opt/ros/hydro/share/sensor_msgs/cmake/../msg;gazebo_msgs;/opt/ros/hydro/share/gazebo_msgs/cmake/../msg;tf;/opt/ros/hydro/share/tf/cmake/../msg;actionlib_msgs;/opt/ros/hydro/share/actionlib_msgs/cmake/../msg;trajectory_msgs;/opt/ros/hydro/share/trajectory_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
|
carrdelling/project_euler
|
problem6.py
|
Python
|
gpl-2.0
| 996
| 0.002008
|
#!/usr/bin/env python
################################################################################
#
# Project Euler - Problem 6
#
# The sum of the squares of the first ten natural numbers is,
#
# 1^2 + 2^2 + ... + 10^2 = 385
# The square of the sum of the firs
|
t ten natural numbers is,
#
# (1 + 2 + ... + 10)^2 = 552 = 3025
# Hence the difference between the sum of the squares of the first ten natural
# numbers and the square of the sum is 3025 - 385 = 2640
#
# Find the difference between the sum of the squares of the first one hundred
# natural numbers and the square of the sum.
#
# Joaquin Derrac - carrdelling@gmail.com
#
#####
|
###########################################################################
if __name__ == "__main__":
sum_one_hundred = sum([x for x in range(1, 101)])
sum_one_hundred_squared = sum_one_hundred * sum_one_hundred
sum_squared = sum([x ** 2 for x in range(1, 101)])
solution = sum_one_hundred_squared - sum_squared
print(solution)
|
eayunstack/neutron
|
neutron/tests/unit/extensions/test_subnet_service_types.py
|
Python
|
apache-2.0
| 14,519
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli
|
cable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. S
|
ee the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import subnet_service_type_db_models
from neutron.extensions import subnet_service_types
from neutron.tests.unit.db import test_db_base_plugin_v2
class SubnetServiceTypesExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
extension = subnet_service_types.Subnet_service_types()
return extension.get_extended_resources(version)
class SubnetServiceTypesExtensionTestPlugin(
db_base_plugin_v2.NeutronDbPluginV2,
subnet_service_type_db_models.SubnetServiceTypeMixin):
"""Test plugin to mixin the subnet service_types extension.
"""
supported_extension_aliases = ["subnet-service-types"]
class SubnetServiceTypesExtensionTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
"""Test API extension subnet_service_types attributes.
"""
CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8']
IP_VERSION = 4
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' +
'SubnetServiceTypesExtensionTestPlugin')
ext_mgr = SubnetServiceTypesExtensionManager()
super(SubnetServiceTypesExtensionTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_service_subnet(self, service_types=None, cidr=None,
network=None, enable_dhcp=False):
if not network:
with self.network() as network:
pass
network = network['network']
if not cidr:
cidr = self.CIDRS[0]
args = {'net_id': network['id'],
'tenant_id': network['tenant_id'],
'cidr': cidr,
'ip_version': self.IP_VERSION,
'enable_dhcp': enable_dhcp}
if service_types:
args['service_types'] = service_types
return self._create_subnet(self.fmt, **args)
def _test_create_subnet(self, service_types, expect_fail=False):
res = self._create_service_subnet(service_types)
if expect_fail:
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
else:
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_create_subnet_blank_type(self):
self._test_create_subnet([])
def test_create_subnet_bar_type(self):
self._test_create_subnet(['network:bar'])
def test_create_subnet_foo_type(self):
self._test_create_subnet(['compute:foo'])
def test_create_subnet_bar_and_foo_type(self):
self._test_create_subnet(['network:bar', 'compute:foo'])
def test_create_subnet_invalid_type(self):
self._test_create_subnet(['foo'], expect_fail=True)
self._test_create_subnet([1], expect_fail=True)
def test_create_subnet_no_type(self):
res = self._create_service_subnet()
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertFalse(subnet['service_types'])
def _test_update_subnet(self, subnet, service_types, fail_code=None):
data = {'subnet': {'service_types': service_types}}
req = self.new_update_request('subnets', data, subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
if fail_code is not None:
self.assertEqual(fail_code,
res['NeutronError']['type'])
else:
subnet = res['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_update_subnet_zero_to_one(self):
service_types = ['network:foo']
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with a single service type
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_two(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with two service types
service_types.append('compute:bar')
self._test_update_subnet(subnet, service_types)
def test_update_subnet_two_to_one(self):
service_types = ['network:foo', 'compute:bar']
# Create a subnet with two service types
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with one service type
service_types = ['network:foo']
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_zero(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with zero service types
service_types = []
self._test_update_subnet(subnet, service_types)
def test_update_subnet_invalid_type(self):
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with invalid service type(s)
self._test_update_subnet(subnet, ['foo'],
fail_code='InvalidSubnetServiceType')
self._test_update_subnet(subnet, [2],
fail_code='InvalidInputSubnetServiceType')
def _assert_port_res(self, port, service_type, subnet, fallback,
error='IpAddressGenerationFailureNoMatchingSubnet'):
res = self.deserialize('json', port)
if fallback:
port = res['port']
self.assertEqual(1, len(port['fixed_ips']))
self.assertEqual(service_type, port['device_owner'])
self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
else:
self.assertEqual(error, res['NeutronError']['type'])
def test_create_port_with_matching_service_type(self):
with self.network() as network:
pass
matching_type = 'network:foo'
non_matching_type = 'network:bar'
# Create a subnet with no service types
self._create_service_subnet(network=network)
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[2],
network=network)
# Create a subnet with a service type to match the port device owner
res = self._create_service_subnet([matching_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with device owner matching the correct service subnet
network = network['network']
por
|
beheh/fireplace
|
fireplace/cards/tgt/warlock.py
|
Python
|
agpl-3.0
| 950
| 0.024211
|
from ..utils import *
##
# Minions
class AT_019:
"Dreadsteed"
deathrattle = Summon(CONTROLLER, "AT_019"
|
)
class AT_021:
"Tiny Knight of Evil"
events = Discard(FRIENDLY).on(Buff(SELF, "AT_021e"))
AT_021e = buff(+1, +1)
class AT_023:
"Void Crusher"
inspire = Destroy(RANDOM_ENEMY_MINION | RANDOM_FRIENDLY_MINION)
class AT_026:
"Wrathguard"
events = Damage(SELF).on(Hit(FRIENDLY_HERO, Damage.AMOUNT))
class AT_027:
"Wilfred Fizzlebang"
events = Draw(CONTROLLER, None, FRIENDLY_HERO_POWER).on(Buff(Draw.CARD, "AT_027e"))
c
|
lass AT_027e:
cost = SET(0)
##
# Spells
class AT_022:
"Fist of Jaraxxus"
play = Hit(RANDOM_ENEMY_CHARACTER, 4)
class Hand:
events = Discard(SELF).on(Hit(RANDOM_ENEMY_CHARACTER, 4))
class AT_024:
"Demonfuse"
play = Buff(TARGET, "AT_024e"), GainMana(OPPONENT, 1)
AT_024e = buff(+3, +3)
class AT_025:
"Dark Bargain"
play = Destroy(RANDOM(ENEMY_MINIONS) * 2), Discard(RANDOM(FRIENDLY_HAND) * 2)
|
junmin-zhu/chromium-rivertrail
|
build/android/adb_install_apk.py
|
Python
|
bsd-3-clause
| 1,365
| 0.011722
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import sys
from pylib import android_commands
from pylib import test_options_parser
from pylib import constants
def InstallApk(args):
options, device = args
apk_path = os.path.join(os.environ['CHROME_SRC'],
'out', options.build_type,
'apks', options.apk)
result = android_commands.AndroidCommands(device=device).ManagedI
|
nstall(
apk_path, False, options.apk_package)
print '----- Installed on %s -----' % device
print result
def main(argv):
parser = optparse.OptionParser()
test_options_parser.AddBuildTypeOption(par
|
ser)
test_options_parser.AddInstallAPKOption(parser)
options, args = parser.parse_args(argv)
if len(args) > 1:
raise Exception('Error: Unknown argument:', args[1:])
devices = android_commands.GetAttachedDevices()
if not devices:
raise Exception('Error: no connected devices')
pool = multiprocessing.Pool(len(devices))
# Send a tuple (options, device) per instance of DeploySingleDevice.
pool.map(InstallApk, zip([options] * len(devices), devices))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
materialsproject/MPContribs
|
mpcontribs-api/mpcontribs/api/config.py
|
Python
|
mit
| 5,326
| 0.003567
|
# -*- coding: utf-8 -*-
"""configuration module for MPContribs Flask API"""
import os
import datetime
import json
import gzip
formulae_path = os.path.join(
os.path.dirname(__file__), "contributions", "formulae.json.gz"
)
with gzip.open(formulae_path) as f:
FORMULAE = json.load(f)
VERSION = datetime.datetime.today().strftime("%Y.%m.%d")
JSON_SORT_KEYS = False
JSON_ADD_STATUS = False
SECRET_KEY = "super-secret" # TODO in local prod config
USTS_MAX_AGE = 2.628e6 # 1 month
MAIL_DEFAULT_SENDER = os.environ.get("MAIL_DEFAULT_SENDER")
MAIL_TOPIC = os.environ.get("AWS_SNS_TOPIC_ARN")
MPCONTRIBS_DB = os.environ.get("MPCONTRIBS_DB_NAME", "mpcontribs")
MPCONTRIBS_MONGO_HOST = os.environ.get("MPCONTRIBS_MONGO_HOST")
MONGODB_SETTINGS = {
# Changed in version 3.9: retryWrites now defaults to True.
"host": f"mongodb+srv://{MPCONTRIBS_MONGO_HOST}/{MPCONTRIBS_DB}",
"connect": False,
"db": MPCONTRIBS_DB,
"compressors": ["snappy", "zstd", "zlib"],
}
REDIS_ADDRESS = os.environ.get("REDIS_ADDRESS", "redis")
REDIS_URL = RQ_REDIS_URL = RQ_DASHBOARD_REDIS_URL = f"redis://{REDIS_ADDRESS}"
DOC_DIR = os.path.join(os.path.dirname(__file__), f"swagger-{MPCONTRIBS_DB}")
SWAGGER = {
"swagger_ui_bundle_js": "//unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js",
"swagger_ui_standalone_preset_js": "//unpkg.com/swagger-ui-dist@3/swagger-ui-standalone-preset.js",
"jquery_js": "//unpkg.com/jquery@2.2.4/dist/jquery.min.js",
"swagger_ui_css": "//unpkg.com/swagger-ui-dist@3/swagger-ui.css",
"uiversion": 3,
"hide_top_bar": True,
"doc_expansion": "none",
"doc_dir": DOC_DIR,
"specs": [
{
"endpoint": "apispec",
"rou
|
te": "/apispec.json",
"rule_filter": lambda rule: True, # all in
"model_filter": lambda tag: True, # all in
}
],
"specs_route": "/",
}
TEMPLATE = {
"swagger": "2.0",
"info": {
"title": "MPContribs API",
"description": "Operations to contribute, update and retrieve materials data on Materials Project",
"termsOfService": "https://materialsproject.org/terms",
"version": VERSION,
|
"contact": {
"name": "MPContribs",
"email": "contribs@materialsproject.org",
"url": "https://mpcontribs.org",
},
"license": {
"name": "Creative Commons Attribution 4.0 International License",
"url": "https://creativecommons.org/licenses/by/4.0/",
},
},
"tags": [
{
"name": "projects",
"description": f'contain provenance information about contributed datasets. \
Deleting projects will also delete all contributions including tables, structures, attachments, notebooks \
and cards for the project. Only users who have been added to a project can update its contents. While \
unpublished, only users on the project can retrieve its data or view it on the \
Portal. Making a project public does not automatically publish all \
its contributions, tables, attachments, and structures. These are separately set to public individually or in bulk.'
"",
},
{
"name": "contributions",
"description": f'contain simple hierarchical data which will show up as cards on the MP details \
page for MP material(s). Tables (rows and columns), structures, and attachments can be added to a \
contribution. Each contribution uses `mp-id` or composition as identifier to associate its data with the \
according entries on MP. Only admins or users on the project can create, update or delete contributions, and \
while unpublished, retrieve its data or view it on the Portal. \
Contribution components (tables, structures, and attachments) are deleted along with a contribution.',
},
{
"name": "structures",
"description": 'are \
<a href="https://pymatgen.org/_modules/pymatgen/core/structure.html#Structure">pymatgen structures</a> which \
can be added to a contribution.',
},
{
"name": "tables",
"description": 'are simple spreadsheet-type tables with columns and rows saved as Pandas \
<a href="https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html#dataframe">DataFrames</a> \
which can be added to a contribution.',
},
{
"name": "attachments",
"description": 'are files saved as objects in AWS S3 and not accessible for querying (only retrieval) \
which can be added to a contribution.',
},
{
"name": "notebooks",
"description": f'are Jupyter \
<a href="https://jupyter-notebook.readthedocs.io/en/stable/notebook.html#notebook-documents">notebook</a> \
documents generated and saved when a contribution is saved. They form the basis for Contribution \
Details Pages on the Portal.',
},
],
"securityDefinitions": {
"ApiKeyAuth": {
"description": "MP API key to authorize requests",
"name": "X-API-KEY",
"in": "header",
"type": "apiKey",
}
},
"security": [{"ApiKeyAuth": []}],
}
|
cancerregulome/gidget
|
commands/feature_matrix_construction/main/filterPWPV.py
|
Python
|
mit
| 4,776
| 0.002094
|
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# these are system modules
import math
import numpy
import random
import sys
import urllib
# these are my local modules
import miscIO
import path
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getLineInfo(aLine):
lInfo = {}
aTokens = aLine.split('\t')
if (len(aTokens) < 1 or len(aTokens) > 3):
if (1):
print aLine
print aLine.strip()
print aTokens
print len(aTokens)
sys.exit(-1)
return (lInfo)
lInfo['TARG'] = {}
lInfo['FEAT'] = {}
bTarg = aTokens[0].split(',')
bFeat = aTokens[1].split(',')
if (len(aTokens) == 3):
bFeat += aTokens[2].split(',')
if (0):
print bTarg
print bFeat
sys.exit(-1)
for ii in range(len(bTarg)):
cTmp = bTarg[ii].split('=')
try:
zVal = float(cTmp[1].strip())
lInfo['TARG'][cTmp[0].strip()] = zVal
except:
try:
lInfo['TARG'][cTmp[0].strip()] = cTmp[1].strip().upper()
except:
return ({})
for ii in range(len(bFeat)):
cTmp = bFeat[ii].split('=')
try:
zVal = float(cTmp[1].strip())
lInfo['FEAT'][cTmp[0].strip()] = zVal
except:
try:
lInfo['FEAT'][cTmp[0].strip()] = cTmp[1].strip().upper()
except:
return ({})
if (0):
print lInfo
sys.exit(-1)
return (lInfo)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getCoordinates(aName):
tokenList = aName.split(':')
chrName = tokenList[3]
startPos = -1
endPos = -1
try:
startPos
|
= int(tokenList[4])
endPos = int(tokenList[5])
except:
doNothing = 1
return (chrName, startPos, endPos)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def filterPWPV(pwpvOutFilename):
print " "
print " reading PWPV outputs from <%s> " % pwpvOutFilename
fh = file(pwpvOutFilename, 'r')
out0 = pwpvOutFilename + ".unmapd"
out1 = pwpvOutFilename + ".
|
mapped"
fh0 = file(out0, 'w')
fh1 = file(out1, 'w')
n0 = 0
n1 = 0
# why didn't I put GNAB in this list ???
# --> adding it to the list on 06sep12
typeList = ["CNVR", "GEXP", "GNAB", "METH", "MIRN", "RPPA"]
# --> taking it back out on 20sep12 ;-)
typeList = ["CNVR", "GEXP", "METH", "MIRN", "RPPA"]
typeCounts = {}
for aLine in fh:
# by default, we assume we will keep this line from the file
keepLine = 1
aLine = aLine.strip()
tokenList = aLine.split('\t')
# expected list of tokens for a PWPV pair :
## ['C:SAMP:miRNA_k5:::::', 'C:SAMP:miRNA_k7:::::', '0.398', '694', '-300.0', '1.7', '-300.0', '0', '0.0', '0', '0.0\n']
if (len(tokenList) < 3):
continue
aType = tokenList[0][2:6]
bType = tokenList[1][2:6]
if (aType <= bType):
aKey = (aType, bType)
else:
aKey = (bType, aType)
if (aType in typeList):
aTokens = tokenList[0].split(':')
if (aTokens[3] == ""):
keepLine = 0
if (keepLine):
if (bType in typeList):
bTokens = tokenList[1].split(':')
if (bTokens[3] == ""):
keepLine = 0
if (keepLine):
fh1.write("%s\n" % aLine)
n1 += 1
else:
fh0.write("%s\n" % aLine)
n0 += 1
if (aKey not in typeCounts.keys()):
typeCounts[aKey] = 0
typeCounts[aKey] += 1
fh.close()
fh0.close()
fh1.close()
print " "
print " n1 = %9d n0 = %9d " % (n1, n0)
if ( (n1+n0) > 0 ):
f1 = float(n1) / float(n1 + n0)
f0 = float(n0) / float(n1 + n0)
print " f1 = %9.6f f0 = %9.6f " % (f1, f0)
print " "
print typeCounts
print " "
print " "
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (len(sys.argv) != 2):
print ' Usage : %s <pwpv results file> ' % sys.argv[0]
print " ERROR -- bad command line arguments "
sys.exit(-1)
pwpvOutFilename = sys.argv[1]
filterPWPV(pwpvOutFilename)
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
delMar43/wcmodtoolsources
|
WC1_clone/room_engine/win_init.py
|
Python
|
mit
| 424
| 0.023585
|
import os, pygame
#create window of correct size (320x200, with some multiple)
x = 320
y = 200
size_mult = 4
bright_mult = 4
pygame.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = str(
|
0) + "," + str(40) #put window in consistent location
os.environ['SDL_VIDEO_WINDOW_POS'] = str(0) + "," + str(40) #put window in consistent location
screen = pygame.display.set_mode((x*size_mult, y*size_mult))
screen2 = pygame.Surface((x,y))
|
|
choderalab/openpathsampling
|
openpathsampling/tests/test_range_logic.py
|
Python
|
lgpl-2.1
| 4,127
| 0.000969
|
from builtins import object
from nose.tools import assert_equal, assert_not_equal, raises
from nose.plugins.skip import Skip, SkipTest
from openpathsampling.range_logic import *
class TestRangeLogic(object):
def test_range_and(self):
assert_equal(range_and(1, 3, 2, 4), [(2, 3)])
assert_equal(range_and(2, 4, 1, 3), [(2, 3)])
assert_equal(range_and(1, 2, 3, 4), None)
assert_equal(range_and(3, 4, 1, 2), None)
assert_equal(range_and(1, 4, 2, 3), [(2, 3)])
assert_equal(range_and(2, 3, 1, 4), [(2, 3)])
assert_equal(range_and(1, 2, 1, 2), 1)
def test_range_or(self):
assert_equal(range_or(1, 3, 2, 4), [(1, 4)])
assert_equal(range_or(2, 4, 1, 3), [(1, 4)])
assert_equal(range_or(1, 2, 3, 4), [(1, 2), (3, 4)])
assert_equal(range_or(3, 4, 1, 2), [(3, 4), (1, 2)])
assert_equal(range_or(1, 4, 2, 3), [(1, 4)])
assert_equal(range_or(2, 3, 1, 4), [(1, 4)])
assert_equal(range_or(1, 2, 1, 2), 1)
def test_range_sub(self):
assert_equal(range_sub(1, 3, 2, 4), [(1, 2)])
assert_equal(range_sub(2, 4, 1, 3), [(3, 4)])
assert_equal(range_sub(1, 2, 3, 4), 1)
assert_equal(range_sub(3, 4, 1, 2), 1)
assert_equal(range_sub(1, 4, 2, 3), [(1, 2), (3, 4)])
assert_equal(range_sub(2, 3, 1, 4), None)
assert_equal(range_sub(1, 2, 1, 2), None)
assert_equal(range_sub(0.1, 0.4, 0.1, 0.3), [(0.3, 0.4)])
class TestPeriodicRangeLogic(object):
def test_periodic_order(self):
# orders without wrapping
assert_equal(periodic_ordering(1, 2, 3, 4), [0, 1, 2, 3])
assert_equal(periodic_ordering(1, 3, 2, 4), [0, 2, 1, 3])
assert_equal(periodic_ordering(4, 3, 2, 1), [0, 3, 2, 1])
assert_equal(periodic_ordering(1, 2, 1, 2), [0, 2, 1, 3])
assert_equal(periodic_ordering(2, 4, 1, 3), [1, 3, 0, 2])
assert_equal(periodic_ordering(1, 2, 4, 3), [1, 2, 0, 3])
def test_periodic_and(self):
assert_equal(periodic_range_and(0.1, 0.3, 0.2, 0.4), [(0.2, 0.3)])
assert_equal(periodic_range_and(0.2, 0.4, 0.1, 0.3), [(0.2, 0.3)])
assert_equal(periodic_range_and(1, 2, 3, 4), None)
assert_equal(periodic_range_and(3, 4, 1, 2), None)
assert_equal(periodic_range_and(1, 4, 2, 3), [(2, 3)])
assert_equal(periodic_range_and(2, 3, 1, 4), [(2, 3)])
assert_equal(periodic_range_and(1, 2, 1, 2), 1)
assert_equal(periodic_range_and(1, 2, 2, 1), None)
assert_equal(periodic_range_and(2, 1, 1, 4), [(2, 4)])
assert_equal(periodic_range_and(0.1, 0.4, 0.3, 0.2),
[(0.1, 0.2), (0.3, 0.4)])
def test_periodic_or(self):
assert_equal(periodic_range_or(0.1, 0.3, 0.2, 0.4), [(0.1, 0.4)])
assert_equal(periodic_range_or
|
(0.2, 0.4, 0.1, 0.3), [(0.1, 0.4)])
assert_equal(periodic_range_or(1, 2, 3, 4), [(1, 2), (3, 4)])
assert_equal(periodic_
|
range_or(3, 4, 1, 2), [(3, 4), (1, 2)])
assert_equal(periodic_range_or(1, 4, 2, 3), [(1, 4)])
assert_equal(periodic_range_or(2, 3, 1, 4), [(1, 4)])
assert_equal(periodic_range_or(1, 2, 1, 2), 1)
assert_equal(periodic_range_or(1, 2, 2, 1), -1)
assert_equal(periodic_range_or(0.1, 0.4, 0.3, 0.2), -1)
assert_equal(periodic_range_or(2, 1, 1, 4), -1)
def test_periodic_sub(self):
assert_equal(periodic_range_sub(0.1, 0.3, 0.2, 0.4), [(0.1, 0.2)])
assert_equal(periodic_range_sub(0.2, 0.4, 0.1, 0.3), [(0.3, 0.4)])
assert_equal(periodic_range_sub(1, 2, 3, 4), 1)
assert_equal(periodic_range_sub(3, 4, 1, 2), 1)
assert_equal(periodic_range_sub(1, 4, 2, 3), [(1, 2), (3, 4)])
assert_equal(periodic_range_sub(2, 3, 1, 4), None)
assert_equal(periodic_range_sub(1, 2, 1, 2), None)
assert_equal(periodic_range_sub(1, 2, 2, 1), 1)
assert_equal(periodic_range_sub(2, 1, 1, 4), [(4, 1)])
assert_equal(periodic_range_sub(0.1, 0.4, 0.3, 0.2), [(0.2, 0.3)])
assert_equal(periodic_range_sub(0.1, 0.4, 0.1, 0.3), [(0.3, 0.4)])
|
timeyyy/PyUpdater
|
pyupdater/hooks/hook-cryptography.py
|
Python
|
bsd-2-clause
| 1,600
| 0
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""
Hook for cryptography module from the Python Cryptography Authority.
"""
import os.path
import glob
from PyInstaller.hooks.hookutils import (collect_submodules as cs,
get_module_file_attribute)
from PyInstaller.hooks.hookutils import PY_EXTENSION_SUFFIXES
# add the OpenSSL FFI binding modules as hidden imports
hiddenimports = cs('cryptography.hazmat.bindings.openssl') # pragma: no cover
def hook(mod):
"""
Include the cffi extensions as binaries in a subfolder named like the
package. The cffi verifier expects to find them inside the package
directory for the main module. We cannot use hiddenimports because that
would add the modules outside the package.
"""
cr
|
ypto_dir = os.path.dirname(get_module_file_attribute('cryptography'))
for ext in PY_EXTENSION_SUFFIXES:
ffimods = glob.glob(os.path.join(crypto_dir,
'*_cffi_*%s*' % ext))
for f in ffimods:
name = os.path.join('cryptography', os.path.basename(f))
# TODO fix this hook to use attribute 'binaries'.
mod.pyinstaller_binaries.append((name, f, 'BI
|
NARY'))
return mod
|
zhmcclient/python-zhmcclient
|
tests/unit/zhmcclient/test_activation_profile.py
|
Python
|
apache-2.0
| 11,882
| 0
|
# Copyright 2016-2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _activation_profile module.
"""
from __future__ import absolute_import, print_function
import copy
import re
import pytest
from zhmcclient import Client, ActivationProfile
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestActivationProfile(object):
"""
All tests for the ActivationProfile and ActivationProfileManager classes.
"""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked CPC in classic mode,
and add two faked activation profiles of each type.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_cpc = self.session.hmc.cpcs.add({
'object-id': 'fake-cpc1-oid',
# object-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': 'fake-cpc1-name',
'description': 'CPC #1 (classic mode)',
'status': 'active',
'dpm-enabled': False,
'is-ensemble-member': False,
'iml-mode': 'lpar',
})
self.cpc = self.client.cpcs.find(name='fake-cpc1-name')
self.faked_reset_ap_1 = self.faked_cpc.reset_activation_profiles.add({
# element-uri is set up automatically
'name': 'rap_1',
'parent': self.faked_cpc.uri,
'class': 'reset-activation-profile',
'description': 'RAP #1',
})
self.faked_reset_ap_2 = self.fa
|
ked_cpc.reset_activation_profiles.add({
# element-uri is set up automatically
'name': 'rap_2',
'parent': self.faked_cpc.uri,
'class': 'reset-activation-profile',
'descripti
|
on': 'RAP #2',
})
self.faked_image_ap_1 = self.faked_cpc.image_activation_profiles.add({
# element-uri is set up automatically
'name': 'iap_1',
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'description': 'IAP #1',
})
self.faked_image_ap_2 = self.faked_cpc.image_activation_profiles.add({
# element-uri is set up automatically
'name': 'iap_2',
'parent': self.faked_cpc.uri,
'class': 'image-activation-profile',
'description': 'IAP #2',
})
self.faked_load_ap_1 = self.faked_cpc.load_activation_profiles.add({
# element-uri is set up automatically
'name': 'lap_1',
'parent': self.faked_cpc.uri,
'class': 'load-activation-profile',
'description': 'LAP #1',
})
self.faked_load_ap_2 = self.faked_cpc.load_activation_profiles.add({
# element-uri is set up automatically
'name': 'lap_2',
'parent': self.faked_cpc.uri,
'class': 'load-activation-profile',
'description': 'LAP #2',
})
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
def test_profilemanager_initial_attrs(self, profile_type):
"""Test initial attributes of ActivationProfileManager."""
mgr_attr = profile_type + '_activation_profiles'
profile_mgr = getattr(self.cpc, mgr_attr)
# Verify all public properties of the manager object
assert profile_mgr.resource_class == ActivationProfile
assert profile_mgr.session == self.session
assert profile_mgr.parent == self.cpc
assert profile_mgr.cpc == self.cpc
assert profile_mgr.profile_type == profile_type
# TODO: Test for ActivationProfileManager.__repr__()
@pytest.mark.parametrize(
"profile_type", ['reset', 'image', 'load']
)
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(),
['name', 'element-uri']),
(dict(full_properties=False),
['name', 'element-uri']),
(dict(full_properties=True),
None),
]
)
def test_profilemanager_list_full_properties(
self, full_properties_kwargs, prop_names, profile_type):
"""Test ActivationProfileManager.list() with full_properties."""
mgr_attr = profile_type + '_activation_profiles'
faked_profile_mgr = getattr(self.faked_cpc, mgr_attr)
exp_faked_profiles = faked_profile_mgr.list()
profile_mgr = getattr(self.cpc, mgr_attr)
# Execute the code to be tested
profiles = profile_mgr.list(**full_properties_kwargs)
assert_resources(profiles, exp_faked_profiles, prop_names)
@pytest.mark.parametrize(
"profile_type, filter_args, exp_names", [
('reset',
{'name': 'rap_2'},
['rap_2']),
('reset',
{'name': '.*rap_1'},
['rap_1']),
('reset',
{'name': 'rap_1.*'},
['rap_1']),
('reset',
{'name': 'rap_.'},
['rap_1', 'rap_2']),
('reset',
{'name': '.ap_1'},
['rap_1']),
('reset',
{'name': '.+'},
['rap_1', 'rap_2']),
('reset',
{'name': 'rap_1.+'},
[]),
('reset',
{'name': '.+rap_1'},
[]),
('image',
{'name': 'iap_1'},
['iap_1']),
('image',
{'name': '.*iap_1'},
['iap_1']),
('image',
{'name': 'iap_1.*'},
['iap_1']),
('image',
{'name': 'iap_.'},
['iap_1', 'iap_2']),
('image',
{'name': '.ap_1'},
['iap_1']),
('image',
{'name': '.+'},
['iap_1', 'iap_2']),
('image',
{'name': 'iap_1.+'},
[]),
('image',
{'name': '.+iap_1'},
[]),
('load',
{'name': 'lap_2'},
['lap_2']),
('load',
{'name': '.*lap_1'},
['lap_1']),
('load',
{'name': 'lap_1.*'},
['lap_1']),
('load',
{'name': 'lap_.'},
['lap_1', 'lap_2']),
('load',
{'name': '.ap_1'},
['lap_1']),
('load',
{'name': '.+'},
['lap_1', 'lap_2']),
('load',
{'name': 'lap_1.+'},
[]),
('load',
{'name': '.+lap_1'},
[]),
('reset',
{'class': 'reset-activation-profile'},
['rap_1', 'rap_2']),
('image',
{'class': 'image-activation-profile'},
['iap_1', 'iap_2']),
('load',
{'class': 'load-activation-profile'},
['lap_1', 'lap_2']),
('reset',
{'class': 'reset-activation-profile',
'description': 'RAP #2'},
['rap_2']),
('image',
{'class': 'image-activation-profile',
'description': 'IAP #1'},
['iap_1']),
('load',
{'class': 'load-activation-profile',
'description': 'LAP #2'},
['lap_2']),
('reset',
{'description':
|
wberrier/meson
|
mesonbuild/dependencies/misc.py
|
Python
|
apache-2.0
| 14,951
| 0.001271
|
# Copyright 2013-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
import glob
import os
import stat
import sysconfig
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import DependencyException, DependencyMethods
from .base import ExternalDependency, ExtraFrameworkDependency, PkgConfigDependency
class BoostDependency(ExternalDependency):
# Some boost libraries have different names for
# their sources and libraries. This dict maps
# between the two.
name2lib = {'test': 'unit_test_framework'}
def __init__(self, environment, kwargs):
super().__init__('boost', environment, 'cpp', kwargs)
self.libdir = ''
try:
self.boost_root = os.environ['BOOST_ROOT']
if not os.path.isabs(self.boost_root):
raise DependencyException('BOOST_ROOT must be an absolute path.')
except KeyError:
self.boost_root = None
if self.boost_root is None:
if self.want_cross:
if 'BOOST_INCLUDEDIR' in os.environ:
self.incdir = os.environ['BOOST_INCLUDEDIR']
else:
raise DependencyException('BOOST_ROOT or BOOST_INCLUDEDIR is needed while cross-compiling')
if mesonlib.is_windows():
self.boost_root = self.detect_win_root()
self.incdir = self.boost_root
else:
if 'BOOST_INCLUDEDIR' in os.environ:
self.incdir = os.environ['BOOST_INCLUDEDIR']
else:
self.incdir = '/usr/include'
else:
self.incdir = os.path.join(self.boost_root, 'include')
self.boost_inc_subdir = os.path.join(self.incdir, 'boost')
mlog.debug('Boost library root dir is', self.boost_root)
self.src_modules = {}
self.lib_modules = {}
self.lib_modules_mt = {}
self.detect_version()
self.requested_modules = self.get_requested(kwargs)
module_str = ', '.join(self.requested_modules)
if self.is_found:
self.detect_src_modules()
self.detect_lib_modules()
self.validate_requested()
if self.boost_root is not None:
info = self.version + ', ' + self.boost_root
else:
info = self.version
mlog.log('Dependency Boost (%s) found:' % module_str, mlog.green('YES'), info)
else:
mlog.log("Dependency Boost (%s) found:" % module_str, mlog.red('NO'))
def detect_win_root(self):
globtext = 'c:\\local\\boost_*'
files = glob.glob(globtext)
if len(files) > 0:
return files[0]
return 'C:\\'
def get_compile_args(self):
args = []
if self.boost_root is not None:
if mesonlib.is_windows():
include_dir = self.boost_root
|
else:
include_dir = os.path.join(self.boost_root, 'include')
else:
include_dir = self.incdir
# Use "-isystem" when including boost h
|
eaders instead of "-I"
# to avoid compiler warnings/failures when "-Werror" is used
# Careful not to use "-isystem" on default include dirs as it
# breaks some of the headers for certain gcc versions
# For example, doing g++ -isystem /usr/include on a simple
# "int main()" source results in the error:
# "/usr/include/c++/6.3.1/cstdlib:75:25: fatal error: stdlib.h: No such file or directory"
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129
# and http://stackoverflow.com/questions/37218953/isystem-on-a-system-include-directory-causes-errors
# for more details
# TODO: The correct solution would probably be to ask the
# compiler for it's default include paths (ie: "gcc -xc++ -E
# -v -") and avoid including those with -isystem
# For now, use -isystem for all includes except for some
# typical defaults (which don't need to be included at all
# since they are in the default include paths). These typical
# defaults include the usual directories at the root of the
# filesystem, but also any path that ends with those directory
# names in order to handle cases like cross-compiling where we
# might have a different sysroot.
if not include_dir.endswith(('/usr/include', '/usr/local/include')):
args.append("".join(self.compiler.get_include_args(include_dir, True)))
return args
def get_requested(self, kwargs):
candidates = kwargs.get('modules', [])
if not isinstance(candidates, list):
candidates = [candidates]
for c in candidates:
if not isinstance(c, str):
raise DependencyException('Boost module argument is not a string.')
return candidates
def validate_requested(self):
for m in self.requested_modules:
if m not in self.src_modules:
msg = 'Requested Boost module {!r} not found'
raise DependencyException(msg.format(m))
def detect_version(self):
try:
ifile = open(os.path.join(self.boost_inc_subdir, 'version.hpp'))
except FileNotFoundError:
return
with ifile:
for line in ifile:
if line.startswith("#define") and 'BOOST_LIB_VERSION' in line:
ver = line.split()[-1]
ver = ver[1:-1]
self.version = ver.replace('_', '.')
self.is_found = True
return
def detect_src_modules(self):
for entry in os.listdir(self.boost_inc_subdir):
entry = os.path.join(self.boost_inc_subdir, entry)
if stat.S_ISDIR(os.stat(entry).st_mode):
self.src_modules[os.path.split(entry)[-1]] = True
def detect_lib_modules(self):
if mesonlib.is_windows():
return self.detect_lib_modules_win()
return self.detect_lib_modules_nix()
def detect_lib_modules_win(self):
arch = detect_cpu_family(self.env.coredata.compilers)
# Guess the libdir
if arch == 'x86':
gl = 'lib32*'
elif arch == 'x86_64':
gl = 'lib64*'
else:
# Does anyone do Boost cross-compiling to other archs on Windows?
gl = None
# See if the libdir is valid
if gl:
libdir = glob.glob(os.path.join(self.boost_root, gl))
else:
libdir = []
# Can't find libdir, bail
if not libdir:
return
libdir = libdir[0]
self.libdir = libdir
globber = 'libboost_*-gd-*.lib' if self.static else 'boost_*-gd-*.lib' # FIXME
for entry in glob.glob(os.path.join(libdir, globber)):
(_, fname) = os.path.split(entry)
base = fname.split('_', 1)[1]
modname = base.split('-', 1)[0]
self.lib_modules_mt[modname] = fname
def detect_lib_modules_nix(self):
if self.static:
libsuffix = 'a'
elif mesonlib.is_osx() and not self.want_cross:
libsuffix = 'dylib'
else:
libsuffix = 'so'
globber = 'libboost_*.{}'.format(libsuffix)
if 'BOOST_LIBRARYDIR' in os.environ:
libdirs = [os.environ['BOOST_LIBRARYDIR']]
elif self.boost_root is None:
libdirs = mesonlib.get_library_dirs()
else:
|
matrumz/RPi_Custom_Files
|
Printing/hplip-3.15.2/ui4/printdialog_base.py
|
Python
|
gpl-2.0
| 5,718
| 0.002973
|
# -*- coding: utf-8 -*-
# Form implementation generated from rea
|
ding ui file 'ui4/printdialog
|
_base.ui'
#
# Created: Mon May 4 14:30:35 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(700, 500)
self.gridlayout = QtGui.QGridLayout(Dialog)
self.gridlayout.setObjectName("gridlayout")
self.StackedWidget = QtGui.QStackedWidget(Dialog)
self.StackedWidget.setObjectName("StackedWidget")
self.page = QtGui.QWidget()
self.page.setObjectName("page")
self.gridlayout1 = QtGui.QGridLayout(self.page)
self.gridlayout1.setObjectName("gridlayout1")
self.label_2 = QtGui.QLabel(self.page)
font = QtGui.QFont()
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridlayout1.addWidget(self.label_2, 0, 0, 1, 1)
self.line = QtGui.QFrame(self.page)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.gridlayout1.addWidget(self.line, 1, 0, 1, 1)
self.Files = FileTable(self.page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Files.sizePolicy().hasHeightForWidth())
self.Files.setSizePolicy(sizePolicy)
self.Files.setObjectName("Files")
self.gridlayout1.addWidget(self.Files, 2, 0, 1, 1)
self.StackedWidget.addWidget(self.page)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName("page_2")
self.gridlayout2 = QtGui.QGridLayout(self.page_2)
self.gridlayout2.setObjectName("gridlayout2")
self.label_3 = QtGui.QLabel(self.page_2)
font = QtGui.QFont()
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridlayout2.addWidget(self.label_3, 0, 0, 1, 1)
self.line_2 = QtGui.QFrame(self.page_2)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridlayout2.addWidget(self.line_2, 1, 0, 1, 1)
self.PrinterName = PrinterNameComboBox(self.page_2)
self.PrinterName.setObjectName("PrinterName")
self.gridlayout2.addWidget(self.PrinterName, 2, 0, 1, 1)
self.OptionsToolBox = PrintSettingsToolbox(self.page_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.OptionsToolBox.sizePolicy().hasHeightForWidth())
self.OptionsToolBox.setSizePolicy(sizePolicy)
self.OptionsToolBox.setObjectName("OptionsToolBox")
self.gridlayout2.addWidget(self.OptionsToolBox, 3, 0, 1, 1)
self.StackedWidget.addWidget(self.page_2)
self.gridlayout.addWidget(self.StackedWidget, 0, 0, 1, 5)
self.line_3 = QtGui.QFrame(Dialog)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridlayout.addWidget(self.line_3, 1, 0, 1, 5)
self.StepText = QtGui.QLabel(Dialog)
self.StepText.setObjectName("StepText")
self.gridlayout.addWidget(self.StepText, 2, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(251, 28, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem, 2, 1, 1, 1)
self.BackButton = QtGui.QPushButton(Dialog)
self.BackButton.setObjectName("BackButton")
self.gridlayout.addWidget(self.BackButton, 2, 2, 1, 1)
self.NextButton = QtGui.QPushButton(Dialog)
self.NextButton.setObjectName("NextButton")
self.gridlayout.addWidget(self.NextButton, 2, 3, 1, 1)
self.CancelButton = QtGui.QPushButton(Dialog)
self.CancelButton.setObjectName("CancelButton")
self.gridlayout.addWidget(self.CancelButton, 2, 4, 1, 1)
self.retranslateUi(Dialog)
self.StackedWidget.setCurrentIndex(1)
self.OptionsToolBox.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "HP Device Manager - Print", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Select Files to Print", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "Select Printer and Options", None, QtGui.QApplication.UnicodeUTF8))
self.StepText.setText(QtGui.QApplication.translate("Dialog", "Step %1 of %2", None, QtGui.QApplication.UnicodeUTF8))
self.BackButton.setText(QtGui.QApplication.translate("Dialog", "< Back", None, QtGui.QApplication.UnicodeUTF8))
self.NextButton.setText(QtGui.QApplication.translate("Dialog", "Next >", None, QtGui.QApplication.UnicodeUTF8))
self.CancelButton.setText(QtGui.QApplication.translate("Dialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
from .printsettingstoolbox import PrintSettingsToolbox
from .printernamecombobox import PrinterNameComboBox
from .filetable import FileTable
|
lbel/Maastricht-Masterclass-2015
|
scripts/MassFit.py
|
Python
|
mit
| 2,260
| 0.031858
|
def MassFit(particle) :
if raw_input("Do %s mass fit? [y/N] " % (particle)) not in ["y", "Y"]:
return
print "************************************"
print "* Doing mass fit *"
print "************************************"
f = TFile.Open("workspace.root")
w = f.Get("w")
assert(isinstance(w, RooWorkspace))
data = w.data("BsDsPi_data")
if (particle == "Bs"):
varName = "lab0_MM"
meanRange = [5366., 5360., 5372.]
if (particle == "Ds"):
varName = "lab2_MM"
meanRange = [1970., 1965., 1975.]
mass = w.var(varName)
mean = RooRealVar("mean", "mass (MeV)", meanRange[0], meanRange[1], meanRange[2]) ;
width = RooRealVar("width", "width (MeV)", 15., 5., 50.) ;
const = RooRealVar("const", "bg const", -0.005, -0.1, 0.1);
sigModel = RooGaussian( "sigModel", "signal PDF", mass, mean, width) ;
bkgModel = RooExponential("bkgModel", "bkgrnd PDF", mass, const) ;
Nsig = RooRealVar("Nsig", "signal Yield", 10000., 0., 10000000.);
Nbkg = RooRealVar("Nbkg", "bkgrnd Yield", 10000., 0., 10000000.);
model = RooAddPdf("model", "full PDF", RooArgList(sigModel, bkgModel), RooArgList(Nsig, Nbkg));
model.fitTo(data)
cMass = TCanvas("cMass_"+particle, "cMass"+particle)
frame = mass.frame()
frame.SetStats(False)
frame.SetTitle("Fit to the %s mass" % (particle))
data.plotOn(frame, RooFit.DataError(RooAbsData.SumW2))
model.plotOn(frame, RooFit.LineColor(4 ) ) #9
model.plotOn(frame, RooFit.LineColor(8 ), RooFit.LineStyle(2), RooFit.Components("sigModel"), RooFit.Name("sig") )
model.plotOn(frame, RooFit.LineColor(46), RooFi
|
t.LineStyle(2), RooFit.Components("bkgModel"), RooFit.Name("bkg") )
frame.Draw()
leg = TLegend(0.64, 0.77, 0.89, 0.89)
leg.AddEntry(frame.findObject("sig"), "Signal ("+particle+")", "l")
leg.AddEntry(frame.findObject("bkg"), "Background", "l")
leg.Draw("same")
cMass.Update()
cMass.SaveAs("plots/MassFit"+particle+".pdf")
print " > Showing mass fit for %s" % (particle)
print " > Signal events: %d +- %d" % (Nsig.getVal(), Nsig.getError())
|
print " > Background events: %d +- %d" % (Nbkg.getVal(), Nbkg.getError())
raw_input("Press enter to continue.")
f.Close()
|
aspera1631/hs_logreader
|
logreader.py
|
Python
|
mit
| 4,183
| 0.005738
|
__author__ = 'bdeutsch'
import re
import numpy as np
import pandas as pd
# List cards drawn by me and played by opponent
def get_cards(filename):
# Open the file
with open(filename) as f:
mycards = []
oppcards = []
for line in f:
# Generate my revealed card list
m = re.search('name=(.+)id.+to FRIENDLY HAND', line)
if m:
mycards.append(m.group(1))
n = re.search('name=(.+)id.+to OPPOSING PLAY(?! \(Hero)', line)
if n:
oppcards.append(n.group(1))
for item in mycards:
print item
print '\n'
for item in oppcards:
print item
# make a list of card IDs and names
def get_ids():
# Create an empty list of IDs
idlist = []
with open('test_game') as f:
# For each line
for line in f:
# Find the entity ids
m = re.search('[\[ ]id=(\d+) ', line)
# if one is found
if m:
# Check that we haven't found it yet, convert to an integer
id = int(m.group(1))
# Add it to the list
if id not in idlist:
idlist.append(id)
# Sort the ids
idlist.sort()
# Convert to dataframe
d = pd.DataFrame(index=idlist)
# Rename the index
d.index.name = "Entity ID"
# Create an empty column for names
d["Name"] = np.nan
#print d
return d
# make a list of card names only if followed by id
def get_names():
with open('test_game') as f:
for line in f:
# Find the entity ids
m = re.search('[\[ ]name=([\w ]+?) id=', line)
if m:
print m.group(1)
def get_ids_names(df):
with open('test_game') as f:
namedict = {}
for line in f:
# Find combinations of entities and names
m = re.search('[\[ ]name=([\w ]+?) id=(\d+)', line)
if m:
ent_id = int(m.group(
|
2))
name = m.group(1)
df.ix[ent_id, 'Name'] = name
#print m.group(2), m.group
|
(1)
return df
idlist = []
with open('test_game') as f:
# For each line
for line in f:
# Find the entity ids
m = re.search('[\[ ]id=(\d+) ', line)
# if one is found
if m:
# Check that we haven't found it yet, convert to an integer
id = int(m.group(1))
# Add it to the list
if id not in idlist:
idlist.append(id)
# Sort the ids
idlist.sort()
# Convert to dataframe
df = pd.DataFrame(index=idlist)
# Rename the index
df.index.name = "Entity ID"
# Create an empty column for names
df["Name"] = np.nan
df["CardId"] = np.nan
df["Player"] = np.nan
with open('test_game') as f:
updates = []
for line in f:
# Find lists of the innermost nested brackets
m = re.findall(r"\[([^\[]+?)]", line)
# If it's not just the command designation bracket ("zone", e.g.)
if len(m)>1:
# for each set of bracket contents
for item in m[1:]:
# add to the list of updates
updates.append(item)
for item in updates:
# find the id
m = re.search("id=(\d+)", item)
if m:
# Assign ID variable
id = int(m.group(1))
# find name and assign
n = re.search("name=(.+?) \w+?=", item)
if n:
name = n.group(1)
df.ix[id, "Name"] = name
# find cardId and assign
n = re.search("cardId=(\w.+?) ", item)
if n:
cardId = n.group(1)
df.ix[id, "CardId"] = cardId
# find player
n = re.search("player=(\d)", item)
if n:
player = n.group(1)
df.ix[id, "Player"] = player
# update the dataframe for each update
# get rid of the "zone" and "power" markers.
# collect the entries into a list
# Put card IDs into a DataFrame
#df = get_ids_names(get_ids())
pd.set_option('display.max_rows', 200)
print df
# get_cards('test_game')
|
biocore/pyqi
|
pyqi/core/interfaces/html/input_handler.py
|
Python
|
bsd-3-clause
| 1,074
| 0.005587
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
__credits__ = ["Evan Bolyen"]
from pyqi.core.exception import IncompetentDeveloperError
def load_file_lines(option_value):
"""Retur
|
n a list of strings, one per line in the file.
Each line will have leading and trailing whitespace stripped from it.
"""
if not hasattr(option_value, 'read'):
raise IncompetentDeveloperError("Input type must be a file object.")
ret
|
urn [line.strip() for line in option_value]
def load_file_contents(option_value):
"""Return the contents of a file as a single string."""
if not hasattr(option_value, 'read'):
raise IncompetentDeveloperError("Input type must be a file object.")
return option_value.read()
|
SymbiFlow/prjuray
|
fuzzers/004-tileinfo/cleanup_site_pins.py
|
Python
|
isc
| 4,765
| 0.000839
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the L
|
icense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
# SPDX-License-Identifier: Apache-2.0
""" Tool to cleanup site pins JSON dumps.
This tool has two behaviors. This first is to rename site names from global
coordinates to site local coordinates. The second is remove the tile prefix
from node names.
For example CLBLM_L_X8Y149 contains two sites named SLICE_X10Y149 and
SLICE_X11Y149. SLICE_X10Y149 becomes X0Y0 and SLICE_X11Y149 becomes X1Y0.
"""
from __future__ import print_function
import json
import json5
import re
import sys
import copy
# All site names appear to follow the pattern <type>_X<abs coord>Y<abs coord>.
# Generally speaking, only the tile relatively coordinates are required to
# assemble arch defs, so we re-origin the coordinates to be relative to the tile
# (e.g. start at X0Y0) and discard the prefix from the name.
SITE_COORDINATE_PATTERN = re.compile('^(.+)_X([0-9]+)Y([0-9]+)$')
def find_origin_coordinate(sites):
""" Find the coordinates of each site within the tile, and then subtract the
smallest coordinate to re-origin them all to be relative to the tile.
"""
if len(sites) == 0:
return 0, 0
def inner_():
for site in sites:
coordinate = SITE_COORDINATE_PATTERN.match(site['name'])
assert coordinate is not None, site
x_coord = int(coordinate.group(2))
y_coord = int(coordinate.group(3))
yield x_coord, y_coord
x_coords, y_coords = zip(*inner_())
min_x_coord = min(x_coords)
min_y_coord = min(y_coords)
return min_x_coord, min_y_coord
def create_site_pin_to_wire_maps(tile_name, nodes):
""" Create a map from site_pin names to nodes.
Create a mapping from site pins to tile local wires. For each node that is
attached to a site pin, there should only be 1 tile local wire.
"""
# Remove tile prefix (e.g. CLBLM_L_X8Y149/) from node names.
# Routing resources will not have the prefix.
tile_prefix = tile_name + '/'
site_pin_to_wires = {}
for node in nodes:
if len(node['site_pins']) == 0:
continue
wire_names = [
wire for wire in node['wires'] if wire.startswith(tile_prefix)
]
assert len(wire_names) == 1, (node, tile_prefix)
for site_pin in node["site_pins"]:
assert site_pin not in site_pin_to_wires
site_pin_to_wires[site_pin] = wire_names[0]
return site_pin_to_wires
def main():
site_pins = json5.load(sys.stdin)
output_site_pins = {}
output_site_pins["tile_type"] = site_pins["tile_type"]
output_site_pins["sites"] = copy.deepcopy(site_pins["sites"])
site_pin_to_wires = create_site_pin_to_wire_maps(site_pins['tile_name'],
site_pins['nodes'])
min_x_coord, min_y_coord = find_origin_coordinate(site_pins['sites'])
for site in output_site_pins['sites']:
orig_site_name = site['name']
coordinate = SITE_COORDINATE_PATTERN.match(orig_site_name)
x_coord = int(coordinate.group(2))
y_coord = int(coordinate.group(3))
site['name'] = 'X{}Y{}'.format(x_coord - min_x_coord,
y_coord - min_y_coord)
site['prefix'] = coordinate.group(1)
site['x_coord'] = x_coord - min_x_coord
site['y_coord'] = y_coord - min_y_coord
for site_pin in site['site_pins']:
assert site_pin['name'].startswith(orig_site_name + '/')
if site_pin['name'] in site_pin_to_wires:
site_pin['wire'] = site_pin_to_wires[site_pin['name']]
else:
print(
('***WARNING***: Site pin {} for tile type {} is not connected, '
'make sure all instaces of this tile type has this site_pin '
'disconnected.').format(site_pin['name'],
site_pins['tile_type']),
file=sys.stderr)
site_pin['name'] = site_pin['name'][len(orig_site_name) + 1:]
json.dumps(output_site_pins, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
|
googleapis/python-translate
|
samples/generated_samples/translate_generated_translate_v3_translation_service_create_glossary_async.py
|
Python
|
apache-2.0
| 1,703
| 0.001762
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateGlossary
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require
|
modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-translate
# [START tra
|
nslate_generated_translate_v3_TranslationService_CreateGlossary_async]
from google.cloud import translate_v3
async def sample_create_glossary():
# Create a client
client = translate_v3.TranslationServiceAsyncClient()
# Initialize request argument(s)
glossary = translate_v3.Glossary()
glossary.name = "name_value"
request = translate_v3.CreateGlossaryRequest(
parent="parent_value",
glossary=glossary,
)
# Make the request
operation = client.create_glossary(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END translate_generated_translate_v3_TranslationService_CreateGlossary_async]
|
PyFilesystem/pyfilesystem
|
fs/commands/fsrm.py
|
Python
|
bsd-3-clause
| 1,775
| 0.003944
|
#!/usr/bin/env python
from fs.errors import ResourceNotFoundError
from fs.opener import opener
from fs.commands.runner import Command
import sys
class FSrm(Command):
usage = """fsrm [OPTION]... [PATH]
Remove a file or directory at PATH"""
def get_optparse(self):
optparse = super(FSrm, self).get_optparse()
optparse.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='ignore non-existent files, never prompt')
optparse.add_option('-i', '--interactive', dest='interactive', action='store_true', default=False,
help='prompt before removing')
optparse.add_option('-r', '--recursive', dest='recursive', action='store_true', default=False,
help='remove directories and their contents recursively')
return optparse
def do_run(self, options, args):
interactive = options.interactive
verbose = options.verbose
for fs, path, is_dir in self.get_resources(args):
if interactive:
if is_dir:
msg = "remove directory '%s'?" % path
else:
msg = "remove file '%s'?" % path
if not self.ask(msg) in 'yY':
continue
try:
if is_dir:
fs.removedir(path, force=options.recursive)
else:
fs.remove(path)
except ResourceNotFoundError:
|
if not options.force:
raise
else:
if verbose:
self.output("removed '%s'\n" % path)
def run():
return FSrm()
|
.run()
if __name__ == "__main__":
sys.exit(run())
|
edx/edx-platform
|
openedx/core/djangoapps/user_api/accounts/tests/test_views.py
|
Python
|
agpl-3.0
| 53,614
| 0.003117
|
"""
Test cases to cover Accounts-related behaviors of the User API application
"""
import datetime
import hashlib
import json
from copy import deepcopy
from unittest import mock
import ddt
import pytz
from django.conf import settings
from django.test.testcases import TransactionTestCase
from django.test.utils import override_settings
from django.urls import reverse
from edx_name_affirmation.api import create_verified_name
from edx_name_affirmation.statuses import VerifiedNameStatus
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from common.djangoapps.student.models import PendingEmailChange, UserProfile
from common.djangoapps.student.tests.factories import TEST_PASSWORD, RegistrationFactory, UserFactory
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_for_user
from openedx.core.djangoapps.user_api.accounts import ACCOUNT_VISIBILITY_PREF_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from .. import ALL_USERS_VISIBILITY, CUSTOM_VISIBILITY, PRIVATE_VISIBILITY
TEST_PROFILE_IMAGE_UPLOADED_AT = datetime.datetime(2002, 1, 9, 15, 43, 1, tzinfo=pytz.UTC)
# this is used in one test to check the behavior of profile image url
# generation with a relative url in the config.
TEST_PROFILE_IMAGE_BACKEND = deepcopy(settings.PROFILE_IMAGE_BACKEND)
TEST_PROFILE_IMAGE_BACKEND['options']['base_url'] = '/profile-images/'
TEST_BIO_VALUE = "Tired mother of twins"
TEST_LANGUAGE_PROFICIENCY_CODE = "hi"
class UserAPITestCase(APITestCase):
"""
The base class for all tests of the User API
"""
VERIFIED_NAME = "Verified User"
def setUp(self):
super().setUp()
self.anonymous_client = APIClient()
self.different_user = UserFactory.create(password=TEST_PASSWORD)
self.different_client = APIClient()
self.staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
self.staff_client = APIClient()
self.user = UserFactory.create(password=TEST_PASSWORD) # will be assigned to self.client by default
def login_client(self, api_client, user):
"""Helper method for getting the client and user and logging in. Returns client. """
client = getattr(self, api_client)
user = getattr(self, user)
client.login(username=user.username, password=TEST_PASSWORD)
return client
def send_post(self, client, json_data, content_type='application/json', expected_status=201):
"""
Helper method for sending a post to the server, defaulting to application/json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.post(self.url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
def send_patch(self, client, json_data, content_type="application/merge-patch+json", expected_status=200):
"""
Helper method for sending a patch to the server, defaulting to application/merge-patch+json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.patch(self.url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
def post_search_api(self, client, json_data, content_type='application/json', expected_status=200):
"""
Helper method for sending a post to the server, defaulting to application/merge-patch+json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.post(self.search_api_url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
def send_get(self, client, query_parameters=None, expected_status=200):
"""
Helper method for sending a GET to the server. Verifies the expected status and returns the response.
"""
url = self.url + '?' + query_parameters if query_parameters else self.url # pylint: disable=no-member
response = client.get(url)
assert expected_status == response.status_code
return response
# pylint: disable=no-member
def send_put(self, client, json_data, content_type="application/json", expected_status=204):
"""
Helper method for sending a PUT to the server. Verifies the expected status and returns the response.
"""
response = client.put(self.url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
# pylint: disable=no-member
def send_delete(self, client, expected_status=204):
"""
Helper method for sending a DELETE to the server. Verifies the expected status and returns the response.
"""
response = client.delete(self.url)
assert expected_status == response.status_code
return response
def create_mock_profile(self, user):
"""
Helper method that creates a mock profile for the specified user
:return:
"""
legacy_profile = UserProfile.objects.get(id=user.id)
legacy_profile.country = "US"
legacy_profile.state = "MA"
legacy_profile.level_of_education = "m"
legacy_profile.year_of_birth = 2000
legacy_profile.goals = "world peace"
legacy_profile.mailing_address = "Park Ave"
legacy_profile.gender = "f"
legacy_profile.bio = TEST_BIO_VALUE
legacy_profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOADED_AT
legacy_profile.language_proficiencies.create(code=TEST_LANGUAGE_PROFICIENCY_CODE)
legacy_profile.phone_number = "+18005555555"
legacy_profile.save()
def create_mock_verified_name(self, user):
"""
Helper method to create an approved VerifiedName entry in name affirmation.
"""
legacy_profile = UserProfile.objects.get(id=user.id)
create_verified_name(user, self.VERIFIED_NAME, legacy_profile.name, status=Verif
|
iedNameStatus.APPROVED)
def create_user_registration(
|
self, user):
"""
Helper method that creates a registration object for the specified user
"""
RegistrationFactory(user=user)
def _verify_profile_image_data(self, data, has_profile_image):
"""
Verify the profile image data in a GET response for self.user
corresponds to whether the user has or hasn't set a profile
image.
"""
template = '{root}/{filename}_{{size}}.{extension}'
if has_profile_image:
url_root = 'http://example-storage.com/profile-images'
filename = hashlib.md5(('secret' + self.user.username).encode('utf-8')).hexdigest()
file_extension = 'jpg'
template += '?v={}'.format(TEST_PROFILE_IMAGE_UPLOADED_AT.strftime("%s"))
else:
url_root = 'http://testserver/static'
filename = 'default'
file_extension = 'png'
template = template.format(root=url_root, filename=filename, extension=file_extension)
assert data['profile_image'] == {'has_image': has_profile_image,
'image_url_full': template.format(size=50),
'image_url_small': template.format(size=10)}
@ddt.ddt
@skip_unless_lms
class TestOwnUsernameAPI(CacheIsolationTestCase, UserAPITestCase):
"""
Unit tests for the Accounts API.
"""
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
self.url = reverse("own_username_api")
def _verify_get_own_username(self, queries, expected_status=200):
"""
Internal helper to perform the actual assertion
"""
with se
|
caiges/populous
|
populous/thumbnail/defaults.py
|
Python
|
bsd-3-clause
| 324
| 0
|
D
|
EBUG = False
BASEDIR = ''
SUBDIR = ''
PREFIX = ''
QUALITY = 85
CONVERT = '/usr/bin/convert'
WVPS = '/usr/bin/wvPS'
PROCESSORS = (
'populous.thumbnail.processors.colorspace',
'populous.thumbnail.processors.autocrop',
'populous.thumbnail.processors.scale_and_crop',
'populous.thumbnail.
|
processors.filters',
)
|
amidoimidazol/bio_info
|
Rosalind.info Problems/Mortal Fibonacci Rabbits.py
|
Python
|
mit
| 1,178
| 0.005111
|
'''
Author: Peter Chip (furamail001@gmail.com)
Date: 2015 03 25
Given: Positive integers n≤100 and m≤20.
Return: The total number of pairs of rabbits that will remain after the n-th month if all rabbits live for m months.
Theory: The standard fibonacci series : 1 1 2 3 5 8 13
fn = fn-1 + fn-2
In reality its fn = fn-1 + (fn-2)k where k is the number it gets incremeneted. In the standard fibonacci set k is 1.
The number
|
of pairs in the first six month
m = 3 : 1 1 2 2 3 4
m = 4 : 1 1 2 3 4 6
m = 5 : 1 1 2 3 5 7
After the first death the number of rabbits:
fn = fn-2 + fn-3 ... fn-m
'''
def new_value(i):
value = 0
change = 2
# Repeat it m - 1 times
for y in range(1, m):
# Add the fn-2 + fn-3 .. fn-m
value += L[(i-change)]
change += 1
return value
# num
|
ber of months
n = 94
# months a rabbit lives
m = 20
L = [1, 1, 2]
i = 3
while i < n:
# If the first death occurs
if i >= m:
L.append(new_value(i))
i += 1
# If before the first death
else:
L.append(L[i-1]+ (L[i-2]))
i += 1
print(L[len(L)-1])
|
SuperElastix/elastix
|
Testing/elx_get_checksum_list.py
|
Python
|
apache-2.0
| 2,114
| 0.028382
|
import sys
import os
import os.path
import glob
from optparse import OptionParser
#-------------------------------------------------------------------------------
# the main function
# cd bin_VS2010
# ctest -C Release
# cd Testing
# python ../../elastix/Testing/elx_get_checksum_list.py -l elastix_run*
# cd ..
# cmake .
# ctest -C Release -R COMPARE_CHECKSUM
# svn commit -m "ENH: updating baselines after recent change X"
def main():
# usage, parse parameters
usage = "usage: %prog [options] arg"
parser = OptionParser( usage )
# option to debug and verbose
parser.add_option( "-v", "--verbose",
action="store_true", dest="verbose" )
# options to control files
parser.add_option( "-l", "--list", type="string", dest="directoryList", help="list of elastix output directories" )
(options, args) = parser.parse_args()
# Check if option -l is given
if options.direct
|
oryList == None :
parser.error( "The option directory list (-l) should be given" )
# Use glob, this works not only on Linux
dirList = glob.glob( options.directoryList );
# Add everything not processed
dirList.extend( args );
print( "directory checksum" )
for directory in dirList:
# Equivalent to: fileName = options.directory + "/" + "elastix.log"
fileName = os.path.join( directory, "elastix.log" );
# Read elastix.log and find last line with checksum
|
try:
f = open( fileName )
except IOError as e:
print( directory + " No elastix.log found" )
continue
checksumFound = False;
for line in f:
if "Registration result checksum:" in line:
checksumline = line;
checksumFound = True;
# Extract checksum
if checksumFound:
checksum = checksumline.split(': ')[1].rstrip( "\n" );
# Print result
print( directory + " " + checksum );
else:
print( directory + " -" );
f.close();
return 0
#-------------------------------------------------------------------------------
if __name__ == '__main__':
sys.exit(main())
|
alexanderfefelov/nav
|
python/nav/eventengine/plugins/modulestate.py
|
Python
|
gpl-2.0
| 2,481
| 0
|
#
# Copyright (C) 2012, 2014 UNINETT
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
""""moduleState event plugin"""
import datetime
from nav.eventengine.alerts import AlertGenerator
from nav.eventengine.plugins import delayedstate
from nav.models.manage import Module
class ModuleStateHandler(delayedstate.DelayedStateHandler):
"""Accepts moduleState events"""
HAS_WARNING_ALERT = True
WARNING_WAIT_TIME = 'moduleDown.warning'
ALERT_WAIT_TIME = 'moduleDown.alert'
handled_types = ('moduleState',)
__waiting_for_resolve = {}
_target = None
def get_target(self):
if not self._target:
self._target = Module.objects.get(id=self.event.subid)
assert self._target.netbox_id == self.event.netbox.id
return self._target
def _get_up_alert(self):
alert = self._get_alert()
alert.alert_type = "moduleUp"
return alert
def _set_internal_state_down(self):
module = self.get_target()
module.up = module.UP_DOWN
module.down_since = datetime.datetime.now()
module.save()
def _set_internal_state_up(self):
module = self.get_target()
module.up = module.UP_UP
module.down_since = None
module.save()
def _get_down_alert(self):
alert = self._get_alert()
alert.alert_type = "moduleDown"
return alert
def _get_alert(self):
|
alert = AlertGenerator(self.event)
target = self.get_target()
if target:
alert['module'] = target
return
|
alert
def _post_down_warning(self):
"""Posts the actual warning alert"""
alert = self._get_alert()
alert.alert_type = "moduleDownWarning"
alert.state = self.event.STATE_STATELESS
self._logger.info("%s: Posting %s alert",
self.get_target(), alert.alert_type)
alert.post()
|
amolenaar/gaphor
|
gaphor/RAAML/fta/conditionalevent.py
|
Python
|
lgpl-2.1
| 1,442
| 0.000693
|
"""Conditional Event item definition."""
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes im
|
port Box, IconBox, Text
from gaphor.diagram.support import represents
from gap
|
hor.diagram.text import FontStyle, FontWeight
from gaphor.RAAML import raaml
from gaphor.RAAML.fta.basicevent import draw_basic_event
from gaphor.UML.modelfactory import stereotypes_str
@represents(raaml.ConditionalEvent)
class ConditionalEventItem(ElementPresentation, Classified):
def __init__(self, diagram, id=None):
super().__init__(diagram, id, width=70, height=35)
self.watch("subject[NamedElement].name").watch(
"subject[NamedElement].namespace.name"
)
def update_shapes(self, event=None):
self.shape = IconBox(
Box(
draw=draw_basic_event,
),
Text(
text=lambda: stereotypes_str(self.subject, ["ConditionalEvent"]),
),
Text(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
)
|
andreadean5/python-hpOneView
|
hpOneView/resources/servers/logical_enclosures.py
|
Python
|
mit
| 9,117
| 0.002962
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
__title__ = 'logical-enclosures'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
class LogicalEnclosures(object):
URI = '/rest/logical-enclosures'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Returns a list of logical enclosures matching the specified filter. A maximum of 40 logical enclosures are
returned to the caller. Additional calls can be made to retrieve any other logical enclosures matching the
filter. Valid filter parameters include attributes of a Logical Enclosure resource.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all the items.
The actual number of items in the response may differ from the requested
count if the sum of start and count exceed the total number of items.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
list: A list of logical enclosures.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def get_by(self, field, value):
"""
Get all logical enclosures that match the filter
The search is case insensitive
Args:
field: field name to filter
value: value to filter
Returns:
list: A list of logical enclosures.
"""
return self._client.get_by(field, value)
def get_by_name(self, name):
"""
Retrieve a resource by his name
Args:
name: resource name
Returns: dict
"""
return self._client.get_by_name(name=name)
def get(self, id_or_uri):
"""
Returns the logical enclosure with the specified ID, if it exists.
Args:
id: ID or URI of logical enclosure
Returns: (dict) logical enclosure
"""
return self._client.get(id_or_uri)
def update(self, resource, timeout=-1):
"""
Updates the given logical enclosure that is passed in. The fields that can be updated on the logical enclosure
itself include its name and configuration script. When the script is updated on the logical enclosure, the
configuration script runs on all enclosures in the logical enclosure.
Args:
resource (dict): Object to update
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns: (dict) Updated logical enclosure
"""
return self._client.update(resource, timeout=timeout)
def patch(self, id_or_uri, operation, path, value, timeout=-1):
"""
Updates the given logical enclosure's attributes that are passed in. The PATCH operation is a partial update of
the resource. The support operation in this context is the firmware update.
Args:
id_or_uri: Could be either the resource id or the resource uri
operation: Patch operation
path: Path
value: Value
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns: (dict) Updated logical enclosure
"""
return self._client.patch(id_or_uri, operation, path, value, timeout=timeout)
def update_configuration(self, id_or_uri, timeout=-1):
"""
Reapplies the appliance's configuration on enclosures for the logical enclosure by ID or uri. This includes
running the same configure steps that were performed as part of the enclosure add. A task is returned to the
caller which can be used to track the progress of the operation.
Args:
id_or_uri: Could be either the resource id or the resource uri
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns: (dict) logical enclosure
"""
uri = self._client.build_uri(id_or_uri) + "/configuration"
return self._client.update_with_zero_body(uri, timeout=timeout)
def get_script(self, id_or_uri):
"""
Gets the configuration script of the logical enclosure by id or uri
Args:
id_or_uri: Could be either the resource id or the resource uri
Return: configuration script
"""
uri = self._client.build_uri(id_or_uri) + "/script"
return self._client.get(uri)
def update_script(self, id_or_uri, information, timeout=-1):
"""
Updates the configuration script of the logical enclosure and on all enclosures in the logical enclosure with
the specified ID.
Args:
id_or_uri: Could be either the resource id or the resource uri
information: updated script
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Return: configuration script
"""
uri = self._client.build_uri(id_or
|
_uri) + "/script"
return self._client.update(information, uri=uri, timeout=timeout)
def generate_support_dump(self, information, id_or_uri, timeout=-1):
"""
Generates a support dump for the logical enclosure with the specified ID. A logical enclosure support dump
includes content for logical interconnects associated with that logical enclosure. By default, it also contains
applianc
|
e support dump content.
Args:
id_or_uri: Could be either the resource id or the resource uri
information (dict): information to generate support dump
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
|
systers/vms
|
vms/pom/locators/eventSearchPageLocators.py
|
Python
|
gpl-2.0
| 481
| 0.002079
|
class EventSear
|
chPageLocators(object):
NAME_FIELD = ".form-control[name='name']"
START_DATE_FIELD = ".form-control[name='start_date']"
END_DATE_FIELD = ".form-control[name='end_date']"
CITY_FIELD = ".form-control[name='city']"
STATE_FIELD = ".form-control[name='state']"
COUNTRY_FIELD = ".form-control[name='country']"
JOB_FIELD = ".form-control[name='job']"
RESULT_BODY = '//table//tbody'
HELP_BLOCK = 'he
|
lp-block'
SUBMIT_PATH = 'submit'
|
calum-chamberlain/EQcorrscan
|
eqcorrscan/utils/pre_processing.py
|
Python
|
gpl-3.0
| 39,205
| 0
|
"""
Utilities module whose functions are designed to do the basic processing of
the data using obspy modules (which also rely on scipy and numpy).
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
import numpy as np
import logging
import datetime as dt
from collections import Counter
from multiprocessing import Pool, cpu_count
from obspy import Stream, Trace, UTCDateTime
from obspy.core.trace import Stats
from obspy.signal.filter import bandpass, lowpass, highpass
Logger = logging.getLogger(__name__)
def _check_daylong(tr):
"""
Check the data quality of the daylong file.
Check to see that the day isn't just zeros, with large steps, if it is
then the resampling will hate it.
:type tr: obspy.core.trace.Trace
:param tr: Trace to check if the data are daylong.
:return quality (simply good or bad)
:rtype: bool
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import _check_daylong
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/' +
... '2013-09-01-0410-35.DFDPC_024_00')
>>> _check_daylong(st[0])
True
"""
if len(np.nonzero(tr.data)[0]) < 0.5 * len(tr.data):
qual = False
else:
qual = True
return qual
def shortproc(st, lowcut, highcut, filt_order, samp_rate, parallel=False,
num_cores=False, starttime=None, endtime=None,
seisan_chan_names=False, fill_gaps=True, ignore_length=False,
ignore_bad_data=False, fft_threads=1):
"""
Basic function to bandpass and downsample.
Works in place on data. This is employed to ensure all parts of the
data are processed in the same way.
:type st: obspy.core.stream.Stream
:param st: Stream to process
:type lowcut: float
:param lowcut: Low cut for bandpass in Hz
:type highcut: float
:param highcut: High cut for bandpass in Hz
:type filt_order: int
:param filt_order: Number of corners for bandpass filter
:type samp_rate: float
:param samp_rate: Sampling rate desired in Hz
:type parallel: bool
:param parallel:
Set to True to process traces in parallel, for small numbers of traces
this is often slower than serial processing, defaults to False
:type num_cores: int
:param num_cores:
Control the number of cores for parallel processing, if set to False
then this will use all the cores available.
:type starttime: obspy.core.utcdatetime.UTCDateTime
:param starttime:
Desired data start time, will trim to this before processing
:type endtime: obspy.core.utcdatetime.UTCDateTime
:param endtime:
Desired data end time, will trim to this before processing
:type seisan_chan_names: bool
:param seisan_chan_names:
Whether channels are named like seisan channels (which are two letters
rather than SEED convention of three) - defaults to True.
:type fill_gaps: bool
:param fill_gaps: Whether to pad any gaps found with zeros or not.
:type ignore_length: bool
:param ignore_length:
Whether to allow data that are less than 80% of the requested length.
Defaults to False which will error if short data are found.
:type ignore_bad_data: bool
:param ignore_bad_data:
If False (default), errors will be raised if data are excessively
gappy or are mostly zeros. If True then no error will be raised, but
an empty trace will be returned.
:type fft_threads: int
:param fft_threads:
Number of threads to use for pyFFTW FFT in resampling. Note that it
is not recommended to use fft_threads > 1 and num_cores > 1.
:return: Processed stream
:rtype: :class:`obspy.core.stream.Stream`
.. note::
If your data contain gaps you should *NOT* fill those gaps before
using the pre-process functions. The pre-process functions will fill
the gaps internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a merged
stream without the `fill_value` argument (e.g.: `st = st.merge()`).
.. warning::
If you intend to use this for processing templates you should consider
how resampling will impact your cross-correlations. Minor differences
in resampling between day-long files (which you are likely to use for
continuous detection) and shorter files will reduce your
cross-correlations!
.. rubric:: Example, bandpass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=9, filt_order=3, samp_rate=20,
... parallel=True, num_cores=2)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, low-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=None, highcut=9, filt_order=3,
... samp_rate=20)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
.. rubric:: Example, high-pass
>>> from obspy import read
>>> from eqcorrscan.utils.pre_processing import shortproc
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/2013-09-01-0410-35.DFDPC_024_00')
>>> st = shortproc(st=st, lowcut=2, highcut=None, filt_order=3,
... samp_rate=20)
>>> print(st[0])
AF.LABE..SHZ | 2013-09-01T04:10:35.700000Z - 2013-09-01T04:12:05.650000Z \
| 20.0 Hz, 1800 samples
"""
if isinstance(st, Trace):
tracein = True
st = Stream(st)
else:
tracein = False
# Add sanity check for filter
if highcut and highcut >= 0.5 * samp_rate:
raise IOError('Highcut must be lower than the nyquist')
length = None
clip = False
if starttime is not None and endtime is not None:
for tr in st:
tr.trim(starttime, endtime)
if len(tr.data) == ((endtime - starttime) *
tr.stats.sampling_rate) + 1:
tr.data = tr.data[1:len(tr.data)]
length = endtime - starttime
clip = True
elif starttime:
for tr in st:
tr.trim(starttime=starttime)
elif endtime:
for tr in st:
tr.trim(endtime=endtime)
for tr in st:
if len(tr.data) == 0:
st.remove(tr)
Logger.warning('No data for {0} after trim'.format(tr.id))
if parallel:
if not num_cores:
num_cores = cpu_count()
if num_cores > len(st):
num_cores = len(st)
pool = Pool(processes=num_cores)
|
results = [pool.apply_async(process, (tr,), {
|
'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order,
'samp_rate': samp_rate, 'starttime': starttime,
'clip': clip, 'seisan_chan_names': seisan_chan_names,
'fill_gaps': fill_gaps, 'length': length,
'ignore_length': ignore_length, 'fft_threads': fft_threads,
'
|
kelle/astropy
|
astropy/convolution/tests/test_convolve_nddata.py
|
Python
|
bsd-3-clause
| 1,827
| 0.002737
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
import numpy as np
from ..convolve import convolve, convolve_fft
from ..kernels import Gaussian2DKernel
from ...nddata import NDData
def test_basic_nddata():
arr = np.zeros((11, 11))
arr[5, 5] = 1
ndd = NDData(arr)
test_kernel = Gaussian2DKernel(1)
result = convolve(ndd, test_kernel)
x, y = np.mgrid[:11, :11]
expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2))
np.testing.assert_allclose(result, expected, atol=1e-6)
resultf = convolve_fft(ndd, test_kernel)
np.testing.assert_allclose(resultf, expected, atol=1e-6)
@pytest.mark.parametrize('convfunc',
[lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True),
lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)])
def test_masked_nddata(convfunc):
arr = np.zeros((11, 11))
arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2
arr[5, 5] = 1.5
ndd_base = NDData(arr)
mask = arr < 0 # this is all False
mask[5, 5] = True
ndd_mask = NDData(arr, mask=mask)
arrnan = arr.copy()
arrnan[5, 5] = np.nan
ndd_nan = NDData(arrnan)
test_kernel =
|
Gaussian2DKernel(1)
result_base = convfunc(ndd_base, test_kernel)
result_nan = convfunc(ndd_nan, test_kernel)
result_mask = conv
|
func(ndd_mask, test_kernel)
assert np.allclose(result_nan, result_mask)
assert not np.allclose(result_base, result_mask)
assert not np.allclose(result_base, result_nan)
# check to make sure the mask run doesn't talk back to the initial array
assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
|
ME-ICA/me-ica
|
meica.libs/nibabel/nicom/tests/test_dicomwrappers.py
|
Python
|
lgpl-2.1
| 6,163
| 0.004543
|
""" Testing DICOM wrappers
"""
from os.path import join as pjoin, dirname
import gzip
import numpy as np
try:
import dicom
except ImportError:
have_dicom = False
else:
have_dicom = True
dicom_test = np.testing.dec.skipif(not have_dicom,
'could not import pydicom')
from .. import dicomwrappers as didw
from .. import dicomreaders as didr
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
IO_DATA_PATH = pjoin(dirname(__file__), 'data')
DATA_FILE = pjoin(IO_DATA_PATH, 'siemens_dwi_1000.dcm.gz')
if have_dicom:
DATA = dicom.read_file(gzip.open(DATA_FILE))
else:
DATA = None
DATA_FILE_B0 = pjoin(IO_DATA_PATH, 'siemens_dwi_0.dcm.gz')
DATA_FILE_SLC_NORM = pjoin(IO_DATA_PATH, 'csa_slice_norm.dcm')
# This affine from our converted image was shown to match our image
# spatially with an image from SPM DICOM conversion. We checked the
# matching with SPM check reg. We have flipped the first and second
# rows to allow for rows, cols tranpose in current return compared to
# original case.
EXPECTED_AFFINE = np.array(
[[ -1.796875, 0, 0, 115],
[0, -1.79684984, -0.01570896, 135.028779],
[0, -0.00940843750, 2.99995887, -78.710481],
[0, 0, 0, 1]])[:,[1,0,2,3]]
# from Guys and Matthew's SPM code, undoing SPM's Y flip, and swapping
# first two values in vector, to account for data rows, cols difference.
EXPECTED_PARAMS = [992.05050247, (0.00507649,
0.99997450,
-0.005023611)]
@dicom_test
def test_wrappers():
# test direct wrapper calls
# first with empty data
for maker, kwargs in ((didw.Wrapper,{}),
(didw.SiemensWrapper, {}),
(didw.MosaicWrapper, {'n_mosaic':10})):
dw = maker(**kwargs)
assert_equal(dw.get('InstanceNumber'), None)
assert_equal(dw.get('AcquisitionNumber'), None)
assert_raises(KeyError, dw.__getitem__, 'not an item')
assert_raises(didw.WrapperError, dw.get_data)
assert_raises(didw.WrapperError, dw.get_affine)
for klass in (didw.Wrapper, didw.SiemensWrapper):
dw = klass()
assert_false(dw.is_mosaic)
for maker in (didw.wrapper_from_data,
didw.Wrapper,
didw.SiemensWrapper,
didw.MosaicWrapper
):
dw = maker(DATA)
assert_equal(dw.get('InstanceNumber'), 2)
assert_equal(dw.get('AcquisitionNumber'), 2)
assert_raises(KeyError, dw.__getitem__, 'not an item')
for maker in (didw.MosaicWrapper, didw.wrapper_from_data):
assert_true(dw.is_mosaic)
@dicom_test
def test_wrapper_from_data():
# test wrapper from data, wrapper from file
for dw in (didw.wrapper_from_data(DATA),
didw.wrapper_from_file(DATA_FILE)):
assert_equal(dw.get('InstanceNumber'), 2)
assert_equal(dw.get('AcquisitionNumber'), 2)
assert_raises(KeyError, dw.__getitem__, 'not an
|
item')
assert_true(dw.is_mosaic)
|
assert_array_almost_equal(
np.dot(didr.DPCS_TO_TAL, dw.get_affine()),
EXPECTED_AFFINE)
@dicom_test
def test_dwi_params():
dw = didw.wrapper_from_data(DATA)
b_matrix = dw.b_matrix
assert_equal(b_matrix.shape, (3,3))
q = dw.q_vector
b = np.sqrt(np.sum(q * q)) # vector norm
g = q / b
assert_array_almost_equal(b, EXPECTED_PARAMS[0])
assert_array_almost_equal(g, EXPECTED_PARAMS[1])
@dicom_test
def test_vol_matching():
# make the Siemens wrapper, check it compares True against itself
dw_siemens = didw.wrapper_from_data(DATA)
assert_true(dw_siemens.is_mosaic)
assert_true(dw_siemens.is_csa)
assert_true(dw_siemens.is_same_series(dw_siemens))
# make plain wrapper, compare against itself
dw_plain = didw.Wrapper(DATA)
assert_false(dw_plain.is_mosaic)
assert_false(dw_plain.is_csa)
assert_true(dw_plain.is_same_series(dw_plain))
# specific vs plain wrapper compares False, because the Siemens
# wrapper has more non-empty information
assert_false(dw_plain.is_same_series(dw_siemens))
# and this should be symmetric
assert_false(dw_siemens.is_same_series(dw_plain))
# we can even make an empty wrapper. This compares True against
# itself but False against the others
dw_empty = didw.Wrapper()
assert_true(dw_empty.is_same_series(dw_empty))
assert_false(dw_empty.is_same_series(dw_plain))
assert_false(dw_plain.is_same_series(dw_empty))
# Just to check the interface, make a pretend signature-providing
# object.
class C(object):
series_signature = {}
assert_true(dw_empty.is_same_series(C()))
@dicom_test
def test_slice_indicator():
dw_0 = didw.wrapper_from_file(DATA_FILE_B0)
dw_1000 = didw.wrapper_from_data(DATA)
z = dw_0.slice_indicator
assert_false(z is None)
assert_equal(z, dw_1000.slice_indicator)
dw_empty = didw.Wrapper()
assert_true(dw_empty.slice_indicator is None)
@dicom_test
def test_orthogonal():
#Test that the slice normal is sufficiently orthogonal
dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM)
R = dw.rotation_matrix
assert np.allclose(np.eye(3),
np.dot(R, R.T),
atol=1e-6)
@dicom_test
def test_use_csa_sign():
#Test that we get the same slice normal, even after swapping the iop
#directions
dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM)
iop = dw.image_orient_patient
dw.image_orient_patient = np.c_[iop[:,1], iop[:,0]]
dw2 = didw.wrapper_from_file(DATA_FILE_SLC_NORM)
assert np.allclose(dw.slice_normal, dw2.slice_normal)
@dicom_test
def test_assert_parallel():
#Test that we get an AssertionError if the cross product and the CSA
#slice normal are not parallel
dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM)
dw.image_orient_patient = np.c_[[1., 0., 0.], [0., 1., 0.]]
assert_raises(AssertionError, dw.__getattribute__, 'slice_normal')
|
storyhe/playWithBot
|
plugins/rpgbot.py
|
Python
|
mit
| 3,282
| 0.014009
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""!가입; 봇 게임센터에 가입합니다.\n!내정보; 내 등록된 정보를 봅니다."""
import re
import json
from botlib import BotLib
from rpg import RPG
from util.util import enum
CmdType = enum(
Register = 1,
MyInfo = 2,
WeaponInfo = 3,
AddWeapon = 4,
UpgradeWeapon = 5,
)
# 입력으로부터 명령어 타입을 얻어내는 함수
def input_to_CmdType(text):
if u"!가입" == text: return CmdType.Register
if u"!내정보" == text: return CmdType.MyInfo
if re.findall(u"^!무기정보 ", text): return CmdType.WeaponInfo
if re.findall(u"^!무기추가 ",
|
text): return CmdType.AddWeapon
if re.findall(u"^!무기강화 ", text): return CmdType.UpgradeWeapon
return None
# 입력으로부터 인자를 가져오는 함수
# 일단은 모든 인자가 오직 한개뿐이라 가정하고 만들었다....
# 인자가 없을 때 예외처리는 on_message()에서 try-except에서 처리
def get_argument(cmdType
|
, text):
match = None
if cmdType == CmdType.WeaponInfo :
match = re.findall(ur"^!무기정보 (.*)", text)
if not match: raise ValueError
if cmdType == CmdType.AddWeapon :
match = re.findall(ur"^!무기추가 (.*)", text)
if not match: raise ValueError
if cmdType == CmdType.UpgradeWeapon :
match = re.findall(ur"^!무기강화 (.*)", text)
if not match: raise ValueError
return match[0]
# 메인 명령을 실행하는 함수
def run_command(cmdType, text, msgobj, serverobj):
userId = msgobj['user']
userobj = BotLib.get_user_json_obj(userId, serverobj)
channel = msgobj['channel']
Rpg = RPG(BotLib)
result = ''
if cmdType == CmdType.Register :
result = Rpg.add_user(userobj)
if cmdType == CmdType.MyInfo :
result = Rpg.get_user_info(userobj)
if cmdType == CmdType.WeaponInfo :
weaponname = get_argument(cmdType, text)
result = Rpg.get_weapon_info(userobj, weaponname)
if cmdType == CmdType.AddWeapon :
weaponname = get_argument(cmdType, text)
result = Rpg.add_weapon(userobj, weaponname)
if cmdType == CmdType.UpgradeWeapon :
weaponname = get_argument(cmdType, text)
result = Rpg.upgrade_weapon(userobj, weaponname)
BotLib.say(channel, result)
################################################################################
# slask 껍데기 함수
# 문자열을 return하면 봇이 그 말을 한다
def on_message(msg, server):
text = msg.get("text", "")
cmdType = input_to_CmdType(text)
if not cmdType:
return
try:
run_command(cmdType, text, msg, server['client'].server)
except ValueError:
if cmdType == CmdType.WeaponInfo :
return u"사용방법: !무기정보 <무기명> \n안내: 무기명에는 찾을 무기를 입력해주십시요."
if cmdType == CmdType.AddWeapon :
return u"사용방법: !무기추가 <무기명> \n안내: 무기명에는 추가할 무기를 입력해주십시요."
if cmdType == CmdType.UpgradeWeapon :
return u"사용방법: !무기강화 <무기명> \n안내: 무기명에는 강화할 무기를 입력해주십시요."
|
lach76/scancode-toolkit
|
src/commoncode/functional.py
|
Python
|
apache-2.0
| 5,818
| 0.001203
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import functools
from itertools import izip
from types import ListType, TupleType, GeneratorType
def flatten(seq):
"""
Flatten recursively a sequence and all its sub-sequences that can be tuples,
lists or generators (generators will be consumed): all are converted to a
flat list of elements.
For example::
>>> flatten([7, (6, [5, [4, ['a'], 3]], 3), 2, 1])
[7, 6, 5, 4, 'a', 3, 3, 2, 1]
>>> def gen():
... for i in range(2):
... yield range(5)
...
>>> flatten(gen())
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
Originally derived from http://www.andreasen.org/misc/util.py
2002-2005 by Erwin S. Andreasen -- http://www.andreasen.org/misc.shtml
This file is in the Public Domain
Version: Id: util.py,v 1.22 2005/12/16 00:08:21 erwin Exp erwin
"""
r = []
for x in seq:
if isinstance(x, (ListType, TupleType)):
r.extend(flatten(x))
elif isinstance(x, GeneratorType):
r.extend(flatten(list(x)))
else:
r.append(x)
return r
def pair_chunks(iterable):
"""
Return an iterable of chunks of elements pairs from iterable. The iterable
|
must contain an even number of elements or it will truncated.
For example::
>>> list(pair_chunks([1, 2, 3, 4, 5, 6]))
[(1, 2), (3, 4), (5, 6)]
>>> list(pair_chunks([1, 2,
|
3, 4, 5, 6, 7]))
[(1, 2), (3, 4), (5, 6)]
"""
return izip(*[iter(iterable)] * 2)
def memoize(fun):
"""
Decorate fun function and cache return values. Arguments must be
hashable. kwargs are not handled. Used to speed up some often executed
functions.
Usage example::
>>> @memoize
... def expensive(*args, **kwargs):
... print('Calling expensive with', args, kwargs)
... return 'value expensive to compute' + repr(args)
>>> expensive(1, 2)
Calling expensive with (1, 2) {}
'value expensive to compute(1, 2)'
>>> expensive(1, 2)
'value expensive to compute(1, 2)'
>>> expensive(1, 2, a=0)
Calling expensive with (1, 2) {'a': 0}
'value expensive to compute(1, 2)'
>>> expensive(1, 2, a=0)
Calling expensive with (1, 2) {'a': 0}
'value expensive to compute(1, 2)'
>>> expensive(1, 2)
'value expensive to compute(1, 2)'
>>> expensive(1, 2, 5)
Calling expensive with (1, 2, 5) {}
'value expensive to compute(1, 2, 5)'
The expensive function returned value will be cached based for each args
values and computed only once in its life. Call with kwargs are not cached
"""
memos = {}
@functools.wraps(fun)
def memoized(*args, **kwargs):
# calls with kwargs are not handled and not cached
if kwargs:
return fun(*args, **kwargs)
# convert any list arg to a tuple
args = tuple(tuple(arg) if isinstance(arg, ListType) else arg
for arg in args)
try:
return memos[args]
except KeyError:
memos[args] = fun(*args)
return memos[args]
return functools.update_wrapper(memoized, fun)
def memoize_to_attribute(attr_name, _test=False):
"""
Decorate a method and cache return values in attr_name of the parent object.
Used to speed up some often called methods that cache their values in
instance variables.
Usage example::
>>> class Obj(object):
... def __init__(self):
... self._expensive = None
... @property
... @memoize_to_attribute('_expensive')
... def expensive(self):
... print('Calling expensive')
... return 'value expensive to compute'
>>> o=Obj()
>>> o.expensive
Calling expensive
'value expensive to compute'
>>> o.expensive
'value expensive to compute'
>>> o.expensive
'value expensive to compute'
The Obj().expensive property value will be cached to attr_name
self._expensive and computed only once in the life of the Obj instance.
"""
def memoized_to_attr(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
if getattr(self, attr_name) is None:
res = meth(self, *args, **kwargs)
setattr(self, attr_name, res)
else:
res = getattr(self, attr_name)
return res
return wrapper
return memoized_to_attr
|
sshrdp/mclab
|
lib/antlr-3.0.1/runtime/Python/tests/testbase.py
|
Python
|
apache-2.0
| 9,623
| 0.004053
|
import unittest
import imp
import os
import errno
import sys
import glob
import re
from distutils.errors import *
def unlink(path):
try:
os.unlink(path)
except OSError, exc:
if exc.errno != errno.ENOENT:
raise
class BrokenTest(unittest.TestCase.failureException):
def __repr__(self):
name, reason = self.args
return '%s: %s: %s works now' % (
(self.__class__.__name__, name, reason))
def broken(reason, *exceptions):
'''Indicates a failing (or erroneous) test case fails that should succeed.
If the test fails with an exception, list the exception type in args'''
def wrapper(test_method):
def replacement(*args, **kwargs):
try:
test_method(*args, **kwargs)
except exceptions or unittest.TestCase.failureException:
pass
else:
raise BrokenTest(test_method.__name__, reason)
replacement.__doc__ = test_method.__doc__
replacement.__name__ = 'XXX_' + test_method.__name__
replacement.todo = reason
return replacement
return wrapper
dependencyCache = {}
compileErrorCache = {}
# setup java CLASSPATH
if 'CLASSPATH' not in os.environ:
cp = []
baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
libDir = os.path.join(baseDir, 'lib')
jar = os.path.join(libDir, 'stringtemplate-3.0.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
jar = os.path.join(libDir, 'antlr-2.7.7.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
jar = os.path.join(libDir, 'junit-4.2.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build'))
classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"'
else:
classpath = ''
class ANTLRTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.baseName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0]
self.lexerModule = None
self.parserModule = None
def _invokeantlr(self, dir, file, options):
fp = os.popen('cd %s; java %s org.antlr.Tool %s %s 2>&1'
% (dir, classpath, options, file)
)
output = ''
failed = False
for line in fp:
out
|
put += line
if line.startswith('error('):
failed = True
rc = fp.close()
if rc is not None:
failed = True
if failed:
raise RuntimeError(
"Failed to compile grammar '%s':\n\n" % file
+ output
)
def compileGrammar(self, grammarName=None, options=''):
if grammarName is None:
grammarName = self.baseName +
|
'.g'
# don't try to rebuild grammar, if it already failed
if grammarName in compileErrorCache:
return
try:
testDir = os.path.dirname(os.path.abspath(__file__))
# get dependencies from antlr
if grammarName in dependencyCache:
dependencies = dependencyCache[grammarName]
else:
dependencies = []
cmd = ('cd %s; java %s org.antlr.Tool -depend %s 2>&1'
% (testDir, classpath, grammarName)
)
output = ""
failed = False
fp = os.popen(cmd)
for line in fp:
output += line
if line.startswith('error('):
failed = True
elif ':' in line:
a, b = line.strip().split(':', 1)
dependencies.append(
(os.path.join(testDir, a.strip()),
[os.path.join(testDir, b.strip())])
)
rc = fp.close()
if rc is not None:
failed = True
if failed:
raise RuntimeError(
"antlr -depend failed with code %s on grammar '%s':\n\n"
% (rc, grammarName)
+ cmd
+ "\n"
+ output
)
# add dependencies to my .stg files
templateDir = os.path.abspath(os.path.join(testDir, '..', '..', '..', 'src', 'org', 'antlr', 'codegen', 'templates', 'Python'))
templates = glob.glob(os.path.join(templateDir, '*.stg'))
for dst, src in dependencies:
src.extend(templates)
dependencyCache[grammarName] = dependencies
rebuild = False
for dest, sources in dependencies:
if not os.path.isfile(dest):
rebuild = True
break
for source in sources:
if os.path.getmtime(source) > os.path.getmtime(dest):
rebuild = True
break
if rebuild:
self._invokeantlr(testDir, grammarName, options)
except:
# mark grammar as broken
compileErrorCache[grammarName] = True
raise
def lexerClass(self, base):
"""Optionally build a subclass of generated lexer class"""
return base
def parserClass(self, base):
"""Optionally build a subclass of generated parser class"""
return base
def walkerClass(self, base):
"""Optionally build a subclass of generated walker class"""
return base
def __load_module(self, name):
modFile, modPathname, modDescription \
= imp.find_module(name, [os.path.dirname(__file__)])
return imp.load_module(
name, modFile, modPathname, modDescription
)
def getLexer(self, *args, **kwargs):
"""Build lexer instance. Arguments are passed to lexer.__init__()."""
self.lexerModule = self.__load_module(self.baseName + 'Lexer')
cls = getattr(self.lexerModule, self.baseName + 'Lexer')
cls = self.lexerClass(cls)
lexer = cls(*args, **kwargs)
return lexer
def getParser(self, *args, **kwargs):
"""Build parser instance. Arguments are passed to parser.__init__()."""
self.parserModule = self.__load_module(self.baseName + 'Parser')
cls = getattr(self.parserModule, self.baseName + 'Parser')
cls = self.parserClass(cls)
parser = cls(*args, **kwargs)
return parser
def getWalker(self, *args, **kwargs):
"""Build walker instance. Arguments are passed to walker.__init__()."""
self.walkerModule = self.__load_module(self.baseName + 'Walker')
cls = getattr(self.walkerModule, self.baseName + 'Walker')
cls = self.walkerClass(cls)
walker = cls(*args, **kwargs)
return walker
def compileInlineGrammar(self, grammar, options=''):
testDir = os.path.dirname(os.path.abspath(__file__))
# get type and name from first grammar line
m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar)
assert m is not None
grammarType = m.group(2)
if grammarType is None:
grammarType = 'combined'
grammarName = m.group(3)
assert grammarType in ('lexer', 'parser', 'tree', 'combi
|
hohe/scikit-rf
|
skrf/network.py
|
Python
|
bsd-3-clause
| 141,218
| 0.00973
|
'''
.. module:: skrf.network
========================================
network (:mod:`skrf.network`)
========================================
Provides a n-port network class and associated functions.
Most of the functionality in this module is provided as methods and
properties of the :class:`Network` Class.
Network Class
===============
.. autosummary::
:toctree: generated/
Network
Network Representations
============================
.. autosummary::
:toctree: generated/
Network.s
Network.z
Network.y
Network.a
Network.t
Connecting Networks
===============================
.. autosummary::
:toctree: generated/
connect
innerconnect
cascade
cascade_list
de_embed
flip
Interpolation and Concatenation Along Frequency Axis
=====================================================
.. autosummary::
:toctree: generated/
stitch
overlap
Network.resample
Network.interpolate
Network.interpolate_self
Network.interpolate_from_f
Combining Networks
===================================
.. autosummary::
:toctree: generated/
n_oneports_2_nport
four_oneports_2_twoport
three_twoports_2_threeport
n_twoports_2_nport
IO
====
.. autosummary::
skrf.io.general.read
skrf.io.general.write
skrf.io.general.ntwk_2_spreadsheet
Network.write
Network.write_touchstone
Network.read
Network.write_spreadsheet
Noise
============
.. autosummary::
:toctree: generated/
Network.add_noise_polar
Network.add_noise_polar_flatband
Network.multiply_noise
Supporting Functions
======================
.. autosummary::
:toctree: generated/
inv
connect_s
innerconnect_s
s2z
s2y
s2t
s2a
z2s
z2y
z2t
z2a
y2s
y2z
y2t
t2s
t2z
t2y
fix_z0_shape
renormalize_s
passivity
reciprocity
Misc Functions
=====================
.. autosummary::
:toctree: generated/
average
two_port_reflect
chopinhalf
Network.nudge
Network.renormalize
'''
from six.moves import xrange
import os
import warnings
try:
import cPickle as pickle
from cPickle import UnpicklingError
except ImportError:
import pickle as pickle
from pickle import UnpicklingError
from copy import deepcopy as copy
import re
from numbers import Number
from itertools import product
import numpy as npy
from numpy.linalg import inv as npy_inv
import pylab as plb
from scipy import stats,signal # for Network.add_noise_*, and Network.windowed
from scipy.interpolate import interp1d # for Network.interpolate()
from numpy import fft
import unittest # fotr unitest.skip
from . import mathFunctions as mf
from . frequency import Frequency
from . plotting import *#smith, plot_rectangular, plot_smith, plot_complex_polar
from . tlineFunctions import zl_2_Gamma0
from . util import get_fid, get_extn, find_nearest_index,slice_domain
## later imports. delayed to solve circular dependencies
#from io.general import read, write
#from io import touchstone
#from io.general import network_2_spreadsheet
from .constants import ZERO
class Network(object):
'''
A n-port electrical network [#]_.
For instructions on how to create Network see :func:`__init__`.
A n-port network may be defined by three quantities,
* network parameter matrix (s, z, or y-matrix)
* port characteristic impedance matrix
* frequency information
The :class:`Network` class stores these data structures internally
in the form of complex :class:`numpy.ndarray`'s. These arrays are not
interfaced directly but instead through the use of the properties:
===================== =============================================
Property Meaning
===================== =============================================
:attr:`s` scattering parameter matrix
:attr:`z0` characteristic impedance matrix
:attr:`f` frequency vector
===================== =============================================
Although these docs focus on s-parameters, other equivalent network
representations such as :attr:`z` and :attr:`y` are
available. Scalar projections of the complex network parameters
are accessible through properties as well. These also return
:class:`numpy.ndarray`'s.
===================== =============================================
Property Meaning
===================== =============================================
:attr:`s_re` real part of the s-matrix
:attr:`s_im` imaginary part of the s-matrix
:attr:`s_mag` magnitude of the s-matrix
:attr:`s_db` magnitude in log scale of the s-matrix
:attr:`s_deg` phase of the s-matrix in degrees
:attr:`s_gd` group delay derived from the s-matrix
===================== =============================================
The following operations act on the networks s-matrix.
===================== =============================================
Operator Function
===================== =============================================
\+ element-wise addition of the s-matrix
\- element-wise difference of the s-matrix
\* element-wise multiplication of the s-matrix
\/ element-wise division of the s-matrix
\*\* cascading (only for 2-ports)
\// de-embedding (for 2-ports, see :attr:`inv`)
===================== =============================================
Different components of the :class:`Network` can be visualized
through various plotting methods. These methods can be used to plot
individual elements of the s-matrix or all at once. For more info
about plotting see the :doc:`../../tutorials/plotting` tutorial.
========================= =============================================
Method Meaning
========================= =============================================
:func:`plot_s_smith` plot complex s-parameters on smith chart
:func:`plot_s_re` plot real part of s-parameters vs frequency
:func:`plot_s_im` plot imaginary part of s-parameters vs frequency
:func:`plot_s_mag` plot magnitude of s-parameters vs frequency
:func:`plot_s_db` plot magnitude (in dB) of s-parameters vs frequency
:func:`plot_s_deg` plot phase of s-parameters (in degrees) vs frequency
:func:`plot_s_deg_unwrap` plot phase of s-parameters (in unwrapped degrees) vs frequency
:func:`plot_s_gd` plot group delay of s-parameters (in s) vs frequency
========================= =============================================
:class:`Network` objects can be created from a touchstone or pickle
file (see :func:`__init__`), by a
:class:`~skrf.media.media.Media` object, or manually by assigning the
network properties directly. :class:`Network` objects
can be saved to disk in the form of touchstone files with the
:func:`write_touchstone` method.
An exhaustive list of :class:`Network` Methods and Properties
(At
|
tributes) are given below
References
------------
.. [#] http://en.wikipedia.org/wiki/Two-port_network
'''
global PRIMARY_PROPERTIES
PRIMARY_PROPERTIES = [ 's','z','y','a']
global COMPONENT_FUNC_DICT
COMPONENT_FUNC_DICT
|
= {
're' : npy.real,
'im' : npy.imag,
'mag' : npy.abs,
'db' : mf.complex_2_db,
'db10' : mf.complex_2_db10,
'rad' : npy.angle,
'deg' : lambda x: npy.angle(x, deg=True),
'arcl' : lambda x: npy.angle(x) * npy.abs(x),
'rad_unwrap' : lambda x: mf.unwrap_rad(npy.angle(x)),
'deg_unwrap' : lambda x: mf.radian_2_degree(mf.unwrap_rad(\
npy.angle(x))),
'arcl_unwrap' : lambda x: mf.unwrap_rad(npy.angle(x)) *\
npy.abs(x),
'gd' :
|
Azure/azure-sdk-for-python
|
sdk/devtestlabs/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/operations/_provider_operations_operations.py
|
Python
|
mit
| 4,782
| 0.004391
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProviderOperationsOperations(object):
"""ProviderOperationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ProviderOperationResult"]
"""Result of the request to list REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderOperationResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.devtestlabs.models.ProviderOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ProviderOperationResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request
|
, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_
|
next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.DevTestLab/operations'} # type: ignore
|
imh/gnss-analysis
|
gnss_analysis/agg_run.py
|
Python
|
lgpl-3.0
| 2,348
| 0.013203
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Ian Horn <ian@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from gnss_analysis.runner import run as
|
single_run
import pandas as pd
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser(description='RTK Filter SITL t
|
ests.')
parser.add_argument('infile', help='Specify the HDF5 file to use for input.')
parser.add_argument('outfile', help='Specify the HDF5 file to output into.')
parser.add_argument('baselineX', help='The baseline north component.')
parser.add_argument('baselineY', help='The baseline east component.')
parser.add_argument('baselineZ', help='The baseline down component.')
parser.add_argument('--NED', action='store_true')
parser.add_argument('-k', '--key',
default='table', nargs=1,
help='The key for the output table to insert into.')
parser.add_argument('-r', '--row',
default=None, nargs=1,
help='The key for the output table to insert into.')
args = parser.parse_args()
hdf5_filename_in = args.infile
hdf5_filename_out = args.outfile
baselineX = args.baselineX
baselineY = args.baselineY
baselineZ = args.baselineZ
baseline = np.array(map(float, [baselineX, baselineY, baselineZ]))
out_key = args.key
row = args.row
if row is None:
row = hdf5_filename_in
reports = single_run(hdf5_filename_in, baseline, baseline_is_NED=args.NED)
out_store = pd.HDFStore(hdf5_filename_out)
if ('/' + out_key) in out_store.keys():
out_df = out_store[out_key]
else:
out_df = pd.DataFrame()
new_cols = [col for col in reports.keys() if col not in out_df.columns]
for new_col in new_cols:
out_df[new_col] = pd.Series(np.nan * np.empty_like(out_df.index),
index=out_df.index)
out_df.loc[row] = pd.Series(reports)
out_store[out_key] = out_df
out_store.close()
if __name__ == "__main__":
main()
|
hrayr-artunyan/shuup
|
shuup/utils/filer.py
|
Python
|
agpl-3.0
| 5,631
| 0.003374
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
import hashlib
import six
from django.core.files.base import ContentFile
from django.forms.models import modelform_factory
from filer.models import File, Folder, Image
def filer_folder_from_path(path):
"""
Split `path` by slashes and create a hierarchy of Filer Folder objects accordingly.
Blank path components are ignored, so "/////foo//////bar///" is the same as "foo/bar".
The empty string (and `None`) are handled as "no folder", i.e. root folder.
:param path: Pathname or None
:type path: str|None
:return: Folder
:rtype: filer.models.Folder
"""
if path is None:
return None
folder = None
for component in six.text_type(path).split("/"):
if component:
folder = Folder.objects.get_or_create(name=component, parent=folder)[0]
return folder
def _filer_file_from_upload(model, request, path, upload_data, sha1=None):
"""
Create some sort of Filer file (either File or Image, really) from the given upload data (ContentFile or UploadFile)
:param model: Model class
:param request: Request, to figure out the owner for this file
:type request: django.http.request.HttpRequest|None
:param path: Pathname string (see `filer_folder_from_path`) or a Filer Folder.
:type path: basestring|filer.models.Folder
:param upload_data: Upload data
:type upload_data: django.core.files.base.File
:param sha1: SHA1 checksum. If given and a matching `model` with the SHA1 is found, it is returned instead.
:type sha1: basestring
:return: Filer file
"""
if sha1:
upload = model.objects.filter(sha1=sha1).first()
if upload:
return upload
file_form_cls = modelform_factory(
model=model, fields=('original_filename', 'owner', 'file'))
upload_form = file_form_cls(
data={
'original_filename': upload_data.name,
'owner': (request.user.pk if (request and not request.user.is_anonymous()) else None)
},
files={
'file': upload_data
}
)
upload = upload_form.save(commit=False)
upload.is_public = True
if isinstance(path, Folder):
upload.folder = path
else:
upload.folder = filer_folder_from_path(path)
upload.save()
return upload
def filer_file_from_upload(request, path, upload_data, sha1=None):
"""
Create a filer.models.filemodels.File from an upload (UploadedFile or such).
If the `sha1` parameter is passed and a file with said SHA1 is found, it will be returned instead.
:param request: Request, to figure out the owner for this file
:type request: django.http.request.HttpRequest|None
:param path: Pathname string (see `filer_folder_from_path`) or a Filer Folder.
:type path: basestring|filer.models.Folder
:param upload_data: Upload data
:type upload_data: django.core.files.base.File
:param sha1: SHA1 checksum. If given and a matching `model` with the SHA1 is found, it is returned instead.
:type sha1: basestring
:rtype: filer.models.filemodels.File
"""
return _filer_file_from_upload(model=File, request=request, path=path, upload_data=upload_data, sha1=sha1)
def filer_image_from_upload(request, path, upload_data, sha1=None):
"""
Create a Filer Image from an upload (UploadedFile or such).
If the `sha1` parameter is passed and an Image with said SHA1 is found, it will be returned instead.
:param request: Request, to figure out the owner for this file
:type request: django.http.request.HttpRequest|None
:param path: Pathname string (see `filer_folder_from_path`) or a Filer Folder.
:type path: basestring|filer.models.Folder
:param upload_data: Upload data
:type upload_data: django.core.files.base.File
:param sha1: SHA-1 checksum of the data, if available, to do deduplication
:type sha1: basestring
:rtype: filer.models.imagemodels.Image
"""
return _filer_file_from_upload(model=Image, request=request, path=path, upload_data=upload_data, sha1=sha1)
def filer_image_from_data(request, path, file_name, file_data, sha1=None):
"""
Create a Filer Image from the given data string.
If the `sha1` parameter is passed and True (the value True, not a truey value), the SHA-1 of the data string
is calculated and passed to the underlying creation function.
If the `sha1` parameter is truthy (generally the SHA-1 hex string), it's passed directly to the creation function.
:param request: Request, to figure out the owner for this file
:type request: django.http.request.HttpRequest|None
:param pa
|
th: Pathname string (see `filer_folder_from_path`) or a Filer Folder.
:type path: basestring|filer.models.Folder
:param file_name: File name
:t
|
ype file_data: basestring
:param file_data: Upload data
:type file_data: bytes
:param sha1: SHA-1 checksum of the data, if available, to do deduplication.
May also be `True` to calculate the SHA-1 first.
:type sha1: basestring|bool
:rtype: filer.models.imagemodels.Image
"""
if sha1 is True:
sha1 = hashlib.sha1(file_data).hexdigest()
upload_data = ContentFile(file_data, file_name)
return _filer_file_from_upload(model=Image, request=request, path=path, upload_data=upload_data, sha1=sha1)
|
laats/dpdq
|
src/qp/frontend.py
|
Python
|
gpl-3.0
| 3,696
| 0.005952
|
# -*-Python-*-
################################################################################
#
# File: frontend.py
# RCS: $Header: $
# Description: frontend:
# responsibility:
# init backend
# init processors
# handle two query types:
# 1) metadata
# response: metadata from backend and processors
# 2) informational
# response: proccess(proc(query))(backend(info(query)))
# Author: Staal Vinterbo
# Created: Wed May 8 16:28:56 2013
# Modified: Sun Jun 23 14:31:31 2013 (Staal Vinterbo) staal@mats
# Language: Python
# Package: N/A
# Status: Experimental
#
# (c) Copyright 2013, Staal Vinterbo, all rights res
|
erved.
#
# frontend.py is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the Li
|
cense, or
# (at your option) any later version.
#
# frontend.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with frontend.py; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
from backend import init_backend, query_backend
def init_frontend(database, processors, reinit=False):
if len(processors) == 0:
raise Exception('Failed to initialize frontend: no processors given.')
try:
if reinit:
backend = reinit_backend(backend)
else:
backend = init_backend(database)
except Exception as e:
raise Exception('Could not initialize backend: ' + str(e))
pdict = {}
for (k,v) in processors.items():
pdict[k] = v['meta']
meta = dict(backend['meta'])
meta['processors'] = pdict
return {'backend' : backend, 'processors' : processors, 'meta' : meta}
def handle_query(frontend, eps, query):
if eps <= 0:
raise Exception('Privacy risk must be positive.')
try:
(ddesc, proc) = query
(pname, parms) = proc
(dname, sel, pro) = ddesc
except Exception as e:
raise Exception('Malformed data query.')
# check if data set exists and if processor is allowed
if dname not in frontend['backend']['meta']['datasets'].keys():
raise Exception('Requested data set not available.')
if pname not in frontend['backend']['meta']['datasets'][dname]['processors']:
raise Exception('Requested information not appropriate for data set.')
try:
proc = frontend['processors'][pname]
except Exception as e:
raise Exception('Could not find query type: ' + str(e))
try:
if proc.has_key('query_edit'):
parms += [('orig_query', {'predicate' :sel, 'attributes' : pro})]
(sel, pro) = proc['query_edit'](sel, pro)
ddesc = (dname, sel, pro)
except Exception as e:
raise Exception('Query edit failed: ' + str(e))
try:
res = query_backend(frontend['backend'], ddesc)
except Exception as e:
raise Exception('Data query failed: ' + str(e))
try:
pres = proc['f'](eps, parms, res)
except Exception as e:
raise Exception('Information processing failed: ' + str(e))
return pres
|
Wonfee/pymobiledevice
|
util/ccl_bplist.py
|
Python
|
gpl-3.0
| 15,606
| 0.005318
|
"""
Copyright (c) 2012, CCL Forensics
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CCL Forensics nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CCL FORENSICS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTA
|
L, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import struct
import datetime
__version__ = "0.11"
__description__ = "Converts Apple binary PList files into a native Python data structure"
__contact__ = "Alex Caithness"
class BplistError(Exception):
pass
class BplistUID:
def __init__(self, value):
self.value = value
def __repr__(self):
return "UID: {0}".format(self.value)
def __str__(self):
return self.__repr__()
def __decode_multibyte_int(b, signed=True):
if len(b) == 1:
fmt = ">B" # Always unsigned?
elif len(b) == 2:
fmt = ">h"
elif len(b) == 3:
if signed:
return ((b[0] << 16) | struct.unpack(">H", b[1:])[0]) - ((b[0] >> 7) * 2 * 0x800000)
else:
return (b[0] << 16) | struct.unpack(">H", b[1:])[0]
elif len(b) == 4:
fmt = ">i"
elif len(b) == 8:
fmt = ">q"
else:
raise BplistError("Cannot decode multibyte int of length {0}".format(len(b)))
if signed and len(b) > 1:
return struct.unpack(fmt.lower(), b)[0]
else:
return struct.unpack(fmt.upper(), b)[0]
def __decode_float(b, signed=True):
if len(b) == 4:
fmt = ">f"
elif len(b) == 8:
fmt = ">d"
else:
raise BplistError("Cannot decode float of length {0}".format(len(b)))
if signed:
return struct.unpack(fmt.lower(), b)[0]
else:
return struct.unpack(fmt.upper(), b)[0]
def __decode_object(f, offset, collection_offset_size, offset_table):
# Move to offset and read type
#print("Decoding object at offset {0}".format(offset))
f.seek(offset)
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
type_byte = ord(f.read(1)[0])
else:
type_byte = f.read(1)[0]
#print("Type byte: {0}".format(hex(type_byte)))
if type_byte == 0x00: # Null 0000 0000
return None
elif type_byte == 0x08: # False 0000 1000
return False
elif type_byte == 0x09: # True 0000 1001
return True
elif type_byte == 0x0F: # Fill 0000 1111
raise BplistError("Fill type not currently supported at offset {0}".format(f.tell())) # Not sure what to return really...
elif type_byte & 0xF0 == 0x10: # Int 0001 xxxx
int_length = 2 ** (type_byte & 0x0F)
int_bytes = f.read(int_length)
return __decode_multibyte_int(int_bytes)
elif type_byte & 0xF0 == 0x20: # Float 0010 nnnn
float_length = 2 ** (type_byte & 0x0F)
float_bytes = f.read(float_length)
return __decode_float(float_bytes)
elif type_byte & 0xFF == 0x33: # Date 0011 0011
date_bytes = f.read(8)
date_value = __decode_float(date_bytes)
return datetime.datetime(2001,1,1) + datetime.timedelta(seconds = date_value)
elif type_byte & 0xF0 == 0x40: # Data 0100 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
data_length = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long Data field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
data_length = __decode_multibyte_int(int_bytes, False)
return f.read(data_length)
elif type_byte & 0xF0 == 0x50: # ASCII 0101 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
ascii_length = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long ASCII field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
ascii_length = __decode_multibyte_int(int_bytes, False)
return f.read(ascii_length).decode("ascii")
elif type_byte & 0xF0 == 0x60: # UTF-16 0110 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
utf16_length = (type_byte & 0x0F) * 2 # Length is characters - 16bit width
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long UTF-16 field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
utf16_length = __decode_multibyte_int(int_bytes, False) * 2
return f.read(utf16_length).decode("utf_16_be")
elif type_byte & 0xF0 == 0x80: # UID 1000 nnnn
uid_length = (type_byte & 0x0F) + 1
uid_bytes = f.read(uid_length)
return BplistUID(__decode_multibyte_int(uid_bytes, signed=False))
elif type_byte & 0xF0 == 0xA0: # Array 1010 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
array_count = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long Array field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
array_count = __decode_multibyte_int(int_bytes, signed=False)
array_refs = []
for i in range(array_count):
array_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in array_refs]
elif type_byte & 0xF0 == 0xC0: # Set 1010 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
set_count = type_byte & 0x0F
else:
# A li
|
plotly/python-api
|
packages/python/plotly/plotly/validators/histogram/stream/_maxpoints.py
|
Python
|
mit
| 550
| 0
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="maxpoints", parent_name="histogram.stream", **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "c
|
alc"),
max=k
|
wargs.pop("max", 10000),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
TeamLovely/Saliere
|
saliere/templatizer.py
|
Python
|
mit
| 6,214
| 0.001609
|
import os
import shutil
import jinja2
from saliere.core import UsageError
class Templatizer:
"""Template manager.
Handles all the template related operations.
"""
def __init__(self, template_path_list=None, template_type=None):
"""Initializer.
:param template_path_list: the list of paths where the templates are possibly located
"""
# Use default template paths if none were specified.
self.template_path_list = template_path_list if template_path_list else ['data/templates',
'../data/templates',
'/usr/local/share/saliere/templates']
# Set the type if specified.
self.template_type = template_type
@staticmethod
def create_folder(folder, on_failure=None):
"""Creates a folder and the parent directories if needed.
:param folder: name/path of the folder to create
:param on_failure: function to execute in case of failure
"""
try:
os.makedirs(folder)
except OSError:
if on_failure:
on_failure()
def copy(self, project_name, output_dir, template_vars=None):
"""Creates the skeleton based on the chosen template.
:param template_path: the path of the template to use
:param project_name: the name of the project
:param output_dir: the path of the output directory
"""
# Locate the template path.
template_path = self.locate_template()
if not template_path:
raise UsageError("A project type is required.")
# Ensure the template path ends with a "/".
template_folder_parent = os.path.abspath(template_path) + "/"
# Prepare the output directory.
output_folder_root = os.path.abspath(output_dir)
# List of the files in the template folder.
for root, subfolders, files in os.walk(template_path):
# Prepare the jinja environment.
template_loader = jinja2.FileSystemLoader(root)
jinja_env = jinja2.Environment(loader=template_loader)
# Recreate the folders with the formula name
template_folder_base = root.replace(template_folder_parent, "")
formula_folder_name = template_fold
|
er_base.replace("template", project_name)
formula_folder_path = os.path.join(output_folder_root, formula_folder_name)
Templatizer.create_folder(formula_folder_path)
# List the files.
for file in files:
dst = os.path.join(formula_folder_path, file)
#
|
If there is no variables to replace, simply copy the file.
if not template_vars:
src = os.path.join(root, file)
shutil.copyfile(src, dst)
continue
# Otherwise jinjanize it.
jinjanized_content = Jinjanizer.jinjanize(jinja_env, file, template_vars)
# Create the file with the rendered content.
with open(dst, mode='w', encoding='utf-8') as jinjanized_file:
jinjanized_file.write(jinjanized_content)
def list_templates(self):
"""Returns a list of available templates ordered alphabetically.
:return: a list of available templates ordered alphabetically
"""
# Ensure we have a list of paths.
if not self.template_path_list:
return None
# Initialize an empty set of available templates.
available_templates = set()
# Go through the list of valid paths.
for path in self.template_path_list:
base_path = os.path.abspath(path)
try:
subdirs = os.listdir(base_path)
available_templates.update(subdirs)
except FileNotFoundError:
pass
# Return a list of available templates ordered alphabetically.
return sorted(available_templates)
def locate_template(self, template_type=None):
"""Returns the path of a template.
Given a template type the function will attempt to retrieve its full path. If instead of a template type, a
full path is given, the function will validate the full path, If the full path cannot be determined, the
function returns None.
:param template_type: the type of the template or its full path
:return: the path of the template or None if it does not exist
"""
# Ensure we have a template type.
if not template_type:
template_type = self.template_type
if not template_type:
return None
# If the template type is a valid custom path, return it.
if os.path.exists(template_type):
return template_type
# Ensure we have a list of paths.
if not self.template_path_list:
return None
# Go through the list of valid paths.
for path in self.template_path_list:
base_path = os.path.abspath(path)
template_path = os.path.join(base_path, template_type)
is_valid = os.path.exists(template_path)
if is_valid:
break
# Return the full path of the given template or None if it cannot be found.
return os.path.abspath(template_path) if is_valid else None
class Jinjanizer:
"""Handle the jinjanization of the templates.
"""
@staticmethod
def jinjanize(jinja_env, template_file, template_vars=None):
"""Renders a Jinja2 template.
:param jinja_env: the jinja environment
:param template_file: the full path of the template file to render
:param formula_name: the name of the formula
:return: a string representing the rendered template
"""
if not template_vars:
template_vars = {}
# Load the template
template = jinja_env.get_template(template_file)
# Render the template and return the result
return template.render(template_vars)
|
jsatt/django-db-email-backend
|
test_app/storage.py
|
Python
|
mit
| 1,353
| 0.001478
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from io import BytesIO
from django.core.files.storage import Storage
class TestStorage(Storage):
def __init__(self, *args, **kwargs):
self.reset()
def _open(self, name, mode='rb'):
if not self.exists(name):
if 'w' in mode:
self.save(name, '')
|
return self._file_system[name]
else:
raise IOError("[Errno 2] No such file or directory: '{}'".format(name))
return self._file_system[name]
def _save(self, name, content):
f = BytesIO()
file_content = content.read()
if isinstance(file_content, bytes):
f.write(file_content)
else:
f.write(file_content.encode('utf8'))
f.seek(0)
if self
|
.exists(name):
name = self.get_available_name(name)
self._file_system[name] = f
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
if self.exists(name):
del self._file_system['name']
else:
raise OSError("[Errno 2] No such file or directory: '{}'".format(name))
def exists(self, name):
return name in self._file_system
def reset(self):
self._file_system = {}
|
wsricardo/mcestudos
|
treinamento-webScraping/Abraji/p08.py
|
Python
|
gpl-3.0
| 342
| 0.01194
|
import urllib.request
pagina = urllib.request.urlopen(
'http://beans.itcarlow.ie/prices-loyalty.html')
texto = pagina.read().decode('utf8')
onde = texto.find('>$')
início = onde + 2
fim = início + 4
preço = texto[início:fim]
if preço < 4.74:
print ('Co
|
mprar pois est
|
á barato:', preço)
else:
print ('Esperar')
|
itucsProject2/Proje1
|
restaurant/models.py
|
Python
|
unlicense
| 639
| 0.00626
|
from django.db import models
from _datetime import date
class Restaurant(models.Model):
name = models.CharField(max_length=200)
|
transportation = models.BooleanField(default=False)
weatherSensetion = models.BooleanField(default=False)
status = models.BooleanField(default=True)
totalDay = models.IntegerField(default=0)
counter = models.IntegerField(default=0)
def __str__(self):
return self.name
def deleteRest(self, deleteId):
self.objects.filter(id=deleteId).delete()
def updateStatus(self, newStatus, updateId):
self.objects.get
|
(id=updateId).update(status = newStatus)
|
jithinbp/pslab-desktop-apps
|
psl_res/GUI/B_ELECTRONICS/B_Opamps/L_Summing.py
|
Python
|
gpl-3.0
| 4,802
| 0.05935
|
#!/usr/bin/python
"""
::
This experiment is used to study Half wave rectifiers
"""
from __future__ import print_function
from PSL_Apps.utilitiesClass import utilitiesClass
from PSL_Apps.templates import ui_template_graph_nofft as template_graph_nofft
from PyQt4 import QtGui,QtCore
import sys,time
params = {
'image' : 'clipping.png',
'name':"Summing Junction",
'hint':'''
Study summing junctions using op-amps
'''
}
class AppWindow(QtGui.QMainWindow, template_graph_nofft.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
from PSL.analyticsClass import analyticsClass
self.math = analyticsClass()
self.prescalerValue=0
self.plot=self.add2DPlot(self.plot_area,enableMenu=False)
self.enableCrossHairs(self.plot,[])
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot.setLabel('left','Voltage -->', units='V',**labelStyle)
self.plot.setLabel('bottom','Time -->', units='S',**labelStyle)
self.plot.setYRange(-8.5,8.5)
self.I.set_gain('CH1',1)
self.I.set_gain('CH2',1)
self.I.set_pv2(0);self.I.set_pv3(0)
self.plot.setLimits(yMax=8,yMin=-8,xMin=0,xMax=4e-3)
self.I.configure_trigger(0,'CH1',0,prescaler = self.prescalerValue)
self.tg=2
self.max_samples=2000
self.samples = self.max_samples
self.timer = QtCore.QTimer()
self.legend = self.plot.addLegend(offset=(-10,30))
self.curve1 = self.addCurve(self.plot,'INPUT 1(CH2)')
self.curve2 = self.addCurve(self.plot,'INPUT 2(CH3)')
self.curve3 = self.addCurve(self.plot,'OUTPUT (CH1)')
self.WidgetLayout.setAlignment(QtCore.Qt.AlignLeft)
#Utility widgets
#Widgets related to power supplies PV1,PVS2,PV3,PCS
self.supplySection = self.supplyWidget(self.I); self.WidgetLayout.addWidget(self.supplySection)
#Widgets related to Analog Waveform generators
self.sineSection = self.sineWidget(self.I); self.WidgetLayout.addWidget(self.sineSection)
#Control widgets
a1={'TITLE':'TIMEBASE','MIN':0,'MAX':9,'FUNC':self.set_timebase,'UNITS':'S','TOOLTIP':'Set Timebase of the oscilloscope'}
self.ControlsLayout.addWidget(self.dialIcon(**a1))
self.ControlsLayout.addWidget(self.gainIconCombined(FUNC=self.I.set_gain,LINK=self.gainChanged))
self.running=True
self.timer.singleShot(100,self.run)
def gainChanged(self,g):
self.autoRange()
def set_timebase(self,g):
timebases = [1.5,2,4,8,16,32,128,256,512,1024]
self.prescalerValue=[0,0,0,0,1,1,2,2,3,3,3][g]
samplescaling=[1,1,1,1,1,0.5,0.4,0.3,0.2,0.2,0.1]
self.tg=timebases[g]
self.samples = int(self.max_samples*samplescaling[g])
return self.autoRange()
def autoRange(self):
xlen = self.tg*self.samples*1e-6
self.plot.autoRange();
chan = self.I.analogInputSources['CH1']
R = [chan.calPoly10(0),chan.calPoly10(1023)]
R[0]=R[0]*.9;R[1]=R[1]*.9
self.plot.setLimits(yMax=max(R),yMin=min(R),xMin=0,xMax=xlen)
self.plot.setYRange(min(R),max(R))
self.plot.setXRange(0,xlen)
return self.samples*self.tg*1e-6
def run(self):
if not self.running: return
try:
self.I.configure_trigger(0,'CH1',0,prescaler = self.presca
|
lerValue)
self.I.capture_traces(3,self.samples,self.tg)
if self.running:self.timer.singleShot(self.samples*self.I.timebase*1e-3+10,self.plotData)
except Exception as e:
print (e)
def plotData(self):
if not self.running: return
try:
n=0
while(not self.I.oscilloscope_progre
|
ss()[0]):
time.sleep(0.1)
n+=1
if n>10:
self.timer.singleShot(100,self.run)
return
self.I.__fetch_channel__(1)
self.I.__fetch_channel__(2)
self.I.__fetch_channel__(3)
self.curve1.setData(self.I.achans[1].get_xaxis()*1e-6,self.I.achans[1].get_yaxis(),connect='finite')
self.curve2.setData(self.I.achans[2].get_xaxis()*1e-6,self.I.achans[2].get_yaxis(),connect='finite')
self.curve3.setData(self.I.achans[0].get_xaxis()*1e-6,self.I.achans[0].get_yaxis(),connect='finite')
self.displayCrossHairData(self.plot,False,self.samples,self.I.timebase,[self.I.achans[1].get_yaxis(),self.I.achans[2].get_yaxis(),self.I.achans[0].get_yaxis()],[(0,255,0),(255,0,0),(255,255,0)])
if self.running:self.timer.singleShot(100,self.run)
except Exception as e:
print (e)
def saveData(self):
self.saveDataWindow([self.curve1,self.curve2,self.curve3],self.plot)
def closeEvent(self, event):
self.running=False
self.timer.stop()
self.finished=True
def __del__(self):
self.timer.stop()
print('bye')
if __name__ == "__main__":
from PSL import sciencelab
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=sciencelab.connect())
myapp.show()
sys.exit(app.exec_())
|
leofnch/kc
|
tests/testnet/roles/Bob.py
|
Python
|
gpl-3.0
| 526
| 0
|
"""
Bob is a honest user. Bob creates transactions and smart contracts, like Alice.
Thread for sync must be started separately, wallet must be already created.
"""
from hodl import block
import logging as log
def main(wallet, keys=None):
log.info("Bob's main started")
log.debug("Bob's money: " + str(w
|
allet.bch.money(keys['Bob'][1])))
# start blockchain checking thread
# create transaction
# create smart contract
# messages to smart contract
# decentralized internet request
pass # t
|
odo
|
snibug/gyp_example
|
build/package_application.py
|
Python
|
apache-2.0
| 3,909
| 0.008186
|
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
"""Package application for the given platform and build configs.
Depending on platform, this will create a package suitable for distribution.
If the buildbot is running the script, it will be uploaded to the
buildbot staging area.
Usage varies depending on the platform and the user.
If the buildbot is running the script, no parameters should be required
other than the platform and the path to the staging area.
However, users who want to build their own packages can specify options on the
command line.
"""
import argparse
import importlib
import logging
import os
import platform
|
import sys
import textwrap
import package_utils
def _ParseCommandLine(args):
"""Parse command line and return options."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__doc__))
if platform.system() == 'Linux':
valid_platforms = ('Android', 'Linux')
else:
valid_platforms = ('PS3', 'PS4', 'WiiU', 'XB1')
packagers = {}
# Import each packager module for the given platform, and
# store the platform-spe
|
cific Packager class in a dict.
for plat in valid_platforms:
packagers[plat] = importlib.import_module(
'%s.packager' % plat.lower()).Packager
valid_configs = (
'Debug',
'Devel',
'QA',
'Gold',
)
subparsers = parser.add_subparsers(dest='platform', help='Platform name')
# We allow each platform to add its own command line arguments,
# as well as the common ones. Add the common args to each sub-parser
# to avoid confusing ordering requirements.
# So the user invokes this like $ package_application.py PLATFORM <args>
for plat, packager in packagers.iteritems():
sub_parser = subparsers.add_parser(plat)
packager.AddCommandLineArguments(sub_parser)
sub_parser.add_argument(
'-c', '--config',
dest='config_list',
required=not package_utils.IsBuildbot(),
choices=valid_configs, action='append',
help='Build config. May be specified multiple times.'
'For automated builds, the set of configs will be specified in'
'the packager script.')
# The buildbot tells us the path to the staging directory, since it's
# based on the branch, time of day and buildnumber.
sub_parser.add_argument('-s', '--staging',
required=package_utils.IsBuildbot(),
help='Path to staging area on buildmaster. '
'(For use by buildbot.)')
sub_parser.add_argument('-u', '--user',
help='Override user for testing staging.')
sub_parser.add_argument('-v', '--verbose',
required=False, action='store_true')
sub_parser.set_defaults(packager=packager)
return parser.parse_args(args)
def main(args):
options = _ParseCommandLine(args)
if options.verbose:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
logging_format = '%(asctime)s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging_level,
format=logging_format,
datefmt='%m-%d %H:%M')
packager = options.packager(options)
try:
deployment_files = packager.PackageApplication()
except RuntimeError as e:
logging.error(e)
return 1
logging.debug('Paths for deployment: %s', deployment_files)
rc = 0
if package_utils.IsBuildbot() or options.user:
build_info = os.path.join(packager.GetOutDir(), 'build_info.txt')
if os.path.exists(build_info):
deployment_files.append(build_info)
else:
logging.error('%s not found.', build_info)
rc |= package_utils.DeployToStaging(
deployment_files, options.staging, options.user)
return rc
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Panos512/invenio
|
modules/miscutil/lib/upgrades/invenio_2012_11_04_circulation_and_linkback_updates.py
|
Python
|
gpl-2.0
| 4,682
| 0.006621
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Co
|
pyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOU
|
T ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
from invenio.dbquery import run_sql
from invenio.textutils import wait_for_user
depends_on = ['invenio_release_1_1_0']
def info():
return "WebLinkback and BibCirculation updates"
def do_upgrade():
## Since Invenio Upgrader was committed to maint-1.1 and merged to
## master in 8d7ed84, some of the tables that were different in
## maint-1.1 and master at the time needed upgrade recipe. This
## commit fixes the situation in gentle manner (by checking column
## existence etc), since some sites may have upgraded DB schema in
## various times.
## Firstly, BibCirculation tables:
# crcBORROWER
create_statement = run_sql('SHOW CREATE TABLE crcBORROWER')[0][1]
if '`ccid` int(15)' not in create_statement:
run_sql("ALTER TABLE crcBORROWER ADD COLUMN ccid int(15) " \
"unsigned NULL default NULL AFTER id")
if 'KEY `ccid`' not in create_statement:
run_sql("ALTER TABLE crcBORROWER ADD UNIQUE KEY ccid (ccid)")
if 'KEY `name`' not in create_statement:
run_sql("ALTER TABLE crcBORROWER ADD KEY name (name)")
if 'KEY `email`' not in create_statement:
run_sql("ALTER TABLE crcBORROWER ADD KEY email (email)")
# crcILLREQUEST
create_statement = run_sql('SHOW CREATE TABLE crcILLREQUEST')[0][1]
if '`budget_code` varchar(60)' not in create_statement:
run_sql("ALTER TABLE crcILLREQUEST ADD COLUMN budget_code varchar(60) " \
"NOT NULL default '' AFTER cost")
# crcITEM.expected_arrival_date
create_statement = run_sql('SHOW CREATE TABLE crcITEM')[0][1]
if '`expected_arrival_date` varchar(60)' not in create_statement:
run_sql("ALTER TABLE crcITEM ADD COLUMN expected_arrival_date varchar(60) " \
"NOT NULL default '' AFTER status")
## Secondly, WebLinkback tables:
run_sql("""
CREATE TABLE IF NOT EXISTS lnkENTRY (
id int(15) NOT NULL auto_increment,
origin_url varchar(100) NOT NULL, -- url of the originating resource
id_bibrec mediumint(8) unsigned NOT NULL, -- bibrecord
additional_properties longblob,
type varchar(30) NOT NULL,
status varchar(30) NOT NULL default 'PENDING',
insert_time datetime default '0000-00-00 00:00:00',
PRIMARY KEY (id),
INDEX (id_bibrec),
INDEX (type),
INDEX (status),
INDEX (insert_time)
) ENGINE=MyISAM;
""")
run_sql("""
CREATE TABLE IF NOT EXISTS lnkENTRYURLTITLE (
id int(15) unsigned NOT NULL auto_increment,
url varchar(100) NOT NULL,
title varchar(100) NOT NULL,
manual_set boolean NOT NULL default 0,
broken_count int(5) default 0,
broken boolean NOT NULL default 0,
PRIMARY KEY (id),
UNIQUE (url),
INDEX (title)
) ENGINE=MyISAM;
""")
run_sql("""
CREATE TABLE IF NOT EXISTS lnkENTRYLOG (
id_lnkENTRY int(15) unsigned NOT NULL,
id_lnkLOG int(15) unsigned NOT NULL,
FOREIGN KEY (id_lnkENTRY) REFERENCES lnkENTRY(id),
FOREIGN KEY (id_lnkLOG) REFERENCES lnkLOG(id)
) ENGINE=MyISAM;
""")
run_sql("""
CREATE TABLE IF NOT EXISTS lnkLOG (
id int(15) unsigned NOT NULL auto_increment,
id_user int(15) unsigned,
action varchar(30) NOT NULL,
log_time datetime default '0000-00-00 00:00:00',
PRIMARY KEY (id),
INDEX (id_user),
INDEX (action),
INDEX (log_time)
) ENGINE=MyISAM;
""")
run_sql("""
CREATE TABLE IF NOT EXISTS lnkADMINURL (
id int(15) unsigned NOT NULL auto_increment,
url varchar(100) NOT NULL,
list varchar(30) NOT NULL,
PRIMARY KEY (id),
UNIQUE (url),
INDEX (list)
) ENGINE=MyISAM;
""")
run_sql("""
CREATE TABLE IF NOT EXISTS lnkADMINURLLOG (
id_lnkADMINURL int(15) unsigned NOT NULL,
id_lnkLOG int(15) unsigned NOT NULL,
FOREIGN KEY (id_lnkADMINURL) REFERENCES lnkADMINURL(id),
FOREIGN KEY (id_lnkLOG) REFERENCES lnkLOG(id)
) ENGINE=MyISAM;
""")
def estimate():
return 10
def pre_upgrade():
pass
def post_upgrade():
pass
|
SMSSecure/SMSSecure
|
scripts/emoji-extractor/remove-emoji-margins.py
|
Python
|
gpl-3.0
| 738
| 0.001355
|
#!/usr/bin/env python3
import argparse
from pathlib import Path
from PIL imp
|
ort Image
parser = argparse.ArgumentParser(
|
prog='emoji-extractor',
description="""Resize extracted emojis to 128x128.""")
parser.add_argument(
'-e', '--emojis',
help='folder where emojis are stored',
default='output/',
required=False)
args = parser.parse_args()
path = Path(args.emojis)
for image_path in path.iterdir():
try:
print('Cropping {}...'.format(image_path.name))
image = Image.open(image_path)
width, height = image.size
box = (4, 0, width - 4, height)
crop = image.crop(box)
crop.save(image_path)
except:
print('Cannot crop {}...'.format(image_path.name))
|
kawamon/hue
|
desktop/core/ext-py/celery-4.2.1/celery/apps/multi.py
|
Python
|
apache-2.0
| 15,740
| 0
|
"""Start/stop/manage workers."""
from __future__ import absolute_import, unicode_literals
import errno
import os
import shlex
import signal
import sys
from collections import OrderedDict, defaultdict
from functools import partial
from subprocess import Popen
from time import sleep
from kombu.utils.encoding import from_utf8
from kombu.utils.objects import cached_property
from celery.five import UserList, items
from celery.platforms import IS_WINDOWS, Pidfile, signal_name
from celery.utils.nodenames import (gethostname, host_format, node_format,
nodesplit)
from celery.utils.saferepr import saferepr
__all__ = ('Cluster', 'Node')
CELERY_EXE = 'celery'
def celery_exe(*args):
return ' '.join((CELERY_EXE,) + args)
def build_nodename(name, prefix, suffix):
hostname = suffix
if '@' in name:
nodename = host_format(name)
shortname, hostname = nodesplit(nodename)
name = shortname
else:
shortname = '%s%s' % (prefix, name)
nodename = host_format(
'{0}@{1}'.format(shortname, hostname),
)
return name, nodename, hostname
def build_expander(nodename, shortname, hostname):
return partial(
node_format,
name=nodename,
N=shortname,
d=hostname,
h=nodename,
i='%i',
I='%I',
)
def format_opt(opt, value):
if not value:
return opt
if opt.startswith('--'):
return '{0}={1}'.format(opt, value)
return '{0} {1}'.format(opt, value)
def _kwargs_to_command_line(kwargs):
return {
('--{0}'.format(k.replace('_', '-'))
if len(k) > 1 else '-{0}'.format(k)): '{0}'.format(v)
for k, v in items(kwargs)
}
class NamespacedOptionParser(object):
def __init__(self, args):
self.args = args
self.options = OrderedDict()
self.values = []
self.passthrough = ''
self.namespaces = defaultdict(lambda: OrderedDict())
def parse(self):
rargs = list(self.args)
pos = 0
while pos < len(rargs):
arg = rargs[pos]
if arg == '--':
self.passthrough = ' '.join(rargs[pos:])
break
elif arg[0] == '-':
if arg[1] == '-':
self.process_long_opt(arg[2:])
else:
value = None
if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
value = rargs[pos + 1]
pos += 1
self.process_short_opt(arg[1:], value)
else:
self.values.append(arg)
pos += 1
def process_long_opt(self, arg, value=None):
if '=' in arg:
arg, value = arg.split('=', 1)
self.add_option(arg, value, short=False)
def process_short_opt(self, arg, value=None):
self.add_option(arg, value, short=True)
def optmerge(self, ns, defaults=None):
if defaults is None:
defaults = self.options
return OrderedDict(defaults, **self.namespaces[ns])
def add_option(self, name, value, short=False, ns=None):
prefix = short and '-' or '--'
dest = self.options
if ':' in name:
name, ns = name.split(':')
dest = self.namespaces[ns]
dest[prefix + name] = value
class Node(object):
"""Represents a node in a cluster."""
def __init__(self, name,
cmd=None, append=None, options=None, extra_args=None):
self.name = name
self.cmd = cmd or '-m {0}'.format(celery_exe('worker', '--detach'))
self.append = append
self.extra_args = extra_args or ''
self.options = self._annotate_with_default_opts(
options or OrderedDict())
self.expander = self._prepare_expander()
self.argv = self._prepare_argv()
self._pid = None
def _annotate_with_default_opts(self, options):
options['-n'] = self.name
self._setdefaultopt(options, ['--pidfile', '-p'], '%n.pid')
self._setdefaultopt(options, ['--logfile', '-f'], '%n%I.log')
self._setdefaultopt(options, ['--executable'], sys.executable)
return options
def _setdefaultopt(self, d, alt, value):
for opt in alt[1:]:
try:
return d[opt]
except KeyError:
pass
return d.setdefault(alt[0], value)
def _prepare_expander(self):
shortname, hostname = self.name.split('@', 1)
return build_expander(
self.name, shortname, hostname)
def _prepare_argv(self):
argv = tuple(
[self.expander(self.cmd)] +
[format_opt(opt, self.expander(value))
for opt, value in items(self.options)] +
[self.extra_args]
)
if self.append:
argv += (self.expander(self.append),)
return argv
def alive(self):
return self.send(0)
def send(self, sig, on_error=None):
pid = self.pid
if pid:
try:
os.kill(pid, sig)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
maybe_call(on_error, self)
return False
return True
maybe_call(on_error, self)
def start(self, env=None, **kwargs):
return self._waitexec(
self.argv, path=self.executable, env=env, **kwargs)
def _waitexec(self, argv, path=sys.executable, env=None,
on_spawn=None, on_signalled=None, on_failure=None):
argstr = self.prepare_argv(argv, path)
maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env)
pipe = Popen(argstr, env=env)
return self.handle_process_exit(
pipe.wait(),
on_signalled=on_signalled,
on_failure=on_failure,
)
def handle_process_exit(self, retcode, on_signalled=None, on_failure=None):
if retcode < 0:
maybe_call(on_signalled, self, -retcode)
return -retcode
elif retcode > 0:
maybe_call(on_failure, self, retcode)
return retcode
def prepare_argv(self, argv, path):
args = ' '.join([path] + list(argv))
return shlex.split(from_utf8(args), posix=not IS_WINDOWS)
def getopt(self, *alt):
for opt in alt:
try:
return self.options[opt]
except KeyError:
pass
raise KeyError(alt[0])
def __repr__(self):
return '<{name}: {0.name}>'.format(self, name=type(self).__name__)
@cached_property
def pidfile(self):
return self.expander(self.getopt('--pidfile', '-p'))
@cached_property
def logfile(self):
return self.expander(self.getopt('--logfile', '-f'))
@property
def pid(self):
if self._pid is not None:
return self._pid
try:
return Pidfile(self.pidfile).read_pid()
except ValueError:
pass
@pid.setter
def pid(self, value):
self._pid = value
@cached_property
def executable(self):
return self.options['--executable']
@cached_property
def argv_with_executable(self):
return (self.executable,) + self.argv
@classmethod
def from_kwargs(cls, name, **kwargs):
return cls(name, options=_kwargs_to_command_line(kwargs))
def maybe_call(fun, *args, **kwargs):
if fun is not None:
fun(*args, **kw
|
args)
class MultiParser(object):
Node = Node
def __init__(self, cmd='celery worker',
append='', prefix='', suffix='',
|
range_prefix='celery'):
self.cmd = cmd
self.append = append
self.prefix = prefix
self.suffix = suffix
self.range_prefix = range_prefix
def parse(self, p):
names = p.values
options = dict(p.options)
ranges = len(names) == 1
prefix = self.prefix
cmd = options.pop('--cmd', self.cmd)
append = options.pop('--append', self.append)
ho
|
tboyce1/home-assistant
|
homeassistant/components/tile/__init__.py
|
Python
|
apache-2.0
| 3,733
| 0.000536
|
"""The Tile component."""
import asyncio
from datetime import timedelta
from pytile import async_login
from pytile.errors import SessionExpiredError, TileError
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_COORDINATOR, DOMAIN, LOGGER
PLATFORMS = ["device_tracker"]
DEVICE_TYPES = ["PHONE", "TILE"]
DEFAULT_ATTRIBUTION = "Data provided by Tile"
DEFAULT_ICON = "mdi:view-grid"
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=2)
CONF_SHOW_INACTIVE = "show_inactive"
async def async_setup(hass, config):
"""Set up the Tile component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up Tile as config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
client = await async_login(
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
session=websession,
)
async def async_update_data():
"""Get new data from the API."""
try:
return await client.tiles.all()
except SessionExpiredError:
LOGGER.info("Tile session expired; creating a new one")
await client.async_init()
except TileError as err:
raise UpdateFailed(f"Error while retrieving data: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=config_entry.title,
update_interval=DEFAULT_UPDAT
|
E_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_refresh()
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a Tile config entry."""
unload_ok = all(
await asyncio
|
.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(config_entry.entry_id)
return unload_ok
class TileEntity(CoordinatorEntity):
"""Define a generic Tile entity."""
def __init__(self, coordinator):
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = None
self._unique_id = None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return DEFAULT_ICON
@property
def name(self):
"""Return the name."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._unique_id
@callback
def _handle_coordinator_update(self):
"""Respond to a DataUpdateCoordinator update."""
self._update_from_latest_data()
self.async_write_ha_state()
@callback
def _update_from_latest_data(self):
"""Update the entity from the latest data."""
raise NotImplementedError
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._update_from_latest_data()
|
jonfoster/pyxb-upstream-mirror
|
examples/ndfd/forecast.py
|
Python
|
apache-2.0
| 2,426
| 0.005359
|
from __future__ import print_function
import xml.dom.minidom
import DWML
import datetime
import pyxb.binding.datatypes as xsd
import urllib2
import time
import collections
import sys
# Get the next seven days forecast for two locations
zip = [ 85711, 55108 ]
if 1 < len(sys.argv):
zip = sys.argv[1:]
begin = xsd.dateTime.today()
end = xsd.dateTime(begin + datetime.timedelta(7))
# Create the REST URI for this query
uri = 'http://www.weather.gov/forecasts/xml/sample_products/browser_interface/ndfdXMLclient.php?zipCodeList=%s&product=time-series&begin=%s&end=%s&maxt=maxt&mint=mint' % ("+".join([ str(_zc) for _zc in zip ]), begin.xsdLiteral(), end.xsdLiteral())
print(uri)
# Retrieve the data
xmld = urllib2.urlopen(uri).read()
open('forecast.xml', 'wb').write(xmld)
#print xmld
# Convert it to DWML object
r = DWML.CreateFromDocument(xmld)
product = r.head.product
print('%s %s' % (product.title, product.category))
source = r.head.source
print(", ".join(source.production_center.content()))
data = r.data
if isinstance(data, collections.MutableSequence):
data = data.pop(0)
print(data)
for i in range(len(data.location)):
loc = data.location[i]
print('%s [%s %s]' % (loc.location_key, loc.point.latitude, loc.point.longitude))
for p in data.parameters:
if p.applicable_location != loc.location_key:
continue
mint = maxt = None
for t in p.temperature:
if 'maximum' == t.type:
maxt = t
elif 'minimum' == t.type:
mint = t
print('%s (%s): %s' % (t.name[0], t.units, " ".join([ str(_v) for _v in t.content() ])))
# Sometimes the service doesn't provide the same number of
# data points for min and max
mint_time_layout = maxt_time_layout = No
|
ne
for tl in data.time_layout:
if tl.layout_key == mint.time_layout:
mint_time_layout = tl
if tl.layout_key == maxt.time_layout:
maxt_time_layout = tl
for ti in range(min(len(mint_time_l
|
ayout.start_valid_time), len(maxt_time_layout.start_valid_time))):
start = mint_time_layout.start_valid_time[ti].value()
end = mint_time_layout.end_valid_time[ti]
print('%s: min %s, max %s' % (time.strftime('%A, %B %d %Y', start.timetuple()),
mint.value_[ti].value(), maxt.value_[ti].value()))
|
kfcpaladin/sze-the-game
|
renpy/editor.py
|
Python
|
mit
| 5,015
| 0.002792
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import renpy
import traceback
import subprocess
class Editor(object):
"""
This class is intended to be subclassed by editor subclasses. It provides a
number of editor related operations, which are called by Ren'Py (including
the Ren'Py Launcher).
Editor operations are grouped into transactions. An editor transaction
starts with a call to the begin() method. Ren'Py will then call some number
of command methods, each causing an operation to occur in the editor. Ren'Py
will call end() at the end of the transaction.
Although not required, it's reasonable than an implementation of this class
will batch the files together and send them to the editor at once. It's also
reasonable that an implementation will send the operations one at a time (and
do little-to-nothing in begin() and end().
Each operation takes a path to operate on. If the editor has a buffer
corresponding to that path, that buffer is used. Otherwise, the editor
is implicitly opened.
We reserve the right to add new keyword arguments to methods of this class,
so please ensure that subclasses accept and ignore unknown keyword
arguments.
"""
de
|
f begin(self, new_window=False, **kwargs):
"""
Begins an editor transaction.
`new_window`
If True, a new editor window will be created and presented to the
|
user. Otherwise, and existing editor window will be used.
"""
def end(self, **kwargs):
"""
Ends an editor transaction.
"""
def open(self, filename, line=None, **kwargs): # @ReservedAssignment
"""
Ensures `path` is open in the editor. This may be called multiple
times per transaction.
`line`
If not None, this should be a line number to open in the
editor.
The first open call in a transaction is somewhat special - that file
should be given focus in a tabbed editor environment.
"""
class SystemEditor(Editor):
def open(self, filename, line=None, **kwargs): # @ReservedAssignment
filename = renpy.exports.fsencode(filename)
try:
if renpy.windows:
os.startfile(filename) # @UndefinedVariable
elif renpy.macintosh:
subprocess.call([ "open", filename ]) # @UndefinedVariable
elif renpy.linux:
subprocess.call([ "xdg-open", filename ]) # @UndefinedVariable
except:
traceback.print_exc()
# The editor that Ren'Py is using. It should be a subclass of the Editor
# class.
editor = None
def init():
"""
Creates the editor object, based on the contents of the RENPY_EDIT_PY
file.
"""
global editor
editor = SystemEditor()
path = os.environ.get("RENPY_EDIT_PY", None)
if path is None:
return
with open(path, "r") as f:
source = f.read()
code = compile(source, path, "exec")
scope = { "__file__" : path }
exec code in scope, scope
if "Editor" in scope:
editor = scope["Editor"]()
return
raise Exception("{0} did not define an Editor class.".format(path))
def launch_editor(filenames, line=1, transient=False):
"""
Causes the editor to be launched.
"""
# On mobile devices, we will never be able to launch the editor.
if renpy.mobile:
return True
if editor is None:
init()
if editor is None:
return False
filenames = [ renpy.parser.unelide_filename(i) for i in filenames ]
try:
editor.begin(new_window=transient)
for i in filenames:
editor.open(i, line)
line = None # The line number only applies to the first filename.
editor.end()
return True
except:
traceback.print_exc()
return False
|
vmagamedov/grpclib
|
tests/dummy_pb2.py
|
Python
|
bsd-3-clause
| 5,139
| 0.003308
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dummy.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='dummy.proto',
package='dummy',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0b\x64ummy.proto\x12\x05\x64ummy\"\x1d\n\x0c\x44ummyRequest\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nDummyReply\x12\r\n\x05value\x18\x01 \x01(\t2\xfa\x01\n\x0c\x44ummyService\x12\x36\n\nUnaryUnary\x12\x13.dummy.DummyRequest\x1a\x11.dummy.DummyReply\"\x00\x12\x39\n\x0bUnaryStream\x12\x13.dummy.DummyRequest\x1a\x11.dummy.DummyReply\"\x00\x30\x01\x12\x39\n\x0bStreamUnary\x12\x13.dummy.DummyRequest\x1a\x11.dummy.DummyReply\"\x00(\x01\x12<\n\x0cStreamStream\x12\x13.dummy.DummyRequest\x1a\x11.dummy.DummyReply\"\x00(\x01\x30\x01\x62\x06proto3'
)
_DUMMYREQUEST = _descriptor.Descriptor(
name='DummyRequest',
full_name='dummy.DummyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='dummy.DummyRequest.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=51,
)
_DUMMYREPLY = _descriptor.Descriptor(
name='DummyReply',
full_name='dummy.DummyReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='dummy.DummyReply.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=80,
)
DESCRIPTOR.message_types_by_name['DummyRequest'] = _DUMMYREQUEST
DESCRIPTOR.message_types_by_name['DummyReply'] = _DUMMYREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DummyRequest = _reflection.GeneratedProtocolMessageType('DummyRequest', (_message.Message,), {
'DESCRIPTOR' : _DUMMYREQUEST,
'__module__' : 'dummy_pb2'
# @@protoc_insertion_point(class_scope:dummy.DummyRequest)
})
_sym_db.RegisterMessage(DummyRequest)
DummyReply = _reflection.GeneratedProtocolMessageType('DummyReply', (_message.Message,), {
'DESCRIPTOR' : _DUMMYREPLY,
'__module__' : 'dummy_pb2'
# @@protoc_insertion_point(class_scope:dummy.DummyReply)
})
_sym_db.RegisterMessage(DummyReply)
_
|
DUMMYSERVICE = _descriptor.ServiceDescriptor(
name='DummyService',
full_name='dummy.DummyService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
seri
|
alized_start=83,
serialized_end=333,
methods=[
_descriptor.MethodDescriptor(
name='UnaryUnary',
full_name='dummy.DummyService.UnaryUnary',
index=0,
containing_service=None,
input_type=_DUMMYREQUEST,
output_type=_DUMMYREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UnaryStream',
full_name='dummy.DummyService.UnaryStream',
index=1,
containing_service=None,
input_type=_DUMMYREQUEST,
output_type=_DUMMYREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StreamUnary',
full_name='dummy.DummyService.StreamUnary',
index=2,
containing_service=None,
input_type=_DUMMYREQUEST,
output_type=_DUMMYREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StreamStream',
full_name='dummy.DummyService.StreamStream',
index=3,
containing_service=None,
input_type=_DUMMYREQUEST,
output_type=_DUMMYREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_DUMMYSERVICE)
DESCRIPTOR.services_by_name['DummyService'] = _DUMMYSERVICE
# @@protoc_insertion_point(module_scope)
|
CHrycyna/LandscapeTracker
|
app/controllers/__init__.py
|
Python
|
mit
| 49
| 0.020408
|
__all__ = ["user_controller", "pl
|
ant_cont
|
roller"]
|
jasonehines/mycroft-core
|
mycroft/skills/container.py
|
Python
|
gpl-3.0
| 3,372
| 0
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
from os.path import dirname, exists, isdir
from mycroft.configuration import ConfigurationManager
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.skills.core import create_skill_descriptor, load_skill
from mycroft.skills.intent import Intent
from mycroft.util.log import getLogger
__author__ = 'seanfitz'
LOG = getLogger("SkillContainer")
class SkillContainer(object):
def __init__(self, args):
params = self.__build_params(args)
if params.config:
ConfigurationManager.load_local([params.config])
if exists(params.lib) and isdir(params.lib):
sys.path.append(params.lib)
sys.path.append(params.dir)
self.dir = params.dir
self.enable_intent = params.enable_intent
self.__init_client(params)
@staticmethod
def __build_params(args):
parser = argparse.ArgumentParser()
parser.add_argument("--config", default="./mycroft.conf")
parser.add_argument("dir", nargs='?', default=dirname(__file__))
parser.add_argument("--lib", default="./lib")
parser.add_argument("--host", default=None)
parser.add_argument("--port", default=None)
parser.add_argument("--use-ssl", action='store_true', default=False)
parser.add_argument("--enable-intent", action='store_true',
default=False)
return parser.parse_args(args)
def __init_client(self, params):
config = ConfigurationManager.get().get("websocket")
if not params.host:
params.host = config.get('host')
if not params.port:
params.port = config.get('port')
self.ws = WebsocketClient(host=params.host,
port=params.port,
ssl=params.use_ssl)
def load_skill(self):
if self.enable_intent:
Intent(self.ws)
skill_descriptor = create_skill_descriptor(self.dir)
self.skill = load_skill(skill_descriptor, self.ws)
def run(self):
try:
self.ws.on('message', LOG.debug)
self.ws.on('open', self.load_skill)
self.ws.on('error', LOG.error)
self.ws.run_forever()
except Exception as e:
LOG.error("Error: {0}".format(e))
self.st
|
op()
def stop(self):
if self.skill:
self.skill.shutdown()
def main():
container = SkillContainer(sys.argv[1:])
try:
container.run()
except KeyboardInterrupt:
|
container.stop()
finally:
sys.exit()
if __name__ == "__main__":
main()
|
JoseTomasTocino/image-metadata-viewer
|
main.py
|
Python
|
lgpl-3.0
| 3,693
| 0.002709
|
#!/usr/bin/env python
# coding: utf-8
import datetime
import subprocess
import logging
import json
import os
import sys
from io import BytesIO
import requests
from bottle import route, run, request
from bottle import jinja2_view as view, jinja2_template as template
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
EXIFTOOL_PATH = 'exiftool/exiftool'
@route('/favicon.ico')
def get_favicon():
return ''
@route('/ads.txt')
def get_ads():
return 'google.com, pub-0745898310693904, DIRECT, f08c47fec0942fa0'
@route('/')
@view('index')
def fetch_data():
image_location = request.GET.get('img')
template_data = {
'state': 0,
'image_location': image_location,
'metadata': {}
}
# If no image location was specified, just return the initial page with no data
if not image_location:
logging.info("No image location specified")
return template_data
template_data['state'] = 1
logging.info("Fetching image at {}...".format(image_location))
response = requests.get(image_location)
if response.status_code != 200:
logging.error("Problem fetching image :(")
template_data['invalid_image'] = "Invalid image"
return template_data
logging.info("Image fetched properly")
f = BytesIO(response.content)
logging.info("Running exiftool process...")
process = subprocess.Popen([EXIFTOOL_PATH, '-g0', '-j', '-c', '%+.6f', '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output, output_err = process.communicate(f.read())
# TODO: check for errors running process
logging.info("Decoding JSON from output...")
metadata = json.loads(output)[0]
# Filter metadata components that are not dictionaries
metadata = {k: v for k, v in metadata.item
|
s() if isinstance(v, dict)}
if 'ExifTool' in metadata:
del metadata['ExifTool']
# Try to build a summary of information
basic_info = {}
try:
basic_info['Dimensions'] = u"{} × {} {}".format(
metadata['File']['ImageWidth'],
metadata['File']['ImageHeight'],
metadata['File']['FileType']
)
except:
pass
if 'EXIF' in metadata:
if 'Artist' in metadata['EXIF']
|
:
basic_info['Artist'] = metadata['EXIF']['Artist']
if 'Copyright' in metadata['EXIF']:
basic_info['Copyright'] = metadata['EXIF']['Copyright']
if 'Model' in metadata['EXIF']:
basic_info['Camera'] = metadata['EXIF']['Model']
if 'LensModel' in metadata['EXIF']:
basic_info['LensModel'] = metadata['EXIF']['LensModel']
if {'ExposureMode', 'ExposureTime', 'FNumber', 'ISO'} <= set(metadata['EXIF'].keys()):
m = metadata['EXIF']
basic_info['Exposure'] = '{}, {}, {}, ISO {}'.format(
m['ExposureMode'], m['ExposureTime'], m['FNumber'], m['ISO']
)
if 'Composite' in metadata:
if 'GPSLongitude' in metadata['Composite'] and 'GPSLatitude' in metadata['Composite']:
template_data['has_location'] = True
if 'LensID' in metadata['Composite']:
basic_info['Lens'] = metadata['Composite']['LensID']
metadata['Basic'] = basic_info
template_data['metadata'] = metadata
# Get a sorted list of metadata keys
template_data['metadata_sorted_keys'] = sorted(metadata.keys())
# Try to get the referer
referer = request.GET.get('page', request.headers.get('Referer', '/'))
return template_data
run(host='0.0.0.0', port=os.environ.get('PORT', 5000))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.