text stringlengths 4 1.02M | meta dict |
|---|---|
"""This module exports the Lua plugin class."""
from SublimeLinter.lint import Linter, util
class GLua(Linter):
"""Provides an interface to gluac -p."""
syntax = 'lua'
cmd = 'gluac -p * -'
regex = r'^.+?:.+?:(?P<line>\d+): (?P<message>.+?(?:near (?P<near>\'.+\')|$))'
error_stream = util.STREAM_STDOUT
| {
"content_hash": "0a8940852ead6dc7ace89dc843644fa0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.5718654434250765,
"repo_name": "glua/SublimeLinter-contrib-glua",
"id": "87e4c0439ef216f56a61261b7f0dc45bc96ed179",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "545"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import contextlib
from _Framework.Util import product
from _Framework.EncoderElement import EncoderElement
from _Framework.SubjectSlot import SubjectEvent
from _Framework.Signal import Signal
from _Framework.NotifyingControlElement import NotifyingControlElement
from _Framework.Util import in_range
from _Framework.Debug import debug_print
from _Framework.Disconnectable import Disconnectable
from _Framework.ButtonMatrixElement import ButtonMatrixElement
class InputSignal(Signal):
"""
Special signal type that makes sure that interaction with input
works properly. Special input control elements that define
value-dependent properties should use this kind of signal.
"""
def __init__(self, sender = None, *a, **k):
super(InputSignal, self).__init__(sender=sender, *a, **k)
self._input_control = sender
@contextlib.contextmanager
def _listeners_update(self):
old_count = self.count
yield
diff_count = self.count - old_count
self._input_control._input_signal_listener_count += diff_count
listener_count = self._input_control._input_signal_listener_count
#if diff_count > 0 and listener_count == diff_count or diff_count < 0 and listener_count == 0:
# self._input_control._request_rebuild()
def connect(self, *a, **k):
with self._listeners_update():
super(InputSignal, self).connect(*a, **k)
def disconnect(self, *a, **k):
with self._listeners_update():
super(InputSignal, self).disconnect(*a, **k)
def disconnect_all(self, *a, **k):
with self._listeners_update():
super(InputSignal, self).disconnect_all(*a, **k)
class EncoderMatrixElement(NotifyingControlElement):
' Class representing a 2-dimensional set of buttons '
def __init__(self, script, *a, **k):
super(EncoderMatrixElement, self).__init__(*a, **k)
self._script = script
self._dials = []
self._dial_coordinates = {}
self._max_row_width = 0
def disconnect(self):
NotifyingControlElement.disconnect(self)
self._dials = None
self._dial_coordinates = None
def add_row(self, dials):
assert (dials != None)
assert isinstance(dials, tuple)
index = 0
for dial in dials:
assert (dial != None)
assert isinstance(dial, EncoderElement)
assert (dial not in self._dial_coordinates.keys())
dial.add_value_listener(self._dial_value, identify_sender=True)
self._dial_coordinates[dial] = (index,
len(self._dials))
index += 1
if (self._max_row_width < len(dials)):
self._max_row_width = len(dials)
self._dials.append(dials)
def width(self):
return self._max_row_width
def height(self):
return len(self._dials)
def send_value(self, column, row, value, force = False):
assert (value in range(128))
assert (column in range(self.width()))
assert (row in range(self.height()))
if (len(self._dials[row]) > column):
self._dials[row][column].send_value(value, force)
def get_dial(self, column, row):
assert (column in range(self.width()))
assert (row in range(self.height()))
dial = None
if (len(self._dials[row]) > column):
dial = self._dials[row][column]
return dial
def reset(self):
for dial_row in self._dials:
for dial in dial_row:
dial.send_value(0, True)
#for dial in self._dials:
# dial.reset()
def _dial_value(self, value, sender):
assert isinstance(value, int)
assert (sender in self._dial_coordinates.keys())
assert isinstance(self._dial_coordinates[sender], tuple)
coordinates = tuple(self._dial_coordinates[sender])
self.notify_value(value, coordinates[0], coordinates[1])
"""for entry in self._value_notifications:
callback = entry['Callback']
callback(value, coordinates[0], coordinates[1])"""
class NewEncoderMatrixElement(ButtonMatrixElement):
' Class representing a 2-dimensional set of buttons '
def __init__(self, script, *a, **k):
super(NewEncoderMatrixElement, self).__init__(*a, **k)
self._script = script
self._dials = []
self._dial_coordinates = {}
self._max_row_width = 0
def disconnect(self):
super(NewEncoderMatrixElement, self).disconnect()
self._dials = None
self._dial_coordinates = None
def get_dial(self, *a, **k):
self.get_button(self, *a, **k)
def _dial_value(self, value, sender):
assert isinstance(value, int)
assert (sender in self._dial_coordinates.keys())
assert isinstance(self._dial_coordinates[sender], tuple)
coordinates = tuple(self._dial_coordinates[sender])
self.notify_value(value, coordinates[0], coordinates[1])
def xiterbuttons(self):
for i, j in product(xrange(self.width()), xrange(self.height())):
button = self.get_button(i, j)
yield (button, (i, j))
| {
"content_hash": "23df4212d1f591d215e8e9b597e4a650",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 96,
"avg_line_length": 28.598765432098766,
"alnum_prop": 0.7034319015756529,
"repo_name": "LividInstruments/LiveRemoteScripts",
"id": "c43d9ceead53d184b470b6422370967d46d74764",
"size": "4676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_Mono_Framework/EncoderMatrixElement.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2230528"
}
],
"symlink_target": ""
} |
"""Example DAG demonstrating the usage of the PythonOperator."""
import time
from pprint import pprint
from airflow import DAG
from airflow.operators.python import PythonOperator, PythonVirtualenvOperator
from airflow.utils.dates import days_ago
args = {
'owner': 'airflow',
}
with DAG(
dag_id='example_python_operator',
default_args=args,
schedule_interval=None,
start_date=days_ago(2),
tags=['example'],
) as dag:
# [START howto_operator_python]
def print_context(ds, **kwargs):
"""Print the Airflow context and ds variable from the context."""
pprint(kwargs)
print(ds)
return 'Whatever you return gets printed in the logs'
run_this = PythonOperator(
task_id='print_the_context',
python_callable=print_context,
)
# [END howto_operator_python]
# [START howto_operator_python_kwargs]
def my_sleeping_function(random_base):
"""This is a function that will run within the DAG execution"""
time.sleep(random_base)
# Generate 5 sleeping tasks, sleeping from 0.0 to 0.4 seconds respectively
for i in range(5):
task = PythonOperator(
task_id='sleep_for_' + str(i),
python_callable=my_sleeping_function,
op_kwargs={'random_base': float(i) / 10},
)
run_this >> task
# [END howto_operator_python_kwargs]
# [START howto_operator_python_venv]
def callable_virtualenv():
"""
Example function that will be performed in a virtual environment.
Importing at the module level ensures that it will not attempt to import the
library before it is installed.
"""
from time import sleep
from colorama import Back, Fore, Style
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Style.RESET_ALL)
for _ in range(10):
print(Style.DIM + 'Please wait...', flush=True)
sleep(10)
print('Finished')
virtualenv_task = PythonVirtualenvOperator(
task_id="virtualenv_python",
python_callable=callable_virtualenv,
requirements=["colorama==0.4.0"],
system_site_packages=False,
)
# [END howto_operator_python_venv]
| {
"content_hash": "dde8f341ecffc54f889f5ca5a14766fb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 84,
"avg_line_length": 30.31168831168831,
"alnum_prop": 0.6323907455012854,
"repo_name": "sekikn/incubator-airflow",
"id": "a9db34254ebcc5227ccd03f09d0eb31cf8998aa1",
"size": "3122",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/example_dags/example_python_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""Starter script for Nova API.
Starts both the EC2 and OpenStack APIs in separate greenthreads.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from nova import config
from nova import objects
from nova import service
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "nova")
utils.monkey_patch()
objects.register_all()
launcher = service.process_launcher()
for api in CONF.enabled_apis:
should_use_ssl = api in CONF.enabled_ssl_apis
if api == 'ec2':
server = service.WSGIService(api, use_ssl=should_use_ssl,
max_url_len=16384)
else:
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
launcher.wait()
| {
"content_hash": "3df90c2a95796e808895a44a55f5dc03",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 26.594594594594593,
"alnum_prop": 0.6626016260162602,
"repo_name": "apporc/nova",
"id": "75e11be8e6daded9ec6de171e8f1d2972394ce03",
"size": "1715",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/cmd/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16560867"
},
{
"name": "Shell",
"bytes": "24210"
},
{
"name": "Smarty",
"bytes": "335237"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 3cea1b2cfa
Revises: 42b10177639f
Create Date: 2013-05-05 17:07:07.392602
"""
# revision identifiers, used by Alembic.
revision = '3cea1b2cfa'
down_revision = '42b10177639f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('party', sa.Column('logo', sa.String(length=1024), nullable=True))
def downgrade():
op.drop_column('party', 'logo')
| {
"content_hash": "889be7791911b0985f362b0c860c1f1c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 84,
"avg_line_length": 19.523809523809526,
"alnum_prop": 0.7195121951219512,
"repo_name": "teampopong/pokr.kr",
"id": "385b9ee54853f9273bfb31ce0eb615d24f2eedc3",
"size": "410",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alembic/versions/3cea1b2cfa_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "216253"
},
{
"name": "HTML",
"bytes": "146548"
},
{
"name": "JavaScript",
"bytes": "143812"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "191129"
},
{
"name": "Shell",
"bytes": "737"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^weixin/admin/', include(admin.site.urls)),
url(r'^weixin/', include('weisite.urls')),
)
| {
"content_hash": "b9c0aea444d3e5d6107d73c7db94441b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 53,
"avg_line_length": 26.444444444444443,
"alnum_prop": 0.7100840336134454,
"repo_name": "cnwarden/wei123",
"id": "549ac9fcf09f1af9ec98478d0e1854ddea634912",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wei123/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3008"
},
{
"name": "HTML",
"bytes": "10277"
},
{
"name": "Python",
"bytes": "26678"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
"""
=============
A-V sync test
=============
This example tests synchronization between the screen and the auditory/visual
playback. If a given machine (experimenta or development) is configured
correctly:
1. The inter-flip interval should be ``1 / refresh_rate``, i.e., ~16 ms for
a 60 Hz display.
2. The red rectangle should correspond to a typical credit card size
(~3 3/8" x 2 1/8").
If you test using an oscilloscope, which is required for actual subject
presentation:
1. There should be no jitter between the trigger and auditory or visual
display when hooking the auditory output and photodiode to an oscilloscope.
2. The auditory and visual onset should be aligned (no fixed delay between
them) when viewed with an oscilloscope.
A fixed trigger-to-AV delay can in principle be adjusted afterward via analysis
changes, and can be assessed using this script and an oscilloscope.
.. warning::
Fullscreen must be used to guarantee flip accuracy! Also, ensure that if
you are using a projector, your computer screen resolution (and
``"SCREEN_SIZE_PIX"``) are configured to use the native resolution of the
projector, as resolution conversion can lead to visual display jitter.
"""
# Author: Dan McCloy <drmccloy@uw.edu>
#
# License: BSD (3-clause)
import numpy as np
from expyfun import ExperimentController, building_doc
from expyfun.visual import Circle, Rectangle
import expyfun.analyze as ea
print(__doc__)
n_channels = 2
click_idx = [0]
with ExperimentController('SyncTest', full_screen=True, noise_db=-np.inf,
participant='s', session='0', output_dir=None,
suppress_resamp=True, check_rms=None,
n_channels=n_channels, version='dev') as ec:
click = np.r_[0.1, np.zeros(99)] # RMS = 0.01
data = np.zeros((n_channels, len(click)))
data[click_idx] = click
ec.load_buffer(data)
pressed = None
screenshot = None
# Make a circle so that the photodiode can be centered on the screen
circle = Circle(ec, 1, units='deg', fill_color='k', line_color='w')
# Make a rectangle that is the standard credit card size
rect = Rectangle(ec, [0, 0, 8.56, 5.398], 'cm', None, '#AA3377')
while pressed != '8': # enable a clean quit if required
ec.set_background_color('white')
t1 = ec.start_stimulus(start_of_trial=False) # skip checks
ec.set_background_color('black')
t2 = ec.flip()
diff = round(1000 * (t2 - t1), 2)
ec.screen_text('IFI (ms): {}'.format(diff), wrap=True)
circle.draw()
rect.draw()
screenshot = ec.screenshot() if screenshot is None else screenshot
ec.flip()
ec.stamp_triggers([2, 4, 8])
ec.refocus()
pressed = ec.wait_one_press(0.5)[0] if not building_doc else '8'
ec.stop()
ea.plot_screen(screenshot)
| {
"content_hash": "b810b656f8ad1e8f8ddd5bf73d627689",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 37.506493506493506,
"alnum_prop": 0.6644736842105263,
"repo_name": "drammock/expyfun",
"id": "bbf181f35bc1cee7a653bf535b8dd7ca7874d927",
"size": "2888",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/sync/sync_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1433"
},
{
"name": "PowerShell",
"bytes": "895"
},
{
"name": "Python",
"bytes": "586829"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/item/shared_item_clothing_station.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "2444b5a3e49ac79009268642ee3ccd2b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 23.846153846153847,
"alnum_prop": 0.6967741935483871,
"repo_name": "anhstudios/swganh",
"id": "cb5c8d8585e30432d3722f334ef9a535d052707f",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/item/shared_item_clothing_station.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import unittest
import gym
import numpy as np
import ray
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
from ray.rllib.algorithms.dt.dt_torch_model import DTTorchModel
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
def _assert_outputs_equal(outputs):
for i in range(1, len(outputs)):
for key in outputs[0].keys():
assert np.allclose(
outputs[0][key], outputs[i][key]
), "outputs are different but they shouldn't be."
def _assert_outputs_not_equal(outputs):
for i in range(1, len(outputs)):
for key in outputs[0].keys():
assert not np.allclose(
outputs[0][key], outputs[i][key]
), "some outputs are the same but they shouldn't be."
def _generate_input_dict(B, T, obs_space, action_space):
"""Generate input_dict that has completely fake values."""
# generate deterministic inputs
# obs
obs = np.arange(B * T * obs_space.shape[0], dtype=np.float32).reshape(
(B, T, obs_space.shape[0])
)
# actions
if isinstance(action_space, gym.spaces.Box):
act = np.arange(B * T * action_space.shape[0], dtype=np.float32).reshape(
(B, T, action_space.shape[0])
)
else:
act = np.mod(np.arange(B * T, dtype=np.int32).reshape((B, T)), action_space.n)
# returns to go
rtg = np.arange(B * (T + 1), dtype=np.float32).reshape((B, T + 1, 1))
# timesteps
timesteps = np.stack([np.arange(T, dtype=np.int32) for _ in range(B)], axis=0)
# attention mask
mask = np.ones((B, T), dtype=np.float32)
input_dict = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: act,
SampleBatch.RETURNS_TO_GO: rtg,
SampleBatch.T: timesteps,
SampleBatch.ATTENTION_MASKS: mask,
}
)
input_dict = convert_to_torch_tensor(input_dict)
return input_dict
class TestDTModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_torch_model_init(self):
"""Test models are initialized properly"""
model_config = {
"embed_dim": 32,
"num_layers": 2,
"max_seq_len": 4,
"max_ep_len": 10,
"num_heads": 2,
"embed_pdrop": 0.1,
"resid_pdrop": 0.1,
"attn_pdrop": 0.1,
"use_obs_output": False,
"use_return_output": False,
}
num_outputs = 2
observation_space = gym.spaces.Box(-1.0, 1.0, shape=(num_outputs,))
action_dim = 5
action_spaces = [
gym.spaces.Box(-1.0, 1.0, shape=(action_dim,)),
gym.spaces.Discrete(action_dim),
]
B, T = 3, 4
for action_space in action_spaces:
# Generate input dict.
input_dict = _generate_input_dict(B, T, observation_space, action_space)
# Do random initialization a few times and make sure outputs are different
outputs = []
for _ in range(10):
model = DTTorchModel(
observation_space,
action_space,
num_outputs,
model_config,
"model",
)
# so dropout is not in effect
model.eval()
model_out, _ = model(input_dict)
output = model.get_prediction(model_out, input_dict)
outputs.append(convert_to_numpy(output))
_assert_outputs_not_equal(outputs)
# Initialize once and make sure dropout is working
model = DTTorchModel(
observation_space,
action_space,
num_outputs,
model_config,
"model",
)
# Dropout should make outputs different in training mode
model.train()
outputs = []
for _ in range(10):
model_out, _ = model(input_dict)
output = model.get_prediction(model_out, input_dict)
outputs.append(convert_to_numpy(output))
_assert_outputs_not_equal(outputs)
# Dropout should make outputs the same in eval mode
model.eval()
outputs = []
for _ in range(10):
model_out, _ = model(input_dict)
output = model.get_prediction(model_out, input_dict)
outputs.append(convert_to_numpy(output))
_assert_outputs_equal(outputs)
def test_torch_model_prediction_target(self):
"""Test the get_prediction and get_targets function."""
model_config = {
"embed_dim": 16,
"num_layers": 3,
"max_seq_len": 3,
"max_ep_len": 9,
"num_heads": 1,
"embed_pdrop": 0.2,
"resid_pdrop": 0.2,
"attn_pdrop": 0.2,
"use_obs_output": True,
"use_return_output": True,
}
num_outputs = 5
observation_space = gym.spaces.Box(-1.0, 1.0, shape=(num_outputs,))
action_dim = 2
action_spaces = [
gym.spaces.Box(-1.0, 1.0, shape=(action_dim,)),
gym.spaces.Discrete(action_dim),
]
B, T = 2, 3
for action_space in action_spaces:
# Generate input dict.
input_dict = _generate_input_dict(B, T, observation_space, action_space)
# Make model and forward pass.
model = DTTorchModel(
observation_space,
action_space,
num_outputs,
model_config,
"model",
)
model_out, _ = model(input_dict)
preds = model.get_prediction(model_out, input_dict)
target = model.get_targets(model_out, input_dict)
preds = convert_to_numpy(preds)
target = convert_to_numpy(target)
# Test the content and shape of output and target
if isinstance(action_space, gym.spaces.Box):
# test preds shape
self.assertEqual(preds[SampleBatch.ACTIONS].shape, (B, T, action_dim))
# test target shape and content
self.assertEqual(target[SampleBatch.ACTIONS].shape, (B, T, action_dim))
assert np.allclose(
target[SampleBatch.ACTIONS],
input_dict[SampleBatch.ACTIONS],
)
else:
# test preds shape
self.assertEqual(preds[SampleBatch.ACTIONS].shape, (B, T, action_dim))
# test target shape and content
self.assertEqual(target[SampleBatch.ACTIONS].shape, (B, T))
assert np.allclose(
target[SampleBatch.ACTIONS],
input_dict[SampleBatch.ACTIONS],
)
# test preds shape
self.assertEqual(preds[SampleBatch.OBS].shape, (B, T, num_outputs))
# test target shape and content
self.assertEqual(target[SampleBatch.OBS].shape, (B, T, num_outputs))
assert np.allclose(
target[SampleBatch.OBS],
input_dict[SampleBatch.OBS],
)
# test preds shape
self.assertEqual(preds[SampleBatch.RETURNS_TO_GO].shape, (B, T, 1))
# test target shape and content
self.assertEqual(target[SampleBatch.RETURNS_TO_GO].shape, (B, T, 1))
assert np.allclose(
target[SampleBatch.RETURNS_TO_GO],
input_dict[SampleBatch.RETURNS_TO_GO][:, 1:, :],
)
def test_causal_masking(self):
"""Test that the transformer model' causal masking works."""
model_config = {
"embed_dim": 16,
"num_layers": 2,
"max_seq_len": 4,
"max_ep_len": 10,
"num_heads": 2,
"embed_pdrop": 0,
"resid_pdrop": 0,
"attn_pdrop": 0,
"use_obs_output": True,
"use_return_output": True,
}
observation_space = gym.spaces.Box(-1.0, 1.0, shape=(4,))
action_space = gym.spaces.Box(-1.0, 1.0, shape=(2,))
B = 2
T = model_config["max_seq_len"]
# Generate input dict.
input_dict = _generate_input_dict(B, T, observation_space, action_space)
# make model and forward with attention
model = DTTorchModel(
observation_space,
action_space,
4,
model_config,
"model",
)
model_out, _ = model(input_dict)
preds = model.get_prediction(model_out, input_dict, return_attentions=True)
preds = convert_to_numpy(preds)
# test properties of attentions
attentions = preds["attentions"]
self.assertEqual(
len(attentions),
model_config["num_layers"],
"there should as many attention tensors as layers.",
)
# used to select the causal padded element of each attention tensor
select_mask = np.triu(np.ones((3 * T, 3 * T), dtype=np.bool), k=1)
select_mask = np.tile(select_mask, (B, model_config["num_heads"], 1, 1))
for attention in attentions:
# check shape
self.assertEqual(
attention.shape, (B, model_config["num_heads"], T * 3, T * 3)
)
# check the upper triangular masking
assert np.allclose(
attention[select_mask], 0.0
), "masked elements should be zero."
# check that the non-masked elements have non 0 scores
# Note: it is very unlikely that randomly initialized weights will make
# one of the scores be 0, as these scores are probabilities.
assert not np.any(
np.isclose(attention[np.logical_not(select_mask)], 0.0)
), "non masked elements should be nonzero."
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "4726a4380d4210ca3e57a75dc2c8b44c",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 87,
"avg_line_length": 34.69435215946844,
"alnum_prop": 0.5338504261227617,
"repo_name": "ray-project/ray",
"id": "a08eee880bab3c21d06da6c8b756443cb8289c24",
"size": "10443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/algorithms/dt/tests/test_dt_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
import sys
import io
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'teensyio/_version.py'
versioneer.versionfile_build = 'teensyio/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'teensyio-' # dirname like 'myproject-1.2.0'
try:
from setuptools import setup, find_packages
except ImportError:
print("Please install or upgrade setuptools or pip")
sys.exit(1)
readme = io.open('README.rst', mode='r', encoding='utf-8').read()
doclink = """
Documentation
-------------
The full documentation is at http://teensyio.rtfd.org."""
history = io.open('HISTORY.rst', mode='r',
encoding='utf-8').read().replace('.. :changelog:', '')
# Use cmdclass.update to add additional commands as necessary. See
# https://docs.python.org/2/distutils/extending.html#integrating-new-commands
cmdclass = versioneer.get_cmdclass()
setup(
name='teensyio',
version=versioneer.get_version(),
description='A clean interface for controlling data acquisition using the Teensy 3.1.',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
license='MIT',
author='Ryan Dwyer',
author_email='ryanpdwyer@gmail.com',
url='https://github.com/ryanpdwyer/teensyio',
zip_safe=False,
include_package_data=True,
# This lets setuptools include_package_data work with git
setup_requires=["setuptools_git >= 0.3"],
packages=find_packages(),
# Add requirements here. If the requirement is difficult to install,
# add to docs/conf.py MAGIC_MOCK, and .travis.yml 'conda install ...'
install_requires=['numpy', 'scipy', 'pySerial', 'matplotlib', 'pandas'],
tests_require=['nose'],
test_suite='nose.collector',
cmdclass=cmdclass,
keywords='teensyio',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
| {
"content_hash": "17ad30a559ac734cd07b56260e70ae9d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 91,
"avg_line_length": 34.266666666666666,
"alnum_prop": 0.6697470817120622,
"repo_name": "ryanpdwyer/teensyio",
"id": "ec9e1213ecc5e494e4cedfb14ef2ccebb553ffc9",
"size": "2080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2293"
},
{
"name": "Python",
"bytes": "60536"
}
],
"symlink_target": ""
} |
from django.conf import settings
#See http://docs.cksource.com/ckeditor_api/symbols/CKEDITOR.config.html for all settings
CKEDITOR_SETTINGS = getattr(settings, 'CKEDITOR_SETTINGS', {
'language': '{{ language }}',
'toolbar': 'CMS',
'skin': 'moono',
# 'stylesSet': [
# {'name': 'Custom Style', 'element': 'h3', 'styles': {'color': 'Blue'}}
# ],
'toolbarCanCollapse': False,
})
TEXT_SAVE_IMAGE_FUNCTION = getattr(settings, 'TEXT_SAVE_IMAGE_FUNCTION', 'djangocms_text_ckeditor.picture_save.create_picture_plugin')
| {
"content_hash": "4cd4b4cb67f9d6ab07a25b3ba236ae70",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 134,
"avg_line_length": 36.13333333333333,
"alnum_prop": 0.6660516605166051,
"repo_name": "timgraham/djangocms-text-ckeditor",
"id": "07c2d160ed0e0754a577fcc9275caf00cfd1af47",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_text_ckeditor/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13885"
},
{
"name": "JavaScript",
"bytes": "20853"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "101217"
}
],
"symlink_target": ""
} |
import json
from tempest.common.rest_client import RestClient
class HypervisorV3ClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(HypervisorV3ClientJSON, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_v3_type
def get_hypervisor_list(self):
"""List hypervisors information."""
resp, body = self.get('os-hypervisors')
body = json.loads(body)
return resp, body['hypervisors']
def get_hypervisor_list_details(self):
"""Show detailed hypervisors information."""
resp, body = self.get('os-hypervisors/detail')
body = json.loads(body)
return resp, body['hypervisors']
def get_hypervisor_show_details(self, hyper_id):
"""Display the details of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s' % hyper_id)
body = json.loads(body)
return resp, body['hypervisor']
def get_hypervisor_servers(self, hyper_name):
"""List instances belonging to the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/servers' % hyper_name)
body = json.loads(body)
return resp, body['hypervisor']
def get_hypervisor_stats(self):
"""Get hypervisor statistics over all compute nodes."""
resp, body = self.get('os-hypervisors/statistics')
body = json.loads(body)
return resp, body['hypervisor_statistics']
def get_hypervisor_uptime(self, hyper_id):
"""Display the uptime of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
body = json.loads(body)
return resp, body['hypervisor']
def search_hypervisor(self, hyper_name):
"""Search specified hypervisor."""
resp, body = self.get('os-hypervisors/search?query=%s' % hyper_name)
body = json.loads(body)
return resp, body['hypervisors']
| {
"content_hash": "02cc0d211b43776e865e5a8660143388",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 39.44444444444444,
"alnum_prop": 0.612206572769953,
"repo_name": "armando-migliaccio/tempest",
"id": "fa1255a5e00573e3f7c8e7922ef5ff3e28dc5410",
"size": "2807",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/services/compute/v3/json/hypervisor_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1966096"
},
{
"name": "Shell",
"bytes": "5228"
}
],
"symlink_target": ""
} |
import sys
import time
import nagios_common
import switchvox_common
import logging
logging.basicConfig(level=logging.WARNING)
class SwitchvoxNagiosInterface(object):
"""
Basic, standard, foundatoin of switchvox-nagios plugin object.
"""
# Set it as a class variable. Should be more than fine.
@classmethod
def _get_options_parser(cls):
parser = nagios_common.get_options_parser()
parser.set_description(cls.__doc__)
return parser
def _parser_cleanup(self, parser):
if not sys.argv[1:]:
parser.print_help()
sys.exit(3)
def get_errors(cls, response):
return switchvox_common.get_errors(response=response)
def _get_response(self):
requester = switchvox_common.switchvox_request
response = requester(username=self.username,
password=self.password,
json_req=self.generate_req(),
hostname=self.hostname)
return response
def generate_req(self):
"""
Generates request object based on parse options results.
"""
raise NotImplementedError
def run_nagios_check(self):
# Populates necessary variables to instance.
parser = self.parse_options()
self._parser_cleanup(parser)
start = time.time()
response = self._get_response()
logging.debug(response)
end = time.time()
self._request_timer = end - start
nagios_common.nagios_return(**self.check_condition(response))
class outboundCall(SwitchvoxNagiosInterface):
"""
Abstract class that defines majority of stuff to make outbound call.
Only thing missing is specific check conditions. ie what qualifies as success or failure.
"""
def __init__(self):
pass
def parse_options(self):
"""
Gets generic parse options then adds whatever is necessary
"""
parser = self._get_options_parser()
parser.add_option("", "--dial_first", dest="dial_first",
type="int", help="number to dial first")
parser.add_option("", "--dial_second", dest="dial_second",
type="int", help="number to dial second")
parser.add_option("", "--timeout", dest="timeout", default=20,
type="int", help="call timeout to send to switchvox")
parser.add_option("", "--dial_as_account_id", dest="dial_as_account_id",
type="int", help="account id to use for dial")
(options, args) = parser.parse_args()
###
# Set all options as attributes of instance self.
###
for key in parser.defaults.keys():
option_value = getattr(options, key)
setattr(self, key, option_value)
return parser
def generate_req(self):
"""
Generates request object based on parse options results.
"""
method = "switchvox.call"
parameters = {"ignore_user_call_rules": "1",
"ignore_user_api_settings": "1",
"timeout": self.timeout,
"dial_as_account_id": self.dial_as_account_id,
"dial_first": self.dial_first,
"dial_second": self.dial_second}
return switchvox_common.request_form(method=method, parameters=parameters)
def check_condition(self, response):
"""
Checks status and returns relevant nagios messages and codes.
"""
raise NotImplementedError
| {
"content_hash": "d564ff736e73da6fac641dd2c5ceceac",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 94,
"avg_line_length": 32.18260869565217,
"alnum_prop": 0.5711969737908673,
"repo_name": "psywhale/pyswitchvox",
"id": "ddde6ebf72bdfea28f1f9223231ed96916275048",
"size": "3701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sv_nag_abstract.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25141"
}
],
"symlink_target": ""
} |
import django_filters
from django.contrib.auth.models import User, Group
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from api.pagination import LargeResultsSetPagination
from api.permissions import IsUser
from api.serializers import NotificationSerializer
from api.models import Notification
class NotificationFilter(django_filters.FilterSet):
class Meta:
model = Notification
fields = ['id', 'type', 'created', 'title', 'description', 'user', 'xplevel', 'badge',]
class NotificationViewSet(viewsets.ModelViewSet):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
pagination_class = LargeResultsSetPagination
authentication_classes = (TokenAuthentication,)
permission_classes = (IsUser,)
filter_class = NotificationFilter | {
"content_hash": "1776e4c798003220dac320fd06ae69ed",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 95,
"avg_line_length": 37.68,
"alnum_prop": 0.7887473460721869,
"repo_name": "Oinweb/py-fly",
"id": "2baa430e5898483ee927bc9dde5317a8928b12b5",
"size": "942",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "api/views/notification.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "187803"
},
{
"name": "HTML",
"bytes": "331298"
},
{
"name": "JavaScript",
"bytes": "136579"
},
{
"name": "Python",
"bytes": "214171"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(3, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[
Required(), EqualTo('password2', message='Passwords must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[Required()])
password = PasswordField('New password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(Form):
email = StringField('New Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
| {
"content_hash": "1425e57c0abea828cac95517f68a9ed9",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 43.59722222222222,
"alnum_prop": 0.6259955399808856,
"repo_name": "oztalha/dd-css",
"id": "68e51b4aaf6f946e5c2a2df5a392be03d4e58cd9",
"size": "3139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36984"
},
{
"name": "HTML",
"bytes": "34364"
},
{
"name": "JavaScript",
"bytes": "1413"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "64575"
}
],
"symlink_target": ""
} |
from codecs import open
import os
import re
from setuptools import setup
with open(os.path.join('tr', '__init__.py'), 'r', encoding='utf8') as f:
version = re.compile(
r".*__version__ = '(.*?)'", re.S).match(f.read()).group(1)
setup(
name='python-tr',
packages=['tr'],
version=version,
license='MIT License',
platforms=['POSIX', 'Windows', 'Unix', 'MacOS'],
description='A Pure-Python implementation of the tr algorithm',
author='Yukino Ikegami',
author_email='yknikgm@gmail.com',
url='https://github.com/ikegami-yukino/python-tr',
keywords=['tr', 'transliterate', 'translate'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Text Processing :: General'
],
long_description='%s\n\n%s' % (open('README.rst', encoding='utf8').read(),
open('CHANGES.rst', encoding='utf8').read())
)
| {
"content_hash": "08a7a6e3a8fb622a54cbd32ea98dbcc1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 37.3421052631579,
"alnum_prop": 0.5835095137420718,
"repo_name": "ikegami-yukino/python-tr",
"id": "9259e514b735f0f0c011159f4ed8a9703ef3db51",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6565"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Site(models.Model):
url = models.CharField(max_length=100)
class User(models.Model):
username = models.CharField(max_length=100)
class AccessKey(models.Model):
key = models.CharField(max_length=100)
class Profile(models.Model):
sites = models.ManyToManyField(Site)
user = models.OneToOneField(User)
access_key = models.ForeignKey(AccessKey, null=True)
class Avatar(models.Model):
image = models.CharField(max_length=100)
profile = models.ForeignKey(Profile, related_name='avatars')
class Tag(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class TaggedItem(models.Model):
tags = GenericRelation(Tag)
class Team(models.Model):
name = models.CharField(max_length=100)
members = models.ManyToManyField(User)
class CustomPK(models.Model):
slug = models.SlugField(
primary_key=True,
)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='custompks',
)
| {
"content_hash": "c4331853d5bc1354f84d03cd390fde18",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 81,
"avg_line_length": 23.803571428571427,
"alnum_prop": 0.7096774193548387,
"repo_name": "dynamomobile/drf-writable-nested",
"id": "ae8e28068ab55ad46fa2d234979d610cdb727ee5",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37199"
}
],
"symlink_target": ""
} |
import calvin.runtime.south.calvinsys.ui.uicalvinsys as ui
from calvin.runtime.south.calvinsys import base_calvinsys_object
class StandardOut(base_calvinsys_object.BaseCalvinsysObject):
"""
StandardOut - Virtual console device.
"""
init_schema = {
"type": "object",
"properties": {
"ui_def": {
"description": "Visual appearance",
"type": "object",
},
"prefix": {
"description": "String to prefix all data",
"type": "string"
}
},
"description": "Send all incoming data to terminal (usually not very useful)"
}
can_write_schema = {
"description": "Always true",
"type": "boolean"
}
write_schema = {
"description": "Write data to standard out",
"type": ["boolean", "integer", "number", "string", "array", "object", "null"]
}
def init(self, ui_def=None, prefix=None, **kwargs):
self._prefix = prefix
ui.register_actuator(self.actor, ui_def)
def can_write(self):
return True
def write(self, data=None):
msg = ""
if data and self._prefix:
msg = "{}: {}".format(self._prefix, data)
elif data:
msg = "{}".format(data)
elif self._prefix:
msg = "{}".format(self._prefix)
ui.update_ui(self.actor, msg)
def close(self):
pass
| {
"content_hash": "2af9f23f24dc54689626c289f148edb1",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 85,
"avg_line_length": 27.50943396226415,
"alnum_prop": 0.5294924554183813,
"repo_name": "EricssonResearch/calvin-base",
"id": "e7f1d5987a0661d710d05a79d918ae5021398c7d",
"size": "2063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvinextras/calvinsys/ui/StandardOut.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EmailInvitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_dt', models.DateTimeField(auto_now_add=True, db_index=True)),
('email', models.EmailField(max_length=254, db_index=True)),
('updated_dt', models.DateTimeField(auto_now=True, db_index=True)),
('token', models.UUIDField(default=uuid.uuid4, unique=True, editable=False, db_index=True)),
('status', models.CharField(default=b'new', max_length=10, choices=[(b'new', b'new'), (b'accepted', b'accepted'), (b'rejected', b'rejected')])),
('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "b1ea99e08025faf718595d2b27b8f43c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 160,
"avg_line_length": 41.407407407407405,
"alnum_prop": 0.6135957066189625,
"repo_name": "Socialsquare/RunningCause",
"id": "8ef628ba127f30395145bf427a274726dbb7e21f",
"size": "1142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invitations/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32981"
},
{
"name": "HTML",
"bytes": "97326"
},
{
"name": "JavaScript",
"bytes": "50418"
},
{
"name": "Python",
"bytes": "132614"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yanchor", parent_name="densitymapbox.colorbar", **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["top", "middle", "bottom"]),
**kwargs,
)
| {
"content_hash": "f2410aadb19362d27aca11d73a3e59cb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 36.07142857142857,
"alnum_prop": 0.6059405940594059,
"repo_name": "plotly/plotly.py",
"id": "1e3e73b8133a60efe1245f2754f0dbf8e0a3cd6d",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/colorbar/_yanchor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import django_filters
from rest_framework import filters
from bulbs.super_features.utils import get_superfeature_model
SUPERFEATURE_MODEL = get_superfeature_model()
def filter_status(queryset, value):
if not value:
return queryset
else:
# NOTE: this list comprehension is happening because
# status is a property, not a model field
# see: http://stackoverflow.com/a/1205416
return [sf for sf in queryset
if sf.status.lower() == value.lower()]
class SuperFeatureFilter(filters.FilterSet):
status = django_filters.CharFilter(action=filter_status)
class Meta:
model = SUPERFEATURE_MODEL
fields = ['status']
| {
"content_hash": "2b277b1c5bbe14bb01785c8d6e9fbfb1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 27.96,
"alnum_prop": 0.6824034334763949,
"repo_name": "theonion/django-bulbs",
"id": "044f0235357b044ed1732e545a8151c7868c1e8e",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulbs/super_features/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36651"
},
{
"name": "HTML",
"bytes": "73968"
},
{
"name": "JavaScript",
"bytes": "57288"
},
{
"name": "Python",
"bytes": "1055540"
},
{
"name": "Ruby",
"bytes": "397"
},
{
"name": "Shell",
"bytes": "1629"
}
],
"symlink_target": ""
} |
import IECore
## The Preset class serves as a base class for the implementation of
## 'presets'. In a nutshell, they are callable classes that manipulate
## the parameters of a Parameterised object. \see BasicPreset for an
## implementation that provides parameter value loading an saving, with
## support for Class and ClassVector parameters.
##
## Presets themselves are Parametersied objects, to allow them to have
## their own parameters to control how they might be applied.
class Preset( IECore.Parameterised ) :
def __init__( self, description="" ) :
IECore.Parameterised.__init__( self, description )
## \return Presets may return a dictionary of arbitrary metadata
## to describe their contents/function. The default implementation
## simply sets "title" to the class name.
def metadata( self ) :
return { "title" : self.__class__ }
## \return True if the Preset can be applied to the given rootParameter
## on the given parameterised object, otherwise False
def applicableTo( self, parameterised, rootParameter ) :
raise NotImplementedError
## Applies the preset to the specified parameterised and
## root parameter.
def __call__( self, parameterised, rootParameter ) :
raise NotImplementedError
IECore.registerRunTimeTyped( Preset )
| {
"content_hash": "d4d6143bedd7dafc1a0463dcbd182829",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 73,
"avg_line_length": 33.578947368421055,
"alnum_prop": 0.7507836990595611,
"repo_name": "appleseedhq/cortex",
"id": "fe8caa9541492c17e8bb30ed4218355e15872780",
"size": "3059",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/IECore/Preset.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1374"
},
{
"name": "C",
"bytes": "66503"
},
{
"name": "C++",
"bytes": "9536541"
},
{
"name": "CMake",
"bytes": "95418"
},
{
"name": "GLSL",
"bytes": "24422"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "2360"
},
{
"name": "Python",
"bytes": "4651272"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
"""
Example of scripting DFM and DWAQ
This script runs a simple DFM domain (square cartesian grid, with
oscillating flow boundary in one corner.), and then a DWAQ dye
release simulation.
sets up a tracer run with a spatially-variable initial
condition and runs it for the duration of the hydro.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
from stompy.grid import unstructured_grid
import stompy.plot.cmap as scmap
import stompy.model.hydro_model as hm
import stompy.model.delft.waq_scenario as dwaq
import stompy.model.delft.dflow_model as dfm
# DEV
import six
six.moves.reload_module(hm)
six.moves.reload_module(dfm)
six.moves.reload_module(dwaq)
##
# put machine specific paths and settings in local_config.py
import local_config
local_config.install()
##
# DFM
def base_model(force=True,num_procs=1,run_dir='dfm_run'):
"""
Create or load a simple DFM hydro run for testing purposes.
If the run already exists and ran to completion, simply load the existing model.
force: if True, force a re-run even if the run exists and completed.
num_procs: if >1, attempt MPI run.
run_dir: path to location of the run.
"""
if (not force) and dfm.DFlowModel.run_completed(run_dir):
model=dfm.DFlowModel.load(run_dir)
return model
# construct a very basic hydro run
g=unstructured_grid.UnstructuredGrid(max_sides=4)
# rectilinear grid
L=500
ret=g.add_rectilinear([0,0],[L,L],50,50)
# sloping N-S from -10 at y=0 to -5 at y=500
# node_z_bed will be used by the DFlowModel script to set bathymetry.
# This is a positive-up quantity
g.add_node_field('node_z_bed',-10 + g.nodes['x'][:,1] * 5.0/L)
model=dfm.DFlowModel()
# Load defaults from a template:
model.load_template('dflow-template.mdu')
model.set_grid(g)
# 3D, 5 layers. Defaults to sigma, evenly spaced
model.mdu['geometry','Kmx']=5
model.num_procs=num_procs
# pristine means clean existing files that might get in the way
model.set_run_dir(run_dir, mode='pristine')
model.run_start=np.datetime64("2018-01-01 00:00")
model.run_stop =np.datetime64("2018-01-03 00:00")
dt=np.timedelta64(300,'s')
t=np.arange(model.run_start-20*dt,
model.run_stop +20*dt,
dt)
# Add a periodic flow boundary condition. 4h period
periodic_Q=10*np.sin((t-t[0])/np.timedelta64(1,'s') * 2*np.pi/(4*3600.))
Q=xr.DataArray(periodic_Q,dims=['time'],coords={'time':t})
# enters the domain over 100m along one edge
# the name is important here -- it can be used when setting up the
# DWAQ run (where it will become 'inflow_flow')
inflow=hm.FlowBC(name='inflow',
geom=np.array([ [0,0],[0,100]]),
flow=Q)
# just a little salt, to get some baroclinicity but nothing crazy
inflow_salt=hm.ScalarBC(parent=inflow,scalar='salinity',value=2.0)
model.add_bcs([inflow,inflow_salt])
# Also add a steady source BC with a temperature signature
point_src=hm.SourceSinkBC(name='pnt_source',
geom=np.array([300,300] ),
flow=10)
point_src_temp=hm.ScalarBC(parent=point_src,scalar='temperature',value=10.0)
model.add_bcs([point_src,point_src_temp])
model.projection='EPSG:26910' # some steps want a projection, though in this case it doesn't really matter.
model.mdu['geometry','WaterLevIni']=0.0
# turn on DWAQ output at half-hour steps
model.mdu['output','WaqInterval']=1800
# and map output at the same interval
model.mdu['output','MapInterval']=1800
# Write out the model setup
model.write()
# Some preprocessing (this is necessary even if it's not an MPI run)
model.partition()
# Do it
output=model.run_model()
# Check to see that it actually ran.
if not model.is_completed():
print(output.decode())
raise Exception('Model run failed')
return model
# Run/load small hydro run:
model=base_model(force=True)
##
# 'model' represents the whole DFM model.
# dwaq.Hydro, and subclasses like dwaq.HydroFiles, represent
# the hydro information used by DWAQ.
base_hydro=dwaq.HydroFiles(model.hyd_output())
##
# Design a tracer release
# this is a simple gaussian blob.
def release_conc_fn(X):
X=X[...,:2] # drop z coordinate if it's there
X0=np.array([250,250]) # center of gaussian
L=50
c=np.exp( -((X-X0)**2).sum(axis=-1)/L**2 )
c=c/c.max() # make max value 1
return c
# Get the grid that DWAQ will use:
grid=base_hydro.grid()
# and evaluate the gaussian at the centers of its cells.
C=release_conc_fn(grid.cells_center())
# Could plot that like this:for check on sanity:
# fig=plt.figure(1)
# fig.clf()
# ax=fig.add_subplot(1,1,1)
# grid.plot_cells(values=C,cmap='jet',ax=ax)
# ax.axis('equal')
##
# Set up the DWAQ run, pointing it to the hydro instance:
wm=dwaq.WaqModel(overwrite=True,
base_path='dwaq_run',
hydro=base_hydro)
# Model will default to running for the full period of the hydro.
# Adjust start time to get a short spinup...
wm.start_time += np.timedelta64(2*3600,'s')
# Create the dye tracer with initial condition.
# Note that C was calculated as a 2D tracer above, but here
# it is used as a 3D per-segment tracer. For 2D you could get
# away with that, but safer to have the Hydro instance convert
# 2D (element) to 3D (segment)
C_3d=base_hydro.extrude_element_to_segment(C)
# boundary condition will default to 0.0
wm.substances['dye1']=dwaq.Substance(initial=C_3d)
# uniform tracer:
wm.substances['unity']=dwaq.Substance(initial=1.0)
# and a tracer set on the boundary flows. Initial defaults to 0.0
wm.substances['boundary_dye']=dwaq.Substance()
wm.add_bc(['inflow'],'boundary_dye',1.0)
wm.map_output += ('salinity','temp')
wm.cmd_write_hydro()
wm.cmd_write_inp()
wm.cmd_delwaq1()
wm.cmd_delwaq2()
wm.write_binary_map_nc()
##
# Open the map output
ds=xr.open_dataset(os.path.join(wm.base_path, 'dwaq_map.nc'))
# Extract the grid from the output
grid_ds=unstructured_grid.UnstructuredGrid.from_ugrid(ds)
##
# Plot that up:
tracers=['dye1','unity','boundary_dye','salinity','temp']
fig=plt.figure(1)
fig.clf()
fig.set_size_inches([10,4],forward=True)
fig,axs=plt.subplots(1,len(tracers),num=1)
cmap=scmap.load_gradient('turbo.cpt')
for ax,scal in zip(axs,tracers):
ax.text(0.05,0.95,scal,transform=ax.transAxes,va='top')
# Drop the last time step -- the DFM tracers are not valid
# then (not sure why)
for ti in range(len(ds.time)-1):
for ax,scal in zip(axs,tracers):
ax.collections=[]
clim=dict(salinity=[0,2],temp=[5,12]).get(scal,[0,1])
ccoll=grid_ds.plot_cells(values=ds[scal].isel(time=ti,layer=0),ax=ax,cmap=cmap,
clim=clim)
ax.axis('equal')
# plt.colorbar(ccoll,ax=ax,orientation='horizontal')
plt.draw()
plt.pause(0.025)
ds.close() # keeping this open can interfere with deleting or overwriting the netcdf file.
| {
"content_hash": "53ae2859da01352c3b3bad8ed4cd0745",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 111,
"avg_line_length": 31.87443946188341,
"alnum_prop": 0.6720596510973551,
"repo_name": "rustychris/stompy",
"id": "d2247ee2ecf44746b4b22d6166f36dcb5de28d29",
"size": "7108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfm_dwaq/basic_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "89942"
},
{
"name": "Makefile",
"bytes": "62"
},
{
"name": "Python",
"bytes": "4305617"
},
{
"name": "Shell",
"bytes": "241"
}
],
"symlink_target": ""
} |
import errno
import click
import yaml
from sigopt.validate import ValidationError
class ValidatedData:
def __init__(self, filename, validated_data):
self.filename = filename
self.data = validated_data
def load_yaml(filename, validator, ignore_no_file):
if filename is None:
return None
try:
with open(filename) as yaml_fp:
data = yaml.safe_load(yaml_fp)
except OSError as ose:
if ose.errno == errno.ENOENT and ignore_no_file:
return None
raise click.BadParameter(f'Could not open {filename}: {ose}') from ose
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as pe:
raise click.BadParameter(f'Could not parse {filename}: {pe}') from pe
try:
validated_data = validator(data)
except ValidationError as ve:
raise click.BadParameter(f'Bad format in {filename}: {ve}') from ve
return ValidatedData(filename, validated_data)
def load_yaml_callback(validator, ignore_no_file=False):
return lambda ctx, p, value: load_yaml(value, validator, ignore_no_file=ignore_no_file)
| {
"content_hash": "c2d2fc13783781fb907b5a6218af3a90",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 89,
"avg_line_length": 29.942857142857143,
"alnum_prop": 0.7213740458015268,
"repo_name": "sigopt/sigopt-python",
"id": "1e4a81de0bac4dd9837f9606a885f45624dd2f1c",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sigopt/cli/arguments/load_yaml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2411"
},
{
"name": "Makefile",
"bytes": "545"
},
{
"name": "Python",
"bytes": "542280"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
from sys import stdin
dummy = int(stdin.readline())
word = str(stdin.readline())
dictLength = int(stdin.readline())
# Build the list of words
words = [];
for i in range(1, dictLength + 1):
dummy = int(stdin.readline())
words.append(str(stdin.readline()))
# Is the word in our list?
if word in words:
print 0
else:
print 1 | {
"content_hash": "1f3d1fbaf19b057c2f3f5e486deeecb3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 36,
"avg_line_length": 19.294117647058822,
"alnum_prop": 0.6920731707317073,
"repo_name": "AntoineAugusti/katas",
"id": "a3d7b1e4ee65da811decb555b9468986595ad977",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prologin/2014/5_neologisme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "2728"
},
{
"name": "Java",
"bytes": "5700"
},
{
"name": "Python",
"bytes": "78940"
}
],
"symlink_target": ""
} |
import pytest
from dj_anonymizer import fields
def test_function():
def base_func(value=0, *args, **kwargs):
result = value + sum(args)
if 'addition' in kwargs:
result += kwargs['addition']
return result
function_field = fields.function(base_func)
assert next(function_field) == 0
function_field = fields.function(base_func, 1)
assert next(function_field) == 1
function_field = fields.function(base_func, 1, 2)
assert next(function_field) == 3
function_field = fields.function(base_func, 1, 2, addition=3)
assert next(function_field) == 6
@pytest.mark.parametrize(
'''field_value, seq_start, seq_step, seq_callback, seq_slugify,
expected_1, expected_2''', [
("username_{seq}", 0, 1, None,
True, "username_0", "username_1"),
("username_{seq}", 5, 10, None,
True, "username_5", "username_15"),
("username_{seq}", 5, 10, lambda: "val",
True, "username_val", "username_val"),
("username_{seq}", 5, 10, lambda: "va l",
True, "username_va-l", "username_va-l"),
("username_{seq}", 5, 10, lambda: "va l",
False, "username_va l", "username_va l"),
]
)
def test_string(field_value, seq_start, seq_step, seq_callback, seq_slugify,
expected_1, expected_2):
username_field = fields.string(
field_value=field_value,
seq_start=seq_start,
seq_step=seq_step,
seq_callback=seq_callback,
seq_slugify=seq_slugify
)
assert next(username_field) == expected_1
assert next(username_field) == expected_2
@pytest.mark.parametrize(
'''password, salt, hasher, expected_1, expected_2''', [
('password', '111', 'md5',
'md5$111$d7fe5ea5ff97cc7c2c79e2df5eb7cf93',
'md5$111$d7fe5ea5ff97cc7c2c79e2df5eb7cf93'),
('password', None, 'unsalted_md5',
'5f4dcc3b5aa765d61d8327deb882cf99',
'5f4dcc3b5aa765d61d8327deb882cf99'),
]
)
def test_password(password, salt, hasher, expected_1, expected_2):
password_field = fields.password(password, salt=salt, hasher=hasher)
assert next(password_field) == expected_1
assert next(password_field) == expected_2
| {
"content_hash": "b70039362c51f16c974682b47d30a319",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 33.37313432835821,
"alnum_prop": 0.6149373881932021,
"repo_name": "knowledge-point/dj_anonymizer",
"id": "bdd8c2aafb62e6359332da09c77aa92b87394c85",
"size": "2236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_anonym_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "775"
},
{
"name": "Python",
"bytes": "20751"
}
],
"symlink_target": ""
} |
def tensor_shape_string(tensor):
"""
Creates a string of the shape of the tensor with format (dim[0], dim[1], ..., dim[n])
:param tensor: input tensor
:return: String of shape
"""
shape = tensor.get_shape().as_list()
shape_str = '('
for i in range(len(shape)):
shape_str += '{:s}'.format(str(shape[i]))
if i < len(shape)-1:
shape_str += ', '
shape_str += ')'
return shape_str
def tensor_num_params(tensor):
"""
Returns the number of params in the tensor, can only be done if the size is finite, no dimension of shape None.
E.g.
- tensor with shape (50, 30) returns 1500
- tensor with shape (None, 30) raises exception
:param tensor: input tensor
:return: number of params in tensor
"""
shape = tensor.get_shape().as_list()
num_params = 1
for i in range(len(shape)):
if shape[i] is None:
raise Exception("Can only calculate number of params when size is fixed, e.g. no tensor with shape [None, 10]")
num_params *= shape[i]
return num_params
| {
"content_hash": "962431b45eb64296e08288b43c6aae32",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 123,
"avg_line_length": 30.5,
"alnum_prop": 0.5947176684881603,
"repo_name": "BartKeulen/drl",
"id": "8d8bd195ab50ad0170ff5f4b840696090f399b55",
"size": "1098",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "drl/utilities/tfutilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5278"
},
{
"name": "HTML",
"bytes": "5292"
},
{
"name": "JavaScript",
"bytes": "11444"
},
{
"name": "Jupyter Notebook",
"bytes": "2448320"
},
{
"name": "Python",
"bytes": "162908"
}
],
"symlink_target": ""
} |
"""
Django settings for djangostructlog project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5$r=b*fz$)&i+1&0qsb$cast1_dpd8!6cwtn!=1u4um1%yr7ld'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'rest_framework.authtoken',
'rest_framework',
'structlog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangostructlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangostructlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'djangostructlog',
'USER': 'structlog',
'PASSWORD': 'structlog',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
| {
"content_hash": "a6caa3255053c35860a3f52d30958ca6",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 91,
"avg_line_length": 26.845588235294116,
"alnum_prop": 0.6852917009038619,
"repo_name": "carlohamalainen/django-struct-log",
"id": "e211ec82d887945f1851543900f7503f4033adfa",
"size": "3651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangostructlog/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "8605"
}
],
"symlink_target": ""
} |
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewnorm -- Skew normal
t -- Student's T
trapz -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
random_correlation -- random correlation matrices
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
.. autosummary::
:toctree: generated/
cumfreq
histogram2
histogram
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
mvsdist
sem
zmap
zscore
iqr
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
f_value
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
ss
square_of_sums
jarque_bera
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
.. autosummary::
:toctree: generated/
chisqprob
betai
Circular statistical functions
==============================
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from numpy.testing import Tester
test = Tester().test
| {
"content_hash": "8e2444753d3cc8b638e420536c496616",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 94,
"avg_line_length": 25.573816155988858,
"alnum_prop": 0.5710706894673783,
"repo_name": "kalvdans/scipy",
"id": "d8b9ce5fa103afada2bd6c27050860297221394c",
"size": "9181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/stats/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4127403"
},
{
"name": "C++",
"bytes": "503114"
},
{
"name": "Fortran",
"bytes": "5574493"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11190581"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import unittest
import ray
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.result import TRAINING_ITERATION
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, Checkpoint
from ray.tune.resources import Resources
from ray.cluster_utils import Cluster
class RayTrialExecutorTest(unittest.TestCase):
def setUp(self):
self.trial_executor = RayTrialExecutor(queue_trials=False)
ray.init(num_cpus=2, ignore_reinit_error=True)
_register_all() # Needed for flaky tests
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testStartStop(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
running = self.trial_executor.get_running_trials()
self.assertEqual(1, len(running))
self.trial_executor.stop_trial(trial)
def testAsyncSave(self):
"""Tests that saved checkpoint value not immediately set."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)
checkpoint = self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.assertEqual(checkpoint, trial.saving_to)
self.assertEqual(trial.checkpoint.value, None)
self.process_trial_save(trial)
self.assertEqual(checkpoint, trial.checkpoint)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testSaveRestore(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)
self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.process_trial_save(trial)
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseResume(self):
"""Tests that pausing works for trials in flight."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testSavePauseResumeErrorRestore(self):
"""Tests that pause checkpoint does not replace restore checkpoint."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
trial.last_result = self.trial_executor.fetch_result(trial)
# Save
checkpoint = self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.assertEqual(Trial.RUNNING, trial.status)
self.assertEqual(checkpoint.storage, Checkpoint.PERSISTENT)
# Process save result (simulates trial runner)
self.process_trial_save(trial)
# Train
self.trial_executor.continue_training(trial)
trial.last_result = self.trial_executor.fetch_result(trial)
# Pause
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.assertEqual(trial.checkpoint.storage, Checkpoint.MEMORY)
# Resume
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
# Error
trial.set_status(Trial.ERROR)
# Restore
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testStartFailure(self):
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trial = Trial("asdf", resources=Resources(1, 0))
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.ERROR, trial.status)
def testPauseResume2(self):
"""Tests that pausing works for trials being processed."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.fetch_result(trial)
checkpoint = self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial, checkpoint)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseUnpause(self):
"""Tests that unpausing works for trials being processed."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)
self.assertEqual(trial.last_result.get(TRAINING_ITERATION), 1)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.unpause_trial(trial)
self.assertEqual(Trial.PENDING, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
trial.last_result = self.trial_executor.fetch_result(trial)
self.assertEqual(trial.last_result.get(TRAINING_ITERATION), 2)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testNoResetTrial(self):
"""Tests that reset handles NotImplemented properly."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {}, "modified_mock")
self.assertEqual(exists, False)
self.assertEqual(Trial.RUNNING, trial.status)
def testResetTrial(self):
"""Tests that reset works as expected."""
class B(Trainable):
def step(self):
return dict(timesteps_this_iter=1, done=True)
def reset_config(self, config):
self.config = config
return True
trials = self.generate_trials({
"run": B,
"config": {
"foo": 0
},
}, "grid_search")
trial = trials[0]
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {"hi": 1},
"modified_mock")
self.assertEqual(exists, True)
self.assertEqual(trial.config.get("hi"), 1)
self.assertEqual(trial.experiment_tag, "modified_mock")
self.assertEqual(Trial.RUNNING, trial.status)
@staticmethod
def generate_trials(spec, name):
suggester = BasicVariantGenerator()
suggester.add_configurations({name: spec})
trials = []
while not suggester.is_finished():
trial = suggester.next_trial()
if trial:
trials.append(trial)
else:
break
return trials
def process_trial_save(self, trial):
"""Simulates trial runner save."""
checkpoint = trial.saving_to
checkpoint_value = self.trial_executor.fetch_result(trial)
checkpoint.value = checkpoint_value
trial.on_checkpoint(checkpoint)
class RayExecutorQueueTest(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 1,
"_system_config": {
"num_heartbeats_timeout": 10
}
})
self.trial_executor = RayTrialExecutor(
queue_trials=True, refresh_period=0)
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def testQueueTrial(self):
"""Tests that reset handles NotImplemented properly."""
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
cpu_only = create_trial(1, 0)
self.assertTrue(self.trial_executor.has_resources(cpu_only.resources))
self.trial_executor.start_trial(cpu_only)
gpu_only = create_trial(0, 1)
self.assertTrue(self.trial_executor.has_resources(gpu_only.resources))
def testHeadBlocking(self):
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
gpu_trial = create_trial(1, 1)
self.assertTrue(self.trial_executor.has_resources(gpu_trial.resources))
self.trial_executor.start_trial(gpu_trial)
# TODO(rliaw): This behavior is probably undesirable, but right now
# trials with different resource requirements is not often used.
cpu_only_trial = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources(cpu_only_trial.resources))
self.cluster.add_node(num_cpus=1, num_gpus=1)
self.cluster.wait_for_nodes()
self.assertTrue(
self.trial_executor.has_resources(cpu_only_trial.resources))
self.trial_executor.start_trial(cpu_only_trial)
cpu_only_trial2 = create_trial(1, 0)
self.assertTrue(
self.trial_executor.has_resources(cpu_only_trial2.resources))
self.trial_executor.start_trial(cpu_only_trial2)
cpu_only_trial3 = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources(cpu_only_trial3.resources))
class LocalModeExecutorTest(RayTrialExecutorTest):
def setUp(self):
ray.init(local_mode=True)
self.trial_executor = RayTrialExecutor(queue_trials=False)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "5a3926a6cb87416acb23956706c24e03",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 79,
"avg_line_length": 38.850187265917604,
"alnum_prop": 0.6467752819820688,
"repo_name": "richardliaw/ray",
"id": "c2afe6a5960895570b867e8fbde0e774faa97248",
"size": "10389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/ray/tune/tests/test_ray_trial_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
Array=[int(x) for x in input().split()]
print(Array) | {
"content_hash": "ed871b7b6978fa268d89a29ee3c27079",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.660377358490566,
"repo_name": "vinitraj10/Java",
"id": "d61321fe6963c1fc7cd899299515c489556de7dd",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/comprehensions/listComprehension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "15318"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from sys import stderr
from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryManager
from azure.cli.core._profile import Profile
class VstsContinuousDeliveryProvider(object):
def __init__(self):
self._progress_last_message = ''
def setup_continuous_delivery(self,
resource_group_name, name, repo_url, branch, git_token,
slot, cd_app_type, cd_account, cd_create_account, location):
"""
This method sets up CD for an Azure Web App thru Team Services
"""
# Gather information about the Azure connection
profile = Profile()
subscription = profile.get_subscription()
user = profile.get_current_account_user()
cred, _, _ = profile.get_login_credentials(subscription_id=None)
cd_manager = ContinuousDeliveryManager(self._update_progress)
# Generate an Azure token with the VSTS resource app id
auth_token = profile.get_access_token_for_resource(user, None, cd_manager.get_vsts_app_id())
cd_manager.set_repository_info(repo_url, branch, git_token)
cd_manager.set_azure_web_info(resource_group_name, name, cred, subscription['id'],
subscription['name'], subscription['tenantId'], location)
vsts_cd_status = cd_manager.setup_continuous_delivery(slot, cd_app_type, cd_account,
cd_create_account, auth_token)
return vsts_cd_status
def remove_continuous_delivery(self): # pylint: disable=no-self-use
"""
To be Implemented
"""
# TODO: this would be called by appservice web source-control delete
pass
def _update_progress(self, current, total, status):
if total:
percent_done = current * 100 / total
message = '{: >3.0f}% complete: {}'.format(percent_done, status)
# Erase the previous message
# (backspace to beginning, space over the text and backspace again)
l = len(self._progress_last_message)
print('\b' * l + ' ' * l + '\b' * l, end='', file=stderr)
print(message, end='', file=stderr)
self._progress_last_message = message
stderr.flush()
if current == total:
print('', file=stderr)
| {
"content_hash": "a62d9c5d5ba724bd845c71f4db71db25",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 100,
"avg_line_length": 44.36363636363637,
"alnum_prop": 0.5967213114754099,
"repo_name": "QingChenmsft/azure-cli",
"id": "89456c7386304fad5c9d5cde6ce1d010dff78d95",
"size": "2786",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-appservice/azure/cli/command_modules/appservice/vsts_cd_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5372365"
},
{
"name": "Shell",
"bytes": "25445"
}
],
"symlink_target": ""
} |
from django.utils.safestring import mark_safe
from django_tables2.columns import Column, BooleanColumn, EmailColumn
import django_tables2 as tables
from orgsema.models import Radnik
class ListaRadnika(tables.Table):
id = Column(verbose_name=mark_safe('ID'))
puno_ime = Column(verbose_name=mark_safe('Ime i prezime'))
username = Column(verbose_name=mark_safe('Nalog'))
email = EmailColumn(verbose_name=mark_safe('Email'))
orgjed = Column(verbose_name=mark_safe('Org. jedinica'))
uloga = Column(verbose_name=mark_safe('Uloga'))
def __init__(self, *args, **kwargs):
super(ListaRadnika, self).__init__(*args, **kwargs)
def render_email(self, record):
return record.user.email
def render_orgjed(self, record):
return record.orgjed.naziv
def render_username(self, record):
return record.user.username
class Meta:
model = Radnik
attrs = {'class': 'table table-striped table-bordered table-hover'}
fields = ('id', 'puno_ime', 'username', 'email', 'orgjed', 'uloga')
| {
"content_hash": "a7afc796329a70f938826e52925c6795",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 35.5,
"alnum_prop": 0.6751173708920187,
"repo_name": "mbranko/kartonpmv",
"id": "c8ca2b2619b9e0c0dfffe10ac095f1844bc13324",
"size": "1112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orgsema/tables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1381558"
},
{
"name": "HTML",
"bytes": "74639"
},
{
"name": "JavaScript",
"bytes": "2790801"
},
{
"name": "Python",
"bytes": "66200"
}
],
"symlink_target": ""
} |
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
import time
client_session = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)
#TODO: add tests for all vds operations | {
"content_hash": "d44136952d8b42c5f62556da0413e912",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 92,
"avg_line_length": 24.1,
"alnum_prop": 0.7759336099585062,
"repo_name": "vmware/nsxramlclient",
"id": "43867a999b01cd7d7aedf5fb68f033ed472c6556",
"size": "1362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/vds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181224"
}
],
"symlink_target": ""
} |
"""File paths for the Reddit Classification pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
TRANSFORM_FN_DIR = 'transform_fn'
RAW_METADATA_DIR = 'raw_metadata'
TRANSFORMED_METADATA_DIR = 'transformed_metadata'
TRANSFORMED_TRAIN_DATA_FILE_PREFIX = 'features_train'
TRANSFORMED_EVAL_DATA_FILE_PREFIX = 'features_eval'
TRANSFORMED_PREDICT_DATA_FILE_PREFIX = 'features_predict'
TRAIN_RESULTS_FILE = 'train_results'
DEPLOY_SAVED_MODEL_DIR = 'saved_model'
MODEL_DIR = 'model_dir'
MODEL_EVALUATIONS_FILE = 'model_evaluations'
BATCH_PREDICTION_RESULTS_FILE = 'batch_prediction_results'
| {
"content_hash": "9a02d353c7d4b1eb7dd11b1c9ed3f123",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 34.526315789473685,
"alnum_prop": 0.7698170731707317,
"repo_name": "GoogleCloudPlatform/cloudml-samples",
"id": "5d823a4ecfe9e85b78e00f3b4fadacc8dfc20284",
"size": "1253",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/standard/legacy/reddit_tft/path_constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7846"
},
{
"name": "Jupyter Notebook",
"bytes": "1081052"
},
{
"name": "OpenEdge ABL",
"bytes": "1846"
},
{
"name": "Python",
"bytes": "1174159"
},
{
"name": "Shell",
"bytes": "50370"
}
],
"symlink_target": ""
} |
import argparse
import subprocess
import os
import os.path as op
import json
import re
from glob import glob
# slurm-related paths. change these if your slurm is set up differently or you
# use a different job submission system. see docs
# https://sylabs.io/guides/3.7/user-guide/appendix.html#singularity-s-environment-variables
# for full description of each of these environmental variables
os.environ['SINGULARITY_BINDPATH'] = os.environ.get('SINGULARITY_BINDPATH', '') + ',/opt/slurm,/usr/lib64/libmunge.so.2.0.0,/usr/lib64/libmunge.so.2,/var/run/munge,/etc/passwd'
os.environ['SINGULARITYENV_PREPEND_PATH'] = os.environ.get('SINGULARITYENV_PREPEND_PATH', '') + ':/opt/slurm/bin'
os.environ['SINGULARITY_CONTAINLIBS'] = os.environ.get('SINGULARITY_CONTAINLIBS', '') + ',' + ','.join(glob('/opt/slurm/lib64/libpmi*'))
def check_singularity_envvars():
"""Make sure SINGULARITY_BINDPATH, SINGULARITY_PREPEND_PATH, and SINGULARITY_CONTAINLIBS only contain existing paths
"""
for env in ['SINGULARITY_BINDPATH', 'SINGULARITYENV_PREPEND_PATH', 'SINGULARITY_CONTAINLIBS']:
paths = os.environ[env]
joiner = ',' if env != "SINGULARITYENV_PREPEND_PATH" else ':'
paths = [p for p in paths.split(joiner) if op.exists(p)]
os.environ[env] = joiner.join(paths)
def check_bind_paths(volumes):
"""Check that paths we want to bind exist, return only those that do."""
return [vol for vol in volumes if op.exists(vol.split(':')[0])]
def main(image, args=[], software='singularity', sudo=False):
"""Run sfp singularity container!
Parameters
----------
image : str
If running with singularity, the path to the .sif file containing the
singularity image. If running with docker, name of the docker image.
args : list, optional
command to pass to the container. If empty (default), we open up an
interactive session.
software : {'singularity', 'docker'}, optional
Whether to run image with singularity or docker
sudo : bool, optional
If True, we run docker with `sudo`. If software=='singularity', we
ignore this.
"""
check_singularity_envvars()
with open(op.join(op.dirname(op.realpath(__file__)), 'config.json')) as f:
config = json.load(f)
volumes = [
f'{op.dirname(op.realpath(__file__))}:/home/sfp_user/spatial-frequency-preferences',
f'{config["MATLAB_PATH"]}:/home/sfp_user/matlab',
f'{config["FREESURFER_HOME"]}:/home/sfp_user/freesurfer',
f'{config["FSLDIR"]}:/home/sfp_user/fsl',
f'{config["DATA_DIR"]}:{config["DATA_DIR"]}',
f'{config["WORKING_DIR"]}:{config["WORKING_DIR"]}'
]
volumes = check_bind_paths(volumes)
# join puts --bind between each of the volumes, we also need it in the
# beginning
volumes = '--bind ' + " --bind ".join(volumes)
# if the user is passing a snakemake command, need to pass
# --configfile /home/sfp_user/sfp_config.json, since we modify the config
# file when we source singularity_env.sh
if args and 'snakemake' == args[0]:
args = ['snakemake', '--configfile', '/home/sfp_user/sfp_config.json',
'-d', '/home/sfp_user/spatial-frequency-preferences',
'-s', '/home/sfp_user/spatial-frequency-preferences/Snakefile', *args[1:]]
# in this case they passed a string so args[0] contains snakemake and then
# a bunch of other stuff
elif args and args[0].startswith('snakemake'):
args = ['snakemake', '--configfile', '/home/sfp_user/sfp_config.json',
'-d', '/home/sfp_user/spatial-frequency-preferences',
'-s', '/home/sfp_user/spatial-frequency-preferences/Snakefile', args[0].replace('snakemake ', ''), *args[1:]]
# if the user specifies --profile slurm, replace it with the
# appropriate path. We know it will be in the last one of args and
# nested below the above elif because if they specified --profile then
# the whole thing had to be wrapped in quotes, which would lead to this
# case.
if '--profile slurm' in args[-1]:
args[-1] = args[-1].replace('--profile slurm',
'--profile /home/sfp_user/.config/snakemake/slurm')
# then need to make sure to mount this
elif '--profile' in args[-1]:
profile_path = re.findall('--profile (.*?) ', args[-1])[0]
profile_name = op.split(profile_path)[-1]
volumes.append(f'{profile_path}:/home/sfp_user/.config/snakemake/{profile_name}')
args[-1] = args[-1].replace(f'--profile {profile_path}',
f'--profile /home/sfp_user/.config/snakemake/{profile_name}')
# open up an interactive session if the user hasn't specified an argument,
# otherwise pass the argument to bash. regardless, make sure we source the
# env.sh file
if not args:
args = ['/bin/bash', '--init-file', '/home/sfp_user/singularity_env.sh']
else:
args = ['/bin/bash', '-c',
# this needs to be done with single quotes on the inside so
# that's what bash sees, otherwise we run into
# https://stackoverflow.com/questions/45577411/export-variable-within-bin-bash-c;
# double-quoted commands get evaluated in the *current* shell,
# not by /bin/bash -c
f"'source /home/sfp_user/singularity_env.sh; {' '.join(args)}'"]
# set these environmental variables, which we use for the jobs submitted to
# the cluster so they know where to find the container and this script
env_str = f"--env SFP_PATH={op.dirname(op.realpath(__file__))} --env SINGULARITY_CONTAINER_PATH={image}"
# the -e flag makes sure we don't pass through any environment variables
# from the calling shell, while --writable-tmpfs enables us to write to the
# container's filesystem (necessary because singularity_env.sh makes a
# temporary config.json file)
if software == 'singularity':
exec_str = f'singularity exec -e {env_str} --writable-tmpfs {volumes} {image} {" ".join(args)}'
elif software == 'docker':
volumes = volumes.replace('--bind', '--volume')
exec_str = f'docker run {volumes} -it {image} {" ".join(args)}'
if sudo:
exec_str = 'sudo ' + exec_str
print(exec_str)
# we use shell=True because we want to carefully control the quotes used
subprocess.call(exec_str, shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("Run billbrod/sfp container. This is a wrapper, which binds the appropriate"
" paths and sources singularity_env.sh, setting up some environmental variables.")
)
parser.add_argument('image',
help=('If running with singularity, the path to the '
'.sif file containing the singularity image. '
'If running with docker, name of the docker image.'))
parser.add_argument('--software', default='singularity', choices=['singularity', 'docker'],
help="Whether to run this with singularity or docker")
parser.add_argument('--sudo', '-s', action='store_true',
help="Whether to run docker with sudo or not. Ignored if software==singularity")
parser.add_argument("args", nargs='*',
help=("Command to pass to the container. If empty, we open up an interactive session."
" If it contains flags, surround with SINGLE QUOTES (not double)."))
args = vars(parser.parse_args())
main(**args)
| {
"content_hash": "6e3a0dbf6768311ed5171976ecc579e8",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 176,
"avg_line_length": 54.09090909090909,
"alnum_prop": 0.6294764059469942,
"repo_name": "billbrod/spatial-frequency-preferences",
"id": "dd5ddb4821ccd340ee589d3cdbd24189b5edd844",
"size": "7759",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "run_singularity.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "676680"
},
{
"name": "MATLAB",
"bytes": "4718"
},
{
"name": "Python",
"bytes": "940028"
},
{
"name": "Shell",
"bytes": "946"
}
],
"symlink_target": ""
} |
"""
This module contains all classes and functions used for handling macros.
"""
from pyccel.utilities.stage import PyccelStage
from .basic import PyccelAstNode
from .datatypes import NativeInteger, NativeGeneric
from .internals import PyccelSymbol
from .variable import Variable
pyccel_stage = PyccelStage()
__all__ = (
'Macro',
'MacroCount',
'MacroShape',
'MacroType',
'construct_macro'
)
#==============================================================================
class Macro(PyccelAstNode):
"""."""
__slots__ = ('_argument',)
_name = '__UNDEFINED__'
_attribute_nodes = ()
def __init__(self, argument):
if not isinstance(argument, (PyccelSymbol, Variable)):
raise TypeError("Argument must be a Pyccelsymbol or a Variable not {}".format(type(argument)))
self._argument = argument
super().__init__()
@property
def argument(self):
return self._argument
@property
def name(self):
return self._name
#==============================================================================
class MacroShape(Macro):
"""."""
__slots__ = ('_index','_rank','_shape')
_name = 'shape'
_dtype = NativeInteger()
_precision = -1
_order = None
def __init__(self, argument, index=None):
if index is not None:
self._rank = 0
self._shape = ()
elif pyccel_stage != "syntactic":
self._rank = int(argument.rank>1)
self._shape = (argument.rank,)
else:
self._rank = 1
self._shape = ()
self._index = index
super().__init__(argument)
@property
def index(self):
return self._index
def __str__(self):
if self.index is None:
return 'MacroShape({})'.format(str(self.argument))
else:
return 'MacroShape({}, {})'.format(str(self.argument),
str(self.index))
#==============================================================================
class MacroType(Macro):
"""."""
__slots__ = ()
_name = 'dtype'
_dtype = NativeGeneric()
_precision = 0
_rank = 0
_shape = ()
_order = None
def __str__(self):
return 'MacroType({})'.format(str(self.argument))
#==============================================================================
class MacroCount(Macro):
"""."""
__slots__ = ()
_name = 'count'
_dtype = NativeInteger()
_precision = -1
_rank = 0
_shape = ()
_order = None
def __str__(self):
return 'MacroCount({})'.format(str(self.argument))
def construct_macro(name, argument, parameter=None):
"""."""
# TODO add available macros: shape, len, dtype
if not isinstance(name, str):
raise TypeError('name must be of type str')
if name == 'shape':
return MacroShape(argument, index=parameter)
elif name == 'dtype':
return MacroType(argument)
elif name == 'count':
return MacroCount(argument)
| {
"content_hash": "7c5c077c8afa370b0dc6ff675b30648f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 106,
"avg_line_length": 26.408333333333335,
"alnum_prop": 0.48437993057746925,
"repo_name": "ratnania/pyccel",
"id": "d23e62a308a75b885821ca5b140b54f9a833488a",
"size": "3558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyccel/ast/macros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "111665"
},
{
"name": "Python",
"bytes": "863199"
},
{
"name": "Shell",
"bytes": "712"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
import stevedore
from neutron.common import exceptions as exc
from neutron.extensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
class TypeManager(stevedore.named.NamedExtensionManager):
"""Manage network segment types using drivers."""
def __init__(self):
# Mapping from type name to DriverManager
self.drivers = {}
LOG.info(_("Configured type driver names: %s"),
cfg.CONF.ml2.type_drivers)
super(TypeManager, self).__init__('neutron.ml2.type_drivers',
cfg.CONF.ml2.type_drivers,
invoke_on_load=True)
LOG.info(_("Loaded type driver names: %s"), self.names())
self._register_types()
self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types)
def _register_types(self):
for ext in self:
network_type = ext.obj.get_type()
if network_type in self.drivers:
LOG.error(_("Type driver '%(new_driver)s' ignored because type"
" driver '%(old_driver)s' is already registered"
" for type '%(type)s'"),
{'new_driver': ext.name,
'old_driver': self.drivers[network_type].name,
'type': network_type})
else:
self.drivers[network_type] = ext
LOG.info(_("Registered types: %s"), self.drivers.keys())
def _check_tenant_network_types(self, types):
self.tenant_network_types = []
for network_type in types:
if network_type in self.drivers:
self.tenant_network_types.append(network_type)
else:
msg = _("No type driver for tenant network_type: %s. "
"Service terminated!") % network_type
LOG.error(msg)
raise SystemExit(msg)
LOG.info(_("Tenant network_types: %s"), self.tenant_network_types)
def initialize(self):
for network_type, driver in self.drivers.iteritems():
LOG.info(_("Initializing driver for type '%s'"), network_type)
driver.obj.initialize()
def validate_provider_segment(self, segment):
network_type = segment[api.NETWORK_TYPE]
driver = self.drivers.get(network_type)
if driver:
driver.obj.validate_provider_segment(segment)
else:
msg = _("network_type value '%s' not supported") % network_type
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
network_type = segment.get(api.NETWORK_TYPE)
driver = self.drivers.get(network_type)
driver.obj.reserve_provider_segment(session, segment)
def allocate_tenant_segment(self, session):
for network_type in self.tenant_network_types:
driver = self.drivers.get(network_type)
segment = driver.obj.allocate_tenant_segment(session)
if segment:
return segment
raise exc.NoNetworkAvailable()
def release_segment(self, session, segment):
network_type = segment.get(api.NETWORK_TYPE)
driver = self.drivers.get(network_type)
# ML2 may have been reconfigured since the segment was created,
# so a driver may no longer exist for this network_type.
# REVISIT: network_type-specific db entries may become orphaned
# if a network is deleted and the driver isn't available to release
# the segment. This may be fixed with explicit foreign-key references
# or consistency checks on driver initialization.
if not driver:
LOG.error(_("Failed to release segment '%s' because "
"network type is not supported."), segment)
return
driver.obj.release_segment(session, segment)
class MechanismManager(stevedore.named.NamedExtensionManager):
"""Manage networking mechanisms using drivers."""
def __init__(self):
# Registered mechanism drivers, keyed by name.
self.mech_drivers = {}
# Ordered list of mechanism drivers, defining
# the order in which the drivers are called.
self.ordered_mech_drivers = []
LOG.info(_("Configured mechanism driver names: %s"),
cfg.CONF.ml2.mechanism_drivers)
super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers',
cfg.CONF.ml2.mechanism_drivers,
invoke_on_load=True,
name_order=True)
LOG.info(_("Loaded mechanism driver names: %s"), self.names())
self._register_mechanisms()
def _register_mechanisms(self):
"""Register all mechanism drivers.
This method should only be called once in the MechanismManager
constructor.
"""
for ext in self:
self.mech_drivers[ext.name] = ext
self.ordered_mech_drivers.append(ext)
LOG.info(_("Registered mechanism drivers: %s"),
[driver.name for driver in self.ordered_mech_drivers])
def initialize(self):
# For ML2 to support bulk operations, each driver must support them
self.native_bulk_support = True
for driver in self.ordered_mech_drivers:
LOG.info(_("Initializing mechanism driver '%s'"), driver.name)
driver.obj.initialize()
self.native_bulk_support &= getattr(driver.obj,
'native_bulk_support', True)
def _call_on_drivers(self, method_name, context,
continue_on_failure=False):
"""Helper method for calling a method across all mechanism drivers.
:param method_name: name of the method to call
:param context: context parameter to pass to each method call
:param continue_on_failure: whether or not to continue to call
all mechanism drivers once one has raised an exception
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver call fails.
"""
error = False
for driver in self.ordered_mech_drivers:
try:
getattr(driver.obj, method_name)(context)
except Exception:
LOG.exception(
_("Mechanism driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
error = True
if not continue_on_failure:
break
if error:
raise ml2_exc.MechanismDriverError(
method=method_name
)
def create_network_precommit(self, context):
"""Notify all mechanism drivers during network creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_network_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("create_network_precommit", context)
def create_network_postcommit(self, context):
"""Notify all mechanism drivers after network creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_network_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where the network will be deleted, triggering
any required cleanup. There is no guarantee that all mechanism
drivers are called in this case.
"""
self._call_on_drivers("create_network_postcommit", context)
def update_network_precommit(self, context):
"""Notify all mechanism drivers during network update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_network_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_network_precommit", context)
def update_network_postcommit(self, context):
"""Notify all mechanism drivers after network update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_network_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where an error is returned to the user. The
user is expected to take the appropriate action, whether by
retrying the call or deleting the network. There is no
guarantee that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_network_postcommit", context)
def delete_network_precommit(self, context):
"""Notify all mechanism drivers during network deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_network_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("delete_network_precommit", context)
def delete_network_postcommit(self, context):
"""Notify all mechanism drivers after network deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_network_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure. In
general we expect the caller to ignore the error, as the
network resource has already been deleted from the database
and it doesn't make sense to undo the action by recreating the
network.
"""
self._call_on_drivers("delete_network_postcommit", context,
continue_on_failure=True)
def create_subnet_precommit(self, context):
"""Notify all mechanism drivers during subnet creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_subnet_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("create_subnet_precommit", context)
def create_subnet_postcommit(self, context):
"""Notify all mechanism drivers after subnet creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_subnet_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where the subnet will be deleted, triggering
any required cleanup. There is no guarantee that all mechanism
drivers are called in this case.
"""
self._call_on_drivers("create_subnet_postcommit", context)
def update_subnet_precommit(self, context):
"""Notify all mechanism drivers during subnet update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_subnet_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_subnet_precommit", context)
def update_subnet_postcommit(self, context):
"""Notify all mechanism drivers after subnet update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_subnet_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where an error is returned to the user. The
user is expected to take the appropriate action, whether by
retrying the call or deleting the subnet. There is no
guarantee that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_subnet_postcommit", context)
def delete_subnet_precommit(self, context):
"""Notify all mechanism drivers during subnet deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_subnet_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("delete_subnet_precommit", context)
def delete_subnet_postcommit(self, context):
"""Notify all mechanism drivers after subnet deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_subnet_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure. In
general we expect the caller to ignore the error, as the
subnet resource has already been deleted from the database
and it doesn't make sense to undo the action by recreating the
subnet.
"""
self._call_on_drivers("delete_subnet_postcommit", context,
continue_on_failure=True)
def create_port_precommit(self, context):
"""Notify all mechanism drivers during port creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_port_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("create_port_precommit", context)
def create_port_postcommit(self, context):
"""Notify all mechanism drivers of port creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_port_postcommit call fails.
Called after the database transaction. Errors raised by
mechanism drivers are left to propagate to the caller, where
the port will be deleted, triggering any required
cleanup. There is no guarantee that all mechanism drivers are
called in this case.
"""
self._call_on_drivers("create_port_postcommit", context)
def update_port_precommit(self, context):
"""Notify all mechanism drivers during port update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_port_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_port_precommit", context)
def update_port_postcommit(self, context):
"""Notify all mechanism drivers after port update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_port_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where an error is returned to the user. The
user is expected to take the appropriate action, whether by
retrying the call or deleting the port. There is no
guarantee that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_port_postcommit", context)
def delete_port_precommit(self, context):
"""Notify all mechanism drivers during port deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_port_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propogated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("delete_port_precommit", context)
def delete_port_postcommit(self, context):
"""Notify all mechanism drivers after port deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_port_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure. In
general we expect the caller to ignore the error, as the
port resource has already been deleted from the database
and it doesn't make sense to undo the action by recreating the
port.
"""
self._call_on_drivers("delete_port_postcommit", context,
continue_on_failure=True)
def bind_port(self, context):
"""Attempt to bind a port using registered mechanism drivers.
:param context: PortContext instance describing the port
Called inside transaction context on session, prior to
create_port_precommit or update_port_precommit, to
attempt to establish a port binding.
"""
binding = context._binding
LOG.debug(_("Attempting to bind port %(port)s on host %(host)s "
"for vnic_type %(vnic_type)s with profile %(profile)s"),
{'port': context._port['id'],
'host': binding.host,
'vnic_type': binding.vnic_type,
'profile': binding.profile})
for driver in self.ordered_mech_drivers:
try:
driver.obj.bind_port(context)
if binding.segment:
binding.driver = driver.name
LOG.debug(_("Bound port: %(port)s, host: %(host)s, "
"vnic_type: %(vnic_type)s, "
"profile: %(profile)s"
"driver: %(driver)s, vif_type: %(vif_type)s, "
"vif_details: %(vif_details)s, "
"segment: %(segment)s"),
{'port': context._port['id'],
'host': binding.host,
'vnic_type': binding.vnic_type,
'profile': binding.profile,
'driver': binding.driver,
'vif_type': binding.vif_type,
'vif_details': binding.vif_details,
'segment': binding.segment})
return
except Exception:
LOG.exception(_("Mechanism driver %s failed in "
"bind_port"),
driver.name)
binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
LOG.warning(_("Failed to bind port %(port)s on host %(host)s"),
{'port': context._port['id'],
'host': binding.host})
| {
"content_hash": "12b7e9c16318e9d3ea7d92d05b09b1e6",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 79,
"avg_line_length": 45.57051282051282,
"alnum_prop": 0.6352042012472453,
"repo_name": "sajuptpm/neutron-ipam",
"id": "83066f3d850d845def6247bdc57863e9b9553ce8",
"size": "21967",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/icehouse",
"path": "neutron/plugins/ml2/managers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "9102565"
},
{
"name": "Shell",
"bytes": "9603"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import math, sys
from Quartz import *
import Quartz
def doSimpleRect(context):
# Set the fill color to opaque red.
CGContextSetRGBFillColor(context, 1.0, 0.0, 0.0, 1.0)
# Set up the rectangle for drawing.
ourRect = CGRectMake(20.0, 20.0, 130.0, 100.0)
# Draw the filled rectangle.
CGContextFillRect(context, ourRect)
def doStrokedRect(context):
# Set the stroke color to a light opaque blue.
CGContextSetRGBStrokeColor(context, 0.482, 0.62, 0.871, 1.0)
# Set up the rectangle for drawing.
ourRect = CGRectMake(20.0, 20.0, 130.0, 100.0)
# Draw the stroked rectangle with a line width of 3.
CGContextStrokeRectWithWidth(context, ourRect, 3.0)
def doStrokedAndFilledRect(context):
# Define a rectangle to use for drawing.
ourRect = CGRectMake(20.0, 220.0, 130.0, 100.0)
# ***** Rectangle 1 *****
# Set the fill color to a light opaque blue.
CGContextSetRGBFillColor(context, 0.482, 0.62, 0.871, 1.0)
# Set the stroke color to an opaque green.
CGContextSetRGBStrokeColor(context, 0.404, 0.808, 0.239, 1.0)
# Fill the rect.
CGContextFillRect(context, ourRect)
# ***** Rectangle 2 *****
# Move the rectangle's origin to the right by 200 units.
ourRect.origin.x += 200.0
# Stroke the rectangle with a line width of 10.
CGContextStrokeRectWithWidth(context, ourRect, 10.0)
# ***** Rectangle 3 *****
# Move the rectangle's origin to the left by 200 units
# and down by 200 units.
ourRect.origin.x -= 200.0
ourRect.origin.y -= 200.0
# Fill then stroke the rect with a line width of 10.
CGContextFillRect(context, ourRect)
CGContextStrokeRectWithWidth(context, ourRect, 10.0)
# ***** Rectangle 4 *****
# Move the rectangle's origin to the right by 200 units.
ourRect.origin.x += 200.0
# Stroke then fill the rect.
CGContextStrokeRectWithWidth(context, ourRect, 10.0)
CGContextFillRect(context, ourRect)
def createRectPath(context, rect):
# Create a path using the coordinates of the rect passed in.
CGContextBeginPath(context)
CGContextMoveToPoint(context, rect.origin.x, rect.origin.y)
# ***** Segment 1 *****
CGContextAddLineToPoint(context, rect.origin.x + rect.size.width,
rect.origin.y)
# ***** Segment 2 *****
CGContextAddLineToPoint(context, rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height)
# ***** Segment 3 *****
CGContextAddLineToPoint(context, rect.origin.x,
rect.origin.y + rect.size.height)
# ***** Segment 4 is created by closing the path *****
CGContextClosePath(context)
def doPathRects(context):
# Define a rectangle to use for drawing.
ourRect = CGRectMake(20.0, 20.0, 130.0, 100.0)
# ***** Rectangle 1 *****
# Create the rect path.
createRectPath (context, ourRect)
# Set the fill color to a light opaque blue.
CGContextSetRGBFillColor(context, 0.482, 0.62, 0.871, 1.0)
# Fill the path.
CGContextDrawPath (context, kCGPathFill) # Clears the path.
# ***** Rectangle 2 *****
# Translate the coordinate system 200 units to the right.
CGContextTranslateCTM(context, 200.0, 0.0)
# Set the stroke color to an opaque green.
CGContextSetRGBStrokeColor(context, 0.404, 0.808, 0.239, 1.0)
createRectPath (context, ourRect)
# Set the line width to 10 units.
CGContextSetLineWidth (context, 10.0)
# Stroke the path.
CGContextDrawPath (context, kCGPathStroke) # Clears the path.
# ***** Rectangle 3 *****
# Translate the coordinate system
# 200 units to the left and 200 units down.
CGContextTranslateCTM(context, -200.0, -200.0)
createRectPath (context, ourRect)
#CGContextSetLineWidth(context, 10.0) # This is redundant.
# Fill, then stroke the path.
CGContextDrawPath (context, kCGPathFillStroke) # Clears the path.
# ***** Rectangle 4 *****
# Translate the coordinate system 200 units to the right.
CGContextTranslateCTM(context, 200.0, 0.0)
createRectPath (context, ourRect)
# Stroke the path.
CGContextDrawPath (context, kCGPathStroke) # Clears the path.
# Create the path again.
createRectPath (context, ourRect)
# Fill the path.
CGContextDrawPath (context, kCGPathFill) # Clears the path.
def doAlphaRects(context):
# ***** Part 1 *****
ourRect = CGRectMake(0.0, 0.0, 130.0, 100.0)
numRects = 6
rotateAngle = 2*math.pi/numRects
tintAdjust = 1.0/numRects
# ***** Part 2 *****
CGContextTranslateCTM(context, 2*ourRect.size.width,
2*ourRect.size.height)
# ***** Part 3 *****
tint = 1.0
for i in range(numRects):
CGContextSetRGBFillColor (context, tint, 0.0, 0.0, tint)
CGContextFillRect(context, ourRect)
# These transformations are cummulative.
CGContextRotateCTM(context, rotateAngle)
tint -= tintAdjust
def drawStrokedLine(context, start, end):
CGContextBeginPath(context)
CGContextMoveToPoint(context, start.x, start.y)
CGContextAddLineToPoint(context, end.x, end.y)
CGContextDrawPath(context, kCGPathStroke)
def doDashedLines(context):
lengths = ( 12.0, 6.0, 5.0, 6.0, 5.0, 6.0 )
start = CGPoint(20.0, 270.0)
end = CGPoint(300.0, 270.0)
# ***** Line 1 solid line *****
CGContextSetLineWidth(context, 5.0)
drawStrokedLine(context, start, end)
# ***** Line 2 long dashes *****
CGContextTranslateCTM(context, 0.0, -50.0)
CGContextSetLineDash(context, 0.0, lengths, 2)
drawStrokedLine(context, start, end)
# ***** Line 3 long short pattern *****
CGContextTranslateCTM(context, 0.0, -50.0)
CGContextSetLineDash(context, 0.0, lengths, 4)
drawStrokedLine(context, start, end)
# ***** Line 4 long short short pattern *****
CGContextTranslateCTM(context, 0.0, -50.0)
CGContextSetLineDash(context, 0.0, lengths, 6)
drawStrokedLine(context, start, end)
# ***** Line 5 short short long pattern *****
CGContextTranslateCTM(context, 0.0, -50.0)
CGContextSetLineDash(context, lengths[0]+lengths[1], lengths, 6)
drawStrokedLine(context, start, end)
# ***** Line 6 solid line *****
CGContextTranslateCTM(context, 0.0, -50.0)
# Reset dash to solid line.
CGContextSetLineDash(context, 0, None, 0)
drawStrokedLine(context, start, end)
def doClippedCircle(context):
circleCenter = CGPoint(150.0, 150.0)
circleRadius = 100.0
startingAngle = 0.0
endingAngle = 2*math.pi
ourRect = CGRectMake(65.0, 65.0, 170.0, 170.0)
# ***** Filled Circle *****
CGContextSetRGBFillColor(context, 0.663, 0., 0.031, 1.0)
CGContextBeginPath(context)
# Construct the circle path counterclockwise.
CGContextAddArc(context, circleCenter.x,
circleCenter.y, circleRadius,
startingAngle, endingAngle, 0)
CGContextDrawPath(context, kCGPathFill)
# ***** Stroked Square *****
CGContextStrokeRect(context, ourRect)
# Translate so that the next drawing doesn't overlap what
# has already been drawn.
CGContextTranslateCTM(context, ourRect.size.width + circleRadius + 5.0, 0)
# Create a rectangular path and clip to that path.
CGContextBeginPath(context)
CGContextAddRect(context, ourRect)
CGContextClip(context)
# ***** Clipped Circle *****
CGContextBeginPath(context)
# Construct the circle path counterclockwise.
CGContextAddArc (context, circleCenter.x,
circleCenter.y, circleRadius,
startingAngle, endingAngle, 0)
CGContextDrawPath(context, kCGPathFill)
def doPDFDocument(context, url):
pdfDoc = CGPDFDocumentCreateWithURL(url)
if pdfDoc is not None:
CGContextScaleCTM(context, .5, .5)
# The media box is the bounding box of the PDF document.
pdfRect = CGPDFDocumentGetMediaBox(pdfDoc, 1) # page 1
# Set the destination rect origin to the Quartz origin.
pdfRect.origin.x = pdfRect.origin.y = 0.
# Draw page 1 of the PDF document.
CGContextDrawPDFDocument(context, pdfRect, pdfDoc, 1)
CGContextTranslateCTM(context, pdfRect.size.width*1.2, 0)
# Scale non-uniformly making the y coordinate scale 1.5 times
# the x coordinate scale.
CGContextScaleCTM(context, 1, 1.5)
CGContextDrawPDFDocument(context, pdfRect, pdfDoc, 1)
CGContextTranslateCTM(context, pdfRect.size.width*1.2, pdfRect.size.height)
# Flip the y coordinate axis horizontally about the x axis.
CGContextScaleCTM(context, 1, -1)
CGContextDrawPDFDocument(context, pdfRect, pdfDoc, 1)
else:
print >>sys.stderr, "Can't create PDF document for URL!"
| {
"content_hash": "ddfa8137b09db282a563d3b4995844c4",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 83,
"avg_line_length": 39.21238938053097,
"alnum_prop": 0.6566237869555405,
"repo_name": "albertz/music-player",
"id": "8758e215be44ba3d7957b8725df9d1fd0fa7194a",
"size": "8862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Quartz/Examples/Programming with Quartz/BasicDrawing/DrawingBasics.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
} |
""" A unittest script for the IHMPSession module. """
import unittest
from cutlass import iHMPSession
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class IHMPSessionTest(unittest.TestCase):
""" A unit test class for the IHMPSession module. """
username = "test"
password = "test"
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
cls.util = CutlassTestUtil()
def testCreateSession(self):
""" Test the constructor for creating sessions. """
success = False
session = None
try:
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(session is None)
def testUsername(self):
""" Test the username property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.stringTypeTest(self, session, "username")
self.util.stringPropertyTest(self, session, "username")
def testPassword(self):
""" Test the password property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.stringTypeTest(self, session, "password")
self.util.stringPropertyTest(self, session, "password")
def testPort(self):
""" Test the port property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.intTypeTest(self, session, "port")
self.util.intPropertyTest(self, session, "port")
def testSSL(self):
""" Test the ssl property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.boolTypeTest(self, session, "ssl")
self.util.boolPropertyTest(self, session, "ssl")
def testCreate16SDnaPrep(self):
""" Test the create_16s_dna_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
prep = session.create_16s_dna_prep()
self.failIf(prep is None)
from cutlass import SixteenSDnaPrep
self.failUnless(isinstance(prep, SixteenSDnaPrep))
def testCreate16SRawSeqSet(self):
""" Test the create_16s_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_16s_raw_seq_set()
self.failIf(seq_set is None)
from cutlass import SixteenSRawSeqSet
self.failUnless(isinstance(seq_set, SixteenSRawSeqSet))
def testCreate16STrimmedSeqSet(self):
""" Test the create_16s_trimmed_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_16s_trimmed_seq_set()
self.failIf(seq_set is None)
from cutlass import SixteenSTrimmedSeqSet
self.failUnless(isinstance(seq_set, SixteenSTrimmedSeqSet))
def testCreateAbundanceMatrix(self):
""" Test the create_abundance_matrix() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
matrix = session.create_abundance_matrix()
self.failIf(matrix is None)
from cutlass import AbundanceMatrix
self.failUnless(isinstance(matrix, AbundanceMatrix))
def testCreateAnnotation(self):
""" Test the create_annotation() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
annot = session.create_annotation()
self.failIf(annot is None)
from cutlass import Annotation
self.failUnless(isinstance(annot, Annotation))
def testCreateClusteredSeqSet(self):
""" Test the create_clustered_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
css = session.create_clustered_seq_set()
self.failIf(css is None)
from cutlass import ClusteredSeqSet
self.failUnless(isinstance(css, ClusteredSeqSet))
def testCreateCytokine(self):
""" Test the create_cytokine() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
cytokine = session.create_cytokine()
self.failIf(cytokine is None)
from cutlass import Cytokine
self.failUnless(isinstance(cytokine, Cytokine))
def testCreateHostAssayPrep(self):
""" Test the create_host_assay_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
host_assay_prep = session.create_host_assay_prep()
self.failIf(host_assay_prep is None)
from cutlass import HostAssayPrep
self.failUnless(isinstance(host_assay_prep, HostAssayPrep))
def testCreateHostSeqPrep(self):
""" Test the create_host_seq_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
prep = session.create_host_seq_prep()
self.failIf(prep is None)
from cutlass import HostSeqPrep
self.failUnless(isinstance(prep, HostSeqPrep))
def testCreateHostTranscriptomicsRawSeqSet(self):
""" Test the create_host_transcriptomics_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
htrss = session.create_host_transcriptomics_raw_seq_set()
self.failIf(htrss is None)
from cutlass import HostTranscriptomicsRawSeqSet
self.failUnless(isinstance(htrss, HostTranscriptomicsRawSeqSet))
def testCreateHostWgsRawSeqSet(self):
""" Test the create_host_wgs_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
ss = session.create_host_wgs_raw_seq_set()
self.failIf(ss is None)
from cutlass import HostWgsRawSeqSet
self.failUnless(isinstance(ss, HostWgsRawSeqSet))
def testCreateMetabolome(self):
""" Test the create_metabolome() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
metabolome = session.create_metabolome()
self.failIf(metabolome is None)
from cutlass import Metabolome
self.failUnless(isinstance(metabolome, Metabolome))
def testCreateMicrobTranscriptomicsRawSeqSet(self):
""" Test the create_microb_transcriptomics_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
raw_seq_set = session.create_microb_transcriptomics_raw_seq_set()
self.failIf(raw_seq_set is None)
from cutlass import MicrobTranscriptomicsRawSeqSet
self.failUnless(isinstance(raw_seq_set, MicrobTranscriptomicsRawSeqSet))
def testCreateMicrobiomeAssayPrep(self):
""" Test the create_microbiome_assay_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
microbiome_assay_prep = session.create_microbiome_assay_prep()
self.failIf(microbiome_assay_prep is None)
from cutlass import MicrobiomeAssayPrep
self.failUnless(isinstance(microbiome_assay_prep, MicrobiomeAssayPrep))
def testCreateProject(self):
""" Test the create_project() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
project = session.create_project()
self.failIf(project is None)
from cutlass import Project
self.failUnless(isinstance(project, Project))
def testCreateProteome(self):
""" Test the create_proteome() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
proteome = session.create_proteome()
self.failIf(proteome is None)
from cutlass import Proteome
self.failUnless(isinstance(proteome, Proteome))
def testCreateSample(self):
""" Test the create_sample() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
sample = session.create_sample()
self.failIf(sample is None)
from cutlass import Sample
self.failUnless(isinstance(sample, Sample))
def testCreateSerology(self):
""" Test the create_serology() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
sero = session.create_serology()
self.failIf(sero is None)
from cutlass import Serology
self.failUnless(isinstance(sero, Serology))
def testCreateSubject(self):
""" Test the create_subject() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
subject = session.create_subject()
self.failIf(subject is None)
from cutlass import Subject
self.failUnless(isinstance(subject, Subject))
def testCreateSubjectAttribute(self):
""" Test the create_subject_attribute() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
subject_attr = session.create_subject_attr()
self.failIf(subject_attr is None)
from cutlass import SubjectAttribute
self.failUnless(isinstance(subject_attr, SubjectAttribute))
def testCreateStudy(self):
""" Test the create_study() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
study = session.create_study()
self.failIf(study is None)
from cutlass import Study
self.failUnless(isinstance(study, Study))
def testCreateVisit(self):
""" Test the create_visit() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
visit = session.create_visit()
self.failIf(visit is None)
from cutlass import Visit
self.failUnless(isinstance(visit, Visit))
def testCreateVisitAttribute(self):
""" Test the create_visit_attribute() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
visit_attr = session.create_visit_attr()
self.failIf(visit_attr is None)
from cutlass import VisitAttribute
self.failUnless(isinstance(visit_attr, VisitAttribute))
def testWgsAssembledSeqSet(self):
""" Test the create_wgs_assembled_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_wgs_assembled_seq_set()
self.failIf(seq_set is None)
from cutlass import WgsAssembledSeqSet
self.failUnless(isinstance(seq_set, WgsAssembledSeqSet))
def testWgsDnaPrep(self):
""" Test the create_wgs_dna_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
prep = session.create_wgs_dna_prep()
self.failIf(prep is None)
from cutlass import WgsDnaPrep
self.failUnless(isinstance(prep, WgsDnaPrep))
def testWgsRawSeqSet(self):
""" Test the create_wgs_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_wgs_raw_seq_set()
self.failIf(seq_set is None)
from cutlass import WgsRawSeqSet
self.failUnless(isinstance(seq_set, WgsRawSeqSet))
def testCreateObjectMethods(self):
"""
Test the create_XXX() methods, where XXX is the name
of a particular node type.
"""
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
node_types = [
"16s_dna_prep", "16s_raw_seq_set", "16s_trimmed_seq_set",
"annotation", "abundance_matrix", "clustered_seq_set",
"cytokine", "host_assay_prep", "host_epigenetics_raw_seq_set",
"host_seq_prep", "host_transcriptomics_raw_seq_set",
"host_wgs_raw_seq_set", "lipidome", "metabolome",
"microbiome_assay_prep", "microb_transcriptomics_raw_seq_set",
"project", "proteome", "sample", "sample_attr", "serology",
"study", "subject", "subject_attr", "viral_seq_set", "visit",
"visit_attr", "wgs_assembled_seq_set", "wgs_raw_seq_set",
"wgs_dna_prep"
]
for node_type in node_types:
instance = session.create_object(node_type)
self.failIf(instance is None)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c196d8005e58f5d52d48220f98c351d3",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 85,
"avg_line_length": 38.76993865030675,
"alnum_prop": 0.6727589208006962,
"repo_name": "ihmpdcc/cutlass",
"id": "735ba78560432d4ab5fee75802f9dd75d15a4be3",
"size": "12662",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_ihmp_session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1322642"
}
],
"symlink_target": ""
} |
from django.db import transaction
import mailchimp
from framework import sentry
from framework.celery_tasks import app
from framework.celery_tasks.handlers import queued_task
from framework.auth.signals import user_confirmed
from osf.models import OSFUser as User
from website import settings
def get_mailchimp_api():
if not settings.MAILCHIMP_API_KEY:
raise mailchimp.InvalidApiKeyError(
'An API key is required to connect to Mailchimp.'
)
return mailchimp.Mailchimp(settings.MAILCHIMP_API_KEY)
def get_list_id_from_name(list_name):
m = get_mailchimp_api()
mailing_list = m.lists.list(filters={'list_name': list_name})
return mailing_list['data'][0]['id']
def get_list_name_from_id(list_id):
m = get_mailchimp_api()
mailing_list = m.lists.list(filters={'list_id': list_id})
return mailing_list['data'][0]['name']
@queued_task
@app.task
@transaction.atomic
def subscribe_mailchimp(list_name, user_id):
user = User.load(user_id)
m = get_mailchimp_api()
list_id = get_list_id_from_name(list_name=list_name)
if user.mailchimp_mailing_lists is None:
user.mailchimp_mailing_lists = {}
try:
m.lists.subscribe(
id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True,
)
except mailchimp.ValidationError as error:
sentry.log_exception()
sentry.log_message(error.message)
user.mailchimp_mailing_lists[list_name] = False
else:
user.mailchimp_mailing_lists[list_name] = True
finally:
user.save()
def unsubscribe_mailchimp(list_name, user_id, username=None, send_goodbye=True):
"""Unsubscribe a user from a mailchimp mailing list given its name.
:param str list_name: mailchimp mailing list name
:param str user_id: current user's id
:param str username: current user's email (required for merged users)
:raises: ListNotSubscribed if user not already subscribed
"""
user = User.load(user_id)
m = get_mailchimp_api()
list_id = get_list_id_from_name(list_name=list_name)
m.lists.unsubscribe(id=list_id, email={'email': username or user.username}, send_goodbye=send_goodbye)
# Update mailing_list user field
if user.mailchimp_mailing_lists is None:
user.mailchimp_mailing_lists = {}
user.save()
user.mailchimp_mailing_lists[list_name] = False
user.save()
@queued_task
@app.task
@transaction.atomic
def unsubscribe_mailchimp_async(list_name, user_id, username=None, send_goodbye=True):
""" Same args as unsubscribe_mailchimp, used to have the task be run asynchronously
"""
unsubscribe_mailchimp(list_name=list_name, user_id=user_id, username=username, send_goodbye=send_goodbye)
@user_confirmed.connect
def subscribe_on_confirm(user):
# Subscribe user to general OSF mailing list upon account confirmation
if settings.ENABLE_EMAIL_SUBSCRIPTIONS:
subscribe_mailchimp(settings.MAILCHIMP_GENERAL_LIST, user._id)
| {
"content_hash": "85c47bb86b6887158643f7d740a50368",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 109,
"avg_line_length": 32.04040404040404,
"alnum_prop": 0.6815889029003783,
"repo_name": "Nesiehr/osf.io",
"id": "8ab8fa36e4239b2e2bc46d34ce3a1e1d357ae6cb",
"size": "3197",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "website/mailchimp_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "215077"
},
{
"name": "JavaScript",
"bytes": "1699002"
},
{
"name": "Mako",
"bytes": "650031"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7928034"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2007-2008, Dj Gilcrease
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from base import Job, cronScheduler
def autodiscover():
"""
Auto-discover INSTALLED_APPS cron.py modules and fail silently when
not present. This forces an import on them to register any cron jobs they
may want.
"""
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an cron.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for cron.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own cron registration.
try:
app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's admin.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
imp.find_module('cron', app_path)
except ImportError:
continue
# Step 3: import the app's cron file. If this has errors we want them
# to bubble up.
__import__("%s.cron" % app)
# Step 4: once we find all the cron jobs, start the cronScheduler
cronScheduler.execute() | {
"content_hash": "8492cf50017e7eff784fdfbe0cc8b3ba",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 40.73015873015873,
"alnum_prop": 0.7381137957911146,
"repo_name": "TheSundar/django-cron",
"id": "b104dda0688d38cd9f6271fd1777160610d3df7d",
"size": "2566",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "django_cron/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9454"
}
],
"symlink_target": ""
} |
import itertools
from ..lang import TypedFunc
from ..lang import Typeclass
from ..lang import is_builtin
from ..lang import build_instance
from ..lang import List
from ..lang import L
from ..lang import H
from ..lang import sig
from ..lang import t
from ..lang import instance
class Functor(Typeclass):
"""
The Functor class is used for types that can be mapped over. Instances of
Functor should satisfy the following laws:
fmap(id) == id
fmap(f * g) == fmap(f * (fmap g))
Attributes:
fmap, __rmul__
Minimal complete definition:
fmap
"""
@classmethod
def make_instance(typeclass, cls, fmap):
fmap = fmap ** \
(H[(Functor, "f")]/ (H/ "a" >> "b") >> t("f", "a") >> t("f", "b"))
if not is_builtin(cls):
cls.__rmul__ = lambda x, f: fmap(f, x)
build_instance(Functor, cls, {"fmap":fmap})
return
@sig(H[(Functor, "f")]/ (H/ "a" >> "b") >> t("f", "a") >> t("f", "b"))
def fmap(f, x):
return Functor[x].fmap(f, x)
instance(Functor, List).where(
fmap = lambda fn, lst: L[itertools.imap(fn, iter(lst))]
)
instance(Functor, TypedFunc).where(
fmap = TypedFunc.__mul__
)
| {
"content_hash": "176b0873d5572b5738c2b41498dd9e07",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 24.408163265306122,
"alnum_prop": 0.5852842809364549,
"repo_name": "shaunstanislaus/hask",
"id": "372265754c19220f6d006a14405988372e6caf26",
"size": "1196",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "hask/Data/Functor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "277301"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import datetime
import logging
import numbers
import pygst
pygst.require('0.10')
import gst # noqa
from mopidy import compat, httpclient
from mopidy.models import Album, Artist, Track
logger = logging.getLogger(__name__)
def calculate_duration(num_samples, sample_rate):
"""Determine duration of samples using GStreamer helper for precise
math."""
return gst.util_uint64_scale(num_samples, gst.SECOND, sample_rate)
def create_buffer(data, capabilites=None, timestamp=None, duration=None):
"""Create a new GStreamer buffer based on provided data.
Mainly intended to keep gst imports out of non-audio modules.
"""
buffer_ = gst.Buffer(data)
if capabilites:
if isinstance(capabilites, compat.string_types):
capabilites = gst.caps_from_string(capabilites)
buffer_.set_caps(capabilites)
if timestamp:
buffer_.timestamp = timestamp
if duration:
buffer_.duration = duration
return buffer_
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * gst.MSECOND
def clocktime_to_millisecond(value):
"""Convert an internal GStreamer time to millisecond time."""
return value // gst.MSECOND
def supported_uri_schemes(uri_schemes):
"""Determine which URIs we can actually support from provided whitelist.
:param uri_schemes: list/set of URIs to check support for.
:type uri_schemes: list or set or URI schemes as strings.
:rtype: set of URI schemes we can support via this GStreamer install.
"""
supported_schemes = set()
registry = gst.registry_get_default()
for factory in registry.get_feature_list(gst.TYPE_ELEMENT_FACTORY):
for uri in factory.get_uri_protocols():
if uri in uri_schemes:
supported_schemes.add(uri)
return supported_schemes
def _artists(tags, artist_name, artist_id=None):
# Name missing, don't set artist
if not tags.get(artist_name):
return None
# One artist name and id, provide artist with id.
if len(tags[artist_name]) == 1 and artist_id in tags:
return [Artist(name=tags[artist_name][0],
musicbrainz_id=tags[artist_id][0])]
# Multiple artist, provide artists without id.
return [Artist(name=name) for name in tags[artist_name]]
# TODO: split based on "stream" and "track" based conversion? i.e. handle data
# from radios in it's own helper instead?
def convert_tags_to_track(tags):
"""Convert our normalized tags to a track.
:param tags: dictionary of tag keys with a list of values
:type tags: :class:`dict`
:rtype: :class:`mopidy.models.Track`
"""
album_kwargs = {}
track_kwargs = {}
track_kwargs['composers'] = _artists(tags, gst.TAG_COMPOSER)
track_kwargs['performers'] = _artists(tags, gst.TAG_PERFORMER)
track_kwargs['artists'] = _artists(
tags, gst.TAG_ARTIST, 'musicbrainz-artistid')
album_kwargs['artists'] = _artists(
tags, gst.TAG_ALBUM_ARTIST, 'musicbrainz-albumartistid')
track_kwargs['genre'] = '; '.join(tags.get(gst.TAG_GENRE, []))
track_kwargs['name'] = '; '.join(tags.get(gst.TAG_TITLE, []))
if not track_kwargs['name']:
track_kwargs['name'] = '; '.join(tags.get(gst.TAG_ORGANIZATION, []))
track_kwargs['comment'] = '; '.join(tags.get('comment', []))
if not track_kwargs['comment']:
track_kwargs['comment'] = '; '.join(tags.get(gst.TAG_LOCATION, []))
if not track_kwargs['comment']:
track_kwargs['comment'] = '; '.join(tags.get(gst.TAG_COPYRIGHT, []))
track_kwargs['track_no'] = tags.get(gst.TAG_TRACK_NUMBER, [None])[0]
track_kwargs['disc_no'] = tags.get(gst.TAG_ALBUM_VOLUME_NUMBER, [None])[0]
track_kwargs['bitrate'] = tags.get(gst.TAG_BITRATE, [None])[0]
track_kwargs['musicbrainz_id'] = tags.get('musicbrainz-trackid', [None])[0]
album_kwargs['name'] = tags.get(gst.TAG_ALBUM, [None])[0]
album_kwargs['num_tracks'] = tags.get(gst.TAG_TRACK_COUNT, [None])[0]
album_kwargs['num_discs'] = tags.get(gst.TAG_ALBUM_VOLUME_COUNT, [None])[0]
album_kwargs['musicbrainz_id'] = tags.get('musicbrainz-albumid', [None])[0]
if tags.get(gst.TAG_DATE) and tags.get(gst.TAG_DATE)[0]:
track_kwargs['date'] = tags[gst.TAG_DATE][0].isoformat()
# Clear out any empty values we found
track_kwargs = {k: v for k, v in track_kwargs.items() if v}
album_kwargs = {k: v for k, v in album_kwargs.items() if v}
# Only bother with album if we have a name to show.
if album_kwargs.get('name'):
track_kwargs['album'] = Album(**album_kwargs)
return Track(**track_kwargs)
def setup_proxy(element, config):
"""Configure a GStreamer element with proxy settings.
:param element: element to setup proxy in.
:type element: :class:`gst.GstElement`
:param config: proxy settings to use.
:type config: :class:`dict`
"""
if not hasattr(element.props, 'proxy') or not config.get('hostname'):
return
element.set_property('proxy', httpclient.format_proxy(config, auth=False))
element.set_property('proxy-id', config.get('username'))
element.set_property('proxy-pw', config.get('password'))
def convert_taglist(taglist):
"""Convert a :class:`gst.Taglist` to plain Python types.
Knows how to convert:
- Dates
- Buffers
- Numbers
- Strings
- Booleans
Unknown types will be ignored and debug logged. Tag keys are all strings
defined as part GStreamer under GstTagList_.
.. _GstTagList: http://gstreamer.freedesktop.org/data/doc/gstreamer/\
0.10.36/gstreamer/html/gstreamer-GstTagList.html
:param taglist: A GStreamer taglist to be converted.
:type taglist: :class:`gst.Taglist`
:rtype: dictionary of tag keys with a list of values.
"""
result = {}
# Taglists are not really dicts, hence the lack of .items() and
# explicit use of .keys()
for key in taglist.keys():
result.setdefault(key, [])
values = taglist[key]
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, gst.Date):
try:
date = datetime.date(value.year, value.month, value.day)
result[key].append(date)
except ValueError:
logger.debug('Ignoring invalid date: %r = %r', key, value)
elif isinstance(value, gst.Buffer):
result[key].append(bytes(value))
elif isinstance(value, (basestring, bool, numbers.Number)):
result[key].append(value)
else:
logger.debug('Ignoring unknown data: %r = %r', key, value)
return result
| {
"content_hash": "fa2e4b9a56605914e4eaf38b3370faee",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 79,
"avg_line_length": 34.90816326530612,
"alnum_prop": 0.6476176556562409,
"repo_name": "rawdlite/mopidy",
"id": "3b9ea30fa0ae204ad2e97bf91eeda7f7261171c3",
"size": "6842",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "mopidy/audio/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "Groff",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "JavaScript",
"bytes": "82060"
},
{
"name": "Python",
"bytes": "1108001"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
import numpy as np
import hyperchamber as hc
import inspect
import nashpy as nash
import hypergan as hg
import hyperchamber as hc
import sys
import gc
import os
import random
from hypergan.trainers.base_trainer import BaseTrainer
TINY = 1e-12
class CurriculumTrainer(BaseTrainer):
def create(self):
self.curriculum = self.config.curriculum
self.curriculum_index = 0
self._delegate = self.gan.create_component(self.config.delegate)
def variables(self):
return self._delegate.variables()
def required(self):
return []
def step(self, feed_dict):
gan = self.gan
sess = gan.session
config = self.config
self._delegate.step(feed_dict)
transition_step = self.curriculum[self.curriculum_index][0]
self.current_step += 1
if (self.current_step-1) == transition_step:
gan.save("saves/curriculum")
self.curriculum_index+=1
if self.config.cycle:
self.curriculum_index = self.curriculum_index % len(self.curriculum)
if self.curriculum_index == len(self.curriculum):
print("End of curriculum")
gan.save("saves/curriculum")
gan.session.close()
tf.reset_default_graph()
sys.exit()
print("Loading index", self.curriculum_index, self.curriculum, self.curriculum[self.curriculum_index])
gan.session.close()
tf.reset_default_graph()
config_name = self.curriculum[self.curriculum_index][1]
newconfig_file = hg.Configuration.find(config_name+'.json')
if newconfig_file is None:
print("Could not find file ", config_name+".json")
raise("missing file")
print("=> Loading config file", newconfig_file)
newconfig = hc.Selector().load(newconfig_file)
if 'inherit' in newconfig:
base_filename = hg.Configuration.find(newconfig['inherit']+'.json')
base_config = hc.Selector().load(base_filename)
newconfig = hc.Config({**base_config, **newconfig})
inputs = hg.inputs.image_loader.ImageLoader(newconfig.runtime['batch_size'])
inputs.create(gan.args.directory,
channels=newconfig.runtime['channels'],
format=gan.args.format,
crop=gan.args.crop,
width=newconfig.runtime['width'],
height=newconfig.runtime['height'],
resize=gan.args.resize)
newgan = gan.config['class'](config=newconfig, inputs=inputs)
newgan.args = gan.args
newgan.cli = self.gan.cli
newgan.name=config_name
newgan.trainer.curriculum= self.curriculum
newgan.trainer.curriculum_index= self.curriculum_index
newgan.trainer.config.cycle = self.config.cycle
newgan.cli.sampler = None
gan.cli.sampler = None
gan.destroy=True
gan.newgan=newgan
gan=None
gc.collect()
newgan.load("saves/curriculum")
| {
"content_hash": "33b1eee6e71a90d24d22ce04b80ba43f",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 114,
"avg_line_length": 34.65217391304348,
"alnum_prop": 0.5912797992471769,
"repo_name": "255BITS/HyperGAN",
"id": "185997a5ee71af79c131924cae779ce53096079c",
"size": "3188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hypergan/trainers/needs_pytorch/curriculum_trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "204346"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""
MRX (XML for MRS) serialization and deserialization.
"""
import io
import re
from pathlib import Path
import xml.etree.ElementTree as etree
from delphin.mrs import MRS, EP, HCons, ICons, CONSTANT_ROLE
from delphin import predicate
from delphin.lnk import Lnk
from delphin import variable
from delphin.sembase import role_priority, property_priority
CODEC_INFO = {
'representation': 'mrs',
}
HEADER = '<mrs-list>'
JOINER = ''
FOOTER = '</mrs-list>'
##############################################################################
##############################################################################
# Pickle-API methods
def load(source):
"""
Deserialize MRX from a file (handle or filename)
Args:
source (str, file): input filename or file object
Returns:
a list of MRS objects
"""
if hasattr(source, 'read'):
ms = list(_decode(source))
else:
source = Path(source).expanduser()
with source.open() as fh:
ms = list(_decode(fh))
return ms
def loads(s):
"""
Deserialize MRX string representations
Args:
s (str): an MRX string
Returns:
a list of MRS objects
"""
ms = list(_decode(io.StringIO(s)))
return ms
def dump(ms, destination, properties=True, lnk=True,
indent=False, encoding='utf-8'):
"""
Serialize MRS objects to MRX and write to a file
Args:
ms: an iterator of MRS objects to serialize
destination: filename or file object where data will be written
properties: if `False`, suppress morphosemantic properties
lnk: if `False`, suppress surface alignments and strings
indent (bool, int): if `True` or an integer value, add
newlines and indentation
encoding (str): if *destination* is a filename, write to the
file with the given encoding; otherwise it is ignored
"""
text = dumps(ms, properties=properties, lnk=lnk, indent=indent)
if hasattr(destination, 'write'):
print(text, file=destination)
else:
destination = Path(destination).expanduser()
with destination.open('w', encoding=encoding) as fh:
print(text, file=fh)
def dumps(ms, properties=True, lnk=True, indent=False):
"""
Serialize MRS objects to an MRX representation
Args:
ms: an iterator of MRS objects to serialize
properties: if `False`, suppress variable properties
lnk: if `False`, suppress surface alignments and strings
indent (bool, int): if `True` or an integer value, add
newlines and indentation
Returns:
an MRX string representation of a corpus of MRS objects
"""
e = _encode(ms, properties, lnk)
string = _tostring(e, indent, 1)
return string
def decode(s):
"""
Deserialize an MRS object from an MRX string.
"""
elem = etree.fromstring(s)
return _decode_mrs(elem)
def encode(m, properties=True, lnk=True, indent=False):
"""
Serialize a MRS object to an MRX string.
Args:
m: an MRS object
properties (bool): if `False`, suppress variable properties
lnk: if `False`, suppress surface alignments and strings
indent (bool, int): if `True` or an integer value, add
newlines and indentation
Returns:
an MRX-serialization of the MRS object
"""
e = _encode_mrs(m, properties, lnk)
string = _tostring(e, indent, 0)
return string
##############################################################################
##############################################################################
# Decoding
def _decode(fh):
# <!ELEMENT mrs-list (mrs)*>
# if memory becomes a big problem, consider catching start events,
# get the root element (later start events can be ignored), and
# root.clear() after decoding each mrs
for _, elem in etree.iterparse(fh, events=('end',)):
if elem.tag == 'mrs':
yield _decode_mrs(elem)
elem.clear()
def _decode_mrs(elem):
# <!ELEMENT mrs (label, var, (ep|hcons)*)>
# <!ATTLIST mrs
# cfrom CDATA #IMPLIED
# cto CDATA #IMPLIED
# surface CDATA #IMPLIED
# ident CDATA #IMPLIED >
elem = elem.find('.') # in case elem is ElementTree rather than Element
variables = {}
top = elem.find('label')
if top is not None:
top = _decode_label(top)
index = elem.find('var')
if index is not None:
index = _decode_var(index, variables=variables)
rels = [_decode_ep(ep, variables) for ep in elem.iter('ep')]
hcons = [_decode_hcons(hc, variables) for hc in elem.iter('hcons')]
icons = [_decode_icons(ic, variables) for ic in elem.iter('icons')]
return MRS(top,
index,
rels,
hcons,
icons=icons,
variables=variables,
lnk=_decode_lnk(elem.get('cfrom'), elem.get('cto')),
surface=elem.get('surface'),
identifier=elem.get('ident'))
def _decode_label(elem):
# <!ELEMENT label (extrapair*)>
# <!ATTLIST label
# vid CDATA #REQUIRED >
vid = elem.get('vid')
# ignoring extrapairs
return 'h' + vid
def _decode_var(elem, variables):
# <!ELEMENT var (extrapair*)>
# <!ATTLIST var
# vid CDATA #REQUIRED
# sort (x|e|h|u|l|i) #IMPLIED >
vid = elem.get('vid')
srt = elem.get('sort').lower()
var = srt + vid
varprops = variables.setdefault(var, {})
for prop, val in _decode_extrapairs(elem.iter('extrapair')):
varprops[prop] = val
return var
def _decode_extrapairs(elems):
# <!ELEMENT extrapair (path,value)>
# <!ELEMENT path (#PCDATA)>
# <!ELEMENT value (#PCDATA)>
return [(e.find('path').text.upper(), e.find('value').text.lower())
for e in elems]
def _decode_ep(elem, variables=None):
# <!ELEMENT ep ((pred|spred|realpred), label, fvpair*)>
# <!ATTLIST ep
# cfrom CDATA #IMPLIED
# cto CDATA #IMPLIED
# surface CDATA #IMPLIED
# base CDATA #IMPLIED >
args = _decode_args(elem, variables=variables)
return EP(_decode_pred(elem.find('./')),
_decode_label(elem.find('label')),
args=args,
lnk=_decode_lnk(elem.get('cfrom'), elem.get('cto')),
surface=elem.get('surface'),
base=elem.get('base'))
def _decode_pred(elem):
# <!ELEMENT pred (#PCDATA)>
# <!ELEMENT spred (#PCDATA)>
# <!ELEMENT realpred EMPTY>
# <!ATTLIST realpred
# lemma CDATA #REQUIRED
# pos (v|n|j|r|p|q|c|x|u|a|s) #REQUIRED
# sense CDATA #IMPLIED >
if elem.tag in ('pred', 'spred'):
return elem.text
elif elem.tag == 'realpred':
return predicate.create(elem.get('lemma'),
elem.get('pos'),
elem.get('sense'))
def _decode_args(elem, variables=None):
# <!ELEMENT fvpair (rargname, (var|constant))>
# This code assumes that only cargs have constant values, and all
# other args (including IVs) have var values.
args = {}
for e in elem.findall('fvpair'):
rargname = e.find('rargname').text.upper()
if e.find('constant') is not None:
argval = e.find('constant').text
elif e.find('var') is not None:
argval = _decode_var(e.find('var'), variables=variables)
args[rargname] = argval
return args
def _decode_hcons(elem, variables):
# <!ELEMENT hcons (hi, lo)>
# <!ATTLIST hcons
# hreln (qeq|lheq|outscopes) #REQUIRED >
# <!ELEMENT hi (var)>
# <!ELEMENT lo (label|var)>
hi = _decode_var(elem.find('hi/var'), variables)
lo = elem.find('lo/')
if lo.tag == 'var':
lo = _decode_var(lo, variables)
else:
lo = _decode_label(lo)
return HCons(hi, elem.get('hreln'), lo)
# this isn't part of the spec; just putting here in case it's added later
def _decode_icons(elem, variables):
# <!ELEMENT icons (left, right)>
# <!ATTLIST icons
# ireln #REQUIRED >
# <!ELEMENT left (var)>
# <!ELEMENT right (var)>
return ICons(_decode_var(elem.find('left/var'), variables),
elem.get('ireln'),
_decode_var(elem.find('right/var'), variables))
def _decode_lnk(cfrom, cto):
if cfrom is cto is None:
return None
elif None in (cfrom, cto):
raise ValueError('Both cfrom and cto, or neither, must be specified.')
else:
return Lnk.charspan(cfrom, cto)
##############################################################################
##############################################################################
# Encoding
def _encode(ms, properties, lnk):
e = etree.Element('mrs-list')
for m in ms:
e.append(_encode_mrs(m, properties, lnk))
return e
def _encode_mrs(m, properties, lnk):
if properties:
varprops = dict(m.variables)
else:
varprops = {}
attributes = {}
if lnk:
attributes['cfrom'] = str(m.cfrom)
attributes['cto'] = str(m.cto)
if m.surface is not None:
attributes['surface'] = m.surface
if m.identifier is not None:
attributes['ident'] = m.identifier
e = etree.Element('mrs', attrib=attributes)
if m.top is not None:
e.append(_encode_label(m.top))
if m.index is not None:
e.append(_encode_variable(m.index, varprops))
for ep in m.rels:
e.append(_encode_ep(ep, varprops, lnk))
for hc in m.hcons:
e.append(_encode_hcon(hc, varprops))
for ic in m.icons:
e.append(_encode_icon(ic, varprops))
return e
def _encode_label(label):
_, vid = variable.split(label)
return etree.Element('label', vid=vid)
def _encode_variable(v, varprops):
srt, vid = variable.split(v)
var = etree.Element('var', vid=vid, sort=srt)
if varprops.get(v):
for key in sorted(varprops[v], key=property_priority):
val = varprops[v][key]
var.append(_encode_extrapair(key, val))
del varprops[v]
return var
def _encode_extrapair(key, value):
extrapair = etree.Element('extrapair')
path = etree.Element('path')
path.text = key
val = etree.Element('value')
val.text = value
extrapair.extend([path, val])
return extrapair
def _encode_ep(ep, varprops, lnk):
attributes = {}
if lnk:
attributes['cfrom'] = str(ep.cfrom)
attributes['cto'] = str(ep.cto)
if ep.surface:
attributes['surface'] = ep.surface
if ep.base:
attributes['base'] = ep.base
e = etree.Element('ep', attrib=attributes)
e.append(_encode_pred(ep.predicate))
e.append(_encode_label(ep.label))
for role in sorted(ep.args, key=role_priority):
val = ep.args[role]
if role == CONSTANT_ROLE:
e.append(_encode_arg(CONSTANT_ROLE, _encode_constant(val)))
else:
e.append(_encode_arg(role, _encode_variable(val, varprops)))
return e
def _encode_pred(pred):
p = None
if predicate.is_surface(pred):
lemma, pos, sense = predicate.split(pred)
attributes = {'lemma': lemma, 'pos': pos}
if sense is not None:
attributes['sense'] = sense
p = etree.Element('realpred', attrib=attributes)
elif predicate.is_abstract(pred):
p = etree.Element('pred')
p.text = pred
else:
p = etree.Element('spred')
p.text = pred
return p
def _encode_arg(key, value):
fvpair = etree.Element('fvpair')
rargname = etree.Element('rargname')
rargname.text = key
fvpair.append(rargname)
fvpair.append(value)
return fvpair
def _encode_constant(value):
const = etree.Element('constant')
const.text = value
return const
def _encode_hcon(hcon, varprops):
hcons_ = etree.Element('hcons', hreln=hcon.relation)
hi = etree.Element('hi')
hi.append(_encode_variable(hcon.hi, varprops))
lo = etree.Element('lo')
lo.append(_encode_label(hcon.lo))
hcons_.extend([hi, lo])
return hcons_
def _encode_icon(icon, varprops):
icons_ = etree.Element('icons', ireln=icon.relation)
left = etree.Element('left')
left.append(_encode_variable(icon.left, varprops))
right = etree.Element('right')
right.append(_encode_variable(icon.right, varprops))
icons_.extend([left, right])
return icons_
def _tostring(e, indent, offset):
string = etree.tostring(e, encoding='unicode')
if indent is not None and indent is not False:
if indent is True:
indent = 0
def indentmatch(m):
return '\n' + (' ' * indent * (m.lastindex + offset)) + m.group()
string = re.sub(
r'(</mrs-list>)'
r'|(<mrs[^-]|</mrs>)'
r'|(<ep[>\s]|<fvpair>|<extrapair>|<hcons\s|<icons\s>)',
indentmatch,
string)
return string.strip()
| {
"content_hash": "15ca4f66884c79d274f6a6cd53bbd393",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 78,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.5671755145439356,
"repo_name": "goodmami/pydelphin",
"id": "46caffb8ecbdf40f1c23581ce467030aa8ea0abf",
"size": "13192",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1.6.0",
"path": "delphin/codecs/mrx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "375282"
}
],
"symlink_target": ""
} |
import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import UGen
class Unpack1FFT(UGen):
"""
::
>>> unpack_1_fft = supriya.ugens.Unpack1FFT.ar(
... binindex=binindex,
... bufsize=bufsize,
... chain=chain,
... whichmeasure=0,
... )
>>> unpack_1_fft
Unpack1FFT.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'chain',
'bufsize',
'binindex',
'whichmeasure',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
binindex=None,
bufsize=None,
chain=None,
whichmeasure=0,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
binindex=binindex,
bufsize=bufsize,
chain=chain,
whichmeasure=whichmeasure,
)
### PUBLIC METHODS ###
@classmethod
def new(
cls,
binindex=None,
bufsize=None,
chain=None,
whichmeasure=0,
):
"""
Constructs a Unpack1FFT.
::
>>> unpack_1_fft = supriya.ugens.Unpack1FFT.new(
... binindex=binindex,
... bufsize=bufsize,
... chain=chain,
... whichmeasure=0,
... )
>>> unpack_1_fft
Unpack1FFT.new()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = None
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
binindex=binindex,
bufsize=bufsize,
chain=chain,
whichmeasure=whichmeasure,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def binindex(self):
"""
Gets `binindex` input of Unpack1FFT.
::
>>> unpack_1_fft = supriya.ugens.Unpack1FFT.ar(
... binindex=binindex,
... bufsize=bufsize,
... chain=chain,
... whichmeasure=0,
... )
>>> unpack_1_fft.binindex
Returns ugen input.
"""
index = self._ordered_input_names.index('binindex')
return self._inputs[index]
@property
def bufsize(self):
"""
Gets `bufsize` input of Unpack1FFT.
::
>>> unpack_1_fft = supriya.ugens.Unpack1FFT.ar(
... binindex=binindex,
... bufsize=bufsize,
... chain=chain,
... whichmeasure=0,
... )
>>> unpack_1_fft.bufsize
Returns ugen input.
"""
index = self._ordered_input_names.index('bufsize')
return self._inputs[index]
@property
def chain(self):
"""
Gets `chain` input of Unpack1FFT.
::
>>> unpack_1_fft = supriya.ugens.Unpack1FFT.ar(
... binindex=binindex,
... bufsize=bufsize,
... chain=chain,
... whichmeasure=0,
... )
>>> unpack_1_fft.chain
Returns ugen input.
"""
index = self._ordered_input_names.index('chain')
return self._inputs[index]
@property
def whichmeasure(self):
"""
Gets `whichmeasure` input of Unpack1FFT.
::
>>> unpack_1_fft = supriya.ugens.Unpack1FFT.ar(
... binindex=binindex,
... bufsize=bufsize,
... chain=chain,
... whichmeasure=0,
... )
>>> unpack_1_fft.whichmeasure
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('whichmeasure')
return self._inputs[index]
| {
"content_hash": "78ce4244b8f242ce96c854e5f5b4dfee",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 63,
"avg_line_length": 23.36470588235294,
"alnum_prop": 0.46651560926485397,
"repo_name": "josiah-wolf-oberholtzer/supriya",
"id": "f6caa6a70901a6a066c21d04087d57df6f1f9c4a",
"size": "3972",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dev/etc/pending_ugens/Unpack1FFT.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "26333"
},
{
"name": "Makefile",
"bytes": "1792"
},
{
"name": "Python",
"bytes": "2331463"
},
{
"name": "SuperCollider",
"bytes": "318"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
bootstrap client session
"""
import frappe
import frappe.defaults
import frappe.desk.desk_page
from frappe.utils import get_gravatar
from frappe.desk.form.load import get_meta_bundle
def get_bootinfo():
"""build and return boot info"""
frappe.set_user_lang(frappe.session.user)
bootinfo = frappe._dict()
hooks = frappe.get_hooks()
doclist = []
# user
get_user(bootinfo)
# system info
bootinfo['sysdefaults'] = frappe.defaults.get_defaults()
bootinfo['server_date'] = frappe.utils.nowdate()
if frappe.session['user'] != 'Guest':
bootinfo['user_info'] = get_fullnames()
bootinfo['sid'] = frappe.session['sid'];
# home page
bootinfo.modules = {}
for app in frappe.get_installed_apps():
try:
bootinfo.modules.update(frappe.get_attr(app + ".config.desktop.get_data")() or {})
except ImportError:
pass
except AttributeError:
pass
bootinfo.module_app = frappe.local.module_app
bootinfo.hidden_modules = frappe.db.get_global("hidden_modules")
bootinfo.doctype_icons = dict(frappe.db.sql("""select name, icon from
tabDocType where ifnull(icon,'')!=''"""))
bootinfo.single_types = frappe.db.sql_list("""select name from tabDocType where ifnull(issingle,0)=1""")
add_home_page(bootinfo, doclist)
bootinfo.page_info = get_allowed_pages()
load_translations(bootinfo)
add_timezone_info(bootinfo)
load_conf_settings(bootinfo)
load_print(bootinfo, doclist)
doclist.extend(get_meta_bundle("Page"))
# ipinfo
if frappe.session['data'].get('ipinfo'):
bootinfo['ipinfo'] = frappe.session['data']['ipinfo']
# add docs
bootinfo['docs'] = doclist
for method in hooks.boot_session or []:
frappe.get_attr(method)(bootinfo)
if bootinfo.lang:
bootinfo.lang = unicode(bootinfo.lang)
bootinfo.error_report_email = frappe.get_hooks("error_report_email")
bootinfo.default_background_image = "/assets/frappe/images/ui/into-the-dawn.jpg"
return bootinfo
def load_conf_settings(bootinfo):
from frappe import conf
for key in ['developer_mode']:
if key in conf: bootinfo[key] = conf.get(key)
def get_allowed_pages():
roles = frappe.get_roles()
page_info = {}
for p in frappe.db.sql("""select distinct
tabPage.name, tabPage.modified, tabPage.title
from `tabPage Role`, `tabPage`
where `tabPage Role`.role in (%s)
and `tabPage Role`.parent = `tabPage`.name""" % ', '.join(['%s']*len(roles)),
roles, as_dict=True):
page_info[p.name] = {"modified":p.modified, "title":p.title}
# pages where role is not set are also allowed
for p in frappe.db.sql("""select name, modified, title
from `tabPage` where
(select count(*) from `tabPage Role`
where `tabPage Role`.parent=tabPage.name) = 0""", as_dict=1):
page_info[p.name] = {"modified":p.modified, "title":p.title}
return page_info
def load_translations(bootinfo):
if frappe.local.lang != 'en':
bootinfo["__messages"] = frappe.get_lang_dict("boot")
bootinfo["lang"] = frappe.lang
def get_fullnames():
"""map of user fullnames"""
ret = frappe.db.sql("""select name,
concat(ifnull(first_name, ''),
if(ifnull(last_name, '')!='', ' ', ''), ifnull(last_name, '')) as fullname,
user_image as image, gender, email
from tabUser where ifnull(enabled, 0)=1 and user_type!="Website User" """, as_dict=1)
d = {}
for r in ret:
if not r.image:
r.image = get_gravatar()
d[r.name] = r
return d
def get_user(bootinfo):
"""get user info"""
bootinfo.user = frappe.user.load_user()
def add_home_page(bootinfo, docs):
"""load home page"""
if frappe.session.user=="Guest":
return
home_page = frappe.db.get_default("desktop:home_page")
try:
page = frappe.desk.desk_page.get(home_page)
except (frappe.DoesNotExistError, frappe.PermissionError):
frappe.message_log.pop()
page = frappe.desk.desk_page.get('desktop')
bootinfo['home_page'] = page.name
docs.append(page)
def add_timezone_info(bootinfo):
user = bootinfo.user.get("time_zone")
system = bootinfo.sysdefaults.get("time_zone")
if user and user != system:
import frappe.utils.momentjs
bootinfo.timezone_info = {"zones":{}, "rules":{}, "links":{}}
frappe.utils.momentjs.update(user, bootinfo.timezone_info)
frappe.utils.momentjs.update(system, bootinfo.timezone_info)
def load_print(bootinfo, doclist):
print_settings = frappe.db.get_singles_dict("Print Settings")
print_settings.doctype = ":Print Settings"
doclist.append(print_settings)
load_print_css(bootinfo, print_settings)
def load_print_css(bootinfo, print_settings):
bootinfo.print_css = frappe.get_attr("frappe.templates.pages.print.get_print_style")(print_settings.print_style or "Modern")
| {
"content_hash": "179c25b0ef9c31f4a6d7587e7e70d31c",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 125,
"avg_line_length": 29.793548387096774,
"alnum_prop": 0.7033347769597228,
"repo_name": "indictranstech/internal-frappe",
"id": "1acf97b254e8079564ab6854bdc3201a373929a5",
"size": "4719",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/boot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "193821"
},
{
"name": "HTML",
"bytes": "126524"
},
{
"name": "JavaScript",
"bytes": "1571461"
},
{
"name": "Python",
"bytes": "1051480"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
ax = opcsim.plots.pdfplot(d, weight='volume', with_modes=True)
ax.set_title("Volume Weighted Urban Aerosol Distribution", fontsize=16)
ax.legend(loc='best')
sns.despine()
| {
"content_hash": "51f9f6004468746e691082fccff28dab",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 71,
"avg_line_length": 42.75,
"alnum_prop": 0.7543859649122807,
"repo_name": "dhhagan/opcsim",
"id": "5a9a3a1cc28b51e526d0c015d1ce0f28e2296d2c",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/_build/html/generated/opcsim-plots-pdfplot-3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "148"
},
{
"name": "Jupyter Notebook",
"bytes": "698"
},
{
"name": "Python",
"bytes": "195835"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import worldbit_hash
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = worldbit_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'cee2caff'
if 'genesis' not in settings:
settings['genesis'] = '00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| {
"content_hash": "8bc268f445698ec902322eaa14c0e06d",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 108,
"avg_line_length": 29.2027027027027,
"alnum_prop": 0.6730680240629339,
"repo_name": "worldbit/worldbit",
"id": "6c9709b02f4cedc024012e210474a7f722544952",
"size": "8931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/linearize/linearize-data.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1315071"
},
{
"name": "C++",
"bytes": "5307484"
},
{
"name": "CSS",
"bytes": "124359"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "147950"
},
{
"name": "Makefile",
"bytes": "97518"
},
{
"name": "Objective-C",
"bytes": "4965"
},
{
"name": "Objective-C++",
"bytes": "7232"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "706785"
},
{
"name": "QMake",
"bytes": "2059"
},
{
"name": "Roff",
"bytes": "3844"
},
{
"name": "Shell",
"bytes": "35823"
}
],
"symlink_target": ""
} |
from rest_framework import viewsets
from polls.models import Questionary, Question, Choice, Answer
from polls.serializers import (QuestionarySerializer, QuestionSerializer,
ChoiceSerializer, AnswerSerializer)
class QuestionaryViewSet(viewsets.ModelViewSet):
queryset = Questionary.objects.all()
serializer_class = QuestionarySerializer
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
class ChoiceViewSet(viewsets.ModelViewSet):
queryset = Choice.objects.all()
serializer_class = ChoiceSerializer
class AnswerViewSet(viewsets.ModelViewSet):
queryset = Answer.objects.all()
serializer_class = AnswerSerializer
| {
"content_hash": "db2a574357c9eebb22a1a95dee0fe2ce",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 30,
"alnum_prop": 0.764,
"repo_name": "ewokcillo/django_workshop",
"id": "4d9eabf1ace47bcff3b2b1814a1e3c8537d04160",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling/polls/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9326"
}
],
"symlink_target": ""
} |
"""
Interpolation
################################
Hidden wrapper function that makes it convenient to choose
an interpolation scheme available in scipy.
"""
import numpy as np
import pandas as pd
from scipy.interpolate import (interp1d, interp2d, griddata,
pchip_interpolate, krogh_interpolate,
barycentric_interpolate, Akima1DInterpolator,
CloughTocher2DInterpolator, RectBivariateSpline,
RegularGridInterpolator)
from scipy.signal import savgol_filter
from scipy.optimize import curve_fit
def _interpolate(df, x, y, z, method, kind, yfirst, dim, minimum):
# Check that columns are in df
if len(set([x, y, z]) & set(df.columns)) != 3:
raise Exception('{!r}, {!r} and {!r} must be in df.columns'.format(x, y, z))
oned = None
if len(df[x].unique()) == 1:
oned, dud = y, x
elif len(df[y].unique()) == 1:
oned, dud = x, y
# Map the method to the function or class in scipy
convenience = {'cloughtocher': CloughTocher2DInterpolator,
'barycentric': barycentric_interpolate,
'regulargrid': RegularGridInterpolator,
'bivariate': RectBivariateSpline,
'akima': Akima1DInterpolator,
'krogh': krogh_interpolate,
'pchip': pchip_interpolate,
'interp1d': interp1d,
'interp2d': interp2d,
'griddata': griddata}
# Handle 1-dimensional data first
if oned is not None:
if method not in ['interp1d', 'akima']:
raise Exception('One-dimensional interpolation must use '
'"interp1d" or "aklima" method')
kwargs = {'kind': kind} if method == 'interp1d' else {}
xdat = df[oned].values
if not df[z].isnull().values.any(): zdat = df[z].values
else:
print('Missing data is interpolated with a 3rd order polynomial.')
zdat = df[z].interpolate(method='piecewise_polynomial', order=3)
newx = np.linspace(xdat.min(), xdat.max(), dim)
interpz = convenience[method](xdat, zdat, **kwargs)
newz = interpz(newx)
return {'x': newx, 'z': newz, 'y': df[dud].unique(),
'min': (newx[newz.argmin()], newz.min())}
# Check that the interpolation method is supported
if method not in convenience.keys():
raise Exception('method must be in {}'.format(convenience.keys()))
# Shape the data in df
pivot = df.pivot(x, y, z)
if pivot.isnull().values.any():
print('Missing data is interpolated with a 3rd order piecewise polynomial.\n'
'End points are extrapolated with a fit function of 3rd order.')
pivot.interpolate(method='piecewise_polynomial', order=3, axis=1, inplace=True)
# Obtained from SO: http://stackoverflow.com/questions/22491628/extrapolate-values-in-pandas-dataframe
# Function to curve fit to the data
def func(x, a, b, c, d):
return a * (x ** 3) + b * (x ** 2) + c * x + d
# Initial parameter guess, just to kick off the optimization
guess = (0.5, 0.5, 0.5, 0.5)
# Create copy of data to remove NaNs for curve fitting
fit_df = pivot.dropna()
# Place to store function parameters for each column
col_params = {}
# Curve fit each column
for col in fit_df.columns:
# Get x & y
x = fit_df.index.astype(float).values
y = fit_df[col].values
# Curve fit column and get curve parameters
params = curve_fit(func, x, y, guess)
# Store optimized parameters
col_params[col] = params[0]
# Extrapolate each column
for col in pivot.columns:
# Get the index values for NaNs in the column
x = pivot[pd.isnull(pivot[col])].index.astype(float).values
# Extrapolate those points with the fitted function
pivot.loc[x, col] = func(x, *col_params[col])
xdat = pivot.index.values
ydat = pivot.columns.values
zdat = pivot.values
# New (x, y) values
newx = np.linspace(xdat.min(), xdat.max(), dim)
newy = np.linspace(ydat.min(), ydat.max(), dim)
# Details of the implementation in scipy
# First 5 are explicitly 2D interpolation
if method == 'bivariate':
interpz = convenience[method](xdat, ydat, zdat)
newz = interpz(newx, newy).T
elif method == 'interp2d':
interpz = convenience[method](xdat, ydat, zdat.T, kind=kind)
newz = interpz(newx, newy)
elif method in ['griddata', 'cloughtocher', 'regulargrid']:
meshx, meshy = np.meshgrid(xdat, ydat)
newmeshx, newmeshy = np.meshgrid(newx, newy)
points = np.array([meshx.flatten(order='F'),
meshy.flatten(order='F')]).T
newpoints = np.array([newmeshx.flatten(order='F'),
newmeshy.flatten(order='F')]).T
if method == 'cloughtocher':
interpz = convenience[method](points, zdat.flatten())
newz = interpz(newpoints)
newz = newz.reshape((dim, dim), order='F')
elif method == 'regulargrid':
interpz = convenience[method]((xdat, ydat), zdat)
newz = interpz(newpoints)
newz = newz.reshape((dim, dim), order='F')
else:
newz = convenience[method](points, zdat.flatten(), newpoints)
newz = newz.reshape((dim, dim), order='F')
# 1D interpolation applied across both x and y
else:
# Not sure if we need this complexity but interesting to see if
# the order of interpolation matters (based on method)
newz = np.empty((dim, dim), dtype=np.float64)
kwargs = {'kind': kind} if method == 'interp1d' else {}
if yfirst:
partz = np.empty((xdat.shape[0], dim), dtype=np.float64)
if method in ['interp1d', 'akima']:
for i in range(xdat.shape[0]):
zfunc = convenience[method](ydat, zdat[i,:], **kwargs)
partz[i] = zfunc(newy)
for i in range(dim):
zfunc = convenience[method](xdat, partz[:,i], **kwargs)
newz[i,:] = zfunc(newy)
newz = newz[::-1,::-1]
else:
for i in range(xdat.shape[0]):
partz[i] = convenience[method](ydat, zdat[i,:], newy)
for i in range(dim):
newz[i,:] = convenience[method](xdat, partz[:,i], newx)
else:
partz = np.empty((ydat.shape[0], dim), dtype=np.float64)
if method in ['interp1d', 'akima']:
for i in range(ydat.shape[0]):
zfunc = convenience[method](xdat, zdat[:,i], **kwargs)
partz[i] = zfunc(newx)
for i in range(dim):
zfunc = convenience[method](ydat, partz[:,i], **kwargs)
newz[:,i] = zfunc(newy)
else:
for i in range(ydat.shape[0]):
partz[i] = convenience[method](xdat, zdat[:,i], newx)
for i in range(dim):
newz[:,i] = convenience[method](ydat, partz[:,i], newy)
# Find minimum values for the interpolated data set
minima = None
if minimum:
minima = np.empty((dim, 3), dtype=np.float64)
for i, arr in enumerate(newz):
minima[i] = (newx[arr.argmin()], newy[i], arr.min())
minima = pd.DataFrame(minima)
# Smooth this out as it can be quite jagged
window = dim - (1 - dim % 2)
minima[1] = savgol_filter(minima[1], window, 3)
return {'x': newx, 'y': newy, 'z': newz, 'min': minima}
# Sample of a wrapper around the hidden function for public API
def interpolate_j2(df, method='interp2d', kind='cubic', yfirst=False,
dim=21, minimum=False):
"""
Given a dataframe containing alpha, gamma, j2 columns,
return a dictionary for plotting.
"""
interped = _interpolate(df, 'alpha', 'gamma', 'j2',
method, kind, yfirst, dim, minimum)
for key, cart in [('alpha', 'x'), ('gamma', 'y'), ('j2', 'z')]:
interped[key] = interped.pop(cart)
return interped
| {
"content_hash": "75255a90a0febbf5328f1bd9f016a866",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 110,
"avg_line_length": 46.88333333333333,
"alnum_prop": 0.5549235691432635,
"repo_name": "exa-analytics/exatomic",
"id": "7da5528b90e7144c6790e09d4351be0c6b492143",
"size": "8577",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "exatomic/algorithms/interpolation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "762"
},
{
"name": "JavaScript",
"bytes": "121101"
},
{
"name": "Jupyter Notebook",
"bytes": "13176"
},
{
"name": "Python",
"bytes": "1084816"
},
{
"name": "Shell",
"bytes": "711"
},
{
"name": "TypeScript",
"bytes": "953"
}
],
"symlink_target": ""
} |
import ast
import builtins
import collections
import decimal
import fractions
import io
import locale
import os
import pickle
import platform
import random
import re
import sys
import traceback
import types
import unittest
import warnings
from contextlib import ExitStack
from operator import neg
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, swap_attr, unlink)
from test.support.script_helper import assert_python_ok
from unittest.mock import MagicMock, patch
try:
import pty, signal
except ImportError:
pty = signal = None
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
test_conv_no_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
test_conv_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', ValueError),
('314 ', 314),
(' \t\t 314 \t\t ', ValueError),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', ValueError),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
class TestFailingBool:
def __bool__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
def filter_char(arg):
return ord(arg) > ord("d")
def map_char(arg):
return chr(ord(arg)+1)
class BuiltinTest(unittest.TestCase):
# Helper to check picklability
def check_iter_pickle(self, it, seq, proto):
itorg = it
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
#test the iterator after dropping one from it
it = pickle.loads(d)
try:
next(it)
except StopIteration:
return
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
# embedded null character
self.assertRaises(ModuleNotFoundError, __import__, 'string\x00')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxsize-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# str
self.assertRaises(TypeError, abs, 'a')
# bool
self.assertEqual(abs(True), 1)
self.assertEqual(abs(False), 0)
# other
self.assertRaises(TypeError, abs)
self.assertRaises(TypeError, abs, None)
class AbsClass(object):
def __abs__(self):
return -5
self.assertEqual(abs(AbsClass()), -5)
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
self.assertEqual(all([0, TestFailingBool()]), False)# Short-circuit
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, any, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
self.assertEqual(any([1, TestFailingBool()]), True) # Short-circuit
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_ascii(self):
self.assertEqual(ascii(''), '\'\'')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(()), '()')
self.assertEqual(ascii([]), '[]')
self.assertEqual(ascii({}), '{}')
a = []
a.append(a)
self.assertEqual(ascii(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(ascii(a), '{0: {...}}')
# Advanced checks for unicode strings
def _check_uni(s):
self.assertEqual(ascii(s), repr(s))
_check_uni("'")
_check_uni('"')
_check_uni('"\'')
_check_uni('\0')
_check_uni('\r\n\t .')
# Unprintable non-ASCII characters
_check_uni('\x85')
_check_uni('\u1fff')
_check_uni('\U00012fff')
# Lone surrogates
_check_uni('\ud800')
_check_uni('\udfff')
# Issue #9804: surrogates should be joined even for printable
# wide characters (UCS-2 builds).
self.assertEqual(ascii('\U0001d121'), "'\\U0001d121'")
# All together
s = "'\0\"\n\r\t abcd\x85é\U00012fff\uD800\U0001D121xxx."
self.assertEqual(ascii(s),
r"""'\'\x00"\n\r\t abcd\x85\xe9\U00012fff\ud800\U0001d121xxx.'""")
def test_neg(self):
x = -sys.maxsize-1
self.assertTrue(isinstance(x, int))
self.assertEqual(-x, sys.maxsize+1)
def test_callable(self):
self.assertTrue(callable(len))
self.assertFalse(callable("a"))
self.assertTrue(callable(callable))
self.assertTrue(callable(lambda x, y: x + y))
self.assertFalse(callable(__builtins__))
def f(): pass
self.assertTrue(callable(f))
class C1:
def meth(self): pass
self.assertTrue(callable(C1))
c = C1()
self.assertTrue(callable(c.meth))
self.assertFalse(callable(c))
# __call__ is looked up on the class, not the instance
c.__call__ = None
self.assertFalse(callable(c))
c.__call__ = lambda self: 0
self.assertFalse(callable(c))
del c.__call__
self.assertFalse(callable(c))
class C2(object):
def __call__(self): pass
c2 = C2()
self.assertTrue(callable(c2))
c2.__call__ = None
self.assertTrue(callable(c2))
class C3(C2): pass
c3 = C3()
self.assertTrue(callable(c3))
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 1<<24)
self.assertEqual(chr(sys.maxunicode),
str('\\U0010ffff'.encode("ascii"), 'unicode-escape'))
self.assertRaises(TypeError, chr)
self.assertEqual(chr(0x0000FFFF), "\U0000FFFF")
self.assertEqual(chr(0x00010000), "\U00010000")
self.assertEqual(chr(0x00010001), "\U00010001")
self.assertEqual(chr(0x000FFFFE), "\U000FFFFE")
self.assertEqual(chr(0x000FFFFF), "\U000FFFFF")
self.assertEqual(chr(0x00100000), "\U00100000")
self.assertEqual(chr(0x00100001), "\U00100001")
self.assertEqual(chr(0x0010FFFE), "\U0010FFFE")
self.assertEqual(chr(0x0010FFFF), "\U0010FFFF")
self.assertRaises(ValueError, chr, -1)
self.assertRaises(ValueError, chr, 0x00110000)
self.assertRaises((OverflowError, ValueError), chr, 2**32)
def test_cmp(self):
self.assertTrue(not hasattr(builtins, "cmp"))
def test_compile(self):
compile('print(1)\n', '', 'exec')
bom = b'\xef\xbb\xbf'
compile(bom + b'print(1)\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
compile(memoryview(b"text"), "name", "exec")
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'single', 0xff)
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
compile('print("\xe5")\n', '', 'exec')
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
# test the optimize argument
codestr = '''def f():
"""doc"""
debug_enabled = False
if __debug__:
debug_enabled = True
try:
assert False
except AssertionError:
return (True, f.__doc__, debug_enabled, __debug__)
else:
return (False, f.__doc__, debug_enabled, __debug__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__, __debug__, __debug__),
(0, True, 'doc', True, True),
(1, False, 'doc', False, False),
(2, False, None, False, False)]
for optval, *expected in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for code in codeobjs:
ns = {}
exec(code, ns)
rv = ns['f']()
self.assertEqual(rv, tuple(expected))
def test_delattr(self):
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assertIn('local_var', dir())
# dir(module)
self.assertIn('exit', dir(sys))
# dir(module_with_invalid__dict__)
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assertIn("strip", dir(str))
self.assertNotIn("__mro__", dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assertIn("y", dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assertIn("__repr__", dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assertNotIn("__repr__", dir(f))
self.assertIn("bar", dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assertTrue(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__tuple)
class Foo(object):
def __dir__(self):
return ("b", "c", "a")
res = dir(Foo())
self.assertIsInstance(res, list)
self.assertTrue(res == ["a", "b", "c"])
# dir(obj__dir__not_sequence)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
# dir(traceback)
try:
raise IndexError
except:
self.assertEqual(len(dir(sys.exc_info()[2])), 4)
# test that object has a __dir__()
self.assertEqual(sorted([].__dir__()), dir([]))
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(-sys.maxsize-1, -1), (sys.maxsize+1, 0))
for num, denom, exp_result in [ (3.25, 1.0, (3.0, 0.25)),
(-3.25, 1.0, (-4.0, 0.75)),
(3.25, -1.0, (-4.0, -0.75)),
(-3.25, -1.0, (3.0, -0.25))]:
result = divmod(num, denom)
self.assertAlmostEqual(result[0], exp_result[0])
self.assertAlmostEqual(result[1], exp_result[1])
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
bom = b'\xef\xbb\xbf'
self.assertEqual(eval(bom + b'a', globals, locals), 1)
self.assertEqual(eval('"\xe5"', globals), "\xe5")
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
class X:
def __getitem__(self, key):
raise ValueError
self.assertRaises(ValueError, eval, "foo", {}, X())
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
with check_warnings():
warnings.filterwarnings("ignore", "global statement",
module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_globals(self):
code = compile("print('Hello World!')", "", "exec")
# no builtin function
self.assertRaisesRegex(NameError, "name 'print' is not defined",
exec, code, {'__builtins__': {}})
# __builtins__ must be a mapping type
self.assertRaises(TypeError,
exec, code, {'__builtins__': 123})
# no __build_class__ function
code = compile("class A: pass", "", "exec")
self.assertRaisesRegex(NameError, "__build_class__ not found",
exec, code, {'__builtins__': {}})
class frozendict_error(Exception):
pass
class frozendict(dict):
def __setitem__(self, key, value):
raise frozendict_error("frozendict is readonly")
# read-only builtins
if isinstance(__builtins__, types.ModuleType):
frozen_builtins = frozendict(__builtins__.__dict__)
else:
frozen_builtins = frozendict(__builtins__)
code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, {'__builtins__': frozen_builtins})
# read-only globals
namespace = frozendict({})
code = compile("x=1", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, namespace)
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_filter_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f1 = filter(filter_char, "abcdeabcde")
f2 = filter(filter_char, "abcdeabcde")
self.check_iter_pickle(f1, list(f2), proto)
def test_getattr(self):
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
# unicode surrogates are not encodable to the default encoding (utf8)
self.assertRaises(AttributeError, getattr, 1, "\uDAD1\uD51E")
def test_hasattr(self):
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr propagates all exceptions outside of
# AttributeError.
class A:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise ValueError
self.assertRaises(ValueError, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEqual(type(hash(X())), int)
class Z(int):
def __hash__(self):
return self
self.assertEqual(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class NegativeLen:
def __len__(self):
return -10
self.assertRaises(ValueError, len, NegativeLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
class HugeNegativeLen:
def __len__(self):
return -sys.maxsize-10
self.assertRaises(ValueError, len, HugeNegativeLen())
class NoLenMethod(object): pass
self.assertRaises(TypeError, len, NoLenMethod())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_map_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
m1 = map(map_char, "Is this the real life?")
m2 = map(map_char, "Is this the real life?")
self.check_iter_pickle(m1, list(m2), proto)
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
self.assertRaises(TypeError, max)
self.assertRaises(TypeError, max, 42)
self.assertRaises(ValueError, max, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, max, BadSeq())
for stmt in (
"max(key=int)", # no args
"max(default=None)",
"max(1, 2, default=None)", # require container for default
"max(default=None, key=int)",
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
self.assertEqual(max((), default=None), None) # zero elem iterable
self.assertEqual(max((1,), default=None), 1) # one elem iterable
self.assertEqual(max((1,2), default=None), 2) # two elem iterable
self.assertEqual(max((), default=1, key=neg), 1)
self.assertEqual(max((1, 2), default=3, key=neg), 1)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2, 3.0), 1)
self.assertEqual(min(1, 2.0, 3), 1)
self.assertEqual(min(1.0, 2, 3), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
for stmt in (
"min(key=int)", # no args
"min(default=None)",
"min(1, 2, default=None)", # require container for default
"min(default=None, key=int)",
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
self.assertEqual(min((), default=None), None) # zero elem iterable
self.assertEqual(min((1,), default=None), 1) # one elem iterable
self.assertEqual(min((1,2), default=None), 1) # two elem iterable
self.assertEqual(min((), default=1, key=neg), 1)
self.assertEqual(min((1, 2), default=1, key=neg), 2)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
it = iter(Iter())
self.assertEqual(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input, below
fp = open(TESTFN, 'w')
self.addCleanup(unlink, TESTFN)
with fp:
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
with fp:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
# embedded null bytes and characters
self.assertRaises(ValueError, open, 'a\x00b')
self.assertRaises(ValueError, open, b'a\x00b')
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_open_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that open() uses the current locale
# encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
self.write_testfile()
current_locale_encoding = locale.getpreferredencoding(False)
fp = open(TESTFN, 'w')
with fp:
self.assertEqual(fp.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_open_non_inheritable(self):
fileobj = open(__file__)
with fileobj:
self.assertFalse(os.get_inheritable(fileobj.fileno()))
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
self.assertEqual(ord('\x80'), 128)
self.assertEqual(ord('\xff'), 255)
self.assertEqual(ord(b' '), 32)
self.assertEqual(ord(b'A'), 65)
self.assertEqual(ord(b'a'), 97)
self.assertEqual(ord(b'\x80'), 128)
self.assertEqual(ord(b'\xff'), 255)
self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
self.assertEqual(ord(chr(0x10FFFF)), 0x10FFFF)
self.assertEqual(ord("\U0000FFFF"), 0x0000FFFF)
self.assertEqual(ord("\U00010000"), 0x00010000)
self.assertEqual(ord("\U00010001"), 0x00010001)
self.assertEqual(ord("\U000FFFFE"), 0x000FFFFE)
self.assertEqual(ord("\U000FFFFF"), 0x000FFFFF)
self.assertEqual(ord("\U00100000"), 0x00100000)
self.assertEqual(ord("\U00100001"), 0x00100001)
self.assertEqual(ord("\U0010FFFE"), 0x0010FFFE)
self.assertEqual(ord("\U0010FFFF"), 0x0010FFFF)
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2.0:
for y in 10, 10.0:
for z in 1000, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertAlmostEqual(pow(-1, 0.5), 1j)
self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
self.assertRaises(ValueError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow)
def test_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), "1+1")
self.assertEqual(input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = io.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = io.StringIO(" 'whitespace'")
self.assertEqual(input(), " 'whitespace'")
sys.stdin = io.StringIO()
self.assertRaises(EOFError, input)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
# test_int(): see test_int.py for tests of built-in function int().
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), int)
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check even / odd rounding behaviour
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 6)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -6)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), int)
self.assertEqual(type(round(-8, -1)), int)
self.assertEqual(type(round(-8, 0)), int)
self.assertEqual(type(round(-8, 1)), int)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound:
def __round__(self):
return 23
class TestNoRound:
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__round__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
# Some versions of glibc for alpha have a bug that affects
# float -> integer rounding (floor, ceil, rint, round) for
# values in the range [2**52, 2**53). See:
#
# http://sources.redhat.com/bugzilla/show_bug.cgi?id=5350
#
# We skip this test on Linux/alpha if it would fail.
linux_alpha = (platform.system().startswith('Linux') and
platform.machine().startswith('alpha'))
system_round_bug = round(5e15+1) != 5e15+1
@unittest.skipIf(linux_alpha and system_round_bug,
"test will fail; failure is probably due to a "
"buggy system round function")
def test_round_large(self):
# Issue #1869: integral floats should remain unchanged
self.assertEqual(round(5e15-1), 5e15-1)
self.assertEqual(round(5e15), 5e15)
self.assertEqual(round(5e15+1), 5e15+1)
self.assertEqual(round(5e15+2), 5e15+2)
self.assertEqual(round(5e15+3), 5e15+3)
def test_bug_27936(self):
# Verify that ndigits=None means the same as passing in no argument
for x in [1234,
1234.56,
decimal.Decimal('1234.56'),
fractions.Fraction(123456, 100)]:
self.assertEqual(round(x, None), round(x))
self.assertEqual(type(round(x, None)), type(round(x)))
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
# test_str(): see test_unicode.py and test_bytes.py for str() tests.
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(list(range(2,8))), 27)
self.assertEqual(sum(iter(list(range(2,8)))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, sum, values, bytearray(b''))
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
empty = []
sum(([x] for x in range(10)), empty)
self.assertEqual(empty, [])
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
class C_get_vars(object):
def getDict(self):
return {'a':2}
__dict__ = property(fget=getDict)
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
self.assertEqual(vars(self.C_get_vars()), {'a':2})
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(list(zip(a, b)), t)
b = [4, 5, 6]
self.assertEqual(list(zip(a, b)), t)
b = (4, 5, 6, 7)
self.assertEqual(list(zip(a, b)), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(list(zip(a, I())), t)
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
self.assertRaises(RuntimeError, zip, a, TestFailingIter())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
list(zip(SequenceWithoutALength(), range(2**30))),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq()))
def test_zip_pickle(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z1 = zip(a, b)
self.check_iter_pickle(z1, t, proto)
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assertTrue(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# --------------------------------------------------------------------
# Issue #7994: object.__format__ with a non-empty format string is
# disallowed
class A:
def __format__(self, fmt_str):
return format('', fmt_str)
self.assertEqual(format(A()), '')
self.assertEqual(format(A(), ''), '')
self.assertEqual(format(A(), 's'), '')
class B:
pass
class C(object):
pass
for cls in [object, B, C]:
obj = cls()
self.assertEqual(format(obj), str(obj))
self.assertEqual(format(obj, ''), str(obj))
with self.assertRaisesRegex(TypeError,
r'\b%s\b' % re.escape(cls.__name__)):
format(obj, 's')
# --------------------------------------------------------------------
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray(b"abc")
self.assertRaises(ValueError, x.translate, b"1", 1)
self.assertRaises(TypeError, x.translate, b"1"*256, 1)
def test_construct_singletons(self):
for const in None, Ellipsis, NotImplemented:
tp = type(const)
self.assertIs(tp(), const)
self.assertRaises(TypeError, tp, 1, 2)
self.assertRaises(TypeError, tp, a=1, b=2)
class TestBreakpoint(unittest.TestCase):
def setUp(self):
# These tests require a clean slate environment. For example, if the
# test suite is run with $PYTHONBREAKPOINT set to something else, it
# will mess up these tests. Similarly for sys.breakpointhook.
# Cleaning the slate here means you can't use breakpoint() to debug
# these tests, but I think that's okay. Just use pdb.set_trace() if
# you must.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
self.env = self.resources.enter_context(EnvironmentVarGuard())
del self.env['PYTHONBREAKPOINT']
self.resources.enter_context(
swap_attr(sys, 'breakpointhook', sys.__breakpointhook__))
def test_breakpoint(self):
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_called_once()
def test_breakpoint_with_breakpointhook_set(self):
my_breakpointhook = MagicMock()
sys.breakpointhook = my_breakpointhook
breakpoint()
my_breakpointhook.assert_called_once_with()
def test_breakpoint_with_breakpointhook_reset(self):
my_breakpointhook = MagicMock()
sys.breakpointhook = my_breakpointhook
breakpoint()
my_breakpointhook.assert_called_once_with()
# Reset the hook and it will not be called again.
sys.breakpointhook = sys.__breakpointhook__
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_called_once_with()
my_breakpointhook.assert_called_once_with()
def test_breakpoint_with_args_and_keywords(self):
my_breakpointhook = MagicMock()
sys.breakpointhook = my_breakpointhook
breakpoint(1, 2, 3, four=4, five=5)
my_breakpointhook.assert_called_once_with(1, 2, 3, four=4, five=5)
def test_breakpoint_with_passthru_error(self):
def my_breakpointhook():
pass
sys.breakpointhook = my_breakpointhook
self.assertRaises(TypeError, breakpoint, 1, 2, 3, four=4, five=5)
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_good_path_builtin(self):
self.env['PYTHONBREAKPOINT'] = 'int'
with patch('builtins.int') as mock:
breakpoint('7')
mock.assert_called_once_with('7')
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_good_path_other(self):
self.env['PYTHONBREAKPOINT'] = 'sys.exit'
with patch('sys.exit') as mock:
breakpoint()
mock.assert_called_once_with()
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_good_path_noop_0(self):
self.env['PYTHONBREAKPOINT'] = '0'
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_not_called()
def test_envar_good_path_empty_string(self):
# PYTHONBREAKPOINT='' is the same as it not being set.
self.env['PYTHONBREAKPOINT'] = ''
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_called_once_with()
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_unimportable(self):
for envar in (
'.', '..', '.foo', 'foo.', '.int', 'int.',
'nosuchbuiltin',
'nosuchmodule.nosuchcallable',
):
with self.subTest(envar=envar):
self.env['PYTHONBREAKPOINT'] = envar
mock = self.resources.enter_context(patch('pdb.set_trace'))
w = self.resources.enter_context(check_warnings(quiet=True))
breakpoint()
self.assertEqual(
str(w.message),
f'Ignoring unimportable $PYTHONBREAKPOINT: "{envar}"')
self.assertEqual(w.category, RuntimeWarning)
mock.assert_not_called()
def test_envar_ignored_when_hook_is_set(self):
self.env['PYTHONBREAKPOINT'] = 'sys.exit'
with patch('sys.exit') as mock:
sys.breakpointhook = int
breakpoint()
mock.assert_not_called()
@unittest.skipUnless(pty, "the pty and signal modules must be available")
class PtyTests(unittest.TestCase):
"""Tests that use a pseudo terminal to guarantee stdin and stdout are
terminals in the test environment"""
def run_child(self, child, terminal_input):
r, w = os.pipe() # Pipe test results from child back to parent
try:
pid, fd = pty.fork()
except (OSError, AttributeError) as e:
os.close(r)
os.close(w)
self.skipTest("pty.fork() raised {}".format(e))
raise
if pid == 0:
# Child
try:
# Make sure we don't get stuck if there's a problem
signal.alarm(2)
os.close(r)
with open(w, "w") as wpipe:
child(wpipe)
except:
traceback.print_exc()
finally:
# We don't want to return to unittest...
os._exit(0)
# Parent
os.close(w)
os.write(fd, terminal_input)
# Get results from the pipe
with open(r, "r") as rpipe:
lines = []
while True:
line = rpipe.readline().strip()
if line == "":
# The other end was closed => the child exited
break
lines.append(line)
# Check the result was got and corresponds to the user's terminal input
if len(lines) != 2:
# Something went wrong, try to get at stderr
# Beware of Linux raising EIO when the slave is closed
child_output = bytearray()
while True:
try:
chunk = os.read(fd, 3000)
except OSError: # Assume EIO
break
if not chunk:
break
child_output.extend(chunk)
os.close(fd)
child_output = child_output.decode("ascii", "ignore")
self.fail("got %d lines in pipe but expected 2, child output was:\n%s"
% (len(lines), child_output))
os.close(fd)
# Wait until the child process completes
os.waitpid(pid, 0)
return lines
def check_input_tty(self, prompt, terminal_input, stdio_encoding=None):
if not sys.stdin.isatty() or not sys.stdout.isatty():
self.skipTest("stdin and stdout must be ttys")
def child(wpipe):
# Check the error handlers are accounted for
if stdio_encoding:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(),
encoding=stdio_encoding,
errors='surrogateescape')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
encoding=stdio_encoding,
errors='replace')
print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe)
print(ascii(input(prompt)), file=wpipe)
lines = self.run_child(child, terminal_input + b"\r\n")
# Check we did exercise the GNU readline path
self.assertIn(lines[0], {'tty = True', 'tty = False'})
if lines[0] != 'tty = True':
self.skipTest("standard IO in should have been a tty")
input_result = eval(lines[1]) # ascii() -> eval() roundtrip
if stdio_encoding:
expected = terminal_input.decode(stdio_encoding, 'surrogateescape')
else:
expected = terminal_input.decode(sys.stdin.encoding) # what else?
self.assertEqual(input_result, expected)
def test_input_tty(self):
# Test input() functionality when wired to a tty (the code path
# is different and invokes GNU readline if available).
self.check_input_tty("prompt", b"quux")
def test_input_tty_non_ascii(self):
# Check stdin/stdout encoding is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "utf-8")
def test_input_tty_non_ascii_unicode_errors(self):
# Check stdin/stdout error handler is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "ascii")
def test_input_no_stdout_fileno(self):
# Issue #24402: If stdin is the original terminal but stdout.fileno()
# fails, do not use the original stdout file descriptor
def child(wpipe):
print("stdin.isatty():", sys.stdin.isatty(), file=wpipe)
sys.stdout = io.StringIO() # Does not support fileno()
input("prompt")
print("captured:", ascii(sys.stdout.getvalue()), file=wpipe)
lines = self.run_child(child, b"quux\r")
expected = (
"stdin.isatty(): True",
"captured: 'prompt'",
)
self.assertSequenceEqual(lines, expected)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = list(range(100))
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_bad_arguments(self):
# Issue #29327: The first argument is positional-only.
sorted([])
with self.assertRaises(TypeError):
sorted(iterable=[])
# Other arguments are keyword-only
sorted([], key=None)
with self.assertRaises(TypeError):
sorted([], None)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple, str]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(set(s)) # unique letters only
types = [str, set, frozenset, list, tuple, dict.fromkeys]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
class ShutdownTest(unittest.TestCase):
def test_cleanup(self):
# Issue #19255: builtins are still available at shutdown
code = """if 1:
import builtins
import sys
class C:
def __del__(self):
print("before")
# Check that builtins still exist
len(())
print("after")
c = C()
# Make this module survive until builtins and sys are cleaned
builtins.here = sys.modules[__name__]
sys.here = sys.modules[__name__]
# Create a reference loop so that this module needs to go
# through a GC phase.
here = sys.modules[__name__]
"""
# Issue #20599: Force ASCII encoding to get a codec implemented in C,
# otherwise the codec may be unloaded before C.__del__() is called, and
# so print("before") fails because the codec cannot be used to encode
# "before" to sys.stdout.encoding. For example, on Windows,
# sys.stdout.encoding is the OEM code page and these code pages are
# implemented in Python
rc, out, err = assert_python_ok("-c", code,
PYTHONIOENCODING="ascii")
self.assertEqual(["before", "after"], out.decode().splitlines())
class TestType(unittest.TestCase):
def test_new_type(self):
A = type('A', (), {})
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'A')
self.assertEqual(A.__module__, __name__)
self.assertEqual(A.__bases__, (object,))
self.assertIs(A.__base__, object)
x = A()
self.assertIs(type(x), A)
self.assertIs(x.__class__, A)
class B:
def ham(self):
return 'ham%d' % self
C = type('C', (B, int), {'spam': lambda self: 'spam%s' % self})
self.assertEqual(C.__name__, 'C')
self.assertEqual(C.__qualname__, 'C')
self.assertEqual(C.__module__, __name__)
self.assertEqual(C.__bases__, (B, int))
self.assertIs(C.__base__, int)
self.assertIn('spam', C.__dict__)
self.assertNotIn('ham', C.__dict__)
x = C(42)
self.assertEqual(x, 42)
self.assertIs(type(x), C)
self.assertIs(x.__class__, C)
self.assertEqual(x.ham(), 'ham42')
self.assertEqual(x.spam(), 'spam42')
self.assertEqual(x.to_bytes(2, 'little'), b'\x2a\x00')
def test_type_nokwargs(self):
with self.assertRaises(TypeError):
type('a', (), {}, x=5)
with self.assertRaises(TypeError):
type('a', (), dict={})
def test_type_name(self):
for name in 'A', '\xc4', '\U0001f40d', 'B.A', '42', '':
with self.subTest(name=name):
A = type(name, (), {})
self.assertEqual(A.__name__, name)
self.assertEqual(A.__qualname__, name)
self.assertEqual(A.__module__, __name__)
with self.assertRaises(ValueError):
type('A\x00B', (), {})
with self.assertRaises(ValueError):
type('A\udcdcB', (), {})
with self.assertRaises(TypeError):
type(b'A', (), {})
C = type('C', (), {})
for name in 'A', '\xc4', '\U0001f40d', 'B.A', '42', '':
with self.subTest(name=name):
C.__name__ = name
self.assertEqual(C.__name__, name)
self.assertEqual(C.__qualname__, 'C')
self.assertEqual(C.__module__, __name__)
A = type('C', (), {})
with self.assertRaises(ValueError):
A.__name__ = 'A\x00B'
self.assertEqual(A.__name__, 'C')
with self.assertRaises(ValueError):
A.__name__ = 'A\udcdcB'
self.assertEqual(A.__name__, 'C')
with self.assertRaises(TypeError):
A.__name__ = b'A'
self.assertEqual(A.__name__, 'C')
def test_type_qualname(self):
A = type('A', (), {'__qualname__': 'B.C'})
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'B.C')
self.assertEqual(A.__module__, __name__)
with self.assertRaises(TypeError):
type('A', (), {'__qualname__': b'B'})
self.assertEqual(A.__qualname__, 'B.C')
A.__qualname__ = 'D.E'
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'D.E')
with self.assertRaises(TypeError):
A.__qualname__ = b'B'
self.assertEqual(A.__qualname__, 'D.E')
def test_type_doc(self):
for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', b'x', 42, None:
A = type('A', (), {'__doc__': doc})
self.assertEqual(A.__doc__, doc)
with self.assertRaises(UnicodeEncodeError):
type('A', (), {'__doc__': 'x\udcdcy'})
A = type('A', (), {})
self.assertEqual(A.__doc__, None)
for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', 'x\udcdcy', b'x', 42, None:
A.__doc__ = doc
self.assertEqual(A.__doc__, doc)
def test_bad_args(self):
with self.assertRaises(TypeError):
type()
with self.assertRaises(TypeError):
type('A', ())
with self.assertRaises(TypeError):
type('A', (), {}, ())
with self.assertRaises(TypeError):
type('A', (), dict={})
with self.assertRaises(TypeError):
type('A', [], {})
with self.assertRaises(TypeError):
type('A', (), types.MappingProxyType({}))
with self.assertRaises(TypeError):
type('A', (None,), {})
with self.assertRaises(TypeError):
type('A', (bool,), {})
with self.assertRaises(TypeError):
type('A', (int, str), {})
def test_bad_slots(self):
with self.assertRaises(TypeError):
type('A', (), {'__slots__': b'x'})
with self.assertRaises(TypeError):
type('A', (int,), {'__slots__': 'x'})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ''})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': '42'})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': 'x\x00y'})
with self.assertRaises(ValueError):
type('A', (), {'__slots__': 'x', 'x': 0})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ('__dict__', '__dict__')})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ('__weakref__', '__weakref__')})
class B:
pass
with self.assertRaises(TypeError):
type('A', (B,), {'__slots__': '__dict__'})
with self.assertRaises(TypeError):
type('A', (B,), {'__slots__': '__weakref__'})
def test_namespace_order(self):
# bpo-34320: namespace should preserve order
od = collections.OrderedDict([('a', 1), ('b', 2)])
od.move_to_end('a')
expected = list(od.items())
C = type('C', (), od)
self.assertEqual(list(C.__dict__.items())[:2], [('b', 2), ('a', 1)])
def load_tests(loader, tests, pattern):
from doctest import DocTestSuite
tests.addTest(DocTestSuite(builtins))
return tests
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "af4e33aaa63c8bada3e62bc9829f30be",
"timestamp": "",
"source": "github",
"line_count": 1978,
"max_line_length": 105,
"avg_line_length": 36.3422649140546,
"alnum_prop": 0.5314043263545941,
"repo_name": "FFMG/myoddweb.piger",
"id": "7c9768a337bbb022379a6c2682f82241d7c10abc",
"size": "71929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitor/api/python/Python-3.7.2/Lib/test/test_builtin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "399228"
},
{
"name": "Batchfile",
"bytes": "93889"
},
{
"name": "C",
"bytes": "32256857"
},
{
"name": "C#",
"bytes": "197461"
},
{
"name": "C++",
"bytes": "200544641"
},
{
"name": "CMake",
"bytes": "192771"
},
{
"name": "CSS",
"bytes": "441704"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "33549"
},
{
"name": "DTrace",
"bytes": "2157"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "HTML",
"bytes": "181677643"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Inno Setup",
"bytes": "9647"
},
{
"name": "JavaScript",
"bytes": "705756"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "3332"
},
{
"name": "M4",
"bytes": "259214"
},
{
"name": "Makefile",
"bytes": "1262318"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "Objective-C",
"bytes": "2167778"
},
{
"name": "Objective-C++",
"bytes": "630"
},
{
"name": "PHP",
"bytes": "59030"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "42080"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "11781"
},
{
"name": "Python",
"bytes": "30377308"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Rich Text Format",
"bytes": "6743"
},
{
"name": "Roff",
"bytes": "55661"
},
{
"name": "Ruby",
"bytes": "5532"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "783974"
},
{
"name": "TSQL",
"bytes": "1201"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "70"
},
{
"name": "XSLT",
"bytes": "552736"
},
{
"name": "Yacc",
"bytes": "19623"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import permission_required
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext as _
from wagtail.wagtailsearch import models, forms
from wagtail.wagtailadmin.forms import SearchForm
@permission_required('wagtailadmin.access_admin')
def index(request):
page = request.GET.get('p', 1)
query_string = request.GET.get('q', "")
queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()
# Search
if query_string:
queries = queries.filter(query_string__icontains=query_string)
# Pagination
paginator = Paginator(queries, 20)
try:
queries = paginator.page(page)
except PageNotAnInteger:
queries = paginator.page(1)
except EmptyPage:
queries = paginator.page(paginator.num_pages)
if request.is_ajax():
return render(request, "wagtailsearch/editorspicks/results.html", {
'queries': queries,
'query_string': query_string,
})
else:
return render(request, 'wagtailsearch/editorspicks/index.html', {
'queries': queries,
'query_string': query_string,
'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_("Search editor's picks")),
})
def save_editorspicks(query, new_query, editors_pick_formset):
# Set sort_order
for i, form in enumerate(editors_pick_formset.ordered_forms):
form.instance.sort_order = i
# Save
if editors_pick_formset.is_valid():
editors_pick_formset.save()
# If query was changed, move all editors picks to the new query
if query != new_query:
editors_pick_formset.get_queryset().update(query=new_query)
return True
else:
return False
@permission_required('wagtailadmin.access_admin')
def add(request):
if request.POST:
# Get query
query_form = forms.QueryForm(request.POST)
if query_form.is_valid():
query = models.Query.get(query_form['query_string'].value())
# Save editors picks
editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)
if save_editorspicks(query, query, editors_pick_formset):
messages.success(request, _("Editor's picks for '{0}' created.").format(query))
return redirect('wagtailsearch_editorspicks_index')
else:
editors_pick_formset = forms.EditorsPickFormSet()
else:
query_form = forms.QueryForm()
editors_pick_formset = forms.EditorsPickFormSet()
return render(request, 'wagtailsearch/editorspicks/add.html', {
'query_form': query_form,
'editors_pick_formset': editors_pick_formset,
})
@permission_required('wagtailadmin.access_admin')
def edit(request, query_id):
query = get_object_or_404(models.Query, id=query_id)
if request.POST:
# Get query
query_form = forms.QueryForm(request.POST)
if query_form.is_valid():
new_query = models.Query.get(query_form['query_string'].value())
# Save editors picks
editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)
if save_editorspicks(query, new_query, editors_pick_formset):
messages.success(request, _("Editor's picks for '{0}' updated.").format(new_query))
return redirect('wagtailsearch_editorspicks_index')
else:
query_form = forms.QueryForm(initial=dict(query_string=query.query_string))
editors_pick_formset = forms.EditorsPickFormSet(instance=query)
return render(request, 'wagtailsearch/editorspicks/edit.html', {
'query_form': query_form,
'editors_pick_formset': editors_pick_formset,
'query': query,
})
@permission_required('wagtailadmin.access_admin')
def delete(request, query_id):
query = get_object_or_404(models.Query, id=query_id)
if request.POST:
query.editors_picks.all().delete()
messages.success(request, _("Editor's picks deleted."))
return redirect('wagtailsearch_editorspicks_index')
return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {
'query': query,
})
| {
"content_hash": "7311e8e48e6fcdd5df184034914b5d59",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 131,
"avg_line_length": 35.01574803149607,
"alnum_prop": 0.659995502586013,
"repo_name": "CreativeOutbreak/wagtail",
"id": "e17cd9d0e7a025974d380005563d762f4da0be24",
"size": "4447",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wagtail/wagtailsearch/views/editorspicks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import re
import sys
import h5py
import numpy as np
import pandas as pd
from ggr.analyses.bioinformatics import run_gprofiler
from ggr.analyses.linking import regions_to_genes
from tronn.util.utils import DataKeys
from tronn.util.formats import array_to_bed
REMOVE_SUBSTRINGS = [
"anatomical",
"ameboidal",
"animal organ",
"multicellular organism",
"cellular developmental",
"tube",
"regulation of",
"embryonic",
"cardiovascular",
"angiogenesis",
"blood vessel",
"vasculature",
"immune",
"defense",
"signaling",
"response to",
"movement of"]
REMOVE_EXACT_STRINGS = [
"system process",
"system development",
"developmental process",
"tissue development"]
def load_data_from_multiple_h5_files(h5_files, key, example_indices=None):
"""convenience wrapper
example indices is a list of arrays
"""
key_data = []
for h5_idx in range(h5_files):
h5_file = h5_files[h5_idx]
h5_indices = example_indices[h5_idx]
with h5py.File(h5_file, "r") as hf:
if example_indices is not None:
key_data.append(hf[key][example_indices])
else:
key_data.append(hf[key][:])
key_data = np.concatenate(key_data, axis=0)
return key_data
def main():
"""analyze spacing
"""
# inputs
SCRIPT_DIR = "/users/dskim89/git/ggr-project/figs/fig_4.homotypic"
OUT_DIR = sys.argv[1]
sig_pwms_file = sys.argv[2]
links_file = sys.argv[3]
tss_file = sys.argv[4]
background_gene_file = sys.argv[5]
motifs_files = sys.argv[6:]
# dirs
tmp_dir = "{}/tmp".format(OUT_DIR)
os.system("mkdir -p {}".format(tmp_dir))
more_plots_dir = "{}/plots".format(OUT_DIR)
os.system("mkdir -p {}".format(more_plots_dir))
# keys
max_val_key = DataKeys.WEIGHTED_PWM_SCORES_POSITION_MAX_VAL
#max_idx_key = DataKeys.WEIGHTED_PWM_SCORES_POSITION_MAX_IDX
pwm_scores_key = DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH
#raw_pwm_scores_key = DataKeys.ORIG_SEQ_PWM_SCORES_THRESH
importances_key = DataKeys.WEIGHTED_SEQ_ACTIVE
metadata_key = DataKeys.SEQ_METADATA
signal_keys = [
"ATAC_SIGNALS",
"H3K27ac_SIGNALS"]
# params
left_clip = 420 + 12
num_positions = 200
final_extend_len = 160
MIN_REGION_COUNT = 100
impt_clip_start = 12
impt_clip_end = 148
pwm_window = 20
fract_thresh = 0.9
# read in the sig pwms
print "WARNING - check pwms file"
sig_pwms = list(pd.read_csv(sig_pwms_file, sep="\t", header=0).iloc[:,0])
# read in the list of all pwm names
with h5py.File(motifs_files[0], "r") as hf:
all_pwms = hf[pwm_scores_key].attrs["pwm_names"]
# get max vals (use to filter for which sequences have pwm)
max_vals_sets = []
for motifs_file in motifs_files:
with h5py.File(motifs_file, "r") as hf:
max_vals_sets.append(hf[max_val_key][:]) # {N, pwm, 1}
# analyze each pwm
for pwm_idx in range(len(sig_pwms)):
# name and global index
pwm_name = sig_pwms[pwm_idx]
pwm_name_clean = re.sub("HCLUST-\\d+_", "", pwm_name)
pwm_name_clean = re.sub(".UNK.0.A", "", pwm_name_clean)
pwm_global_idx = np.where(
[1 if pwm_name in global_name else 0
for global_name in all_pwms])[0][0]
print pwm_name_clean, pwm_global_idx
# skip
#continue
#if "SMAD3" not in pwm_name_clean:
# continue
# check to see which have this pwm and adjust indices
example_indices = []
for max_vals in max_vals_sets:
example_indices.append(np.where(max_vals[:,pwm_global_idx,0] != 0)[0])
# debug
if False:
total = 0
for example_index_set in example_indices:
total += example_index_set.shape[0]
print total
# collect necessary data, sliced
pwm_scores = []
metadata = []
importances = [] # TODO
for motifs_file_idx in range(len(motifs_files)):
print motifs_files[motifs_file_idx]
with h5py.File(motifs_files[motifs_file_idx], "r") as hf:
pwm_scores.append(
hf[pwm_scores_key][example_indices[motifs_file_idx], :, :, pwm_global_idx])
metadata.append(
hf[metadata_key][example_indices[motifs_file_idx], 0])
importances_reduced = np.sum(
hf[importances_key][example_indices[motifs_file_idx],:,:,:], axis=(1,3))
importances_reduced = importances_reduced[:,impt_clip_start:impt_clip_end]
importances.append(importances_reduced)
pwm_scores = np.concatenate(pwm_scores, axis=0)
metadata = np.concatenate(metadata, axis=0)
importances = np.concatenate(importances, axis=0)
# adjust scores
pwm_scores = np.max(pwm_scores, axis=1) # {N, seqlen}
score_len = pwm_scores.shape[1]
# now basically want to calculate a fraction of scores that fall in pwm window
pwm_impt_coverage = np.zeros((pwm_scores.shape[0]))
pwm_hit_count = np.zeros((pwm_scores.shape[0]))
for example_idx in range(pwm_scores.shape[0]):
# figure out how many positions were marked
impt_sum_total = np.sum(importances[example_idx] > 0)
# per index, get window around and collect importance scores
pwm_pos_indices = np.where(pwm_scores[example_idx] > 0)[0]
pwm_hit_count[example_idx] = pwm_pos_indices.shape[0]
# collect importance scores within window
impt_sum_pwm_overlap = 0
for pwm_pos_idx in pwm_pos_indices:
start = max(pwm_pos_idx - pwm_window/2, 0)
end = min(pwm_pos_idx + pwm_window/2, score_len)
impt_sum_pwm_overlap += np.sum(importances[example_idx,start:end] > 0)
# and check
fract_covered = impt_sum_pwm_overlap / float(impt_sum_total)
pwm_impt_coverage[example_idx] = fract_covered
# filter
keep_indices = np.where(pwm_impt_coverage >= fract_thresh)[0]
metadata = metadata[keep_indices]
pwm_hit_count = pwm_hit_count[keep_indices]
# look at different count levels
count_thresholds = range(1, 6)
for count_threshold in count_thresholds:
# get matching regions
thresholded_indices = np.where(pwm_hit_count >= count_threshold)[0]
thresholded_metadata = metadata[thresholded_indices]
if thresholded_metadata.shape[0] < MIN_REGION_COUNT:
continue
# make bed
bed_file = "{}/{}.count-{}.bed.gz".format(OUT_DIR, pwm_name_clean, count_threshold)
array_to_bed(
thresholded_metadata,
bed_file, interval_key="active", merge=True)
# get gene set enrichment
gene_set_file = "{}.gene_set.txt.gz".format(bed_file.split(".bed")[0])
regions_to_genes(
bed_file,
links_file,
tss_file,
gene_set_file,
filter_by_score=0.5)
# gprofiler
gprofiler_dir = "{}/enrichments.{}".format(OUT_DIR, pwm_name_clean)
os.system("mkdir -p {}".format(gprofiler_dir))
if True:
run_gprofiler(
gene_set_file, background_gene_file,
gprofiler_dir, ordered=True, header=True)
continue
summary_df = None
for position_range in position_ranges:
print ">> range:", position_range
prefix = "{}/{}.range_{}-{}".format(
tmp_dir, pwm_name_clean, position_range[0], position_range[1]-1)
position_range = np.array(position_range)
# check positive side
pos_position_range = position_range + mid_idx
#print pos_position_range
positive_present = np.sum(
pwm_aligned_array[:,pos_position_range[0]:pos_position_range[1]] > 0, axis=1) > 0
# check negative side
neg_position_range = -np.flip(position_range) + mid_idx
#print neg_position_range
negative_present = np.sum(
pwm_aligned_array[:,neg_position_range[0]:neg_position_range[1]] > 0, axis=1) > 0
# combine
distance_present = np.logical_or(positive_present, negative_present)
distance_indices = np.where(distance_present)[0]
print "num regions:", distance_indices.shape[0]
pwm_counts = np.sum(pwm_aligned_array[distance_indices] > 0, axis=1)
pwm_count_indices = np.where(pwm_counts == 2)[0]
print pwm_count_indices.shape
if distance_indices.shape[0] > MIN_REGION_COUNT:
#thresholded_metadata = metadata[distance_indices]
thresholded_metadata = metadata[distance_indices]#[pwm_count_indices]
# build the BED file
bed_file = "{}.bed.gz".format(prefix)
array_to_bed(
thresholded_metadata,
bed_file, interval_key="active", merge=True)
# track ATAC/H3K27ac?
# then match to proximal gene set
# TODO - think about adjusting max dist OR use distance based links
gene_set_file = "{}.gene_set.txt.gz".format(bed_file.split(".bed")[0])
regions_to_genes_through_links(
bed_file,
links_file,
gene_set_file)
# and run gprofiler
gprofiler_file = "{}.go_gprofiler.txt".format(gene_set_file.split(".txt.gz")[0])
if not os.path.isfile(gprofiler_file):
run_gprofiler(
gene_set_file,
background_gene_set_file,
tmp_dir,
header=True)
# read in gprofiler file and clean
thresholded_summary = pd.read_csv(gprofiler_file, sep="\t")
thresholded_summary = thresholded_summary[
(thresholded_summary["domain"] == "BP") |
(thresholded_summary["domain"] == "CC") |
(thresholded_summary["domain"] == "rea")]
#thresholded_summary = thresholded_summary[thresholded_summary["domain"] == "rea"]
#thresholded_summary = thresholded_summary[thresholded_summary["domain"] == "BP"]
thresholded_summary = thresholded_summary[
["term.id", "p.value", "term.name"]]
thresholded_summary["range"] = "-".join(
[str(val) for val in position_range.tolist()])
print "term count:", thresholded_summary.shape[0]
# add to summary
if summary_df is None:
summary_df = thresholded_summary.copy()
else:
# filter first?
if thresholded_summary.shape[0] > 3:
summary_df = summary_df[
summary_df["term.id"].isin(thresholded_summary["term.id"].values)]
summary_df = pd.concat([summary_df, thresholded_summary], axis=0)
summary_df = summary_df.sort_values("term.name")
summary_df["log10pval"] = -np.log10(summary_df["p.value"].values)
summary_df = summary_df.drop("p.value", axis=1)
summary_file = "{}/{}.summary.txt.gz".format(tmp_dir, pwm_name_clean)
summary_df.to_csv(summary_file, sep="\t", index=False, header=True, compression="gzip")
# remove substrings
for substring in REMOVE_SUBSTRINGS:
keep = [False if substring in term else True
for term in summary_df["term.name"]]
keep_indices = np.where(keep)[0]
summary_df = summary_df.iloc[keep_indices]
# remove exact strings
keep = [False if term in REMOVE_EXACT_STRINGS else True
for term in summary_df["term.name"]]
keep_indices = np.where(keep)[0]
summary_df = summary_df.iloc[keep_indices]
summary_df = summary_df.sort_values(["term.name", "range"])
summary_file = "{}/{}.summary.filt.txt.gz".format(tmp_dir, pwm_name_clean)
summary_df.to_csv(summary_file, sep="\t", index=False, header=True, compression="gzip")
quit()
return
main()
| {
"content_hash": "5eac354bca0e5a7c4334cc46a4c558d8",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 98,
"avg_line_length": 38.31044776119403,
"alnum_prop": 0.5561788997974131,
"repo_name": "vervacity/ggr-project",
"id": "0986b93a081af8b9f1b5eafecf3fe9c562866fc6",
"size": "12857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "figs/archive/fig_4.homotypic/analyze.genome.solo_regions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94993"
},
{
"name": "R",
"bytes": "50884"
}
],
"symlink_target": ""
} |
SERVE_CONTROLLER_NAME = "SERVE_CONTROLLER_ACTOR"
#: Actor name used to register HTTP proxy actor
SERVE_PROXY_NAME = "SERVE_PROXY_ACTOR"
#: HTTP Address
DEFAULT_HTTP_ADDRESS = "http://127.0.0.1:8000"
#: HTTP Host
DEFAULT_HTTP_HOST = "127.0.0.1"
#: HTTP Port
DEFAULT_HTTP_PORT = 8000
#: Max concurrency
ASYNC_CONCURRENCY = int(1e6)
#: Max time to wait for HTTP proxy in `serve.start()`.
HTTP_PROXY_TIMEOUT = 60
#: Default histogram buckets for latency tracker.
DEFAULT_LATENCY_BUCKET_MS = [
1,
2,
5,
10,
20,
50,
100,
200,
500,
1000,
2000,
5000,
]
#: Name of backend reconfiguration method implemented by user.
BACKEND_RECONFIGURE_METHOD = "reconfigure"
| {
"content_hash": "fb68afc25bc5101ad767d9c67f73891e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 62,
"avg_line_length": 18.605263157894736,
"alnum_prop": 0.669024045261669,
"repo_name": "richardliaw/ray",
"id": "52c76972f82d4d0909d81a1fd9f453920965eb36",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/serve/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
import unittest
import mock
import maccli.service.configuration
from tests.mock_data import *
class AuthTestCase(unittest.TestCase):
@mock.patch('maccli.dao.api_configuration.get_user_configuration')
def test_list_configurations(self, mock):
mock.return_value = (200, MOCK_CONFIGURATION_LIST_JSON)
json_response = maccli.service.configuration.list_configurations()
self.assertTrue(mock.called)
self.assertEqual(json_response, MOCK_CONFIGURATION_LIST_JSON)
@mock.patch('maccli.dao.api_configuration.search_public_configuration')
def test_list_configurations(self, mock):
mock.return_value = (200, MOCK_CONFIGURATION_SEARCH_JSON)
json_response = maccli.service.configuration.search_configurations(None)
self.assertTrue(mock.called)
self.assertEqual(json_response, MOCK_CONFIGURATION_SEARCH_JSON)
| {
"content_hash": "6d488a0935f2306c657de267c973abdc",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 80,
"avg_line_length": 39.95454545454545,
"alnum_prop": 0.7406143344709898,
"repo_name": "manageacloud/manageacloud-cli",
"id": "b699b0d877f725dd26b9bdb58d5272d33526ac9f",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_service_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "845"
},
{
"name": "Python",
"bytes": "399172"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sgsclient
def readme():
with open("README.rst") as f:
return f.read()
setup(name="sgsclient",
version=sgsclient.version,
description="The python client library for the Stratum Game Server.",
long_description=readme(),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Games/Entertainment :: Board Games",
"Topic :: Games/Entertainment :: Puzzle Games",
"Topic :: Games/Entertainment :: Turn Based Strategy"
]
keywords=["sgsclient", "stratum", "game", "server", "turn", "based",
"board", "ai", "autonomous", "tictactoe"],
url="https://python-client.stratumgs.org",
author="David Korhumel",
author_email="dpk2442@gmail.com",
license="MIT",
packages=find_packages(),
install_requires=[],
include_package_data=True,
zip_safe=False)
| {
"content_hash": "0428830f9b3df8f8b5962d58e92492c5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 32.78787878787879,
"alnum_prop": 0.5988909426987061,
"repo_name": "stratumgs/stratumgs-python-client",
"id": "25c29fe6262df618c4ace952069159954d4f34e0",
"size": "1082",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11305"
}
],
"symlink_target": ""
} |
from OSIM.Modeling.CircuitSystemEquations import CircuitSystemEquations
from OSIM.Simulation.CircuitAnalysis.CircuitAnalyser import CircuitAnalyser
from OSIM.Simulation.NetToComp import NetToComp
#seq = CircuitSystemEquations(NetToComp('Transistor_invers.net').getComponents())
#ca = CircuitAnalyser(seq)
#ca.plot_lin(ca.getDCParamSweep('V2',0,0.3,0.001,["Q1bi","Q1ci","Q1ei"],'V1',[0.85]))
seq = CircuitSystemEquations(NetToComp('Transistor_invers.net').getComponents())
ca = CircuitAnalyser(seq)
ca.plot_lin(ca.getDCParamSweep('V2',0,0.5,0.01,["Q1IT"],'V1',[0.85]))
| {
"content_hash": "cab9349cb8e41f5961ffced354edd190",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 85,
"avg_line_length": 51.81818181818182,
"alnum_prop": 0.7789473684210526,
"repo_name": "tmaiwald/OSIM",
"id": "7ea2e4e92b38674c4a2fa400fcc5323ce35283a1",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OSIM/Simulation/Simulation_Workbench/Transistor_invers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "222991"
}
],
"symlink_target": ""
} |
from random import choice
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from users.models import *
from competencies.models import Organization
from competencies import my_admin, utils
from competencies import my_admin
class UserViewTests(TestCase):
"""Tests for all views in users."""
# May want to use simpler password hashing in tests.
# https://docs.djangoproject.com/en/1.8/topics/testing/overview/#speeding-up-the-tests
# Uses some functions that are copied directly from competencies.tests.
# Watch for these getting out of sync.
def setUp(self):
# Build an organization, down to the performance indicator level.
self.num_orgs = 2
self.num_elements = 2
self.client = Client()
# Create 2 test users.
self.test_user_0 = User.objects.create_user(username='testuser0', password='pw')
new_up = UserProfile(user=self.test_user_0)
new_up.save()
self.test_user_1 = User.objects.create_user(username='testuser1', password='pw')
new_up = UserProfile(user=self.test_user_1)
new_up.save()
# Empty lists of elements.
self.test_organizations = []
# Increment for every element created.
# Ensures a unique name for every element, which makes testing some behaviors easier.
self.element_number = 0
def build_to_organizations(self):
"""Build out system to the organization level."""
# Build a test organization that user 0 is associated with,
# and one that user 1 is associated with.
# If a test only needs one org, it can set self.num_orgs = 1.
for organization_num in range(2):
name = "Test Organization %d" % organization_num
if organization_num < self.num_orgs/2:
new_organization = Organization.objects.create(name=name, owner=self.test_user_0)
new_organization.editors.add(self.test_user_0)
else:
new_organization = Organization.objects.create(name=name, owner=self.test_user_1)
new_organization.editors.add(self.test_user_1)
self.test_organizations.append(new_organization)
def test_login_view(self):
"""Lets user log in."""
test_url = reverse('users:login')
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
def test_register_view(self):
"""Lets new user register an account."""
test_url = reverse('users:register')
self.generic_test_blank_form(test_url)
# Test new user can be created, and userprofile connected properly.
response = self.client.post(test_url, {'username': 'ozzy', 'email': '',
'password1': 'pw', 'password2': 'pw',
})
self.assertEqual(response.status_code, 302)
new_user = User.objects.filter(username='ozzy')[0]
self.assertTrue(hasattr(new_user, 'userprofile'))
def test_profile_view(self):
"""Lets user view their profile details."""
self.build_to_organizations()
test_url = reverse('users:profile')
# Test that anonymous users are redirected.
response = self.client.get(test_url)
self.assertEqual(response.status_code, 302)
# Test that registered users see appropriate information.
self.client.login(username='testuser0', password='pw')
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertTrue('testuser0' in response.content.decode())
for user in User.objects.all():
if user.username == 'testuser0':
break
for org in user.organization_set.all():
self.assertTrue(org.name in response.content.decode())
def generic_test_blank_form(self, test_url):
"""A helper method to test that a form-based page returns a blank form properly."""
# Test that a logged in user can get a blank form properly.
self.client.login(username='testuser0', password='pw')
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
class ModelTests(TestCase):
"""Test aspects of models."""
def test_userprofile(self):
"""Test that a userprofile connects properly to a user."""
new_user = User()
new_user.username = 'new_user'
new_user.password = 'new_user_pw'
new_user.save()
new_up = UserProfile()
new_up.user = new_user
new_up.save()
self.assertEqual(new_user.userprofile, new_up)
class MyAdminTests(TestCase):
"""Test individual functions in my_admin.py."""
def test_add_userprofile(self):
"""Make sure new user gets a userprofile."""
new_user = User.objects.create_user(username='randy', password='pw')
my_admin.add_userprofile(new_user)
self.assertTrue(hasattr(new_user, 'userprofile'))
| {
"content_hash": "19750c7578912cd5d128896b089aee8d",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 97,
"avg_line_length": 38.96212121212121,
"alnum_prop": 0.6360101108302547,
"repo_name": "openlearningtools/opencompetencies",
"id": "0772e071980d5aca9bbf280fedb72b94645edc57",
"size": "5143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2544"
},
{
"name": "HTML",
"bytes": "35563"
},
{
"name": "Python",
"bytes": "129686"
}
],
"symlink_target": ""
} |
import pytest
_ = pytest.importorskip('pyaio')
import os
import unittest
import re
from tempfile import mkdtemp
from shutil import rmtree
from slimta.diskstorage import DiskStorage
from slimta.envelope import Envelope
class TestDiskStorage(unittest.TestCase):
id_pattern = re.compile(r'[0-9a-fA-F]{32}')
def setUp(self):
self.env_dir = mkdtemp()
self.meta_dir = mkdtemp()
self.tmp_dir = mkdtemp()
self.disk = DiskStorage(self.env_dir, self.meta_dir, self.tmp_dir)
def tearDown(self):
rmtree(self.env_dir)
rmtree(self.meta_dir)
rmtree(self.tmp_dir)
def _write_test_envelope(self, rcpts=None):
env = Envelope('sender@example.com', rcpts or ['rcpt@example.com'])
env.timestamp = 9876543210
id = self.disk.write(env, 1234567890)
return id, env
def test_tmp_cleanup(self):
id, env = self._write_test_envelope()
self.assertEqual([], os.listdir(self.tmp_dir))
def test_write(self):
id, env = self._write_test_envelope()
written_env = self.disk.ops.read_env(id)
written_meta = self.disk.ops.read_meta(id)
self.assertTrue(self.id_pattern.match(id))
self.assertEqual(vars(env), vars(written_env))
self.assertEqual(1234567890, written_meta['timestamp'])
self.assertEqual(0, written_meta['attempts'])
self.assertEqual('sender@example.com', written_env.sender)
self.assertEqual(['rcpt@example.com'], written_env.recipients)
self.assertEqual(9876543210, written_env.timestamp)
def test_set_timestamp(self):
id, env = self._write_test_envelope()
self.disk.set_timestamp(id, 1111)
written_env = self.disk.ops.read_env(id)
written_meta = self.disk.ops.read_meta(id)
self.assertEqual(vars(env), vars(written_env))
self.assertEqual(1111, written_meta['timestamp'])
def test_increment_attempts(self):
id, env = self._write_test_envelope()
self.assertEqual(1, self.disk.increment_attempts(id))
self.assertEqual(2, self.disk.increment_attempts(id))
written_env = self.disk.ops.read_env(id)
written_meta = self.disk.ops.read_meta(id)
self.assertEqual(vars(env), vars(written_env))
self.assertEqual(2, written_meta['attempts'])
def test_set_recipients_delivered(self):
id, env = self._write_test_envelope()
self.disk.set_recipients_delivered(id, [1])
self.disk.set_recipients_delivered(id, [3])
written_env = self.disk.ops.read_env(id)
written_meta = self.disk.ops.read_meta(id)
self.assertEqual(vars(env), vars(written_env))
self.assertEqual([1, 3], written_meta['delivered_indexes'])
def test_load(self):
queued = [self._write_test_envelope(),
self._write_test_envelope()]
loaded = [info for info in self.disk.load()]
self.assertEqual(len(queued), len(loaded))
for timestamp, loaded_id in loaded:
for queued_id, env in queued:
if loaded_id == queued_id:
written_env = self.disk.ops.read_env(loaded_id)
written_meta = self.disk.ops.read_meta(loaded_id)
self.assertEqual(vars(env), vars(written_env))
self.assertEqual(timestamp, written_meta['timestamp'])
break
else:
raise ValueError('Queued does not match loaded')
def test_get(self):
id, env = self._write_test_envelope(['rcpt1@example.com',
'rcpt2@example.com'])
self.disk.increment_attempts(id)
self.disk.set_recipients_delivered(id, [0])
get_env, get_attempts = self.disk.get(id)
self.assertEqual('sender@example.com', get_env.sender)
self.assertEqual(['rcpt2@example.com'], get_env.recipients)
self.assertEqual(1, get_attempts)
def test_remove(self):
id, env = self._write_test_envelope()
self.disk.remove(id)
id, env = self._write_test_envelope()
self.disk.ops.delete_env(id)
self.disk.remove(id)
id, env = self._write_test_envelope()
self.disk.ops.delete_meta(id)
self.disk.remove(id)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| {
"content_hash": "6b26bc010a7e21f99edd56db07cd0eb3",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 75,
"avg_line_length": 36.78813559322034,
"alnum_prop": 0.6166781847500576,
"repo_name": "slimta/python-slimta",
"id": "1eaf9f5bdc9114bb5ac760b27397cee37555aa89",
"size": "4342",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_slimta_queue_disk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "606434"
}
],
"symlink_target": ""
} |
"""Tests for homekit_controller config flow."""
import asyncio
import unittest.mock
from unittest.mock import AsyncMock, patch
import aiohomekit
from aiohomekit.exceptions import AuthenticationError
from aiohomekit.model import Accessories, Accessory
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.homekit_controller import config_flow
from homeassistant.components.homekit_controller.const import KNOWN_DEVICES
from homeassistant.components.homekit_controller.storage import async_get_entity_storage
from homeassistant.data_entry_flow import FlowResultType
from homeassistant.helpers import device_registry
from homeassistant.helpers.service_info.bluetooth import BluetoothServiceInfo
from tests.common import MockConfigEntry, mock_device_registry
PAIRING_START_FORM_ERRORS = [
(KeyError, "pairing_failed"),
]
PAIRING_START_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error"),
(aiohomekit.UnavailableError, "already_paired"),
]
PAIRING_TRY_LATER_ERRORS = [
(aiohomekit.BusyError, "busy_error"),
(aiohomekit.MaxTriesError, "max_tries_error"),
(IndexError, "protocol_error"),
]
PAIRING_FINISH_FORM_ERRORS = [
(aiohomekit.exceptions.MalformedPinError, "authentication_error"),
(aiohomekit.MaxPeersError, "max_peers_error"),
(aiohomekit.AuthenticationError, "authentication_error"),
(aiohomekit.UnknownError, "unknown_error"),
(KeyError, "pairing_failed"),
]
PAIRING_FINISH_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error")
]
INSECURE_PAIRING_CODES = [
"111-11-111",
"123-45-678",
"22222222",
"111-11-111 ",
" 111-11-111",
]
INVALID_PAIRING_CODES = [
"aaa-aa-aaa",
"aaa-11-aaa",
"111-aa-aaa",
"aaa-aa-111",
"1111-1-111",
"a111-11-111",
"111-11-111a",
"1111111",
]
VALID_PAIRING_CODES = [
"114-11-111",
"123-45-679",
"123-45-679 ",
"11121111",
"98765432",
" 98765432 ",
]
NOT_HK_BLUETOOTH_SERVICE_INFO = BluetoothServiceInfo(
name="FakeAccessory",
address="AA:BB:CC:DD:EE:FF",
rssi=-81,
manufacturer_data={12: b"\x06\x12\x34"},
service_data={},
service_uuids=[],
source="local",
)
HK_BLUETOOTH_SERVICE_INFO_NOT_DISCOVERED = BluetoothServiceInfo(
name="Eve Energy Not Found",
address="AA:BB:CC:DD:EE:FF",
rssi=-81,
# ID is '9b:86:af:01:af:db'
manufacturer_data={
76: b"\x061\x01\x9b\x86\xaf\x01\xaf\xdb\x07\x00\x06\x00\x02\x02X\x19\xb1Q"
},
service_data={},
service_uuids=[],
source="local",
)
HK_BLUETOOTH_SERVICE_INFO_DISCOVERED_UNPAIRED = BluetoothServiceInfo(
name="Eve Energy Found Unpaired",
address="AA:BB:CC:DD:EE:FF",
rssi=-81,
# ID is '00:00:00:00:00:00', pairing flag is byte 3
manufacturer_data={
76: b"\x061\x01\x00\x00\x00\x00\x00\x00\x07\x00\x06\x00\x02\x02X\x19\xb1Q"
},
service_data={},
service_uuids=[],
source="local",
)
HK_BLUETOOTH_SERVICE_INFO_DISCOVERED_PAIRED = BluetoothServiceInfo(
name="Eve Energy Found Paired",
address="AA:BB:CC:DD:EE:FF",
rssi=-81,
# ID is '00:00:00:00:00:00', pairing flag is byte 3
manufacturer_data={
76: b"\x061\x00\x00\x00\x00\x00\x00\x00\x07\x00\x06\x00\x02\x02X\x19\xb1Q"
},
service_data={},
service_uuids=[],
source="local",
)
@pytest.mark.parametrize("pairing_code", INVALID_PAIRING_CODES)
def test_invalid_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid pin code."""
with pytest.raises(aiohomekit.exceptions.MalformedPinError):
config_flow.ensure_pin_format(pairing_code)
@pytest.mark.parametrize("pairing_code", INSECURE_PAIRING_CODES)
def test_insecure_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid setup code."""
with pytest.raises(config_flow.InsecureSetupCode):
config_flow.ensure_pin_format(pairing_code)
config_flow.ensure_pin_format(pairing_code, allow_insecure_setup_codes=True)
@pytest.mark.parametrize("pairing_code", VALID_PAIRING_CODES)
def test_valid_pairing_codes(pairing_code):
"""Test ensure_pin_format corrects format for a valid pin in an alternative format."""
valid_pin = config_flow.ensure_pin_format(pairing_code).split("-")
assert len(valid_pin) == 3
assert len(valid_pin[0]) == 3
assert len(valid_pin[1]) == 2
assert len(valid_pin[2]) == 3
def get_flow_context(hass, result):
"""Get the flow context from the result of async_init or async_configure."""
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
return flow["context"]
def get_device_discovery_info(
device, upper_case_props=False, missing_csharp=False, paired=False
) -> zeroconf.ZeroconfServiceInfo:
"""Turn a aiohomekit format zeroconf entry into a homeassistant one."""
result = zeroconf.ZeroconfServiceInfo(
host="127.0.0.1",
hostname=device.description.name,
name=device.description.name + "._hap._tcp.local.",
addresses=["127.0.0.1"],
port=8080,
properties={
"md": device.description.model,
"pv": "1.0",
zeroconf.ATTR_PROPERTIES_ID: device.description.id,
"c#": device.description.config_num,
"s#": device.description.state_num,
"ff": "0",
"ci": "7",
"sf": "0" if paired else "1",
"sh": "",
},
type="_hap._tcp.local.",
)
if missing_csharp:
del result.properties["c#"]
if upper_case_props:
result.properties = {
key.upper(): val for (key, val) in result.properties.items()
}
return result
def setup_mock_accessory(controller):
"""Add a bridge accessory to a test controller."""
bridge = Accessories()
accessory = Accessory.create_with_info(
name="Koogeek-LS1-20833F",
manufacturer="Koogeek",
model="LS1",
serial_number="12345",
firmware_revision="1.1",
)
accessory.aid = 1
service = accessory.add_service(ServicesTypes.LIGHTBULB)
on_char = service.add_char(CharacteristicsTypes.ON)
on_char.value = 0
bridge.add_accessory(accessory)
return controller.add_device(bridge)
@pytest.mark.parametrize("upper_case_props", [True, False])
@pytest.mark.parametrize("missing_csharp", [True, False])
async def test_discovery_works(hass, controller, upper_case_props, missing_csharp):
"""Test a device being discovered."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device, upper_case_props, missing_csharp)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_ZEROCONF,
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing doesn't error error and pairing results
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == {}
async def test_abort_duplicate_flow(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_pair_already_paired_1(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
# Flag device as already paired
discovery_info = get_device_discovery_info(device, paired=True)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_paired"
async def test_unknown_domain_type(hass, controller):
"""Test that aiohomekit can reject discoveries it doesn't support."""
device = setup_mock_accessory(controller)
# Flag device as already paired
discovery_info = get_device_discovery_info(device)
discovery_info.name = "TestDevice._music._tap.local."
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_id_missing(hass, controller):
"""Test id is missing."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Remove id from device
del discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID]
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_properties"
async def test_discovery_ignored_model(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
discovery_info.properties["md"] = "HHKBridge1,1"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_discovery_ignored_hk_bridge(hass, controller):
"""Ensure we ignore homekit bridges and accessories created by the homekit integration."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
config_entry = MockConfigEntry(domain=config_flow.HOMEKIT_BRIDGE_DOMAIN, data={})
config_entry.add_to_hass(hass)
formatted_mac = device_registry.format_mac("AA:BB:CC:DD:EE:FF")
dev_reg = mock_device_registry(hass)
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)},
)
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_discovery_does_not_ignore_non_homekit(hass, controller):
"""Do not ignore devices that are not from the homekit integration."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
config_entry = MockConfigEntry(domain="not_homekit", data={})
config_entry.add_to_hass(hass)
formatted_mac = device_registry.format_mac("AA:BB:CC:DD:EE:FF")
dev_reg = mock_device_registry(hass)
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)},
)
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
async def test_discovery_broken_pairing_flag(hass, controller):
"""
There is already a config entry for the pairing and its pairing flag is wrong in zeroconf.
We have seen this particular implementation error in 2 different devices.
"""
await controller.add_paired_device(Accessories(), "00:00:00:00:00:00")
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Make sure that we are pairable
assert discovery_info.properties["sf"] != 0x0
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# Should still be paired.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 1
# Even though discovered as pairable, we bail out as already paired.
assert result["reason"] == "already_paired"
async def test_discovery_invalid_config_entry(hass, controller):
"""There is already a config entry for the pairing id but it's invalid."""
pairing = await controller.add_paired_device(Accessories(), "00:00:00:00:00:00")
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
with patch.object(
pairing,
"list_accessories_and_characteristics",
side_effect=AuthenticationError("Invalid pairing keys"),
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
# And new config flow should continue allowing user to set up a new pairing
assert result["type"] == "form"
async def test_discovery_already_configured(hass, controller):
"""Already configured."""
entry = MockConfigEntry(
domain="homekit_controller",
data={
"AccessoryIP": "4.4.4.4",
"AccessoryPort": 66,
"AccessoryPairingID": "00:00:00:00:00:00",
},
unique_id="00:00:00:00:00:00",
)
entry.add_to_hass(hass)
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already paired
discovery_info.properties["sf"] = 0x00
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["AccessoryIP"] == discovery_info.host
assert entry.data["AccessoryPort"] == discovery_info.port
async def test_discovery_already_configured_update_csharp(hass, controller):
"""Already configured and csharp changes."""
entry = MockConfigEntry(
domain="homekit_controller",
data={
"AccessoryIP": "4.4.4.4",
"AccessoryPort": 66,
"AccessoryPairingID": "AA:BB:CC:DD:EE:FF",
},
unique_id="aa:bb:cc:dd:ee:ff",
)
entry.add_to_hass(hass)
connection_mock = AsyncMock()
hass.data[KNOWN_DEVICES] = {"AA:BB:CC:DD:EE:FF": connection_mock}
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already paired
discovery_info.properties["sf"] = 0x00
discovery_info.properties["c#"] = 99999
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
await hass.async_block_till_done()
assert entry.data["AccessoryIP"] == discovery_info.host
assert entry.data["AccessoryPort"] == discovery_info.port
@pytest.mark.parametrize("exception,expected", PAIRING_START_ABORT_ERRORS)
async def test_pair_abort_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "async_start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_TRY_LATER_ERRORS)
async def test_pair_try_later_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
# User initiates pairing - device refuses to enter pairing mode but may be successful after entering pairing mode or rebooting
test_exc = exception("error")
with patch.object(device, "async_start_pairing", side_effect=test_exc):
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["step_id"] == expected
assert result2["type"] == "form"
# Device is rebooted or placed into pairing mode as they have been instructed
# We start pairing again
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], user_input={"any": "key"}
)
# .. and successfully complete pair
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result4["type"] == "create_entry"
assert result4["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_START_FORM_ERRORS)
async def test_pair_form_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "async_start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User gets back the form
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["errors"] == {}
# User re-tries entering pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_ABORT_ERRORS)
async def test_pair_abort_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = unittest.mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "async_start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_FORM_ERRORS)
async def test_pair_form_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = unittest.mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "async_start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
"pairing": True,
}
async def test_user_works(hass, controller):
"""Test user initiated disovers devices."""
setup_mock_accessory(controller)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"device": "TestDevice"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
"unique_id": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice", "category": "Other"},
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_user_pairing_with_insecure_setup_code(hass, controller):
"""Test user initiated disovers devices."""
device = setup_mock_accessory(controller)
device.pairing_code = "123-45-678"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"device": "TestDevice"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_USER,
"unique_id": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice", "category": "Other"},
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "123-45-678"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert result["errors"] == {"pairing_code": "insecure_setup_code"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={"pairing_code": "123-45-678", "allow_insecure_setup_codes": True},
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_user_no_devices(hass, controller):
"""Test user initiated pairing where no devices discovered."""
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_user_no_unpaired_devices(hass, controller):
"""Test user initiated pairing where no unpaired devices discovered."""
device = setup_mock_accessory(controller)
# Pair the mock device so that it shows as paired in discovery
finish_pairing = await device.async_start_pairing(device.description.id)
await finish_pairing(device.pairing_code)
# Device discovery is requested
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_unignore_works(hass, controller):
"""Test rediscovery triggered disovers work."""
device = setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_UNIGNORE},
data={"unique_id": device.description.id},
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Other"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_UNIGNORE,
}
# User initiates pairing by clicking on 'configure' - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing finalized
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_unignore_ignores_missing_devices(hass, controller):
"""Test rediscovery triggered disovers handle devices that have gone away."""
setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_UNIGNORE},
data={"unique_id": "00:00:00:00:00:01"},
)
assert result["type"] == "abort"
assert result["reason"] == "accessory_not_found_error"
async def test_discovery_dismiss_existing_flow_on_paired(hass, controller):
"""Test that existing flows get dismissed once paired to something else."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already not paired
discovery_info.properties["sf"] = 0x01
discovery_info.properties["c#"] = 99999
discovery_info.properties[zeroconf.ATTR_PROPERTIES_ID] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
await hass.async_block_till_done()
assert (
len(hass.config_entries.flow.async_progress_by_handler("homekit_controller"))
== 1
)
# Set device as already paired
discovery_info.properties["sf"] = 0x00
# Device is discovered again after pairing to someone else
result2 = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_paired"
await hass.async_block_till_done()
assert (
len(hass.config_entries.flow.async_progress_by_handler("homekit_controller"))
== 0
)
async def test_mdns_update_to_paired_during_pairing(hass, controller):
"""Test we do not abort pairing if mdns is updated to reflect paired during pairing."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
discovery_info_paired = get_device_discovery_info(device, paired=True)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info,
)
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
mdns_update_to_paired = asyncio.Event()
original_async_start_pairing = device.async_start_pairing
async def _async_start_pairing(*args, **kwargs):
finish_pairing = await original_async_start_pairing(*args, **kwargs)
async def _finish_pairing(*args, **kwargs):
# Insert an event wait to make sure
# we trigger the mdns update in the middle of the pairing
await mdns_update_to_paired.wait()
return await finish_pairing(*args, **kwargs)
return _finish_pairing
with patch.object(device, "async_start_pairing", _async_start_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"title_placeholders": {"name": "TestDevice", "category": "Outlet"},
"unique_id": "00:00:00:00:00:00",
"source": config_entries.SOURCE_ZEROCONF,
}
# User enters pairing code
task = asyncio.create_task(
hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
)
# Make sure when the device is discovered as paired via mdns
# it does not abort pairing if it happens before pairing is finished
result2 = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_ZEROCONF},
data=discovery_info_paired,
)
assert result2["type"] == FlowResultType.ABORT
assert result2["reason"] == "already_paired"
mdns_update_to_paired.set()
result = await task
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == {}
async def test_discovery_no_bluetooth_support(hass, controller):
"""Test discovery with bluetooth support not available."""
with patch(
"homeassistant.components.homekit_controller.config_flow.aiohomekit_const.BLE_TRANSPORT_SUPPORTED",
False,
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_BLUETOOTH},
data=HK_BLUETOOTH_SERVICE_INFO_NOT_DISCOVERED,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "ignored_model"
async def test_bluetooth_not_homekit(hass, controller):
"""Test bluetooth discovery with a non-homekit device."""
with patch(
"homeassistant.components.homekit_controller.config_flow.aiohomekit_const.BLE_TRANSPORT_SUPPORTED",
True,
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_BLUETOOTH},
data=NOT_HK_BLUETOOTH_SERVICE_INFO,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "ignored_model"
async def test_bluetooth_valid_device_no_discovery(hass, controller):
"""Test bluetooth discovery with a homekit device and discovery fails."""
with patch(
"homeassistant.components.homekit_controller.config_flow.aiohomekit_const.BLE_TRANSPORT_SUPPORTED",
True,
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_BLUETOOTH},
data=HK_BLUETOOTH_SERVICE_INFO_NOT_DISCOVERED,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "accessory_not_found_error"
async def test_bluetooth_valid_device_discovery_paired(hass, controller):
"""Test bluetooth discovery with a homekit device and discovery works."""
setup_mock_accessory(controller)
with patch(
"homeassistant.components.homekit_controller.config_flow.aiohomekit_const.BLE_TRANSPORT_SUPPORTED",
True,
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_BLUETOOTH},
data=HK_BLUETOOTH_SERVICE_INFO_DISCOVERED_PAIRED,
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "already_paired"
async def test_bluetooth_valid_device_discovery_unpaired(hass, controller):
"""Test bluetooth discovery with a homekit device and discovery works."""
setup_mock_accessory(controller)
storage = await async_get_entity_storage(hass)
with patch(
"homeassistant.components.homekit_controller.config_flow.aiohomekit_const.BLE_TRANSPORT_SUPPORTED",
True,
):
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": config_entries.SOURCE_BLUETOOTH},
data=HK_BLUETOOTH_SERVICE_INFO_DISCOVERED_UNPAIRED,
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "pair"
assert storage.get_map("00:00:00:00:00:00") is None
assert get_flow_context(hass, result) == {
"source": config_entries.SOURCE_BLUETOOTH,
"unique_id": "AA:BB:CC:DD:EE:FF",
"title_placeholders": {"name": "TestDevice", "category": "Other"},
}
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["type"] == FlowResultType.FORM
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result3["type"] == FlowResultType.CREATE_ENTRY
assert result3["title"] == "Koogeek-LS1-20833F"
assert result3["data"] == {}
assert storage.get_map("00:00:00:00:00:00") is not None
| {
"content_hash": "b1596e648e6cecb6a99a237f75126105",
"timestamp": "",
"source": "github",
"line_count": 1099,
"max_line_length": 130,
"avg_line_length": 35.393994540491356,
"alnum_prop": 0.6611136819373746,
"repo_name": "nkgilley/home-assistant",
"id": "5e2c8249560887605e29621b2f6cbc40f4ee530c",
"size": "38898",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/homekit_controller/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
| {
"content_hash": "55ef45c25b05843732e23d5d5f7d77e3",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7857142857142857,
"repo_name": "jeremyphilemon/uniqna",
"id": "0d091e53aa86d6c73bc9559e7a97c6dd957098c4",
"size": "70",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "home/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157526"
},
{
"name": "HTML",
"bytes": "75472"
},
{
"name": "JavaScript",
"bytes": "186150"
},
{
"name": "Python",
"bytes": "69021"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from .array import to_device
from .kernelapi import Dim3, FakeCUDAModule, swapped_cuda_module
from numba.six import reraise
import numpy as np
import sys
import threading
from contextlib import contextmanager
"""
Global variable to keep track of the current "kernel context", i.e the
FakeCUDAModule. We only support one kernel launch at a time.
No support for concurrent kernel launch.
"""
_kernel_context = None
@contextmanager
def _push_kernel_context(mod):
"""
Push the current kernel context.
"""
global _kernel_context
assert _kernel_context is None, "conrrent simulated kernel not supported"
_kernel_context = mod
try:
yield
finally:
_kernel_context = None
def _get_kernel_context():
"""
Get the current kernel context. This is usually done by a device function.
"""
return _kernel_context
class FakeCUDAKernel(object):
'''
Wraps a @cuda.jit-ed function.
'''
def __init__(self, fn, device, fastmath=False):
self.fn = fn
self._device = device
self._fastmath = fastmath
# Initial configuration: 1 block, 1 thread, stream 0, no dynamic shared
# memory.
self[1, 1, 0, 0]
def __call__(self, *args):
if self._device:
with swapped_cuda_module(self.fn, _get_kernel_context()):
return self.fn(*args)
fake_cuda_module = FakeCUDAModule(self.grid_dim, self.block_dim,
self.dynshared_size)
with _push_kernel_context(fake_cuda_module):
# fake_args substitutes all numpy arrays for FakeCUDAArrays
# because they implement some semantics differently
def fake_arg(arg):
if isinstance(arg, np.ndarray) and arg.ndim > 0:
return to_device(arg)
return arg
fake_args = [fake_arg(arg) for arg in args]
with swapped_cuda_module(self.fn, fake_cuda_module):
# Execute one block at a time
for grid_point in np.ndindex(*self.grid_dim):
bm = BlockManager(self.fn, self.grid_dim, self.block_dim)
bm.run(grid_point, *fake_args)
def __getitem__(self, configuration):
grid_dim = configuration[0]
block_dim = configuration[1]
if not isinstance(grid_dim, (tuple, list)):
grid_dim = [grid_dim]
else:
grid_dim = list(grid_dim)
if not isinstance(block_dim, (tuple, list)):
block_dim = [block_dim]
else:
block_dim = list(block_dim)
while len(grid_dim) < 3:
grid_dim.append(1)
while len(block_dim) < 3:
block_dim.append(1)
self.grid_dim = grid_dim
self.block_dim = block_dim
if len(configuration) == 4:
self.dynshared_size = configuration[3]
return self
def bind(self):
pass
@property
def ptx(self):
'''
Required in order to proceed through some tests, but serves no functional
purpose.
'''
res = '.const'
res += '\n.local'
if self._fastmath:
res += '\ndiv.full.ftz.f32'
return res
# Thread emulation
class BlockThread(threading.Thread):
'''
Manages the execution of a function for a single CUDA thread.
'''
def __init__(self, f, manager, blockIdx, threadIdx):
super(BlockThread, self).__init__(target=f)
self.syncthreads_event = threading.Event()
self.syncthreads_blocked = False
self._manager = manager
self.blockIdx = Dim3(*blockIdx)
self.threadIdx = Dim3(*threadIdx)
self.exception = None
def run(self):
try:
super(BlockThread, self).run()
except Exception as e:
tid = 'tid=%s' % list(self.threadIdx)
ctaid = 'ctaid=%s' % list(self.blockIdx)
if str(e) == '':
msg = '%s %s' % (tid, ctaid)
else:
msg = '%s %s: %s' % (tid, ctaid, e)
tb = sys.exc_info()[2]
self.exception = (type(e), type(e)(msg), tb)
def syncthreads(self):
self.syncthreads_blocked = True
self.syncthreads_event.wait()
self.syncthreads_event.clear()
def __str__(self):
return 'Thread <<<%s, %s>>>' % (self.blockIdx, self.threadIdx)
class BlockManager(object):
'''
Manages the execution of a thread block.
When run() is called, all threads are started. Each thread executes until it
hits syncthreads(), at which point it sets its own syncthreads_blocked to
True so that the BlockManager knows it is blocked. It then waits on its
syncthreads_event.
The BlockManager polls threads to determine if they are blocked in
syncthreads(). If it finds a blocked thread, it adds it to the set of
blocked threads. When all threads are blocked, it unblocks all the threads.
The thread are unblocked by setting their syncthreads_blocked back to False
and setting their syncthreads_event.
The polling continues until no threads are alive, when execution is
complete.
'''
def __init__(self, f, grid_dim, block_dim):
self._grid_dim = grid_dim
self._block_dim = block_dim
self._f = f
def run(self, grid_point, *args):
# Create all threads
threads = set()
livethreads = set()
blockedthreads = set()
for block_point in np.ndindex(*self._block_dim):
def target():
self._f(*args)
t = BlockThread(target, self, grid_point, block_point)
t.start()
threads.add(t)
livethreads.add(t)
# Potential optimisations:
# 1. Continue the while loop immediately after finding a blocked thread
# 2. Don't poll already-blocked threads
while livethreads:
for t in livethreads:
if t.syncthreads_blocked:
blockedthreads.add(t)
elif t.exception:
reraise(*(t.exception))
if livethreads == blockedthreads:
for t in blockedthreads:
t.syncthreads_blocked = False
t.syncthreads_event.set()
blockedthreads = set()
livethreads = set([ t for t in livethreads if t.is_alive() ])
# Final check for exceptions in case any were set prior to thread
# finishing, before we could check it
for t in threads:
if t.exception:
reraise(*(t.exception))
| {
"content_hash": "99705a7b2eabf27a6641849af80a6e18",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 81,
"avg_line_length": 31.33177570093458,
"alnum_prop": 0.580462341536167,
"repo_name": "stefanseefeld/numba",
"id": "7dc66cd77e0971b5f7a9a654eab85f1ddc54bd87",
"size": "6705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cuda/simulator/kernel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5535"
},
{
"name": "C",
"bytes": "303376"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "Jupyter Notebook",
"bytes": "110325"
},
{
"name": "Python",
"bytes": "3946372"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth import get_user_model
from .models import Key, KeyLog, ServiceLocation, CycleInfo, RouteInfo
class KeyLogInline(admin.TabularInline):
model = KeyLog
extra=1
class KeyAdmin(admin.ModelAdmin):
inlines = [KeyLogInline]
class DefineRoute(RouteInfo):
class Meta:
proxy = True
class DefineCycle(CycleInfo):
class Meta:
proxy = True
admin.site.register(Key, KeyAdmin)
admin.site.register(ServiceLocation)
admin.site.register(DefineRoute)
admin.site.register(DefineCycle) | {
"content_hash": "244542445f2cebf2b50deec78e35311b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 24.434782608695652,
"alnum_prop": 0.7580071174377224,
"repo_name": "jancsarc/KIM-Online",
"id": "87013163d9ae46ea57efac3609d36fbfda02d04e",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/keys/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63453"
},
{
"name": "HTML",
"bytes": "98809"
},
{
"name": "JavaScript",
"bytes": "370050"
},
{
"name": "Python",
"bytes": "87152"
}
],
"symlink_target": ""
} |
import asposeimagingcloud
from asposeimagingcloud.ImagingApi import ImagingApi
from asposeimagingcloud.ImagingApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Imaging API SDK
api_client = asposeimagingcloud.ApiClient.ApiClient(apiKey, appSid, True)
imagingApi = ImagingApi(api_client);
#set input file name
name = "sample.gif"
backgroundColorIndex = 255
colorResolution = 7
pixelAspectRatio = 10
try:
#invoke Aspose.Imaging Cloud SDK API to update GIF specific properties without using cloud storage
response = imagingApi.PostImageGif(file = data_folder + name, backgroundColorIndex=backgroundColorIndex, colorResolution=colorResolution, pixelAspectRatio=pixelAspectRatio)
if response.Status == 'OK':
#download image from API response
outfilename = "c:/temp/" + name
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| {
"content_hash": "6bce1d4eac7c3caaaa5adcd0cb371487",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 176,
"avg_line_length": 36.04761904761905,
"alnum_prop": 0.7404227212681638,
"repo_name": "asposeimaging/Aspose_Imaging_Cloud",
"id": "28eb82ee958dc5269ffefd5a0d34056a53d6dd43",
"size": "1514",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/Python/SDK/properties/UpdateGIFImagePropertiesWithoutStorage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "207"
},
{
"name": "C#",
"bytes": "211360"
},
{
"name": "Java",
"bytes": "264819"
},
{
"name": "JavaScript",
"bytes": "146745"
},
{
"name": "Objective-C",
"bytes": "224110"
},
{
"name": "PHP",
"bytes": "150631"
},
{
"name": "Python",
"bytes": "180881"
},
{
"name": "Ruby",
"bytes": "190426"
}
],
"symlink_target": ""
} |
import codecs
import os
import setuptools
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
setuptools.setup(
name="poyo",
version="0.5.0",
author="Raphael Pierzina",
author_email="raphael@hackebrot.de",
maintainer="Raphael Pierzina",
maintainer_email="raphael@hackebrot.de",
description="A lightweight YAML Parser for Python. 🐓",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/hackebrot/poyo",
project_urls={
"Repository": "https://github.com/hackebrot/poyo",
"Issues": "https://github.com/hackebrot/poyo/issues",
},
license="MIT",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=[],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python",
],
keywords=["YAML", "parser", "cookiecutter"],
)
| {
"content_hash": "4a0a48b3dc366a8901a32fd1dc9f1cda",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 70,
"avg_line_length": 34.38461538461539,
"alnum_prop": 0.610178970917226,
"repo_name": "hackebrot/poyo",
"id": "57a5489d4c9b36169fea6dc5ccc853a826787e56",
"size": "1838",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "589"
},
{
"name": "Python",
"bytes": "23367"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from c3nav.control.models import UserPermissions
class UserPermissionsInline(admin.StackedInline):
model = UserPermissions
can_delete = False
class UserAdmin(BaseUserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
readonly_fields = ('last_login', 'date_joined')
inlines = (UserPermissionsInline, )
def get_view_on_site_url(self, obj=None):
return None if obj is None else reverse('control.users.detail', args=[obj.pk])
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| {
"content_hash": "5fd556759d5153d2129b136d64409a5a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 32.89655172413793,
"alnum_prop": 0.6981132075471698,
"repo_name": "c3nav/c3nav",
"id": "59216378285eb103e111858f730328039e41a9bb",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/c3nav/control/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "986"
},
{
"name": "HTML",
"bytes": "89944"
},
{
"name": "JavaScript",
"bytes": "179692"
},
{
"name": "Python",
"bytes": "1061013"
},
{
"name": "SCSS",
"bytes": "41200"
},
{
"name": "Sass",
"bytes": "11121"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
import unittest
class TddInPythonExample(unittest.TestCase):
def test_calculator_add_method_returns_correct_result(self):
result = 2 + 2
self.assertEqual(4, result)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "145fea61c386d5ecbcc63d8d2e583050",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.6567796610169492,
"repo_name": "DongjunLee/kino-bot",
"id": "6a2f7ebe2e1fa692af5e1b553a6b634d98020b9a",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/simple_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259265"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import zhusuan as zs
import tensorflow as tf
import tensorflow.contrib.layers as layers
def get_w_names(drop_rate, net_sizes):
w_names = []
return w_names
@zs.reuse('model')
def p_Y_Xw(observed, X, drop_rate, n_basis, net_sizes, n_samples, task):
with zs.BayesianNet(observed=observed) as model:
f = tf.expand_dims(X, 1)
for i in range(len(net_sizes)-1):
f = tf.layers.dense(f, net_sizes[i+1])
if(i < len(net_sizes)-2):
f = tf.nn.relu(f)
f = tf.squeeze(f, [1])
if(task == "classification"):
f = tf.nn.softmax(f)
return model, f, None | {
"content_hash": "8d25e0f9810b5c78e0393f93ba288104",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 29.423076923076923,
"alnum_prop": 0.6130718954248366,
"repo_name": "MaxInGaussian/ZS-VAFNN",
"id": "b30611611f6ef05e10f7ef5474a1604ddf81a523",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/DNN.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91216"
}
],
"symlink_target": ""
} |
from select2_foreign_key import test_functional
from .models import TModel
class AdminForeignKeyTestCase(test_functional.AdminForeignKeyTestCase):
model = TModel
| {
"content_hash": "8ba6f4ba5db4825a2ed2ebb16db6736c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.8224852071005917,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "c05d561713f5cac5f5bd9ceb657bca19b6d8f28a",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/select2_djhacker_formfield/test_functional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.gcs import GCSCreateBucketOperator, GCSDeleteBucketOperator
from airflow.providers.google.cloud.transfers.mysql_to_gcs import MySQLToGCSOperator
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT")
DAG_ID = "example_mysql_to_gcs"
BUCKET_NAME = f"bucket_{DAG_ID}_{ENV_ID}"
FILENAME = "test_file"
SQL_QUERY = "SELECT * from test_table"
with models.DAG(
DAG_ID,
schedule="@once",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example", "mysql"],
) as dag:
create_bucket = GCSCreateBucketOperator(
task_id="create_bucket", bucket_name=BUCKET_NAME, project_id=PROJECT_ID
)
# [START howto_operator_mysql_to_gcs]
upload_mysql_to_gcs = MySQLToGCSOperator(
task_id="mysql_to_gcs", sql=SQL_QUERY, bucket=BUCKET_NAME, filename=FILENAME, export_format="csv"
)
# [END howto_operator_mysql_to_gcs]
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket", bucket_name=BUCKET_NAME, trigger_rule=TriggerRule.ALL_DONE
)
(
# TEST SETUP
create_bucket
# TEST BODY
>> upload_mysql_to_gcs
# TEST TEARDOWN
>> delete_bucket
)
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"content_hash": "7ca81c86c11f4e37b5f8afed36e63a33",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 105,
"avg_line_length": 30.661016949152543,
"alnum_prop": 0.699281370923162,
"repo_name": "apache/airflow",
"id": "55b4dd1b5731e07883c44eecc4efc2337e372596",
"size": "2594",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/system/providers/google/cloud/gcs/example_mysql_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
import jsonschema
from unittest import mock
from webob import exc
from senlin.api.common import util
from senlin.api.common import wsgi
from senlin.common import context
from senlin.common import policy
from senlin.objects import base as obj_base
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class FakeRequest(obj_base.SenlinObject):
VERSION = '2.0'
VERSION_MAP = {
'1.3': '2.0'
}
@classmethod
def obj_from_primitive(cls, primitive):
pass
class TestGetAllowedParams(base.SenlinTestCase):
def setUp(self):
super(TestGetAllowedParams, self).setUp()
req = wsgi.Request({})
self.params = req.params.copy()
self.params.add('foo', 'foo value')
self.whitelist = {'foo': 'single'}
def test_returns_empty_dict(self):
self.whitelist = {}
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual({}, result)
def test_only_adds_whitelisted_params_if_param_exists(self):
self.whitelist = {'foo': 'single'}
self.params.clear()
result = util.get_allowed_params(self.params, self.whitelist)
self.assertNotIn('foo', result)
def test_returns_only_whitelisted_params(self):
self.params.add('bar', 'bar value')
result = util.get_allowed_params(self.params, self.whitelist)
self.assertIn('foo', result)
self.assertNotIn('bar', result)
def test_handles_single_value_params(self):
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual('foo value', result['foo'])
def test_handles_multiple_value_params(self):
self.whitelist = {'foo': 'multi'}
self.params.add('foo', 'foo value 2')
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual(2, len(result['foo']))
self.assertIn('foo value', result['foo'])
self.assertIn('foo value 2', result['foo'])
def test_handles_mixed_value_param_with_multiple_entries(self):
self.whitelist = {'foo': 'mixed'}
self.params.add('foo', 'foo value 2')
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual(2, len(result['foo']))
self.assertIn('foo value', result['foo'])
self.assertIn('foo value 2', result['foo'])
def test_handles_mixed_value_param_with_single_entry(self):
self.whitelist = {'foo': 'mixed'}
result = util.get_allowed_params(self.params, self.whitelist)
self.assertEqual(['foo value'], result['foo'])
def test_ignores_bogus_whitelist_items(self):
self.whitelist = {'foo': 'blah'}
result = util.get_allowed_params(self.params, self.whitelist)
self.assertNotIn('foo', result)
class TestPolicyEnforce(base.SenlinTestCase):
def setUp(self):
super(TestPolicyEnforce, self).setUp()
self.req = wsgi.Request({})
self.req.context = context.RequestContext(project='foo',
is_admin=False)
class DummyController(object):
REQUEST_SCOPE = 'test'
@util.policy_enforce
def an_action(self, req):
return 'woot'
self.controller = DummyController()
@mock.patch.object(policy, 'enforce')
def test_policy_enforce_policy_deny(self, mock_enforce):
mock_enforce.return_value = False
self.assertRaises(exc.HTTPForbidden,
self.controller.an_action,
self.req, tenant_id='foo')
class TestParseRequest(base.SenlinTestCase):
def setUp(self):
super(TestParseRequest, self).setUp()
self.context = utils.dummy_context()
def test_all_okay(self):
name = 'ClusterListRequest'
body = {'project_safe': True}
req = mock.Mock(context=self.context)
res = util.parse_request(name, req, body)
self.assertIsNotNone(res)
def test_bad_request_name(self):
name = 'BadClusterListRequest'
body = {'project_safe': True}
req = mock.Mock(context=self.context)
ex = self.assertRaises(exc.HTTPBadRequest,
util.parse_request,
name, req, body)
self.assertEqual('Unsupported object type BadClusterListRequest',
str(ex))
def test_bad_request_body(self):
name = 'ClusterCreateRequest'
body = {'bad_key': 'bad_value'}
req = mock.Mock(context=self.context)
ex = self.assertRaises(exc.HTTPBadRequest,
util.parse_request,
name, req, body, 'cluster')
self.assertEqual("Request body missing 'cluster' key.",
str(ex))
def test_bad_primitive(self):
name = 'ClusterListRequest'
body = {'limit': -1}
req = mock.Mock(context=self.context)
ex = self.assertRaises(exc.HTTPBadRequest,
util.parse_request,
name, req, body)
self.assertEqual("Value must be >= 0 for field 'limit'.",
str(ex))
def test_bad_schema(self):
name = 'ClusterListRequest'
body = {'bogus_key': 'bogus_value',
'project_safe': True}
req = mock.Mock(context=self.context)
ex = self.assertRaises(exc.HTTPBadRequest,
util.parse_request,
name, req, body)
self.assertEqual("Additional properties are not allowed ('bogus_key' "
"was unexpected)", str(ex))
@mock.patch.object(jsonschema, 'validate')
@mock.patch.object(FakeRequest, 'obj_from_primitive')
@mock.patch.object(obj_base.SenlinObject, 'obj_class_from_name')
def test_version_conversion(self, mock_cls, mock_construct, mock_validate):
name = 'FakeReq'
body = {}
mock_cls.return_value = FakeRequest
# The following context will force the request to be downgraded to
# its base version (1.0)
context = utils.dummy_context(api_version='1.2')
req = mock.Mock(context=context)
obj = mock.Mock()
mock_construct.return_value = obj
primitive = {
'senlin_object.version': '2.0',
'senlin_object.name': 'FakeReq',
'senlin_object.data': {},
'senlin_object.namespace': 'senlin'
}
res = util.parse_request(name, req, body)
self.assertIsNotNone(res)
mock_cls.assert_called_once_with('FakeReq')
self.assertEqual(2, mock_construct.call_count)
obj.obj_make_compatible.assert_called_once_with(primitive, '1.0')
class TestParseBool(base.SenlinTestCase):
def test_parse_bool(self):
name = 'param'
for value in ('True', 'true', 'TRUE', True):
self.assertTrue(util.parse_bool_param(name, value))
for value in ('False', 'false', 'FALSE', False):
self.assertFalse(util.parse_bool_param(name, value))
for value in ('foo', 't', 'f', 'yes', 'no', 'y', 'n', '1', '0', None):
self.assertRaises(exc.HTTPBadRequest,
util.parse_bool_param, name, value)
| {
"content_hash": "614161df0a9ff8928d7ba47e33fbcd37",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 79,
"avg_line_length": 33.76605504587156,
"alnum_prop": 0.5916315717973102,
"repo_name": "openstack/senlin",
"id": "5484c3caca3534f0bb9b944258c9c5e686a9bea7",
"size": "7910",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/tests/unit/api/common/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "69788"
},
{
"name": "Python",
"bytes": "3755028"
},
{
"name": "Shell",
"bytes": "24272"
}
],
"symlink_target": ""
} |
import sys
import os
# Use OpenBLAS with 1 thread only as it seems to be using too many
# on the CIs apparently.
import scipy
import scipy.cluster._hierarchy
import scipy.cluster._vq
import scipy.fftpack._fftpack
import scipy.fftpack.convolve
import scipy.integrate._dop
import scipy.integrate._odepack
import scipy.integrate._quadpack
import scipy.integrate._test_multivariate
import scipy.integrate._test_odeint_banded
import scipy.integrate.lsoda
import scipy.integrate.vode
import scipy.interpolate._fitpack
import scipy.interpolate._interpolate
import scipy.interpolate._ppoly
import scipy.interpolate.dfitpack
import scipy.interpolate.interpnd
import scipy.io.matlab.mio5_utils
import scipy.io.matlab.mio_utils
import scipy.io.matlab.streams
import scipy.linalg._decomp_update
import scipy.linalg._fblas
import scipy.linalg._flapack
import scipy.linalg._flinalg
import scipy.linalg._interpolative
import scipy.linalg._solve_toeplitz
import scipy.linalg.cython_blas
import scipy.linalg.cython_lapack
import scipy.ndimage._nd_image
import scipy.ndimage._ni_label
import scipy.odr.__odrpack
import scipy.optimize._cobyla
import scipy.optimize._group_columns
import scipy.optimize._lbfgsb
import scipy.optimize._lsq.givens_elimination
import scipy.optimize._minpack
import scipy.optimize._nnls
import scipy.optimize._slsqp
import scipy.optimize._zeros
import scipy.optimize.minpack2
import scipy.optimize.moduleTNC
import scipy.signal._max_len_seq_inner
import scipy.signal._spectral
import scipy.signal.sigtools
import scipy.signal.spline
import scipy.sparse._csparsetools
import scipy.sparse._sparsetools
import scipy.sparse.csgraph._min_spanning_tree
import scipy.sparse.csgraph._reordering
import scipy.sparse.csgraph._shortest_path
import scipy.sparse.csgraph._tools
import scipy.sparse.csgraph._traversal
import scipy.sparse.linalg.dsolve._superlu
import scipy.sparse.linalg.eigen.arpack._arpack
import scipy.sparse.linalg.isolve._iterative
import scipy.spatial._distance_wrap
import scipy.spatial.ckdtree
import scipy.spatial.qhull
import scipy.special._ellip_harm_2
import scipy.special._ufuncs
import scipy.special._ufuncs_cxx
import scipy.special.specfun
import scipy.stats.mvn
import scipy.stats.statlib
import scipy.stats
import scipy.special
sys.exit(scipy.test())
| {
"content_hash": "63fa61a7b0d02de8e9be25fe91c9e2dc",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 66,
"avg_line_length": 30.06578947368421,
"alnum_prop": 0.8323851203501094,
"repo_name": "jjhelmus/berryconda",
"id": "08234c602ceb6b4e8c0195201b3db9692bca07fd",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/scipy/run_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "46772"
},
{
"name": "C",
"bytes": "4355"
},
{
"name": "C++",
"bytes": "5042"
},
{
"name": "CMake",
"bytes": "13859"
},
{
"name": "Fortran",
"bytes": "12236"
},
{
"name": "Makefile",
"bytes": "16"
},
{
"name": "Perl",
"bytes": "1817"
},
{
"name": "Python",
"bytes": "47467"
},
{
"name": "Shell",
"bytes": "54210"
},
{
"name": "Tcl",
"bytes": "27"
}
],
"symlink_target": ""
} |
"""
Support for Z-Wave climate devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.zwave/
"""
# Because we do not compile openzwave on CI
import logging
from homeassistant.components.climate import (
DOMAIN, ClimateDevice, STATE_AUTO, STATE_COOL, STATE_HEAT,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_FAN_MODE,
SUPPORT_OPERATION_MODE, SUPPORT_SWING_MODE)
from homeassistant.components.zwave import ( # noqa pylint: disable=unused-import
ZWaveDeviceEntity, async_setup_platform)
from homeassistant.const import (
STATE_OFF, TEMP_CELSIUS, TEMP_FAHRENHEIT, ATTR_TEMPERATURE)
_LOGGER = logging.getLogger(__name__)
CONF_NAME = 'name'
DEFAULT_NAME = 'Z-Wave Climate'
REMOTEC = 0x5254
REMOTEC_ZXT_120 = 0x8377
REMOTEC_ZXT_120_THERMOSTAT = (REMOTEC, REMOTEC_ZXT_120)
ATTR_OPERATING_STATE = 'operating_state'
ATTR_FAN_STATE = 'fan_state'
WORKAROUND_ZXT_120 = 'zxt_120'
DEVICE_MAPPINGS = {
REMOTEC_ZXT_120_THERMOSTAT: WORKAROUND_ZXT_120
}
STATE_MAPPINGS = {
'Off': STATE_OFF,
'Heat': STATE_HEAT,
'Heat Mode': STATE_HEAT,
'Heat (Default)': STATE_HEAT,
'Cool': STATE_COOL,
'Auto': STATE_AUTO,
}
def get_device(hass, values, **kwargs):
"""Create Z-Wave entity device."""
temp_unit = hass.config.units.temperature_unit
return ZWaveClimate(values, temp_unit)
class ZWaveClimate(ZWaveDeviceEntity, ClimateDevice):
"""Representation of a Z-Wave Climate device."""
def __init__(self, values, temp_unit):
"""Initialize the Z-Wave climate device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._target_temperature = None
self._current_temperature = None
self._current_operation = None
self._operation_list = None
self._operation_mapping = None
self._operating_state = None
self._current_fan_mode = None
self._fan_list = None
self._fan_state = None
self._current_swing_mode = None
self._swing_list = None
self._unit = temp_unit
_LOGGER.debug("temp_unit is %s", self._unit)
self._zxt_120 = None
# Make sure that we have values for the key before converting to int
if (self.node.manufacturer_id.strip() and
self.node.product_id.strip()):
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16))
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZXT_120:
_LOGGER.debug(
"Remotec ZXT-120 Zwave Thermostat workaround")
self._zxt_120 = 1
self.update_properties()
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.values.fan_mode:
support |= SUPPORT_FAN_MODE
if self.values.mode:
support |= SUPPORT_OPERATION_MODE
if self._zxt_120 == 1 and self.values.zxt_120_swing_mode:
support |= SUPPORT_SWING_MODE
return support
def update_properties(self):
"""Handle the data changes for node values."""
# Operation Mode
if self.values.mode:
self._operation_list = []
self._operation_mapping = {}
operation_list = self.values.mode.data_items
if operation_list:
for mode in operation_list:
ha_mode = STATE_MAPPINGS.get(mode)
if ha_mode and ha_mode not in self._operation_mapping:
self._operation_mapping[ha_mode] = mode
self._operation_list.append(ha_mode)
continue
self._operation_list.append(mode)
current_mode = self.values.mode.data
self._current_operation = next(
(key for key, value in self._operation_mapping.items()
if value == current_mode), current_mode)
_LOGGER.debug("self._operation_list=%s", self._operation_list)
_LOGGER.debug("self._current_operation=%s", self._current_operation)
# Current Temp
if self.values.temperature:
self._current_temperature = self.values.temperature.data
device_unit = self.values.temperature.units
if device_unit is not None:
self._unit = device_unit
# Fan Mode
if self.values.fan_mode:
self._current_fan_mode = self.values.fan_mode.data
fan_list = self.values.fan_mode.data_items
if fan_list:
self._fan_list = list(fan_list)
_LOGGER.debug("self._fan_list=%s", self._fan_list)
_LOGGER.debug("self._current_fan_mode=%s",
self._current_fan_mode)
# Swing mode
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self._current_swing_mode = self.values.zxt_120_swing_mode.data
swing_list = self.values.zxt_120_swing_mode.data_items
if swing_list:
self._swing_list = list(swing_list)
_LOGGER.debug("self._swing_list=%s", self._swing_list)
_LOGGER.debug("self._current_swing_mode=%s",
self._current_swing_mode)
# Set point
if self.values.primary.data == 0:
_LOGGER.debug("Setpoint is 0, setting default to "
"current_temperature=%s",
self._current_temperature)
if self._current_temperature is not None:
self._target_temperature = (
round((float(self._current_temperature)), 1))
else:
self._target_temperature = round(
(float(self.values.primary.data)), 1)
# Operating state
if self.values.operating_state:
self._operating_state = self.values.operating_state.data
# Fan operating state
if self.values.fan_state:
self._fan_state = self.values.fan_state.data
@property
def current_fan_mode(self):
"""Return the fan speed set."""
return self._current_fan_mode
@property
def fan_list(self):
"""Return a list of available fan modes."""
return self._fan_list
@property
def current_swing_mode(self):
"""Return the swing mode set."""
return self._current_swing_mode
@property
def swing_list(self):
"""Return a list of available swing modes."""
return self._swing_list
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._unit == 'C':
return TEMP_CELSIUS
if self._unit == 'F':
return TEMP_FAHRENHEIT
return self._unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_operation(self):
"""Return the current operation mode."""
return self._current_operation
@property
def operation_list(self):
"""Return a list of available operation modes."""
return self._operation_list
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
temperature = kwargs.get(ATTR_TEMPERATURE)
else:
return
self.values.primary.data = temperature
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if self.values.fan_mode:
self.values.fan_mode.data = fan_mode
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
if self.values.mode:
self.values.mode.data = self._operation_mapping.get(
operation_mode, operation_mode)
def set_swing_mode(self, swing_mode):
"""Set new target swing mode."""
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self.values.zxt_120_swing_mode.data = swing_mode
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = super().device_state_attributes
if self._operating_state:
data[ATTR_OPERATING_STATE] = self._operating_state
if self._fan_state:
data[ATTR_FAN_STATE] = self._fan_state
return data
| {
"content_hash": "89804e7c4fcf11e37e38e8b4aabf9098",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 82,
"avg_line_length": 35.53846153846154,
"alnum_prop": 0.5930735930735931,
"repo_name": "persandstrom/home-assistant",
"id": "77b5e111686f6f9da29fa302ebaf883af4e34ca2",
"size": "8778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/climate/zwave.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
""" Pykaf setup.py package.
https://docs.python.org/2/distutils/index.html
"""
from setuptools import setup
setup(
name='pynaf',
version='2.0.2',
author='Rodrigo Agerri',
author_email='rodrigo.agerri@ehu.es',
packages=['pynaf',],
url='https://github.com/josubg/pynaf/',
license=' Apache License 2.0 (APL 2.0)',
description='Read and create NAF annotation Documents.',
long_description=open('LONG.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: XML',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
],
keywords='NLP XML markup NAF',
install_requires=['lxml']
)
| {
"content_hash": "801a744a0b4a01caa9d96b9c2a1508b9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 30.566666666666666,
"alnum_prop": 0.6205016357688113,
"repo_name": "jofatmofn/pynaf",
"id": "4d4689febabb994caa03a0c19eb60595b5111df4",
"size": "932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30352"
}
],
"symlink_target": ""
} |
import libvirt
import logging
import os
import uuid
from string import Template
from dwarf import config
CONF = config.Config()
LOG = logging.getLogger(__name__)
DOMAIN_NOSTATE = 0
DOMAIN_RUNNING = 1
DOMAIN_PAUSED = 2
DOMAIN_SHUTDOWN = 3
DOMAIN_CRASHED = 4
DOMAIN_SUSPENDED = 5
_LIBVIRT_DOMAIN_STATE = {
libvirt.VIR_DOMAIN_NOSTATE: DOMAIN_NOSTATE,
libvirt.VIR_DOMAIN_RUNNING: DOMAIN_RUNNING,
libvirt.VIR_DOMAIN_BLOCKED: DOMAIN_RUNNING,
libvirt.VIR_DOMAIN_PAUSED: DOMAIN_PAUSED,
libvirt.VIR_DOMAIN_SHUTDOWN: DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF: DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_CRASHED: DOMAIN_CRASHED,
libvirt.VIR_DOMAIN_PMSUSPENDED: DOMAIN_SUSPENDED,
}
def _name(sid):
return 'dwarf-%08x' % int(sid)
def _xml_snippet(name, enable):
xml = {}
if enable:
xml['%s_begin' % name] = ''
xml['%s_end' % name] = ''
else:
xml['%s_begin' % name] = '<!--'
xml['%s_end' % name] = '-->'
return xml
def _create_domain_xml(server, flavor, force=False):
"""
Create a libvirt XML file for the domain
"""
basepath = os.path.join(CONF.instances_dir, server['id'])
xml_file = os.path.join(basepath, 'libvirt.xml')
# Read the XML file if it exists already
if not force and os.path.exists(xml_file):
LOG.info('read existing libvirt.xml for server %s', server['id'])
with open(xml_file, 'r') as fh:
xml = fh.read()
# Otherwise create it
else:
LOG.info('create libvirt.xml for server %s', server['id'])
with open(os.path.join(os.path.dirname(__file__),
'libvirt-domain.xml'), 'r') as fh:
xml_template = fh.read()
xml_info = {
'domain_type': CONF.libvirt_domain_type,
'uuid': server['id'],
'name': _name(server['int_id']),
'memory': int(flavor['ram']) * 1024,
'vcpus': flavor['vcpus'],
'basepath': basepath,
'mac_addr': server['mac_address'],
'bridge': CONF.libvirt_bridge_name,
}
# Enable/disable the config drive
config_drive = _xml_snippet('config_drive',
(CONF.force_config_drive or
server['config_drive'] == 'True'))
xml_info.update(config_drive)
xml = Template(xml_template).substitute(xml_info)
with open(xml_file, 'w') as fh:
fh.write(xml)
return xml
def _create_net_xml():
"""
Create a libvirt XML for the network bridge
"""
with open(os.path.join(os.path.dirname(__file__),
'libvirt-net.xml'), 'r') as fh:
xml_template = fh.read()
xml_info = {
'uuid': str(uuid.uuid4()),
'network_name': CONF.libvirt_network_name,
'bridge': CONF.libvirt_bridge_name,
'ip': CONF.libvirt_bridge_ip,
'dhcp_start': '.'.join(CONF.libvirt_bridge_ip.split('.')[0:3] + ['2']),
'dhcp_end': '.'.join(CONF.libvirt_bridge_ip.split('.')[0:3] + ['254']),
}
return Template(xml_template).substitute(xml_info)
class Controller(object):
def __init__(self):
self.libvirt = None
def _test_connect(self):
"""
Test the connection to the libvirt daemon
"""
try:
self.libvirt.getLibVersion()
return True
except libvirt.libvirtError as e:
if e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR):
LOG.debug('Connection to libvirt broke')
return False
raise
def _connect(self):
"""
Open a connection to the libvirt daemon
"""
if self.libvirt is None or not self._test_connect():
uri = 'qemu:///system'
LOG.debug('Connecting to libvirt (%s)', uri)
self.libvirt = libvirt.open(uri)
# -------------------------------------------------------------------------
# Libvirt domain operations (private)
def _create_domain(self, xml):
"""
Create the libvirt domain and start it
"""
domain = self.libvirt.defineXML(xml)
domain.create()
return domain
def _get_domain(self, server):
"""
Get the active server domain
"""
try:
domain = self.libvirt.lookupByName(_name(server['int_id']))
except libvirt.libvirtError:
return
return domain
def _destroy_domain(self, domain):
"""
Destroy a libvirt domain
"""
if domain is None:
return
try:
domain.destroy()
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID:
# Check if the instance is already shut down
if self._info_domain(domain)['state'] == DOMAIN_SHUTDOWN:
return
raise
def _undefine_domain(self, domain):
"""
Undefine a libvirt domain
"""
if domain is None:
return
domain.undefine()
def _start_domain(self, domain):
"""
Start a libvirt domain
"""
if domain is None:
return
if self._info_domain(domain)['state'] == DOMAIN_RUNNING:
return
domain.create()
def _info_domain(self, domain):
"""
Return the libvirt domain info
"""
if domain is None:
return
info = dict(zip(['state', 'max_mem', 'memory', 'nr_virt_cpu',
'cpu_time'], domain.info()))
# Normalize the domain state
info['state'] = _LIBVIRT_DOMAIN_STATE[info['state']]
return info
def _shutdown_domain(self, domain, hard):
"""
Shutdown a libvirt domain
"""
if domain is None:
return
if self._info_domain(domain)['state'] != DOMAIN_RUNNING:
return
if hard:
self._destroy_domain(domain)
else:
domain.shutdown()
# -------------------------------------------------------------------------
# Server operations (public)
def create_server(self, server, flavor):
"""
Create a server
"""
LOG.info('create_server(server=%s, flavor=%s)', server, flavor)
self._connect()
xml = _create_domain_xml(server, flavor)
self._create_domain(xml)
def delete_server(self, server):
"""
Delete a server
"""
LOG.info('delete_server(server=%s)', server)
self._connect()
domain = self._get_domain(server)
self._destroy_domain(domain)
self._undefine_domain(domain)
def start_server(self, server):
"""
Start a server
"""
LOG.info('start_server(server=%s)', server)
self._connect()
domain = self._get_domain(server)
self._start_domain(domain)
def stop_server(self, server, hard=False):
"""
Stop a server
"""
LOG.info('stop_server(server=%s, hard=%s)', server, hard)
self._connect()
domain = self._get_domain(server)
self._shutdown_domain(domain, hard)
def info_server(self, server):
"""
Return the server info
"""
LOG.info('info_server(server=%s)', server)
self._connect()
domain = self._get_domain(server)
info = self._info_domain(domain)
LOG.info('info = %s', info)
return info
def create_network(self):
"""
Create the network
"""
LOG.info('create_network()')
self._connect()
try:
# Check if the network already exists
net = self.libvirt.networkLookupByName(CONF.libvirt_network_name)
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_NO_NETWORK:
# Unexpected error
raise
# Define the network
xml = _create_net_xml()
net = self.libvirt.networkDefineXML(xml)
# Configure the network to automatically start on host boot
net.setAutostart(1)
# Create (start) the network
if net.isActive() == 0:
net.create()
def get_dhcp_lease(self, server):
"""
Get DHCP lease information
"""
LOG.info('get_dhcp_lease(server=%s)', server)
net = self.libvirt.networkLookupByName(CONF.libvirt_network_name)
lease = net.DHCPLeases(mac=server['mac_address'])
if len(lease) == 1:
return {'ip': lease[0]['ipaddr']}
| {
"content_hash": "b85657876d6cca192e33ed165a7b1a86",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 79,
"avg_line_length": 28.05732484076433,
"alnum_prop": 0.5312145289443814,
"repo_name": "juergh/dwarf",
"id": "d9ad99a8cb928bde473a518abdb82f0af4e3f5cc",
"size": "9487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dwarf/compute/virt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2161"
},
{
"name": "Python",
"bytes": "151324"
},
{
"name": "Shell",
"bytes": "1343"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from . import models
@admin.register(models.Voter)
class VoterAdmin(admin.ModelAdmin):
# List
search_fields = [
'first_name',
'last_name',
'email',
]
list_display = [
'name',
'email',
'birth_date',
'zip_code',
'email_confirmed',
'email_subscribed',
]
list_filter = [
'regions',
'email_confirmed',
'email_subscribed',
]
filter_horizontal = [
'regions'
]
# Detail
raw_id_fields = ['user']
related_lookup_fields = {
'fk': ['user']
}
@admin.register(models.Status)
class StatusAdmin(admin.ModelAdmin):
def fetch_and_update_registration(self, _request, queryset):
for status in queryset:
status.fetch_and_update_registration()
search_fields = [
'voter__first_name',
'voter__last_name',
'election__name',
]
list_display = [
'id',
'voter',
'election',
'registered',
'read_sample_ballot',
'located_polling_location',
'voted',
]
list_filter = [
'election',
'registered',
'read_sample_ballot',
'located_polling_location',
'voted',
]
actions = [fetch_and_update_registration]
| {
"content_hash": "5431640121d6508ac197fc5b38efc13a",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 64,
"avg_line_length": 17.358974358974358,
"alnum_prop": 0.5206794682422452,
"repo_name": "citizenlabsgr/voter-engagement",
"id": "3e2f56af6c4d88cb7ee5cd7fbcd9df13949ba604",
"size": "1385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/voters/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "652"
},
{
"name": "HTML",
"bytes": "1028"
},
{
"name": "JavaScript",
"bytes": "6368"
},
{
"name": "Makefile",
"bytes": "3666"
},
{
"name": "Python",
"bytes": "54548"
},
{
"name": "Shell",
"bytes": "995"
},
{
"name": "TypeScript",
"bytes": "86686"
}
],
"symlink_target": ""
} |
import platform
class Platform:
def name(self):
if platform.system() == 'Linux':
print platform.system() + ':' + 'Ok'
else:
print platform.system() + ':' + 'Not supported, in debug mode'
| {
"content_hash": "a882090796d4cf45b4c19b45d7baef30",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.5407725321888412,
"repo_name": "gabriellacerda/setRobot-tool",
"id": "7bd4dd8b7f26dd06fd4f546f4b5fc64ddaeb86e7",
"size": "233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/Main/Platform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14808"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR04b_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR04b_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR04b_ConnectedLHS, self).__init__(name='HUnitR04b_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR04b_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class State(State) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__State"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
| {
"content_hash": "69ef770a501a873c9dd279c96f92e858",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 100,
"avg_line_length": 26.3125,
"alnum_prop": 0.6730007917656373,
"repo_name": "levilucio/SyVOLT",
"id": "a0619bf648fa4925949402aa4ca6715093b5cb24",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Properties/unit_contracts/HUnitR04b_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe, os, json
from frappe.modules import get_module_path, scrub_dt_dn
from frappe.utils import get_datetime_str
def import_files(module, dt=None, dn=None, force=False, pre_process=None):
if type(module) is list:
out = []
for m in module:
out.append(import_file(m[0], m[1], m[2], force=force, pre_process=pre_process))
return out
else:
return import_file(module, dt, dn, force=force, pre_process=pre_process)
def import_file(module, dt, dn, force=False, pre_process=None):
"""Sync a file from txt if modifed, return false if not updated"""
path = get_file_path(module, dt, dn)
ret = import_file_by_path(path, force, pre_process=pre_process)
return ret
def get_file_path(module, dt, dn):
dt, dn = scrub_dt_dn(dt, dn)
path = os.path.join(get_module_path(module),
os.path.join(dt, dn, dn + ".json"))
return path
def import_file_by_path(path, force=False, data_import=False, pre_process=None):
frappe.flags.in_import = True
try:
docs = read_doc_from_file(path)
except IOError:
print path + " missing"
return
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
# check if timestamps match
db_modified = frappe.db.get_value(doc['doctype'], doc['name'], 'modified')
if db_modified and doc.get('modified')==get_datetime_str(db_modified):
return False
original_modified = doc.get("modified")
import_doc(doc, force=force, data_import=data_import, pre_process=pre_process)
if original_modified:
# since there is a new timestamp on the file, update timestamp in
if doc["doctype"] == doc["name"] and doc["name"]!="DocType":
frappe.db.sql("""update tabSingles set value=%s where field="modified" and doctype=%s""",
(original_modified, doc["name"]))
else:
frappe.db.sql("update `tab%s` set modified=%s where name=%s" % \
(doc['doctype'], '%s', '%s'),
(original_modified, doc['name']))
frappe.flags.in_import = False
return True
def read_doc_from_file(path):
doc = None
if os.path.exists(path):
with open(path, 'r') as f:
try:
doc = json.loads(f.read())
except ValueError:
print "bad json: {0}".format(path)
raise
else:
raise IOError, '%s missing' % path
return doc
ignore_values = {
"Report": ["disabled"],
"Print Format": ["disabled"]
}
ignore_doctypes = ["Page Role", "DocPerm"]
def import_doc(docdict, force=False, data_import=False, pre_process=None):
frappe.flags.in_import = True
docdict["__islocal"] = 1
doc = frappe.get_doc(docdict)
if pre_process:
pre_process(doc)
ignore = []
if frappe.db.exists(doc.doctype, doc.name):
old_doc = frappe.get_doc(doc.doctype, doc.name)
if doc.doctype in ignore_values and not force:
# update ignore values
for key in ignore_values.get(doc.doctype) or []:
doc.set(key, old_doc.get(key))
# update ignored docs into new doc
for df in doc.meta.get_table_fields():
if df.options in ignore_doctypes and not force:
doc.set(df.fieldname, [])
ignore.append(df.options)
# delete old
frappe.delete_doc(doc.doctype, doc.name, force=1, ignore_doctypes=ignore, for_reload=True)
doc.flags.ignore_children_type = ignore
doc.flags.ignore_links = True
if not data_import:
doc.flags.ignore_validate = True
doc.flags.ignore_permissions = True
doc.flags.ignore_mandatory = True
doc.insert()
frappe.flags.in_import = False
| {
"content_hash": "6e05399b2ba1ce5052ac3ea9487c9d5e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 94,
"avg_line_length": 28.533333333333335,
"alnum_prop": 0.6799065420560748,
"repo_name": "anandpdoshi/frappe",
"id": "3738a049001ed032999ea13cbe830a227523844f",
"size": "3525",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "frappe/modules/import_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "285216"
},
{
"name": "HTML",
"bytes": "1349168"
},
{
"name": "JavaScript",
"bytes": "1092822"
},
{
"name": "Python",
"bytes": "1259016"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""This module provides some common test case bases for
SparklingPandasTestCases"""
from sparklingpandas.utils import add_pyspark_path
import pandas
add_pyspark_path()
from pyspark import SparkConf
from sparklingpandas.pcontext import PSparkContext
import unittest2
import sys
from pandas.util.testing import assert_frame_equal
import logging
class SparklingPandasTestCase(unittest2.TestCase):
"""Basic SparklingPandasTestCase, inherit from this class to get a
PSparkContext as spark_ctx."""
def setUp(self):
"""Setup the basic panda spark test case. This right now just creates a
PSparkContext."""
logging.info("Setting up spark context")
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf()
conf.set("spark.cores.max", "4")
conf.set("spark.master", "local[4]")
conf.set("spark.app-name", class_name)
conf.set("spark.driver.allowMultipleContexts", "true")
self.psc = PSparkContext.simple(conf=conf)
# Add a common basic input and basicpframe we can reuse in testing
self.basicinput = [
("tea", "happy"),
("water", "sad"),
("coffee", "happiest"),
("tea", "water")]
self.basiccolumns = ['magic', 'thing']
self.basicpframe = self.psc.DataFrame(
self.basicinput, columns=self.basiccolumns)
self.basicframe = pandas.DataFrame(
self.basicinput, columns=self.basiccolumns)
# Add a numeric frame
self.numericinput = [
(1, 2), (3, 4), (1, 3), (2, 6), (3, 100), (3, 20), (8, 9)]
self.numericpframe = self.psc.DataFrame(
self.numericinput, columns=['a', 'b'])
self.numericframe = pandas.DataFrame(
self.numericinput, columns=['a', 'b'])
# A three column numeric frame
self.numericthreeinput = [
(1, 2, -100.5),
(3, 4, 93),
(1, 3, 100.2),
(2, 6, 0.5),
(3, 100, 1.5),
(3, 20, 80),
(8, 9, 20)]
self.numericthreepframe = self.psc.DataFrame(
self.numericthreeinput, columns=['a', 'b', 'c'])
self.numericthreeframe = pandas.DataFrame(
self.numericthreeinput, columns=['a', 'b', 'c'])
self.mixedinput = [(1, 2, "coffee"), (4, 5, "cheese")]
self.mixedpframe = self.psc.DataFrame(self.mixedinput,
columns=['a', 'b', 'c'])
self.mixedframe = pandas.DataFrame(self.mixedinput,
columns=['a', 'b', 'c'])
# Mixed NA frame
self.mixednainput = [(1, 2, "coffee", None), (4, 5, "cheese", None)]
self.mixednapframe = self.psc.DataFrame(self.mixednainput,
columns=['a', 'b', 'c', 'd'])
self.mixednaframe = pandas.DataFrame(self.mixednainput,
columns=['a', 'b', 'c', 'd'])
def tearDown(self):
"""
Tear down the basic panda spark test case. This stops the running
context and does a hack to prevent Akka rebinding on the same port.
"""
logging.info("tearing down spark context")
self.psc.stop()
sys.path = self._old_sys_path
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
self.psc.spark_ctx._jvm.System.clearProperty("spark.driver.port")
@staticmethod
def _compare_dfs(df1, df2):
"""
Compare two DataFrames for equality
"""
assert_frame_equal(df1, df2)
if __name__ == "__main__":
unittest2.main()
| {
"content_hash": "c4eccbc19c53994656acaaafe45932aa",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 39.020833333333336,
"alnum_prop": 0.5621996796583022,
"repo_name": "holdenk/sparklingpandas",
"id": "ca174ee32277184078b8d2a72d51cf5389cbe2f6",
"size": "4528",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sparklingpandas/test/sp_test_case.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97395"
},
{
"name": "Scala",
"bytes": "6153"
},
{
"name": "Shell",
"bytes": "2774"
}
],
"symlink_target": ""
} |
import os
import shutil
import urllib
import urllib2
import json
from subprocess import call, Popen
from time import sleep
from nosuch.oscutil import *
UseLoopMIDI = False
root = os.getenv("VIZBENCH")
print "root = ",root
def killtask(nm):
call(["c:/windows/system32/taskkill","/f","/im",nm])
call(["c:/python27/python.exe",os.path.join(root,"bin","killall.py")])
call(["c:/python27/python.exe",os.path.join(root,"bin","debugcycle.py")])
if UseLoopMIDI:
loopmidi = Popen(["/Program Files (x86)/Tobias Erichsen/loopMIDI/loopMIDI.exe"])
print "loopMIDI has been started."
bidulepatch = "patches\\bidule\\viz_loopMIDI.bidule"
else:
print "NOTE: loopMIDI has NOT been started, and LoopBe is used."
bidulepatch = "patches\\bidule\\viz.bidule"
bidulepatchpath = os.path.join(root,bidulepatch)
sleep(1) # ???
bidule = Popen([
"C:\\Program Files\\Plogue\\Bidule\\PlogueBidule_x64.exe",
bidulepatchpath])
# Wait for Bidule to load
sleep(10)
### patches="c:\\local\\manifold\\bin\\config\\palette"
### shutil.copy(patches+"\\default_burn.mnf",patches+"\\default.mnf")
# XXX - there's a hardcoded path in tofile
fromfile=os.path.join(root,"patches\\resolume\\resolume_config.xml")
tmpfile=os.path.join(root,"patches\\resolume\\tmp_resolume_config.xml")
# replace %VIZBENCH% with the root
fin = open(fromfile)
ftmp = open(tmpfile,"w")
lines = fin.readlines()
for line in lines:
line = line.replace("%VIZBENCH%",root)
ftmp.write(line)
fin.close()
ftmp.close()
tofile="c:\\users\\tjt\\documents\\resolume avenue 4\\preferences\\config.xml"
shutil.copy(tmpfile,tofile)
arena = Popen(["C:\\Program Files (x86)\\Resolume Avenue 4.1.11\\Avenue.exe"])
## cd \local\python\nosuch_oscutil
global resolume
resolume = OscRecipient("127.0.0.1",7000)
# Activate the clips in Resolume.
# IMPORTANT!! The last clip activated MUST be layer1, so that the
# Osc enabling/disabling eof FFGL plugins works as intended.
sleep(12)
## print "Sending OSC to activate Resolume."
# resolume.sendosc("/layer2/clip1/connect",[1])
# resolume.sendosc("/layer1/clip1/connect",[1])
# Keep sending - Resolume might not be up yet
for i in range(5):
sleep(2)
resolume.sendosc("/layer2/clip1/connect",[1])
resolume.sendosc("/layer1/clip1/connect",[1])
# call(["c:/local/bin/nircmd.exe","win","min","stitle","Plogue"])
print "DONE!"
| {
"content_hash": "843cfdf6fbbadfb5ce4fd6ad2b6c4cef",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 81,
"avg_line_length": 27.36470588235294,
"alnum_prop": 0.7166809974204643,
"repo_name": "nosuchtim/VizBench",
"id": "d9662c1afcc554af691aa142d91a638462a0ae81",
"size": "2326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/startviz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "6312"
},
{
"name": "Batchfile",
"bytes": "26273"
},
{
"name": "C",
"bytes": "14955654"
},
{
"name": "C++",
"bytes": "6558816"
},
{
"name": "CMake",
"bytes": "279444"
},
{
"name": "CSS",
"bytes": "581261"
},
{
"name": "ChucK",
"bytes": "1310"
},
{
"name": "Common Lisp",
"bytes": "16457"
},
{
"name": "DIGITAL Command Language",
"bytes": "35809"
},
{
"name": "GLSL",
"bytes": "67424"
},
{
"name": "Gnuplot",
"bytes": "424"
},
{
"name": "Groff",
"bytes": "2333"
},
{
"name": "HTML",
"bytes": "5256465"
},
{
"name": "Inno Setup",
"bytes": "2160"
},
{
"name": "Java",
"bytes": "40393"
},
{
"name": "JavaScript",
"bytes": "2426586"
},
{
"name": "Lua",
"bytes": "1660"
},
{
"name": "M",
"bytes": "7648"
},
{
"name": "M4",
"bytes": "42898"
},
{
"name": "Makefile",
"bytes": "199739"
},
{
"name": "Mathematica",
"bytes": "54820"
},
{
"name": "Objective-C",
"bytes": "127445"
},
{
"name": "Perl",
"bytes": "54178"
},
{
"name": "Pure Data",
"bytes": "1091"
},
{
"name": "Python",
"bytes": "2151201"
},
{
"name": "QMake",
"bytes": "971"
},
{
"name": "Shell",
"bytes": "379773"
},
{
"name": "SuperCollider",
"bytes": "2139"
},
{
"name": "XSLT",
"bytes": "12347"
}
],
"symlink_target": ""
} |
import pymongo
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri = crawler.settings.get('MONGO_URI'),
mongo_db = crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
collection_name = item.__class__.__name__
self.db[collection_name].insert(dict(item))
return item
| {
"content_hash": "dc5fe25db935f438c318dd8e17b8377a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 29.28,
"alnum_prop": 0.6079234972677595,
"repo_name": "aeoluswing/scrapy",
"id": "c32b72ea53537de54b86266877d9c3a3b27b4090",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiubai/qiubai/pipelines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "59766"
},
{
"name": "Python",
"bytes": "22995"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 Lyst Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABCMeta, abstractmethod
from .base import PegasosBase
from . import constants
import numpy as np
class SVMPegasosBase(PegasosBase):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
iterations,
lambda_reg,
learner_type,
loop_type,
verbose):
if learner_type != constants.LEARNER_PEGASOS_SVM:
raise ValueError('%s only supports SVM learners' % self.__class__.__name__)
super(SVMPegasosBase, self).__init__(
iterations,
lambda_reg,
learner_type,
loop_type,
verbose)
class LogisticPegasosBase(PegasosBase):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
iterations,
lambda_reg,
learner_type,
loop_type,
verbose):
if learner_type != constants.LEARNER_PEGASOS_LOGREG:
raise ValueError('%s only supports logistic learners' % self.__class__.__name__)
super(LogisticPegasosBase, self).__init__(
iterations,
lambda_reg,
learner_type,
loop_type,
verbose)
def predict_proba(self, X):
if not self.weight_vector:
raise ValueError('must call `fit` before `predict_proba`')
p = self.decision_function(X) * self.weight_vector.scale
positive = np.exp(p) / (1.0 + np.exp(p))
# Return positive and negative class probabilities to
# satisfy sklearn pipelines such as OneVsRestClassifier.
return np.vstack((positive, 1 - positive)).T
| {
"content_hash": "20babd0ca529045676b2d2b765e1d6ac",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 92,
"avg_line_length": 29.974358974358974,
"alnum_prop": 0.5902480752780154,
"repo_name": "animeshramesh/incremental-learning",
"id": "dc23322a4da4a0de04e08461b62f4fb5930db18a",
"size": "2338",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pegasos/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4691316"
},
{
"name": "Python",
"bytes": "32991"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
} |
import amici
import sys
import petab
import subprocess
import os
from amici.petab_import import import_model
def main():
arg = sys.argv[1]
if arg == 'compilation':
git_dir = os.path.join(os.curdir, 'CS_Signalling_ERBB_RAS_AKT')
if not os.path.exists(git_dir):
subprocess.run([
'git', 'clone', '--depth', '1',
'https://github.com/ICB-DCM/CS_Signalling_ERBB_RAS_AKT']
)
os.chdir(os.path.join(os.curdir, 'CS_Signalling_ERBB_RAS_AKT'))
pp = petab.Problem.from_yaml('FroehlichKes2018/PEtab/FroehlichKes2018.yaml')
petab.lint_problem(pp)
os.chdir(os.path.dirname(os.path.abspath(os.curdir)))
import_model(model_name='CS_Signalling_ERBB_RAS_AKT_petab',
sbml_model=pp.sbml_model,
condition_table=pp.condition_df,
observable_table=pp.observable_df,
measurement_table=pp.measurement_df,
compile=False,
verbose=True)
os.chdir(os.path.join(os.curdir, 'CS_Signalling_ERBB_RAS_AKT_petab'))
subprocess.run(['python', 'setup.py', 'install'])
return
else:
import CS_Signalling_ERBB_RAS_AKT_petab as model_module
model = model_module.getModel()
solver = model.getSolver()
# TODO
edata = amici.ExpData(model)
edata.setTimepoints([1e8])
edata.setObservedData([1.0])
edata.setObservedDataStdDev([1.0])
if arg == 'forward_simulation':
solver.setSensitivityMethod(amici.SensitivityMethod.none)
solver.setSensitivityOrder(amici.SensitivityOrder.none)
elif arg == 'forward_sensitivities':
model.setParameterList(list(range(100)))
solver.setSensitivityMethod(amici.SensitivityMethod.forward)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
elif arg == 'adjoint_sensitivities':
solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
elif arg == 'forward_simulation_non_optimal_parameters':
tmpPar = model.getParameters()
model.setParameters([0.1 for _ in tmpPar])
solver.setSensitivityMethod(amici.SensitivityMethod.none)
solver.setSensitivityOrder(amici.SensitivityOrder.none)
elif arg == 'adjoint_sensitivities_non_optimal_parameters':
tmpPar = model.getParameters()
model.setParameters([0.1 for _ in tmpPar])
solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
elif arg == 'forward_steadystate_sensitivities_non_optimal_parameters':
tmpPar = model.getParameters()
model.setParameters([0.1 for _ in tmpPar])
solver.setSensitivityMethod(amici.SensitivityMethod.forward)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
edata.setTimepoints([float('inf')])
elif arg == 'adjoint_steadystate_sensitivities_non_optimal_parameters':
tmpPar = model.getParameters()
model.setParameters([0.1 for _ in tmpPar])
solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
edata.setTimepoints([float('inf')])
else:
print("Unknown argument:", arg)
sys.exit(1)
rdata = amici.runAmiciSimulation(model, solver, edata)
diagnostics = ['numsteps', 'numstepsB', 'numrhsevals', 'numrhsevalsB',
'numerrtestfails', 'numerrtestfailsB',
'numnonlinsolvconvfails', 'numnonlinsolvconvfailsB',
'preeq_cpu_time', 'preeq_cpu_timeB',
'cpu_time', 'cpu_timeB',
'posteq_cpu_time', 'posteq_cpu_timeB']
for d in diagnostics:
print(d, rdata[d])
assert rdata['status'] == amici.AMICI_SUCCESS
if __name__ == '__main__':
main()
| {
"content_hash": "d585d6f45870a91d0a6dd792cf9292d4",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 84,
"avg_line_length": 41.510416666666664,
"alnum_prop": 0.6434127979924718,
"repo_name": "AMICI-developer/AMICI",
"id": "0f853c4a665e463b57172f768d7a4a8edb63feb1",
"size": "4009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/performance/test.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6507412"
},
{
"name": "C++",
"bytes": "4515"
},
{
"name": "CMake",
"bytes": "89132"
},
{
"name": "FORTRAN",
"bytes": "115964"
},
{
"name": "Makefile",
"bytes": "196218"
},
{
"name": "Matlab",
"bytes": "436730"
},
{
"name": "Perl",
"bytes": "9412"
},
{
"name": "TeX",
"bytes": "131408"
}
],
"symlink_target": ""
} |
from __future__ import print_function # Use print() instead of print
from flask import url_for
from StringIO import StringIO
def test_page_urls(client):
# Visit landing page
response = client.get(url_for('landing_page'))
assert b'<h1>Not sure' in response.data
# Login as user and visit User page
response = client.post(url_for('user.login'), follow_redirects=True,
data=dict(email='user@example.com', password='Password1'))
assert b'<h1>Welcome</h1>' in response.data
response = client.get(url_for('user_page', id=2))
assert b'<h1>User Example</h1>' in response.data
# Edit User Profile page
response = client.get(url_for('user_profile_page'))
assert b'<h1>User Profile</h1>' in response.data
f = open('app/static/images/default.png')
lines = "".join(f.readlines())
f.close()
response = client.post(url_for('user_profile_page'), follow_redirects=True,
data=dict(first_name='User', last_name='User',
photo=(StringIO(lines),
'test.png')))
response = client.get(url_for('user_page', id=2))
assert b'<h1>User User</h1>' in response.data
# Logout
response = client.get(url_for('user.logout'), follow_redirects=True)
assert b'<h1>Sign in</h1>' in response.data
# Login as admin and visit Admin page
response = client.post(url_for('user.login'), follow_redirects=True,
data=dict(email='admin@example.com', password='Password1'))
assert b'<h1>Welcome</h1>' in response.data
response = client.get(url_for('admin_page'))
assert b'<h1>Admin page</h1>' in response.data
# Logout
response = client.get(url_for('user.logout'), follow_redirects=True)
assert b'<h1>Sign in</h1>' in response.data
| {
"content_hash": "61e099589384462eee91a3a4aac019b4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 42.34090909090909,
"alnum_prop": 0.6210413311862587,
"repo_name": "xke/nash",
"id": "4b7c8e44ad0e830dc039227fe8348e47540b05ee",
"size": "1963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_page_urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "4074"
},
{
"name": "HTML",
"bytes": "26321"
},
{
"name": "JavaScript",
"bytes": "28540"
},
{
"name": "Makefile",
"bytes": "278"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "45436"
},
{
"name": "Shell",
"bytes": "426"
}
],
"symlink_target": ""
} |
"""Tests for tensor2tensor.models.research.transformer_vae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models.research import transformer_vae
import tensorflow as tf
class TransformerVaeTest(tf.test.TestCase):
def testTransformerAEOnDVQ(self):
batch_size = 3
input_length = 5
target_length = 16
vocab_size = 9
hparams = transformer_vae.transformer_ae_small()
hparams.bottleneck_kind = "dvq"
hparams.dp_strength = 0
p_hparams = problem_hparams.test_problem_hparams(vocab_size,
vocab_size,
hparams)
hparams.problem_hparams = p_hparams
inputs = -1 + np.random.random_integers(
vocab_size, size=(batch_size, input_length, 1, 1))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, target_length, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
tf.train.create_global_step()
model = transformer_vae.TransformerAE(hparams, tf.estimator.ModeKeys.TRAIN,
p_hparams)
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
logits_val = session.run(logits)
self.assertEqual(logits_val.shape,
(batch_size, target_length, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "7047e79bb6279410181b5e5d8e565ab8",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 37.78260869565217,
"alnum_prop": 0.6242807825086306,
"repo_name": "mlperf/training_results_v0.5",
"id": "c47c485b57feb4799d8e42fdda9790876ef2f5f7",
"size": "2344",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/models/research/transformer_vae_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import time
import unittest
from pycopia import anypath
from pycopia import asyncio
from pycopia import asyncserver
from pycopia import basicconfig
from pycopia import benchmarks
from pycopia import charbuffer
from pycopia import cliutils
from pycopia import combinatorics
from pycopia import daemonize
from pycopia import environ
from pycopia import ezmail
from pycopia import fsm
from pycopia import guid
from pycopia import interactive
from pycopia import ipv4
from pycopia import logfile
from pycopia import makepassword
from pycopia import md5lib
from pycopia import methodholder
from pycopia import netstring
from pycopia import rot13
from pycopia import scheduler
from pycopia import sharedbuffer
from pycopia import smtp_envelope
from pycopia import sourcegen
from pycopia import shparser
from pycopia import table
from pycopia import texttools
from pycopia import passwd
from pycopia import re_inverse
if os.environ.get("DISPLAY"):
from pycopia import gtktools
from pycopia.inet import ABNF
from pycopia.inet import DICT
from pycopia.inet import fcgi
from pycopia.inet import HTTP
from pycopia.inet import httputils
from pycopia.inet import mailaliases
from pycopia.inet import rfc2822
from pycopia.inet import SMTP
from pycopia.inet import telnet
from pycopia.inet import toc
from pycopia.ISO import iso3166
from pycopia.ISO import iso639a
import pycopia.OS
import pycopia.OS.sequencer
class CoreTests(unittest.TestCase):
def test_charbuffer(self):
"""Charbuffer uses mmap module."""
b = charbuffer.Buffer()
b += "testing"
self.assertEqual(str(b), "testing")
def test_ipv4(self):
"""basic test of ipv4 module."""
r1 = ipv4.IPRange("172.22.1.11/24", "172.22.1.21/24")
r2 = ipv4.IPRange("172.22.1.21/24", "172.22.1.11/24")
r3 = ipv4.IPRange("172.22.1.55/24", "172.22.1.55/24")
l1 = list(r1)
l2 = list(r2)
print(l1)
print(l1 == l2)
print(r3, list(r3))
ip = ipv4.IPv4("172.22.4.1/24")
self.assertEqual(ip.mask, 0b11111111111111111111111100000000)
print(ip.address)
ip.address = "172.22.4.2/24"
print(ip.address)
ip.address = -1407843325
print(ip.CIDR)
ip = ipv4.IPv4("1.1.1.1/30")
print(len(ip))
print(len(ipv4.IPv4("1.1.1.1/29")))
print(len(ipv4.IPv4("1.1.1.1/28")))
print(len(ipv4.IPv4("1.1.1.1/24")))
for each_ip in ip:
print(each_ip)
self.assertEqual(ip.mask, 0b11111111111111111111111111111100)
self.assertEqual(ip.address, 0x01010101)
def test_passwd(argv):
pwent = passwd.getpwself()
print(repr(pwent))
print(str(pwent))
print(int(pwent))
print(pwent.name)
print(pwent.home)
print(pwent.uid)
print(pwent[3])
def test_shparser(self):
argv = None
def _check_argv(argv):
self.assertEqual(argv[0], "echo")
self.assertEqual(argv[1], "-q")
self.assertEqual(argv[3], "")
self.assertEqual(argv[9], "bogus one")
self.assertEqual(argv[10], argv[11])
sh = shparser.ShellParser(_check_argv)
rv = sh.feedline('echo -q -N "" -t tparm -b 1024 -f "bogus one" $PATH ${PATH}')
def test_re_inverse(self):
import sre_parse
RE = r'(firstleft|)somestring(\s.*|) \S(a|b) [fgh]+ {2,3}R(\S)'
print(sre_parse.parse(RE))
for i in range(20):
ms = re_inverse.make_match_string(RE)
for i in range(20):
ms = re_inverse.make_nonmatch_string(RE)
def test_sequencer(self):
counters = [0, 0, 0, 0, 0]
starttimes = [None, None, None, None, None]
def _test_job(counters):
print ("test job 1")
counters[0] += 1
def _test_job2(counters):
print ("test job 2")
counters[1] += 1
def _test_delay_job(counters, starttimes):
if counters[2] == 0:
starttimes[2] = time.time()
print ("test delay job")
counters[2] += 1
def _test_delay_job2(counters, starttimes):
if counters[3] == 0:
starttimes[3] = time.time()
print ("test delay job 2 at", time.time())
counters[3] += 1
def _test_oneshot(counters, starttimes):
thetime = time.time()
counters[4] += 1
starttimes[4] = thetime
print ("test oneshot at", thetime)
s = pycopia.OS.sequencer.Sequencer()
start = time.time()
s.add_task(_test_job, 2.0, duration=20.0, callback_args=(counters,))
s.add_task(_test_job2, 3.0, duration=30.0, callback_args=(counters,))
s.add_task(_test_delay_job, 3.1, delay=35, duration=18.0, callback_args=(counters,starttimes))
s.add_task(_test_delay_job2, 2.0, delay=55, duration=3.0, callback_args=(counters,starttimes))
s.add_task(_test_oneshot, 0.0, delay=15, callback_args=(counters,starttimes))
s.run()
s.close()
endtime = time.time()
self.assertAlmostEqual(endtime-start, 58.0, places=2) # job2 delay plus duration (max time)
self.assertAlmostEqual(starttimes[2] - start, 35.0, places=2) # delay_job start delay
self.assertAlmostEqual(starttimes[3] - start, 55.0, places=2) # delay_job2 start delay
self.assertAlmostEqual(endtime - starttimes[3], 3.0, places=2) # test_delay_job2
self.assertAlmostEqual(starttimes[4] - start, 15.0, places=2) # oneshot delay
self.assertEqual(counters[0], 10)
self.assertEqual(counters[1], 10)
self.assertEqual(counters[2], 6)
self.assertEqual(counters[3], 2)
self.assertEqual(counters[4], 1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6205e4b1965b47505b8f1ed417656c7f",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 102,
"avg_line_length": 32.75824175824176,
"alnum_prop": 0.6296544783629654,
"repo_name": "kdart/pycopia",
"id": "c92113271567d845e61038c1f4569bc7c6e34cb4",
"size": "6033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423794"
},
{
"name": "CSS",
"bytes": "19522"
},
{
"name": "JavaScript",
"bytes": "91759"
},
{
"name": "Makefile",
"bytes": "6958"
},
{
"name": "Perl",
"bytes": "271"
},
{
"name": "Python",
"bytes": "6098633"
},
{
"name": "Roff",
"bytes": "7289"
},
{
"name": "Shell",
"bytes": "12778"
},
{
"name": "Vim script",
"bytes": "50421"
}
],
"symlink_target": ""
} |
def extractIvorycrescentWordpressCom(item):
'''
Parser for 'ivorycrescent.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "31a29ffae3c28bb5bcb2bf3876da309f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.6389380530973451,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b7d8b14b69efaec1ef0abed87aeab901b17bdec9",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractIvorycrescentWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""Cryptograhpy routines used by HAP."""
from typing import Optional
from pyatv.support.chacha20 import Chacha20Cipher
class HAPSession:
"""Manages cryptography for a HAP session according to IP in specification.
The HAP specification mandates that data is encrypted/decrypted in blocks
of 1024 bytes. This class takes care of that. It is designed to be
transparent until encryption is enabled, i.e. data is just passed through
in case it has not yet been enabled.
"""
FRAME_LENGTH = 1024 # As specified by HAP, section 5.2.2 (Release R1)
AUTH_TAG_LENGTH = 16
def __init__(
self,
) -> None:
"""Initialize a new HAPSession instance."""
self._encrypted_data = b""
self.chacha20: Optional[Chacha20Cipher] = None
def enable(self, output_key: bytes, input_key: bytes) -> None:
"""Enable encryption with specified keys."""
self.chacha20 = Chacha20Cipher(output_key, input_key)
def decrypt(self, data: bytes) -> bytes:
"""Decrypt incoming data."""
if self.chacha20 is None:
return data
self._encrypted_data += data
output = b""
while self._encrypted_data:
length = self._encrypted_data[0:2]
block_length = (
int.from_bytes(length, byteorder="little") + self.AUTH_TAG_LENGTH
)
if len(self._encrypted_data) < block_length + 2:
return output
block = self._encrypted_data[2 : 2 + block_length]
output += self.chacha20.decrypt(block, aad=length)
self._encrypted_data = self._encrypted_data[2 + block_length :]
return output
def encrypt(self, data: bytes) -> bytes:
"""Encrypt outgoing data."""
if self.chacha20 is None:
return data
output = b""
while data:
frame = data[0 : self.FRAME_LENGTH]
data = data[self.FRAME_LENGTH :]
length = int.to_bytes(len(frame), 2, byteorder="little")
frame = self.chacha20.encrypt(frame, aad=length)
output += length + frame
return output
| {
"content_hash": "582b2c57f8ad2a4266135fa61c9f0196",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 81,
"avg_line_length": 32.78787878787879,
"alnum_prop": 0.6016635859519408,
"repo_name": "postlund/pyatv",
"id": "0ad9f91df014919f0b8bdadede2295d09f95d486",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyatv/auth/hap_session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "456"
},
{
"name": "Python",
"bytes": "1432120"
},
{
"name": "Shell",
"bytes": "2108"
}
],
"symlink_target": ""
} |
def helper_one():
print("HELLO MY FRIEND FROM HELPER ONE!!")
def testing(a):
print("HELPING TO PUSH ARGS: {0}".format(a))
| {
"content_hash": "45994fcc1952c030ed416f7ffdea6b08",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 25,
"alnum_prop": 0.672,
"repo_name": "mrchristine/dbc-notebooks",
"id": "9b6fba34f0536f2dd85b2ae9cce1a9e8a5a460ee",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/pyspark_sync/helper1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "32953"
},
{
"name": "Python",
"bytes": "7390"
}
],
"symlink_target": ""
} |
from flask import g
from mongoengine.connection import get_db
from .model_document import ModelDocument, ModelQuerySet
################################################################################
__all__ = ('MultipleDatabaseModelDocument',)
################################################################################
class MultipleDatabaseModelQuerySet(ModelQuerySet):
def __init__(self, document, collection):
# make a local copy of the Document class for this QuerySet, to prevent
# database, so that new attributes can be set on it
new_document = self._copy_class(document)
# this copies what may be class-level attributes from 'document',
# to instance-level attributes on 'new_document', freezing them
current_db_alias = document._get_db_alias()
new_document._get_db_alias = staticmethod(lambda: current_db_alias)
current_collection = document._get_collection()
new_document._get_collection = staticmethod(lambda: current_collection)
super(MultipleDatabaseModelQuerySet, self).__init__(new_document, collection)
@staticmethod
def _copy_class(cls):
# TODO: move this to a common utils
new_cls_dict = dict(cls.__dict__)
new_cls_dict['meta'] = new_cls_dict.pop('_meta')
return type(cls.__name__, cls.__bases__, new_cls_dict)
class MultipleDatabaseModelDocument(ModelDocument):
"""
An abstract class for documents that may reside in one of multiple databases.
"""
# TODO: prevent this class from being instantiated directly
meta = {
'abstract': True,
'allow_inheritance': False,
'db_alias': None, # this shouldn't actually be used
'queryset_class': MultipleDatabaseModelQuerySet,
'auto_create_index': False, # don't change; see '_get_collection' for why this is set
}
@property
def database(self):
# the import is required here to prevent circular imports
# TODO: remove this import statement
from ..image_store import MultipleDatabaseImageStore
return MultipleDatabaseImageStore.objects.with_id(self._db_alias)
@classmethod
def _get_db_alias(cls):
"""
Helper method to provide the current database, as set by a
MultipleDatabaseImageStore context manager.
This would be better as a property, but Python has poor support for
classmethod descriptors, particularly with mutators.
"""
try:
return g.multiple_database_connection_aliases[-1]
except (AttributeError, IndexError):
raise NotImplemented('A "%s" must be used inside a "MultipleDatabaseImageStoreMixin" context (\'with\' statement).' % cls.__name__)
@classmethod
def _get_db(cls):
"""
Overrides the Document._get_collection classmethod.
This will only be called on class instances, as instantiated objects
have this method patched by 'self.switch_db'.
"""
return get_db(cls._get_db_alias())
@classmethod
def _get_collection(cls):
"""
Overrides the 'Document._get_collection' classmethod.
This method attempts to provide some degree of caching, preventing a
new collection from having to be created on every access, while still
allowing the database to change.
Unlike for databases, MongoEngine doesn't store an internal cache for
multiple collections per class, so one is created here, and used
instead of the single '_collection' cache.
This will only be called on class instances, as instantiated objects
have this method patched by 'self.switch_db'.
"""
if issubclass(MultipleDatabaseModelDocument, cls):
# setting the '_collections' property on one of the common base
# classes would prevent the derived classes from having their own
# seperate instances of the property
raise NotImplementedError('"_get_collection" should only be called on concrete model classes.')
if not hasattr(cls, '_collections'):
cls._collections = dict()
db_alias = cls._get_db_alias()
try:
cls._collection = cls._collections[db_alias]
except KeyError:
cls._collection = None
# 'cls._collection' is set as a side effect of the superclass
# '_get_collection'
cls._collections[db_alias] = super(MultipleDatabaseModelDocument, cls)._get_collection()
# unless meta['auto_create_index'] is false, the superclass
# '_get_collection' will attempt to call 'ensure_indexes', which
# in turn calls '_get_collection', leading to infinite recursion
# so, wait until the necessary '_collection' / '_collections' values
# are set after the return, and only then call 'ensure_indexes'
cls.ensure_indexes()
return cls._collection
def __init__(self, *args, **kwargs):
super(MultipleDatabaseModelDocument, self).__init__(*args, **kwargs)
# make the new database persistent to this instance
# cls_db_alias = type(self)._get_db_alias()
cls_db_alias = self._get_db_alias()
self._db_alias = cls_db_alias # save the value for use in the 'database' property
self.switch_db(cls_db_alias) # this patches over 'self._get_db'
| {
"content_hash": "7e6f0f97878443446bbad3bfcd64a1ce",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 143,
"avg_line_length": 42.93700787401575,
"alnum_prop": 0.6310287914909224,
"repo_name": "SlideAtlas/SlideAtlas-Server",
"id": "cae60f7de5ddb79651b8ed08e0ca5f0021b51b48",
"size": "5469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slideatlas/models/common/multiple_database_model_document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "80540"
},
{
"name": "HTML",
"bytes": "191521"
},
{
"name": "JavaScript",
"bytes": "2008727"
},
{
"name": "Jupyter Notebook",
"bytes": "4818"
},
{
"name": "Python",
"bytes": "548349"
},
{
"name": "Shell",
"bytes": "6978"
}
],
"symlink_target": ""
} |
from django import forms
class UploadForm(forms.Form):
file = forms.FileField()
| {
"content_hash": "6ab1b96d455002d4e7a649551393a9ee",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 29,
"avg_line_length": 17.2,
"alnum_prop": 0.7325581395348837,
"repo_name": "webkom/noetikon",
"id": "3d54bda26aef7a0a6f419fd100ebdc9a234933f8",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noetikon/files/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2927"
},
{
"name": "HTML",
"bytes": "3086"
},
{
"name": "Makefile",
"bytes": "688"
},
{
"name": "Python",
"bytes": "31713"
}
],
"symlink_target": ""
} |
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/2.0.18'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False
| {
"content_hash": "fd73fc354fae69c7e09c7b25943c326c",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 32.72142857142857,
"alnum_prop": 0.6902423051735429,
"repo_name": "HadiOfBBG/pegasusrises",
"id": "d8e8495aa1bf0923eb0d2e694a9770118bb98138",
"size": "5182",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "atom/http_interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5995"
},
{
"name": "CSS",
"bytes": "399268"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "HTML",
"bytes": "104324"
},
{
"name": "JavaScript",
"bytes": "3316602"
},
{
"name": "Python",
"bytes": "4274594"
}
],
"symlink_target": ""
} |
from numpy import *
from time import *
import numpy.random as rnd
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import Ellipse
# First column should be ellipsoidal inside (converging)
# Second column should be spherical inside (diverging)
# Third column should be ellipsoidal outside (converging)
# Fourth column should be spherical outside (converging slower)
D = loadtxt('out/ex1.txt')
x = range(1,26)
fig0 = plt.figure(0)
fig0.set_size_inches(5, 2.5, forward=True)
plt.rcParams.update({"font.size": 8})
plt.semilogy(x, D[:,0], linestyle='-', color='black', linewidth=2)
plt.semilogy(x, D[:,1], linestyle='--', color='black', linewidth=2)
plt.legend(['Ellipsoidal expansion', 'Spherical expansion'])
plt.title('Coulomb Convergence Inside Reference Ellipsoid')
plt.xlabel('Maximum expansion order (N)')
plt.ylabel('Relative error')
plt.tight_layout()
plt.savefig('figs/CoulCompInside.eps', format='eps', dpi=2000)
fig1 = plt.figure(1)
fig1.set_size_inches(5, 2.5, forward=True)
plt.rcParams.update({"font.size": 8})
plt.semilogy(x, D[:,2], linestyle='-', color='black', linewidth=2)
plt.semilogy(x, D[:,3], linestyle='--', color='black', linewidth=2)
plt.legend(['Ellipsoidal expansion', 'Spherical expansion'])
plt.xlabel('Maximum expansion order (N)')
plt.title('Coulomb Convergence Outside Reference Ellipsoid')
plt.ylabel('Relative error')
plt.tight_layout()
plt.savefig('figs/CoulCompOutside.eps', format='eps', dpi=2000)
plt.show()
| {
"content_hash": "caaa40e7a4848f223db7959795b175cf",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 67,
"avg_line_length": 37.073170731707314,
"alnum_prop": 0.7421052631578947,
"repo_name": "tom-klotz/ellipsoid-solvation",
"id": "a53121636dc846a414d67cc05aee7f020143e0fb",
"size": "1520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/examples/ex1plot.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "358893"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Matlab",
"bytes": "191"
},
{
"name": "Python",
"bytes": "13996"
}
],
"symlink_target": ""
} |
import json
import yaml
from conda_build_missing import build, find_all_recipes, sort_dependency_order
import os
from conda_build.config import config as conda_bld_config
import conda_build
import conda.api
from conda.utils import url_path
from contextlib import contextmanager
import logging
import conda_manifest.core_vn_matrix as vn_matrix
import conda_manifest.config
import argparse
import conda_manifest.config
import conda.config
from conda.api import get_index
from conda_manifest.env_recipes import load_envs
from conda_manifest.sources import load_sources
stdoutlog = logging.getLogger('conda-manager.stdoutlog')
def build_null(meta):
meta.meta.setdefault('build', {})['script'] = 'echo "Hello!"'
build(meta, test=False)
def resolve_index(src_indices, env_sources):
"""
Given the indices for all sources, produce an index with
filtered packages based on the sources specification.
"""
pkg_names_handled = []
index = {}
for sources in env_sources:
pkgs_handled_at_this_level = []
for source in sources:
for tar_name, pkg_info in src_indices[source].items():
name = pkg_info['name']
if name in pkg_names_handled:
continue
pkgs_handled_at_this_level.append(name)
if tar_name in index:
raise ValueError('Conflicting package information for {} '
'from {} and {}.'
''.format(tar_name,
index[tar_name]['channel'],
pkg_info['channel']))
index[tar_name] = pkg_info.copy()
# Put the source into the pkg_info.
index[tar_name]['source'] = source
pkg_names_handled.extend(pkgs_handled_at_this_level)
return index
@contextmanager
def fixed_get_index(desired_index):
"""
No matter what, get_index should return the desired_index, and nothing else.
"""
orig_get_index = conda.api.get_index
def new_get_index(*args, **kwargs):
return desired_index
conda.api.get_index = conda_build.build.get_index = new_get_index
yield
conda.api.get_index = conda_build.build.get_index = orig_get_index
@contextmanager
def conda_build_croot_for_source(source_name):
"""
Change the conda build build_root/croot for the lifetime of the context
manager.
"""
orig_build_root = conda_bld_config.croot
conda_bld_config.croot = conda_manifest.config.src_distributions_dir(source_name)
conda_bld_config.bldpkgs_dir = os.path.join(conda_bld_config.croot,
conda.config.subdir)
yield
conda_bld_config.croot = orig_build_root
conda_bld_config.bldpkgs_dir = os.path.join(conda_bld_config.croot,
conda.config.subdir)
def compute_source_indices(env_sources):
"""Generate a dictionary mapping source name to source index."""
src_index = {}
for sources in env_sources:
for source_name in sources:
with conda_build_croot_for_source(source_name):
if os.path.exists(conda_bld_config.bldpkgs_dir):
# Get hold of just the built packages.
src_urls = [url_path(conda_bld_config.croot)]
index = conda.api.get_index(src_urls, prepend=False)
src_index[source_name] = index
else:
src_index[source_name] = {}
return src_index
if __name__ == '__main__':
parser = argparse.ArgumentParser("Pull together the environment recipes "
"directory.")
parser.add_argument("--sources", default='sources.yaml',
help="Location of sources.yaml")
parser.add_argument("--envs", nargs='+', default=['env.specs/*.yaml'],
help="Glob pattern of environment yamls.")
if 1 or conda_manifest.config.DEBUG:
args = parser.parse_args(['--envs', '../env.specs/lts.yaml',
'--sources', '../sources.yaml'])
else:
args = parser.parse_args()
sources = load_sources(args.sources)
envs = load_envs(args.envs)
for env in envs:
env_sources = env['sources']
orig_build_root = conda_bld_config.croot
channels = []
for sources in env_sources:
for source_name in sources:
source_build_directory = conda_manifest.config.src_distributions_dir(source_name)
s = os.path.join(source_build_directory, conda.config.subdir)
if not os.path.exists(s):
os.makedirs(s)
import conda_build.index
conda_build.index.update_index(s)
channels.append(url_path(source_build_directory))
conda.config.rc['channels'] = channels
print 'Channels:', channels
env_recipe_dir = conda_manifest.config.env_recipes_dir(env=env)
metas = list(find_all_recipes([env_recipe_dir]))
stdoutlog.debug('Found the following recipes:\n{}\n-------------------'
''.format('\n'.join(meta.name() for meta in metas)))
metas = sort_dependency_order(metas)
stdoutlog.debug('Metas sorted into the following order:\n{}\n---------'
''.format('\n'.join(meta.name() for meta in metas)))
src_index = {}
src_index = compute_source_indices(env_sources)
index = resolve_index(src_index, env_sources)
r = conda.resolve.Resolve(index)
for meta in metas:
stdoutlog.debug('Starting to look at: ', meta.name())
with open(os.path.join(meta.path, 'source.json'), 'r') as fh:
source = json.load(fh)
source_name = source['name']
version_matrix = vn_matrix.special_case_version_matrix(meta, index)
# version_matrix = vn_matrix.filter_cases(version_matrix, index, env['packages'])
for case in vn_matrix.conda_special_versions(meta, index, version_matrix):
if meta.dist() + '.tar.bz2' not in src_index[source_name]:
stdoutlog.info('Building {} from {}.\n'
''.format(meta.name(), source_name))
with conda_build_croot_for_source(source_name):
print conda_bld_config.croot
print conda.config.rc['channels']
print 'BUILDING IN:', conda_bld_config.croot
with conda_manifest.config.pipe_check_call(os.path.join(meta.path,
'build.{}.log'.format(conda.config.subdir))):
# with fixed_get_index(index):
build(meta, channels, test=True)
# src_index = compute_source_indices(env_sources)
# index = resolve_index(src_index, env_sources)
# with fixed_get_index(index):
# build(meta, channels, test=True)
else:
stdoutlog.info('Not building {} from {}, as it has already been '
'built.\n'.format(meta.name(), source_name))
| {
"content_hash": "fa79e81846fd64c71aaaa4025b37c2cd",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 124,
"avg_line_length": 38.95336787564767,
"alnum_prop": 0.5597233306730514,
"repo_name": "marqh/conda-manifest",
"id": "d6657cff4f76d8c2c791bd3da898136c9afc9f09",
"size": "7518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_manifest/build_recipes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "42643"
}
],
"symlink_target": ""
} |
import pandas
from nltools.file_reader import onsets_to_dm
import os.path
import csv
import pickle
import warnings
from regress_rois import *
from scipy.stats import ttest_1samp, t, norm
import datetime
import time
self_onset_file_version='20180220T031755'
action_timestamp=datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%dT%H%M%S')
roi_data_version = '20180720'
#server
# roi_data_dir = '/expdata/bensmith/joint-modeling/data/msm/freesurfer/reversallearning/segstats_out/'
# roi_event_data_dir='/expdata/bensmith/joint-modeling/data/msm/freesurfer/reversallearning/roi_event_data'
# self_onset_dir='/expdata/bensmith/joint-modeling/data/msm//reversallearning/runfiles'
# run_id_map_path=
roi_data_dir='/expdata/bensmith/joint-modeling/data/msm/reversallearning/roits/'
roi_event_data_dir='/expdata/bensmith/joint-modeling/data/msm/reversallearning/roi_event_data/fsl_roi_event_data'
run_id_map_path='/expdata/bensmith/joint-modeling/data/msm/freesurfer/functional_preprocessed/run_id_key.csv'
self_onset_dir='/expdata/bensmith/joint-modeling/data/msm/reversallearning/runfiles'
motion_param_dir = "/expdata/xfgavin/MSM"
motion_param_file = 'mc/prefiltered_func_data_mcf.par'
#local
#roi_data_dir='/Users/benjaminsmith/Dropbox/joint-modeling/data/fsl_roi_local/'
#roi_event_data_dir='/Users/benjaminsmith/Dropbox/joint-modeling/data/fsl_roi_event_data'
#run_id_map_path='/Users/benjaminsmith/Dropbox/joint-modeling/data/freesurfer-testing-local/run_id_key.csv'
#self_onset_dir='/Users/benjaminsmith/Dropbox/joint-modeling/data/freesurfer-testing-local/runfiles'
#motion_param_dir = "/expdata/xfgavin/MSM"
#motion_param_file = 'mc/prefiltered_func_data_mcf.par'
# #this is a table containing all the names of all the freesurfer items.
# freesurfer_LUT_table_path="/Users/benjaminsmith/Dropbox/joint-modeling/data/freesurfer-testing-local/FreeSurferColorLUT.txt"
# how to map rois using the two numbering systems here?
# get teh csv file.
#run_id_map=pandas.read_csv('/expdata/bensmith/joint-modeling/data/msm/freesurfer/functional_preprocessed/run_id_key.csv')
#get a list of the folders we'll be looking at.
roi_folders=os.listdir(roi_data_dir+roi_data_version)
# freesurfer_LUT_df=pandas.read_table(freesurfer_LUT_table_path,
# delim_whitespace=True,comment="#",
# names=["RegionId","Label","R","G","B","A"])
run_id_map=pandas.read_csv(run_id_map_path)
for idx, row in run_id_map.iterrows():
#idx=0
#row=run_id_map[0,:]
sid=row['subid']
#freesurferRunId=row['freesurferRunId']
runid=row['runid'] #this is just the simple runid which is accessible in the behavioral data.
if row['motivation'] == 'Reward':
m="Reward"
m_short = "Reward"
elif row['motivation'] == 'Punish':
m="Punishment"
m_short = "Punish"
else: raise Exception("Unrecognized motivation type")
onset_file=self_onset_dir + '/runfiledetail'+self_onset_file_version+'_s'+str(sid)+'_' + m.lower() + '_r'+str(runid)+'.txt'
print(onset_file)
#concatenate all the ROIs for this subject/run from the files
sub_r_m_roi_dt = pandas.concat([pandas.read_table(roi_data_dir+roi_data_version+'/'+folder +'/s'+str(sid)+"r" + str(runid ) + "_" + m_short + ".txt",
delim_whitespace=True, header=None,names=[folder])
for folder in roi_folders],axis=1)
#sub_r_m_roi_dt=pandas.read_csv(roi_file,delim_whitespace=True, header=None)
#let's get the sum file as well which gives us ROI names.
# sub_r_m_roi_sum_dt = pandas.read_table(
# roi_data_dir + roi_data_version + '/sub' + str(sid) + '_run' + str(freesurferRunId )+ '_sum.dat',
# delim_whitespace=True, header=None, comment='#',
# names=["Index","SegId","NVoxels","Volume_mm3","StructName","Mean","StdDev","Min","Max","Range"])
# great and now add the labels to the summary file.
# sub_r_m_roi_sum_dt=sub_r_m_roi_sum_dt.merge(freesurfer_LUT_df[["RegionId","Label"]],how='left',left_on='SegId',right_on='RegionId')
colnames = sub_r_m_roi_dt.columns
#onset_file='/Users/benjaminsmith/Dropbox/joint-modeling/data/runfiledetail20171020T012224_s214_reward_r1.txt'
if (os.path.isfile(onset_file)):
print('we have a match; ' + onset_file)
print("importing onsets")
# import the onset
onsets = onsets_to_dm(
onset_file,
TR=2,
runLength=sub_r_m_roi_dt.shape[0] #we don't know this for sure. But in this instance it doesn't matter.
# import the run_length data from the data.
)
onsets.sampling_rate = 2
onsets_convolved = onsets.convolve()
#removing columns with nothing in them.
for c in onsets_convolved.columns:
if sum(onsets_convolved.loc[:, c]) <= 0:
print('deleting ' + str(c))
del onsets_convolved[c]
rowcount = onsets_convolved.__len__()
if rowcount != 360:
warnings.warn("Just a friendly FYI: expected number of rows is 360 but this subject had " + str(
rowcount) + ". Probably this subject got cut off the task half-way through.")
# high pass filters
onsets_convolved['linearterm'] = range(1, rowcount + 1)
onsets_convolved['quadraticterm'] = [pow(x, 2) for x in onsets_convolved['linearterm']]
onsets_convolved['cubicterm'] = [pow(x, 3) for x in onsets_convolved['linearterm']]
#mean center
onsets_convolved['ones'] = [1] * rowcount
#every row in the onsets files is timepoint; every column is a specific event with the mapping of that
#column's expected activity
#In principle, we can treat the data in sub_r_m_roi_dt just as we could a 4D nii.gz fMRI time series
#it's not exactly like that though because it is a pandas dt and not a nifti object so
#we might or might not be able to use the same functions. Let's see.
motion_param_path = motion_param_dir + "/sub" + str(sid) + "/analysis/ReversalLearning_" + m_short + "_run" + str(runid) + "_pre.feat/" + motion_param_file
#print motion_param_path
motion_params = pandas.read_table(motion_param_path, names=["Motion" + str(i) for i in range(1, 7)],
delim_whitespace=True)
onsets_convolved=pandas.concat([onsets_convolved, motion_params],axis=1)
roi_data = regress_rois(onsets_convolved,sub_r_m_roi_dt)
#so this has given us a beta value with a row for each event, and a column for each ROI. that's what we want!
#we could have just multiplied each ROI time course with the convolution...in other words, done the dot product
#but without the regression equation. However the regression equation buys us a few nice things,
# including throwing in the linear, quadratic, cubic, and ones terms.
# it also gives us a beta value, which is a measure of the degree to which each convolution (X) *predicts*
# activity in each ROI (Y).
#normally the code would be measuring the degree to which each convolution predicts activity in each
#voxel (allowing for all the others, which is important and helpful) but we have replaced voxels here with ROIs.
#I think this is what we want.
roi_dfs={}
roi_output_partialpath =roi_event_data_dir + '/' + action_timestamp + 'sub' + str(sid) + '_' + m.lower() + '_r' + str(runid) + '_'
#give these row and column names.
for event_by_roi_t in ['beta_vals','t_vals','p_vals']:
roi_dfs[event_by_roi_t]= pd.DataFrame(data=roi_data[event_by_roi_t],
columns = colnames)
roi_dfs[event_by_roi_t]['EventName']=pd.Series(onsets_convolved.columns)
roi_dfs[event_by_roi_t].set_index('EventName',inplace=True)
roi_dfs[event_by_roi_t] = pd.DataFrame(data=roi_data[event_by_roi_t],
columns=colnames)
roi_dfs[event_by_roi_t]['EventName'] = pd.Series(onsets_convolved.columns)
roi_dfs[event_by_roi_t].set_index('EventName', inplace=True)
roi_dfs[event_by_roi_t].to_csv(roi_output_partialpath + event_by_roi_t + '.csv')
for roi_v in ['df_vals', 'sigma_vals']:
roi_dfs[roi_v] = pd.DataFrame(roi_data[roi_v])
roi_dfs[roi_v].to_csv(roi_output_partialpath + roi_v + '.csv')
#we ddn't get the column and row names in but I don't care.
roi_dfs['residual_vals']=pd.DataFrame(data=roi_data['residual_vals'])
roi_dfs['residual_vals'].to_csv(roi_output_partialpath + 'residual_vals' + '.csv')
#what's the next thing we'll want to do with these things????
#I think once we have the betas stored for each event, then we will want to connect the events to the event records in R.
| {
"content_hash": "5eff31400fb855892bbdf330ad242c16",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 163,
"avg_line_length": 49.916666666666664,
"alnum_prop": 0.6558708959376739,
"repo_name": "bjsmith/reversallearning",
"id": "f312199baedf4d368ba98df8e4e8424a64757ff9",
"size": "9185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neural_data/get_all_run_fsl_rois.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3054"
},
{
"name": "C++",
"bytes": "161"
},
{
"name": "CSS",
"bytes": "5603"
},
{
"name": "HTML",
"bytes": "51177018"
},
{
"name": "JavaScript",
"bytes": "84962"
},
{
"name": "Jupyter Notebook",
"bytes": "144"
},
{
"name": "Matlab",
"bytes": "83615"
},
{
"name": "Python",
"bytes": "81217"
},
{
"name": "R",
"bytes": "12440712"
},
{
"name": "Rebol",
"bytes": "3791"
},
{
"name": "Shell",
"bytes": "15103"
},
{
"name": "Stan",
"bytes": "2442341"
},
{
"name": "TeX",
"bytes": "361947"
}
],
"symlink_target": ""
} |
from ctypes import byref
# Other GDAL imports.
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.feature import Feature
from django.contrib.gis.gdal.field import FIELD_CLASSES
from django.contrib.gis.gdal.geometries import OGRGeomType
from django.contrib.gis.gdal.srs import SpatialReference
# GDAL ctypes function prototypes.
from django.contrib.gis.gdal.prototypes.ds import \
get_extent, get_fd_geom_type, get_fd_name, get_feature, get_feature_count, \
get_field_count, get_field_defn, get_field_name, get_field_precision, \
get_field_width, get_field_type, get_layer_defn, get_layer_srs, \
get_next_feature, reset_reading
from django.contrib.gis.gdal.prototypes.srs import clone_srs
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_L_* routines are relevant here.
class Layer(object):
"A class that wraps an OGR Layer, needs to be instantiated from a DataSource object."
#### Python 'magic' routines ####
def __init__(self, layer_ptr):
"Needs a C pointer (Python/ctypes integer) in order to initialize."
self._ptr = None # Initially NULL
if not layer_ptr:
raise OGRException('Cannot create Layer, invalid pointer given')
self._ptr = layer_ptr
self._ldefn = get_layer_defn(self._ptr)
def __getitem__(self, index):
"Gets the Feature at the specified index."
if not isinstance(index, (slice, int)):
raise TypeError
end = self.num_feat
if isinstance(index,int):
# An integer index was given
if index < 0:
index = end - index
if index < 0 or index >= self.num_feat:
raise OGRIndexError('index out of range')
return self._make_feature(index)
else:
# A slice was given
start, stop, stride = index.indices(end)
return [self._make_feature(offset) for offset in range(start,stop,stride)]
def __iter__(self):
"Iterates over each Feature in the Layer."
# ResetReading() must be called before iteration is to begin.
reset_reading(self._ptr)
for i in range(self.num_feat):
yield Feature(get_next_feature(self._ptr), self._ldefn)
def __len__(self):
"The length is the number of features."
return self.num_feat
def __str__(self):
"The string name of the layer."
return self.name
def _make_feature(self, offset):
"Helper routine for __getitem__ that makes a feature from an offset."
return Feature(get_feature(self._ptr, offset), self._ldefn)
#### Layer properties ####
@property
def extent(self):
"Returns the extent (an Envelope) of this layer."
env = OGREnvelope()
get_extent(self._ptr, byref(env), 1)
return Envelope(env)
@property
def name(self):
"Returns the name of this layer in the Data Source."
return get_fd_name(self._ldefn)
@property
def num_feat(self, force=1):
"Returns the number of features in the Layer."
return get_feature_count(self._ptr, force)
@property
def num_fields(self):
"Returns the number of fields in the Layer."
return get_field_count(self._ldefn)
@property
def geom_type(self):
"Returns the geometry type (OGRGeomType) of the Layer."
return OGRGeomType(get_fd_geom_type(self._ldefn))
@property
def srs(self):
"Returns the Spatial Reference used in this Layer."
try:
ptr = get_layer_srs(self._ptr)
return SpatialReference(clone_srs(ptr))
except SRSException:
return None
@property
def fields(self):
"""
Returns a list of string names corresponding to each of the Fields
available in this Layer.
"""
return [get_field_name(get_field_defn(self._ldefn, i))
for i in xrange(self.num_fields) ]
@property
def field_types(self):
"""
Returns a list of the types of fields in this Layer. For example,
the list [OFTInteger, OFTReal, OFTString] would be returned for
an OGR layer that had an integer, a floating-point, and string
fields.
"""
return [FIELD_CLASSES[get_field_type(get_field_defn(self._ldefn, i))]
for i in xrange(self.num_fields)]
@property
def field_widths(self):
"Returns a list of the maximum field widths for the features."
return [get_field_width(get_field_defn(self._ldefn, i))
for i in xrange(self.num_fields)]
@property
def field_precisions(self):
"Returns the field precisions for the features."
return [get_field_precision(get_field_defn(self._ldefn, i))
for i in xrange(self.num_fields)]
#### Layer Methods ####
def get_fields(self, field_name):
"""
Returns a list containing the given field name for every Feature
in the Layer.
"""
if not field_name in self.fields:
raise OGRException('invalid field name: %s' % field_name)
return [feat.get(field_name) for feat in self]
def get_geoms(self, geos=False):
"""
Returns a list containing the OGRGeometry for every Feature in
the Layer.
"""
if geos:
from django.contrib.gis.geos import GEOSGeometry
return [GEOSGeometry(feat.geom.wkb) for feat in self]
else:
return [feat.geom for feat in self]
| {
"content_hash": "04528aeda974ce0c8a833b61a33fdc8f",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 89,
"avg_line_length": 36.125786163522015,
"alnum_prop": 0.6241295264623955,
"repo_name": "paulsmith/geodjango",
"id": "a41b28819be425330eba954bed17d8fd41820f0a",
"size": "5769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/gdal/layer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "71605"
},
{
"name": "Python",
"bytes": "3433375"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
setup(
name='django-statsd-influx',
packages=['influx'],
version='0.1.7',
description='Django Statsd Influx Client ',
author='Jure Ham, Zemanta',
author_email='jure.ham@zemanta.com',
url='https://github.com/Zemanta/django-statsd-influx',
download_url='https://github.com/Zemanta/django-statsd-influx/tarball/0.1',
keywords=['statsd', 'influx', 'influxdb', 'django'],
install_requires=[
'statsd==3.2.1',
'decorator==4.0.11',
],
tests_require=['tox', 'virtualenv', 'statsd==3.2.1', 'decorator==4.0.11'],
cmdclass={'test': Tox},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
)
| {
"content_hash": "4bbe02192943ccab36c49d9b7b7c82ee",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 30.46153846153846,
"alnum_prop": 0.6117424242424242,
"repo_name": "Zemanta/django-statsd-influx",
"id": "037800585f3b803fe39cceef93430b6da4a9c035",
"size": "1584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7102"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.