text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from unittest import skip
from nose.tools import *
from .context import ask
from .fixtures.requests import TEST_FULL_REQUEST_DICT, TEST_SPARSE_REQUEST_DICT
class TestStandardRequest(object):
def setUp(self):
self.example = ask.Request(TEST_FULL_REQUEST_DICT)
def tearDown(self):
self.example = None
def test_request_stores_request_dict(self):
assert_equal(self.example.request, TEST_FULL_REQUEST_DICT)
def test_request_stores_metadata(self):
metadata = {'cute': 'puppy'}
r = ask.Request(TEST_FULL_REQUEST_DICT, metadata=metadata)
assert_equal(r.metadata, metadata)
def test_request_metadata_is_blank_if_not_provided(self):
assert_equal(self.example.metadata, {})
def test_request_returns_request_type(self):
req_type = self.example.request_type()
assert_equal(req_type, 'IntentRequest')
def test_request_returns_intent_name(self):
intent_name = self.example.intent_name()
assert_equal(intent_name, 'YesIntent')
def test_request_is_intent(self):
res = self.example.is_intent()
assert_true(res)
def test_request_returns_user_id(self):
user_id = self.example.user_id()
assert_equal(user_id, "amzn1.account.AGBATYSC32Y2QVDQKOWJUUJNEYFA")
def test_request_returns_access_token(self):
token = self.example.access_token()
assert_equal(token, "fillertoken-fix-later")
def test_request_returns_session_id(self):
session_id = self.example.session_id()
assert_equal(session_id, "SessionId.d461672c-2997-4d9d-9a8c-a67834acb9aa")
def test_request_returns_slot_value(self):
val1 = self.example.get_slot_value("example1")
val2 = self.example.get_slot_value("example2")
assert_equal(val1, "value1")
assert_equal(val2, "value2")
def test_request_returns_slot_names(self):
names = self.example.get_slot_names()
assert_items_equal(names, ["example1", "example2"])
def test_request_returns_slot_map(self):
slot_map = self.example.get_slot_map()
expected = {'example1': 'value1', 'example2': 'value2'}
assert_equal(slot_map, expected)
def test_request_slots_property_assigned_on_init(self):
slot_map = self.example.get_slot_map()
slots = self.example.slots
assert_equal(slots, slot_map)
assert_is_not_none(slots)
class TestSparseRequest(object):
def setUp(self):
self.example = ask.Request(TEST_SPARSE_REQUEST_DICT)
def tearDown(self):
self.example = None
def test_intent_name_with_no_intent(self):
assert_is_none(self.example.intent_name())
def test_is_intent_returns_False_with_no_intent(self):
assert_false(self.example.is_intent())
def test_access_token_returns_None(self):
assert_is_none(self.example.access_token())
def test_slot_value_returns_None(self):
assert_is_none(self.example.access_token())
def test_slot_names_returns_empty_list(self):
assert_equal(self.example.get_slot_names(), [])
def test_slot_map_returns_empty_dict(self):
assert_equal(self.example.get_slot_map(), {})
class TestEmptyRequest(object):
#@raises(KeyError)
@skip('Unsure proper functionality. Pass or raise better error?')
def test_empty_request(self):
ask.Request({})
|
{
"content_hash": "8f780a3213999a1152f2929244eba296",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 82,
"avg_line_length": 29.128205128205128,
"alnum_prop": 0.6616784037558685,
"repo_name": "bhairavmehta95/flashcard-helper-alexa-skill",
"id": "58d4c03e30d69ccd53fbfb1819941f4811a7be5c",
"size": "3408",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6373"
},
{
"name": "JavaScript",
"bytes": "6392"
},
{
"name": "Python",
"bytes": "14180413"
},
{
"name": "Shell",
"bytes": "3530"
}
],
"symlink_target": ""
}
|
'''
@author: YYK
'''
import shell
import ssh
import os.path
import log
import subprocess
import lock
import time
import json
logger = log.get_logger(__name__)
class SaltError(Exception):
'''salt error'''
def prepare_salt_state(state_path, salt_state_path='/srv/salt'):
try:
subprocess.call(['salt', '--version'])
except Exception as e:
print "Execute `salt --version` failed. Probably there isn't salt installed"
raise e
if not os.path.exists(salt_state_path):
os.makedirs(salt_state_path, 0755)
shell.call('rm -rf %s' % os.path.join(salt_state_path, os.path.basename(state_path)))
shell.call('cp -r %s %s' % (state_path, salt_state_path))
def is_salt_failed(salt_json_output):
json_data = json.loads(salt_json_output)
if isinstance(json_data, list):
return True
if isinstance(json_data, dict):
for value in json_data.values():
if isinstance(value, dict):
for item in value.values():
if item.has_key('result'):
if item['result'] == False:
return True
elif value == False:
return True
elif isinstance(value, list):
return True
return False
def execute_salt_state(hostname, username, password, state_name, master_name, machine_id=None):
with lock.FileLock(hostname):
ssh.execute('''ip=`env | grep SSH_CLIENT | cut -d '=' -f 2 | cut -d ' ' -f 1`; [ $ip == ::1 ] && ip=127.0.0.1; sed -i "/%s/d" /etc/hosts; sed -i "/$ip/d" /etc/hosts; echo "$ip %s" >> /etc/hosts''' % (master_name, master_name), hostname, username, password)
if not machine_id:
(retcode, machine_id, err) = ssh.execute('cat /sys/class/dmi/id/product_uuid', hostname, username, password, exception_if_error=False)
if not machine_id:
raise SaltError("Can't find machine-id on %s" % hostname)
machine_id = machine_id.strip()
if not wait_for_salt_minion_daemon(machine_id, 1, False):
ssh.execute('which salt-minion; [ $? -ne 0 ] && curl -L http://bootstrap.saltstack.org | sudo sh ;sed -i "^id/d" /etc/salt/minion; sed -i "^master/d" /etc/salt/minion; echo "id: %s" >>/etc/salt/minion; echo "master: %s" >> /etc/salt/minion; rm -f /etc/salt/pki/minion/minion_master.pub ; service salt-minion restart' % (machine_id, master_name), hostname, username, password, exception_if_error=False)
wait_for_salt_minion_daemon(machine_id)
print 'salt %s %s' % (machine_id, state_name)
output = shell.call('salt --out=json %s %s' % (machine_id, state_name))
if not is_salt_failed(output):
print '%s' % output
print "salt has deployed %s" % state_name
else:
raise SaltError('salt execution failure: %s' % output)
#need wait for a while for salt_minion to register into master, after its service is restarted.
def wait_for_salt_minion_daemon(salt_minion_id, timeout_times=10, exception=True):
def _salt_ping():
cmd = shell.ShellCmd('salt -t 1 --out=json %s test.ping' % salt_minion_id)
cmd(False)
return cmd.return_code == 0 and cmd.stdout != ''
import time
while timeout_times > 0:
if _salt_ping():
return True
time.sleep(1)
timeout_times -= 1
print 'Wait for salt minion: %s registration to master' % salt_minion_id
else:
print 'Command fail: `salt %s test.ping`' % salt_minion_id
if exception:
raise SaltError('Salt minion daemon: %s failed to register to master, after trying %s times.' % (salt_minion_id, timeout_times))
else:
return False
|
{
"content_hash": "d6ea75a6a04d734a57c2edf2aa838497",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 413,
"avg_line_length": 39.96808510638298,
"alnum_prop": 0.6012776151184456,
"repo_name": "mingjian2049/zstack-utility",
"id": "1297ec94691550d83cdc67f42f8f0c8304d609d8",
"size": "3757",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "zstacklib/zstacklib/utils/salt.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2380992"
},
{
"name": "Shell",
"bytes": "235730"
}
],
"symlink_target": ""
}
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from ..utils import get_user_model
class RegistrationForm(forms.Form):
username = forms.RegexField(
regex=r'^[\w.@+-]+$', max_length=30, label=_('Username'),
error_messages={'invalid': _('The username must contain only letters, '
'numbers and underscores.')},
)
email = forms.EmailField(label=_('Email'))
password1 = forms.CharField(label=_('Password'),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_('Password (again)'),
widget=forms.PasswordInput)
def clean(self):
data = self.cleaned_data
if ('password1' in data and 'password2' in data):
if data['password1'] != data['password2']:
raise forms.ValidationError(
_("The two passwords didn't match."),
)
return self.cleaned_data
def save(self):
user = get_user_model().objects.create_user(
self.cleaned_data['username'],
self.cleaned_data['email'],
self.cleaned_data['password1'],
)
user.is_active = False
user.save()
return user
|
{
"content_hash": "9382c7dea76d921de8781c23ec9eaae2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 35.888888888888886,
"alnum_prop": 0.5557275541795665,
"repo_name": "brutasse/django-le-social",
"id": "be234d2e405bad94ac9513fb2ade16f4734149d4",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "le_social/registration/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "456"
},
{
"name": "Python",
"bytes": "36495"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Dec 1 18:52:57 2016
@author: aishwaryakamath
"""
from keras.models import Sequential
from keras.layers import Bidirectional, BatchNormalization
from keras.layers.core import Dense, Activation, Merge, Dropout, Flatten, Reshape
from keras.layers.convolutional import MaxPooling2D, MaxPooling1D
from keras.layers.convolutional import Convolution1D, Convolution2D
class TextCNN(object):
def __init__(self, img_dim=1664, word_dim=300, max_sent_len=26, nb_classes=1000, fc_hidden_dim=2014, dropout=0.5,num_filters=128):
self.img_dim = img_dim
self.word_dim = word_dim
self.max_sent_len = max_sent_len
self.nb_classes = nb_classes
self.lstm_hidden_dim = lstm_hidden_dim
self.fc_hidden_dim = fc_hidden_dim
self.bidirect = bidirect
self.dropout = dropout
self.filter_size=filter_size
self.num_filters=num_filters
def build(self):
self.img_model = Sequential()
self.img_model.add(MaxPooling2D(input_shape=(14, 14, 512)))
self.img_model.add(Flatten())
for i in xrange(3):
self.img_model.add(Dense(self.img_dim, activation='tanh'))
self.img_model.add(BatchNormalization())
self.txt_model = Sequential()
#
#
self.txt_model.add(Convolution1D(128, 3, activation='relu', border_mode='same', input_shape=(self.max_sent_len, self.word_dim)))
# self.txt_model.add(BatchNormalization())
# #self.txt_model.add(Activation('relu'))
self.txt_model.add(Convolution1D(128, 4, activation='relu', border_mode='same'))
# self.txt_model.add(BatchNormalization())
# #self.txt_model.add(Activation('relu'))
self.txt_model.add(Convolution1D(128, 5, activation='relu', border_mode='same'))
# self.txt_model.add(BatchNormalization())
# #self.txt_model.add(Activation('relu'))
self.txt_model.add(MaxPooling1D(pool_length=2, stride=None, border_mode='valid'))
self.txt_model.add(Flatten())
self.model = Sequential()
self.model.add(Merge([self.txt_model, self.img_model], mode='mul', concat_axis=1))
self.model.add(BatchNormalization())
for i in xrange(2):
self.model.add(Dense(self.fc_hidden_dim, init='he_normal', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(self.dropout))
self.model.add(Dense(self.nb_classes, activation='softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
self.model.summary()
def fit(self, X_ques, X_img, y, nb_epoch=50, batch_size=50, shuffle=True):
return self.model.fit([X_ques, X_img], y, nb_epoch=nb_epoch, batch_size=batch_size, shuffle=shuffle)
def evaluate(self, X_ques_test, X_im_test, y_test, batch_size=50):
return self.model.evaluate([X_ques_test, X_im_test], y_test, batch_size=batch_size)
def train_on_batch(self, X_ques, X_img, y):
return self.model.train_on_batch([X_ques, X_img], y)
def test_on_batch(self, X_ques_test, X_img_test, y_test):
return self.model.test_on_batch([X_ques_test, X_img_test], y_test)
def save(self):
params = {
'img_dim': self.img_dim,
'word_dim': self.word_dim,
'max_sent_len': self.max_sent_len,
'nb_classes': self.nb_classes,
'lstm_hidden_dim': self.lstm_hidden_dim,
'fc_hidden_dim': self.fc_hidden_dim,
'bidirect': self.bidirect,
'dropout': self.dropout
}
fn = '../models/'+"".join(["{0}={1},".format(k,v) for k,v in params.iteritems()])
open(fn+'.json', 'w').write(self.model.to_json())
|
{
"content_hash": "1a4e96af785e7c97078622a4f342befb",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 136,
"avg_line_length": 42.05555555555556,
"alnum_prop": 0.6277410832232496,
"repo_name": "ashkamath/VQA",
"id": "a394e9fb27dcbcd9d591724ec950f8b1d8d1cb8b",
"size": "3809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VQA/CNN model files/mymodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2194445"
},
{
"name": "Python",
"bytes": "88498"
}
],
"symlink_target": ""
}
|
import os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key', ['total_elements',
'E_solv_kJ',
'E_coul_kcal',
'E_coul_kJ',
'E_solv_kcal'])
def test_PGB_mut_sensor(key):
results = get_results()
with open('pgbmut.pickle', 'rb') as f:
base_results = pickle.load(f)
assert abs(base_results[key] - results[key]) / abs(base_results[key]) < 1e-12
def test_pgbmut_iterations():
results = get_results()
with open('pgbmut.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results['iterations'] == results['iterations']
@functools.lru_cache(6)
def get_results():
print('Generating results for 1PGBmut example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/1PGBmut_sensor'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/1PGBmut_sensor'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
|
{
"content_hash": "15c00cd3e4848f13c25c6a89d5401b40",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 106,
"avg_line_length": 31.07843137254902,
"alnum_prop": 0.5488958990536278,
"repo_name": "barbagroup/pygbe",
"id": "f81e0154df93ec55a1ef3840926ef551bcc81533",
"size": "1585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pgbmut.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "217312"
},
{
"name": "C++",
"bytes": "72804"
},
{
"name": "Dockerfile",
"bytes": "2273"
},
{
"name": "Jupyter Notebook",
"bytes": "2311"
},
{
"name": "Python",
"bytes": "566209"
},
{
"name": "TeX",
"bytes": "6758"
}
],
"symlink_target": ""
}
|
from logging import getLogger
from galaxy.util import parse_xml_string
from ..job import (
BaseJobExec,
job_states,
)
log = getLogger(__name__)
argmap = {
"destination": "-q",
"Execution_Time": "-a",
"Account_Name": "-A",
"Checkpoint": "-c",
"Error_Path": "-e",
"Group_List": "-g",
"Hold_Types": "-h",
"Join_Paths": "-j",
"Keep_Files": "-k",
"Resource_List": "-l",
"Mail_Points": "-m",
"Mail_Users": "-M",
"Job_Name": "-N",
"Output_Path": "-o",
"Priority": "-p",
"Rerunable": "-r",
"Shell_Path_List": "-S",
"job_array_request": "-t",
"User_List": "-u",
"Variable_List": "-v",
}
class Torque(BaseJobExec):
ERROR_MESSAGE_UNRECOGNIZED_ARG = "Unrecognized long argument passed to Torque CLI plugin: %s"
def job_script_kwargs(self, ofile, efile, job_name):
pbsargs = {"-o": ofile, "-e": efile, "-N": job_name}
for k, v in self.params.items():
if k == "plugin":
continue
try:
if not k.startswith("-"):
k = argmap[k]
pbsargs[k] = v
except KeyError:
log.warning(self.ERROR_MESSAGE_UNRECOGNIZED_ARG, k)
template_pbsargs = ""
for k, v in pbsargs.items():
template_pbsargs += f"#PBS {k} {v}\n"
return dict(headers=template_pbsargs)
def submit(self, script_file):
return f"qsub {script_file}"
def delete(self, job_id):
return f"qdel {job_id}"
def get_status(self, job_ids=None):
return "qstat -x"
def get_single_status(self, job_id):
return f"qstat -f {job_id}"
def parse_status(self, status, job_ids):
# in case there's noise in the output, find the big blob 'o xml
tree = None
rval = {}
for line in status.strip().splitlines():
try:
tree = parse_xml_string(line.strip())
assert tree.tag == "Data"
break
except Exception:
tree = None
if tree is None:
log.warning(f"No valid qstat XML return from `qstat -x`, got the following: {status}")
return None
else:
for job in tree.findall("Job"):
id = job.find("Job_Id").text
if id in job_ids:
state = job.find("job_state").text
# map PBS job states to Galaxy job states.
rval[id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
for line in status.splitlines():
line = line.split(" = ")
if line[0].strip() == "job_state":
return self._get_job_state(line[1].strip())
# no state found, job has exited
return job_states.OK
def _get_job_state(self, state):
try:
return {"E": job_states.RUNNING, "R": job_states.RUNNING, "Q": job_states.QUEUED, "C": job_states.OK}.get(
state
)
except KeyError:
raise KeyError(f"Failed to map torque status code [{state}] to job state.")
__all__ = ("Torque",)
|
{
"content_hash": "0b2f8e236f9197b01808a86854e124fc",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 118,
"avg_line_length": 29.431192660550458,
"alnum_prop": 0.5152743142144638,
"repo_name": "galaxyproject/pulsar",
"id": "12c843583c92ea0c56d2a49f56aac308dd44f4b0",
"size": "3208",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pulsar/managers/util/cli/job/torque.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "241"
},
{
"name": "Dockerfile",
"bytes": "5337"
},
{
"name": "Makefile",
"bytes": "5892"
},
{
"name": "Python",
"bytes": "688213"
},
{
"name": "Shell",
"bytes": "17057"
}
],
"symlink_target": ""
}
|
"""
Support for DEB packages
"""
import datetime
import logging
import os
import re
import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "lowpkg"
def __virtual__():
"""
Confirm this module is on a Debian based system
"""
if __grains__["os_family"] == "Debian":
return __virtualname__
return (
False,
"The dpkg execution module cannot be loaded: "
"only works on Debian family systems.",
)
def bin_pkg_info(path, saltenv="base"):
"""
.. versionadded:: 2015.8.0
Parses RPM metadata and returns a dictionary of information about the
package (name, version, etc.).
path
Path to the file. Can either be an absolute path to a file on the
minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``).
If a salt fileserver URL is passed, the file will be cached to the
minion so that it can be examined.
saltenv : base
Salt fileserver environment from which to retrieve the package. Ignored
if ``path`` is a local file path on the minion.
CLI Example:
.. code-block:: bash
salt '*' lowpkg.bin_pkg_info /root/foo-1.2.3-1ubuntu1_all.deb
salt '*' lowpkg.bin_pkg_info salt://foo-1.2.3-1ubuntu1_all.deb
"""
# If the path is a valid protocol, pull it down using cp.cache_file
if __salt__["config.valid_fileproto"](path):
newpath = __salt__["cp.cache_file"](path, saltenv)
if not newpath:
raise CommandExecutionError(
"Unable to retrieve {} from saltenv '{}'".format(path, saltenv)
)
path = newpath
else:
if not os.path.exists(path):
raise CommandExecutionError("{} does not exist on minion".format(path))
elif not os.path.isabs(path):
raise SaltInvocationError("{} does not exist on minion".format(path))
cmd = ["dpkg", "-I", path]
result = __salt__["cmd.run_all"](cmd, output_loglevel="trace")
if result["retcode"] != 0:
msg = "Unable to get info for " + path
if result["stderr"]:
msg += ": " + result["stderr"]
raise CommandExecutionError(msg)
ret = {}
for line in result["stdout"].splitlines():
line = line.strip()
if re.match(r"^Package[ ]*:", line):
ret["name"] = line.split()[-1]
elif re.match(r"^Version[ ]*:", line):
ret["version"] = line.split()[-1]
elif re.match(r"^Architecture[ ]*:", line):
ret["arch"] = line.split()[-1]
missing = [x for x in ("name", "version", "arch") if x not in ret]
if missing:
raise CommandExecutionError(
"Unable to get {} for {}".format(", ".join(missing), path)
)
if __grains__.get("cpuarch", "") == "x86_64":
osarch = __grains__.get("osarch", "")
arch = ret["arch"]
if arch != "all" and osarch == "amd64" and osarch != arch:
ret["name"] += ":{}".format(arch)
return ret
def unpurge(*packages):
"""
Change package selection for each package specified to 'install'
CLI Example:
.. code-block:: bash
salt '*' lowpkg.unpurge curl
"""
if not packages:
return {}
old = __salt__["pkg.list_pkgs"](purge_desired=True)
ret = {}
__salt__["cmd.run"](
["dpkg", "--set-selections"],
stdin=r"\n".join(["{} install".format(x) for x in packages]),
python_shell=False,
output_loglevel="trace",
)
__context__.pop("pkg.list_pkgs", None)
new = __salt__["pkg.list_pkgs"](purge_desired=True)
return salt.utils.data.compare_dicts(old, new)
def list_pkgs(*packages, **kwargs):
"""
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
External dependencies::
Virtual package resolution requires aptitude. Because this function
uses dpkg, virtual packages will be reported as not installed.
CLI Example:
.. code-block:: bash
salt '*' lowpkg.list_pkgs
salt '*' lowpkg.list_pkgs hostname
salt '*' lowpkg.list_pkgs hostname mount
"""
cmd = [
"dpkg-query",
"-f=${db:Status-Status}\t${binary:Package}\t${Version}\n",
"-W",
] + list(packages)
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] != 0:
msg = "Error: " + out["stderr"]
log.error(msg)
return msg
lines = [line.split("\t", 1) for line in out["stdout"].splitlines()]
pkgs = dict([line.split("\t") for status, line in lines if status == "installed"])
return pkgs
def file_list(*packages, **kwargs):
"""
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list hostname
salt '*' lowpkg.file_list hostname mount
salt '*' lowpkg.file_list
"""
errors = []
ret = set()
cmd = ["dpkg-query", "-f=${db:Status-Status}\t${binary:Package}\n", "-W"] + list(
packages
)
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] != 0:
msg = "Error: " + out["stderr"]
log.error(msg)
return msg
lines = [line.split("\t") for line in out["stdout"].splitlines()]
pkgs = [package for (status, package) in lines if status == "installed"]
for pkg in pkgs:
output = __salt__["cmd.run"](["dpkg", "-L", pkg], python_shell=False)
fileset = set(output.splitlines())
ret = ret.union(fileset)
return {"errors": errors, "files": sorted(ret)}
def file_dict(*packages, **kwargs):
"""
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_dict hostname
salt '*' lowpkg.file_dict hostname mount
salt '*' lowpkg.file_dict
"""
errors = []
ret = {}
cmd = ["dpkg-query", "-f=${db:Status-Status}\t${binary:Package}\n", "-W"] + list(
packages
)
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] != 0:
msg = "Error: " + out["stderr"]
log.error(msg)
return msg
lines = [line.split("\t") for line in out["stdout"].splitlines()]
pkgs = [package for (status, package) in lines if status == "installed"]
for pkg in pkgs:
cmd = ["dpkg", "-L", pkg]
ret[pkg] = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
return {"errors": errors, "packages": ret}
def _get_pkg_info(*packages, **kwargs):
"""
Return list of package information. If 'packages' parameter is empty,
then data about all installed packages will be returned.
:param packages: Specified packages.
:param failhard: Throw an exception if no packages found.
:return:
"""
kwargs = salt.utils.args.clean_kwargs(**kwargs)
failhard = kwargs.pop("failhard", True)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if __grains__["os"] == "Ubuntu" and __grains__["osrelease_info"] < (12, 4):
bin_var = "${binary}"
else:
bin_var = "${Package}"
ret = []
cmd = (
"dpkg-query -W -f='package:" + bin_var + "\\n"
"revision:${binary:Revision}\\n"
"architecture:${Architecture}\\n"
"maintainer:${Maintainer}\\n"
"summary:${Summary}\\n"
"source:${source:Package}\\n"
"version:${Version}\\n"
"section:${Section}\\n"
"installed_size:${Installed-size}\\n"
"size:${Size}\\n"
"MD5:${MD5sum}\\n"
"SHA1:${SHA1}\\n"
"SHA256:${SHA256}\\n"
"origin:${Origin}\\n"
"homepage:${Homepage}\\n"
"status:${db:Status-Abbrev}\\n"
"description:${Description}\\n"
"\\n*/~^\\\\*\\n'"
)
cmd += " {}".format(" ".join(packages))
cmd = cmd.strip()
call = __salt__["cmd.run_all"](cmd, python_shell=False)
if call["retcode"]:
if failhard:
raise CommandExecutionError(
"Error getting packages information: {}".format(call["stderr"])
)
else:
return ret
for pkg_info in [
elm
for elm in re.split(r"\r?\n\*/~\^\\\*(\r?\n|)", call["stdout"])
if elm.strip()
]:
pkg_data = {}
pkg_info, pkg_descr = pkg_info.split("\ndescription:", 1)
for pkg_info_line in [
el.strip() for el in pkg_info.split(os.linesep) if el.strip()
]:
key, value = pkg_info_line.split(":", 1)
if value:
pkg_data[key] = value
install_date = _get_pkg_install_time(pkg_data.get("package"))
if install_date:
pkg_data["install_date"] = install_date
pkg_data["description"] = pkg_descr
ret.append(pkg_data)
return ret
def _get_pkg_license(pkg):
"""
Try to get a license from the package.
Based on https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
:param pkg:
:return:
"""
licenses = set()
cpr = "/usr/share/doc/{}/copyright".format(pkg)
if os.path.exists(cpr):
with salt.utils.files.fopen(cpr, errors="ignore") as fp_:
for line in salt.utils.stringutils.to_unicode(fp_.read()).split(os.linesep):
if line.startswith("License:"):
licenses.add(line.split(":", 1)[1].strip())
return ", ".join(sorted(licenses))
def _get_pkg_install_time(pkg):
"""
Return package install time, based on the /var/lib/dpkg/info/<package>.list
:return:
"""
iso_time = None
if pkg is not None:
location = "/var/lib/dpkg/info/{}.list".format(pkg)
if os.path.exists(location):
iso_time = (
datetime.datetime.utcfromtimestamp(
int(os.path.getmtime(location))
).isoformat()
+ "Z"
)
return iso_time
def _get_pkg_ds_avail():
"""
Get the package information of the available packages, maintained by dselect.
Note, this will be not very useful, if dselect isn't installed.
:return:
"""
avail = "/var/lib/dpkg/available"
if not salt.utils.path.which("dselect") or not os.path.exists(avail):
return dict()
# Do not update with dselect, just read what is.
ret = dict()
pkg_mrk = "Package:"
pkg_name = "package"
with salt.utils.files.fopen(avail) as fp_:
for pkg_info in salt.utils.stringutils.to_unicode(fp_.read()).split(pkg_mrk):
nfo = dict()
for line in (pkg_mrk + pkg_info).split(os.linesep):
line = line.split(": ", 1)
if len(line) != 2:
continue
key, value = line
if value.strip():
nfo[key.lower()] = value
if nfo.get(pkg_name):
ret[nfo[pkg_name]] = nfo
return ret
def info(*packages, **kwargs):
"""
Returns a detailed summary of package information for provided package names.
If no packages are specified, all packages will be returned.
.. versionadded:: 2015.8.1
packages
The names of the packages for which to return information.
failhard
Whether to throw an exception if none of the packages are installed.
Defaults to True.
.. versionadded:: 2016.11.3
CLI Example:
.. code-block:: bash
salt '*' lowpkg.info
salt '*' lowpkg.info apache2 bash
salt '*' lowpkg.info 'php5*' failhard=false
"""
# Get the missing information from the /var/lib/dpkg/available, if it is there.
# However, this file is operated by dselect which has to be installed.
dselect_pkg_avail = _get_pkg_ds_avail()
kwargs = salt.utils.args.clean_kwargs(**kwargs)
failhard = kwargs.pop("failhard", True)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
for pkg in _get_pkg_info(*packages, failhard=failhard):
# Merge extra information from the dselect, if available
for pkg_ext_k, pkg_ext_v in dselect_pkg_avail.get(pkg["package"], {}).items():
if pkg_ext_k not in pkg:
pkg[pkg_ext_k] = pkg_ext_v
# Remove "technical" keys
for t_key in [
"installed_size",
"depends",
"recommends",
"provides",
"replaces",
"conflicts",
"bugs",
"description-md5",
"task",
]:
if t_key in pkg:
del pkg[t_key]
lic = _get_pkg_license(pkg["package"])
if lic:
pkg["license"] = lic
ret[pkg["package"]] = pkg
return ret
|
{
"content_hash": "b73340eb9dadea6dbe93933de157ba3a",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 88,
"avg_line_length": 30.10909090909091,
"alnum_prop": 0.5641606280193237,
"repo_name": "saltstack/salt",
"id": "2bd12a7e615050658caf94da01e071f88bff98f0",
"size": "13248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/modules/dpkg_lowpkg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
import pytest
from inventory_control import storage
def get_config():
"""
Create my simple localhost config
:return:
"""
config = {'host': 'localhost', 'user': 'wce',
'password': 'thispasswordisobjectivelyterrible',
'db': 'inventory_control'}
return config
def test_integration_storage():
engine = storage.StorageEngine(config=get_config())
result = engine.cursor.execute("SELECT COUNT(*) FROM test")
assert result == 1
try:
#engine._drop_tables()
engine._create_tables()
res = engine.cursor.execute("SELECT * FROM components")
assert res == 0
res = engine.cursor.execute("SELECT * FROM projects")
assert res == 0
except Exception as ex:
print(ex)
finally:
engine._drop_tables()
def test_integration_component_creation():
"""
Create a component type, get it, and delete it.
:return:
"""
name = 'some_name'
engine = storage.StorageEngine(config=get_config())
try:
engine._create_tables()
engine.add_component_type(type_name=name)
result = engine.get_component_type(type_name=name)
assert result['type'] == name
engine.remove_component_type(type_name=name)
result = engine.get_component_type(type_name=name)
assert result is None
finally:
engine._drop_tables()
def test_project():
"""
Create a project, delete a project, and possibly
rank them in order
:return:
"""
engine = storage.StorageEngine(config=get_config())
project_number = 1001
try:
engine._create_tables()
engine.add_project(project_number=project_number)
engine.delete_project(project_number=project_number)
finally:
engine._drop_tables()
|
{
"content_hash": "15522b29908f7747b1e90ead9dc53b6f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 63,
"avg_line_length": 26.318840579710145,
"alnum_prop": 0.6200440528634361,
"repo_name": "codeforsanjose/inventory-control",
"id": "f79d4e285f5d426ab721dd0aaf81fe334b3e8d63",
"size": "1816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7897"
}
],
"symlink_target": ""
}
|
"""From flight data, gather info about distance between airports and check for consistency. Then
write to distance.csv"""
from sets import Set
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None #Suppress overwrite error.
def get_distancefreq():
#Empty dataframe to accumulate frequency, based on Origin, Dest, and Distance.
freqdf=pd.DataFrame({
'OriginAirportId':[],
'DestAirportId':[],
'Distance':[],
'Freq':[]})
freqdf.set_index(['OriginAirportId', 'DestAirportId', 'Distance'], inplace=True)
for year in range(1987, 2016):
if(year == 1987):
startmonth = 10
else:
startmonth = 1
for month in range(startmonth, 13):
flightfile='../data/%d_%d.csv' %(year, month)
df = pd.read_csv(flightfile)[['OriginAirportId', 'DestAirportId', 'Distance']]
#Create a freq column and calculate frequency for a given month.
df['Freq'] = np.nan
df['Freq'] = df.groupby(['OriginAirportId', 'DestAirportId', 'Distance']).transform(len)
df.drop_duplicates(inplace=True)
df.set_index(['OriginAirportId', 'DestAirportId', 'Distance'], inplace=True)
#Align rows by index, and sum frequencies.
freqdf, df = freqdf.align(df, fill_value=0)
freqdf += df
return freqdf.reset_index(level=2)
def checkdistance(distdf):
#Input dataframe has index=Origin, Dest; columns = Distance, Freq.
#Freq = frequency of each distance given origin, dest.
flights = list(distdf.index.unique())
roundtrips = [x for x in flights if (x[1], x[0]) in flights and x[0] < x[1]]
#Check, store, and print various errors.
print "Same Origin and Dest Airports:"
same_origindest = [x[0] for x in flights if x[0] == x[1]]
print same_origindest
print "Multiple Distance Errors:"
multipledist = [(x[0], x[1]) for x in flights if distdf.Distance[x[0], x[1]].size > 1]
for x in multipledist:
print "Flight: ", x
print "Distances: ", list(distdf.Distance[x])
print "Frequency: ", list(distdf.Freq[x])
print "Round Trip Errors:"
roundtriperrors = [x for x in roundtrips if
(True if distdf.Distance[x].size != distdf.Distance[x[1], x[0]].size
else (distdf.Distance[x] != distdf.Distance[x[1], x[0]])
if (distdf.Distance[x].size == 1)
else (Set(distdf.Distance[x]) != Set(distdf.Distance[x[1], x[0]])))]
for x in roundtriperrors:
print "Flight: ", x
print "Distance to: ", list(distdf.Distance[x])
print "Distance back: ", list(distdf.Distance[x[1], x[0]])
return [same_origindest, multipledist, roundtriperrors]
def getdistance():
#Get distance info from flight data and check for errors.
distfreq_df = get_distancefreq()
errors = checkdistance(distfreq_df)
#Remove same origin/dest flights.
distfreq_df.drop([(x,x) for x in errors[0]], inplace=True)
#Choose distance which occurs more than half the time.
distfreq_df.reset_index(inplace=True)
distfreq_df['TotalFreq'] = distfreq_df.groupby(['OriginAirportId', 'DestAirportId']).Freq.transform(sum)
distfreq_df = distfreq_df[distfreq_df.Freq/distfreq_df.TotalFreq > 0.5]
#Write to csv.
distfreq_df.drop(['Freq', 'TotalFreq'], axis=1, inplace=True)
distfreq_df.to_csv('../data/distance.csv', index=False)
|
{
"content_hash": "af387e873b57e8028e39cf390a0eadda",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 108,
"avg_line_length": 39.28409090909091,
"alnum_prop": 0.6355221290135956,
"repo_name": "mtb0/flightmodel",
"id": "b6231e28cd99fc6a938dc743a0f2ee6a3a7af998",
"size": "3479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/download/get_dist_from_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29189"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from yolk.__init__ import __version__ as VERSION
setup(name="yolk",
license = "BSD License",
version=VERSION,
description="Command-line tool for querying PyPI and Python packages installed on your system.",
long_description=open("README", "r").read(),
maintainer="Rob Cakebread",
author="Rob Cakebread",
author_email="cakebread @ gmail",
url="https://github.com/cakebread/yolk",
keywords="PyPI setuptools cheeseshop distutils eggs package management",
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=["setuptools"],
packages=['yolk', 'yolk.plugins'],
package_dir={'yolk':'yolk'},
entry_points={'console_scripts': ['yolk = yolk.cli:main',]},
)
|
{
"content_hash": "3b110f9294f0e258411fac94e314ca81",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 100,
"avg_line_length": 37.44444444444444,
"alnum_prop": 0.6261127596439169,
"repo_name": "cakebread/yolk",
"id": "2e08c6b0a1cde96a9c144615386e230d6b9f7e93",
"size": "1035",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "88377"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from PIL import Image
from singa import device
from singa import tensor
from singa import sonnx
import onnx
from utils import download_model, check_exist_or_download
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
def preprocess(img):
img = img.resize((256, 256))
img = img.crop((16, 16, 240, 240))
img = np.array(img).astype(np.float32) / 255.
img = np.rollaxis(img, 2, 0)
for channel, mean, std in zip(range(3), [0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]):
img[channel, :, :] -= mean
img[channel, :, :] /= std
img = np.expand_dims(img, axis=0)
return img
def get_image_labe():
# download label
label_url = 'https://s3.amazonaws.com/onnx-model-zoo/synset.txt'
with open(check_exist_or_download(label_url), 'r') as f:
labels = [l.rstrip() for l in f]
# download image
image_url = 'https://s3.amazonaws.com/model-server/inputs/kitten.jpg'
img = Image.open(check_exist_or_download(image_url))
return img, labels
class MyModel(sonnx.SONNXModel):
def __init__(self, onnx_model):
super(MyModel, self).__init__(onnx_model)
def forward(self, *x):
y = super(MyModel, self).forward(*x)
return y[0]
def train_one_batch(self, x, y):
pass
if __name__ == "__main__":
url = 'https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg16/vgg16.tar.gz'
download_dir = '/tmp/'
model_path = os.path.join(download_dir, 'vgg16', 'vgg16.onnx')
logging.info("onnx load model...")
download_model(url)
onnx_model = onnx.load(model_path)
# inference
logging.info("preprocessing...")
img, labels = get_image_labe()
img = preprocess(img)
# sg_ir = sonnx.prepare(onnx_model) # run without graph
# y = sg_ir.run([img])
logging.info("model compling...")
dev = device.create_cuda_gpu()
x = tensor.PlaceHolder(img.shape, device=dev)
model = MyModel(onnx_model)
model.compile([x], is_train=False, use_graph=True, sequential=True)
# verifty the test
# from utils import load_dataset
# inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'vgg16', 'test_data_set_0'))
# x_batch = tensor.Tensor(device=dev, data=inputs[0])
# outputs = sg_ir.run([x_batch])
# for ref_o, o in zip(ref_outputs, outputs):
# np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4)
logging.info("model running...")
x = tensor.Tensor(device=dev, data=img)
y = model.forward(x)
logging.info("postprocessing...")
y = tensor.softmax(y)
scores = tensor.to_numpy(y)
scores = np.squeeze(scores)
a = np.argsort(scores)[::-1]
for i in a[0:5]:
logging.info('class=%s ; probability=%f' % (labels[i], scores[i]))
|
{
"content_hash": "3681889caf4afd2aac941321336829bf",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 90,
"avg_line_length": 30.483870967741936,
"alnum_prop": 0.6204585537918871,
"repo_name": "apache/incubator-singa",
"id": "369cee9e58e21508d5e879b82256e13e0a1973cf",
"size": "3613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/onnx/vgg16.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "179197"
},
{
"name": "C++",
"bytes": "2270126"
},
{
"name": "CMake",
"bytes": "35412"
},
{
"name": "Cuda",
"bytes": "23993"
},
{
"name": "Dockerfile",
"bytes": "19274"
},
{
"name": "Java",
"bytes": "2578"
},
{
"name": "Python",
"bytes": "450209"
},
{
"name": "Shell",
"bytes": "11607"
}
],
"symlink_target": ""
}
|
import os
from collections import defaultdict
from flask import Flask
from flask import json
from flask import request
from flask import render_template
app = Flask(__name__)
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
RESULTS_DIR = os.path.join(PROJECT_DIR, '../results')
def visualise_file():
versions = request.args.getlist('version') or ['1.4.10', '1.5.5', '1.6']
db_vendors = request.args.getlist('vendor') or [
'postgresql', 'mysql', 'sqlite']
app_names = request.args.getlist('app_name') or [
'polymorphic_test', 'model_utils_test', 'generic_m2m']
fn_tmpl = '{app_name}_{db_vendor}_Django-{version}_benchmark_results.csv'
grouped_data = defaultdict(list)
for app_name in app_names:
for version in versions:
for db_vendor in db_vendors:
name = '{}_{}_{}'.format(app_name, version, db_vendor)
filename = fn_tmpl.format(
app_name=app_name, db_vendor=db_vendor, version=version)
path = os.path.join(RESULTS_DIR, filename)
if not os.path.exists(path):
continue
with open(path) as fh:
for line in fh:
try:
line = json.loads(line)
grouped_data[name].append(line)
except:
pass
return json.dumps(grouped_data)
@app.route('/')
def index():
return render_template(
'graph.html', parsecom_app_id=os.getenv('PARSECOM_APP_ID'),
parsecom_js_key=os.getenv('PARSECOM_JAVASCRIPT_KEY'))
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "2a06648c090d838a03b5f06eec29d328",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 32.20754716981132,
"alnum_prop": 0.5629759812536614,
"repo_name": "elbaschid/mti-lightbulb",
"id": "05330392dfa0e08d71fab756f3f7519c24446a9a",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualize/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12835"
},
{
"name": "JavaScript",
"bytes": "780324"
},
{
"name": "Python",
"bytes": "36910"
},
{
"name": "Scheme",
"bytes": "5332"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.http import HttpResponse # noqa
from django.template import defaultfilters as filters
from django.utils import html
from django.utils.http import urlencode
from django.utils import safestring
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
DELETABLE_STATES = ("available", "error", "error_extending")
class VolumePolicyTargetMixin(policy.PolicyTargetMixin):
policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),)
class LaunchVolume(tables.LinkAction):
name = "launch_volume"
verbose_name = _("Launch as Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if getattr(volume, 'bootable', '') == 'true':
return volume.status == "available"
return False
class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume",
u"Delete Volumes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume",
u"Scheduled deletion of Volumes",
count
)
policy_rules = (("volume", "volume:delete"),)
def delete(self, request, obj_id):
cinder.volume_delete(request, obj_id)
def allowed(self, request, volume=None):
if volume:
return (volume.status in DELETABLE_STATES and
not getattr(volume, 'has_snapshot', False))
return True
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("volume", "volume:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(CreateVolume, self).__init__(attrs, **kwargs)
def allowed(self, request, volume=None):
limits = api.cinder.tenant_absolute_limits(request)
gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf"))
- limits.get('totalGigabytesUsed', 0))
volumes_available = (limits.get('maxTotalVolumes', float("inf"))
- limits.get('totalVolumesUsed', 0))
if gb_available <= 0 or volumes_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Volume")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
return True
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "extend"
verbose_name = _("Extend Volume")
url = "horizon:project:volumes:volumes:extend"
classes = ("ajax-modal", "btn-extend")
policy_rules = (("volume", "volume:extend"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Manage Attachments")
url = "horizon:project:volumes:volumes:attach"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, volume=None):
if volume:
project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
attach_allowed = \
policy.check((("compute", "compute:attach_volume"),),
request,
{"project_id": project_id})
detach_allowed = \
policy.check((("compute", "compute:detach_volume"),),
request,
{"project_id": project_id})
if attach_allowed or detach_allowed:
return volume.status in ("available", "in-use")
return False
class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction):
name = "snapshots"
verbose_name = _("Create Snapshot")
url = "horizon:project:volumes:volumes:create_snapshot"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create_snapshot"),)
def allowed(self, request, volume=None):
try:
limits = api.cinder.tenant_absolute_limits(request)
except Exception:
exceptions.handle(request, _('Unable to retrieve tenant limits.'))
limits = {}
snapshots_available = (limits.get('maxTotalSnapshots', float("inf"))
- limits.get('totalSnapshotsUsed', 0))
if snapshots_available <= 0 and "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
return volume.status in ("available", "in-use")
class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction):
name = "backups"
verbose_name = _("Create Backup")
url = "horizon:project:volumes:volumes:create_backup"
classes = ("ajax-modal",)
policy_rules = (("volume", "backup:create"),)
def allowed(self, request, volume=None):
return (cinder.volume_backup_supported(request) and
volume.status == "available")
class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction):
name = "upload_to_image"
verbose_name = _("Upload to Image")
url = "horizon:project:volumes:volumes:upload_to_image"
classes = ("ajax-modal",)
icon = "cloud-upload"
policy_rules = (("volume", "volume:upload_to_image"),)
def allowed(self, request, volume=None):
has_image_service_perm = \
request.user.has_perm('openstack.services.image')
return (volume.status in ("available", "in-use") and
has_image_service_perm)
class EditVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Volume")
url = "horizon:project:volumes:volumes:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "retype"
verbose_name = _("Change Volume Type")
url = "horizon:project:volumes:volumes:retype"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:retype"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, volume_id):
volume = cinder.volume_get(request, volume_id)
return volume
def get_size(volume):
return _("%sGB") % volume.size
def get_attachment_name(request, attachment):
server_id = attachment.get("server_id", None)
if "instance" in attachment and attachment['instance']:
name = attachment["instance"].name
else:
try:
server = api.nova.server_get(request, server_id)
name = server.name
except Exception:
name = None
exceptions.handle(request, _("Unable to retrieve "
"attachment information."))
try:
url = reverse("horizon:project:instances:detail", args=(server_id,))
instance = '<a href="%s">%s</a>' % (url, html.escape(name))
except NoReverseMatch:
instance = name
return instance
class AttachmentColumn(tables.Column):
"""Customized column class.
So it that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, volume):
request = self.table.request
link = _('Attached to %(instance)s on %(dev)s')
attachments = []
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
# When a volume is attached it may return the server_id
# without the server name...
instance = get_attachment_name(request, attachment)
vals = {"instance": instance,
"dev": html.escape(attachment.get("device", ""))}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
("error_extending", False),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
filters=(filters.title,),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"))
attachments = AttachmentColumn("attachments",
verbose_name=_("Attached To"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"))
class Meta:
name = "volumes"
verbose_name = _("Volumes")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateVolume, DeleteVolume, VolumesFilterAction)
row_actions = (EditVolume, ExtendVolume, LaunchVolume, EditAttachments,
CreateSnapshot, CreateBackup, RetypeVolume,
UploadToImage, DeleteVolume)
class DetachVolume(tables.BatchAction):
name = "detach"
classes = ('btn-danger', 'btn-detach')
policy_rules = (("compute", "compute:detach_volume"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Volume",
u"Detach Volumes",
count
)
# This action is asynchronous.
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detaching Volume",
u"Detaching Volumes",
count
)
def action(self, request, obj_id):
attachment = self.table.get_object_by_id(obj_id)
api.nova.instance_volume_detach(request,
attachment.get('server_id', None),
obj_id)
def get_success_url(self, request):
return reverse('horizon:project:volumes:index')
class AttachedInstanceColumn(tables.Column):
"""Customized column class that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, attachment):
request = self.table.request
return safestring.mark_safe(get_attachment_name(request, attachment))
class AttachmentsTable(tables.DataTable):
instance = AttachedInstanceColumn(get_attachment_name,
verbose_name=_("Instance"))
device = tables.Column("device",
verbose_name=_("Device"))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, attachment):
instance_name = get_attachment_name(self.request, attachment)
vals = {"volume_name": attachment['volume_name'],
"instance_name": html.strip_tags(instance_name)}
return _("Volume %(volume_name)s on instance %(instance_name)s") % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta:
name = "attachments"
verbose_name = _("Attachments")
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
|
{
"content_hash": "cdb6c8dae34aeba2213290ca513db55d",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 79,
"avg_line_length": 34.72985781990521,
"alnum_prop": 0.5912254366812227,
"repo_name": "CiscoSystems/avos",
"id": "a4389f989c3a370c6346b8ec9dc3089a00d4bd71",
"size": "15261",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/volumes/volumes/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "85008"
},
{
"name": "HTML",
"bytes": "457426"
},
{
"name": "JavaScript",
"bytes": "904618"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4318649"
},
{
"name": "Scala",
"bytes": "894"
},
{
"name": "Shell",
"bytes": "17503"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'todolist.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
|
{
"content_hash": "807410c36bba244c2c2c8088edb90279",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 37,
"avg_line_length": 21.076923076923077,
"alnum_prop": 0.5912408759124088,
"repo_name": "Joneyviana/todolist-django-angular",
"id": "94c9b48e086a5fdbf12a81f78d53ec41b68a6b58",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todolist/users/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78607"
},
{
"name": "HTML",
"bytes": "23085"
},
{
"name": "JavaScript",
"bytes": "144311"
},
{
"name": "Python",
"bytes": "60162"
},
{
"name": "Shell",
"bytes": "4232"
}
],
"symlink_target": ""
}
|
from twisted.web import resource
class Logout(resource.Resource):
"""
Log the user out.
@param cookieDict: A C{dict} mapping cookie values to Twitter usernames.
"""
allowedMethods = ('GET',)
def __init__(self, cookieDict, conf):
resource.Resource.__init__(self)
self._cookieDict = cookieDict
self._conf = conf
def render_GET(self, request):
"""
Forget about the user's cookie and redirect them to our home page.
@param request: A twisted.web HTTP C{Request}.
"""
try:
del self._cookieDict[request.getCookie(self._conf.cookie_name)]
except KeyError:
pass
request.redirect('/')
return ''
|
{
"content_hash": "f665d27924eb539d074470ccb5698dba",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.5901639344262295,
"repo_name": "fluidinfo/lastpage-server",
"id": "f2c9f5d9939af3fa47b2c8c8209a73d11a6fddc5",
"size": "1309",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lastpage/logout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32706"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_warren_cyborg_style_01.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","1h_trainer")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "e9eabb86018180bfd7ab94dd02115967",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 24,
"alnum_prop": 0.6987179487179487,
"repo_name": "obi-two/Rebelion",
"id": "c7ec2d8035253b5964c50ccd9c99fc760cebaf5a",
"size": "457",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_warren_cyborg_style_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from django.core.management import call_command
from django.test import TestCase
from mock import call
from mock import patch
from kolibri.core.content import models as content
class DeleteChannelTestCase(TestCase):
"""
Testcase for delete channel management command
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def delete_channel(self):
call_command("deletechannel", self.the_channel_id)
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_remove_metadata_object(
self, os_remove_mock, content_file_path
):
self.delete_channel()
self.assertEquals(0, content.ChannelMetadata.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_remove_contentnodes(
self, os_remove_mock, content_file_path
):
self.delete_channel()
self.assertEquals(0, content.ContentNode.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_leave_unrelated_contentnodes(
self, os_remove_mock, content_file_path
):
c2c1 = content.ContentNode.objects.get(title="c2c1")
new_id = c2c1.id[:-1] + "1"
content.ContentNode.objects.create(
id=new_id,
content_id=c2c1.content_id,
kind=c2c1.kind,
channel_id=c2c1.channel_id,
available=True,
title=c2c1.title,
)
self.delete_channel()
self.assertEquals(1, content.ContentNode.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_remove_file_objects(
self, os_remove_mock, content_file_path
):
self.delete_channel()
self.assertEquals(0, content.File.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_files(self, os_remove_mock, content_file_path):
path = "testing"
content_file_path.return_value = path
num_files = content.LocalFile.objects.filter(available=True).count()
self.delete_channel()
os_remove_mock.assert_has_calls([call(path)] * num_files)
|
{
"content_hash": "09bc3a6554face371a45c41a50fc115a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 83,
"avg_line_length": 38.17391304347826,
"alnum_prop": 0.6814730447987851,
"repo_name": "lyw07/kolibri",
"id": "6b96bfef58b4e0f3892eca2a78609d7e86527bfb",
"size": "2634",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "kolibri/core/content/test/test_deletechannel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "2007902"
},
{
"name": "Dockerfile",
"bytes": "6930"
},
{
"name": "Gherkin",
"bytes": "199214"
},
{
"name": "HTML",
"bytes": "34393"
},
{
"name": "JavaScript",
"bytes": "1376767"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "1896793"
},
{
"name": "Shell",
"bytes": "11350"
},
{
"name": "Vue",
"bytes": "1278479"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
from schedprof.coroutine import Coroutine
from schedprof.cpu import CPU, State
class Scheduler(object):
__metaclass__ = ABCMeta
def __init__(self):
self._now = 0
self.cpus = []
self.coros = []
self._idle_cputime = 0
self._context_switches = 0
self._cache_hits = 0
def status_string(self):
stats = [i.state.value + (" " if i.coro is None else "{0:02}".format(i.coro.n)) for i in self.cpus]
ret = "Now is {}\n".format(self._now)
ret += " CPU stats:\n"
ret += " {0:3}: {1}".format(self._now, (" ".join(stats)).strip())
idle_coroutines = [str(i) for i in self.coros if i._cpu is None]
if idle_coroutines:
ret += "\n Idle coroutines:\n "
ret += " ".join([str(i) for i in idle_coroutines])
return ret
@abstractmethod
def map_(self):
"""Scheduler function.
Returned list of (CPU, Coroutine) tuples means what coroutine particular
CPU should execute next.
Subclass can use self.cpus, self.coros methods to make a decision.
Also Coroutine.peek_instruction() function might be useful.
"""
pass
def run_program(self, program, ncpu, cpu_monitor=None):
assert isinstance(program, Coroutine)
cpu_monitor= cpu_monitor or (lambda *args: None)
self._now = 0
self._idle_cputime = 0
self._context_switches = 0
self._cache_hits = 0
self.coros = [program]
self.cpus = [CPU() for _ in range(ncpu)]
mutexes_to_unlock = []
while True:
# Finalize completed tasks
for cpu in filter(lambda cpu: not cpu.coro is None and cpu.due <= self._now, self.cpus):
cpu.suspend()
# Perform pending mutex unlocks, resume coroutines that were waiting for these mutexes
# Scheduler can use Coroutine.peek_instruction() to find all coroutines that want to acquire
# particular mutex.
for mutex in [i for when, i in mutexes_to_unlock if when <= self._now]:
for coro in mutex.wait_queue:
coro.resume()
# Map pending coros to CPUs by doing actual scheduling
# Scheduler.map_() method should be implemented in the subclass
schedule = self.map_()
# Sort schedule by cpu number
schedule = sorted(schedule, key=lambda x: x[0].n)
# Ensure returned CPUs are ready to accept new tasks
for cpu, _ in schedule:
if cpu.burning(self._now):
raise RuntimeError("Coroutine was mapped to burning CPU")
# Fetch next instruction from coroutine, assign CPU according to the
# schedule and compute execution due time
for cpu, coro in schedule:
task = coro.pop_instruction()
# Provide CPU Monitor with information it requires (used mostly is unit tests)
cpu_monitor(self._now, cpu, coro, task)
if task[0] == 'lock':
mutex = task[2]
if not mutex.acquired_by is None:
mutex.wait_queue.append(coro)
coro.suspend(task)
else:
mutex.acquired_by = coro
if task[0] == 'unlock':
mutex = task[2]
if mutex.acquired_by != coro:
raise RuntimeError("An attempt to unlock non-owned mutex")
mutex.acquired_by = None
mutexes_to_unlock.append((self._now + task[1], mutex))
if task[0] == 'spawn':
spawned_coro = task[2]
self.coros.append(spawned_coro)
if task[0] == 'terminate':
coro_to_kill = task[2]
self.coros.remove(coro_to_kill)
if task[0] in ['spawn', 'io', 'cpu', 'terminate', 'lock', 'unlock']:
self._context_switches += 1
cachehit = cpu.wakeup(coro, State.RUNNING, self._now + task[1])
if cachehit:
self._cache_hits += 1
if all([cpu.idle(self._now) for cpu in self.cpus]) and len(self.coros) == 0:
# All CPUs are idle and there are no pending coros -
# program reached it's end
break
# Nearest point in future that may required re-scheduling
nextnow = min([cpu.due for cpu in self.cpus if cpu.due > self._now])
# Compute CPU idle time
nidlecpus = len([None for cpu in self.cpus if cpu.idle(self._now)])
self._idle_cputime += nidlecpus * (nextnow - self._now)
# Jump to the next scheduling point
self._now = nextnow
total_cputime = len(self.cpus) * self._now
burning_cputime = total_cputime - self._idle_cputime
return (self._now, total_cputime, burning_cputime, self._context_switches, self._cache_hits)
class DumbScheduler(Scheduler):
"""Maps first N idle coroutines to first M idle CPU resulting
into min(N, M) scheduled tasks.
"""
def map_(self):
idle_cpus = [i for i in self.cpus if i.idle(self._now)]
ready_coros = [i for i in self.coros if i.ready()]
return list(zip(idle_cpus, ready_coros))
def print_stat(stat):
print("Elapsed time: {}\nTotal CPU time: {}\nBurning CPU time: {}\nContext switches: {}\nCache hits: {}".format(*stat))
print("Cache hit rate: {:.2%}".format(stat[4] / stat[3]))
print("CPU utilization: {:.2%}".format(stat[2] / stat[1]))
print("Parallel speedup: {:.4}".format(stat[2] / stat[0]))
|
{
"content_hash": "172b5222628dce03cc1887d40e6d292b",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 123,
"avg_line_length": 45.4609375,
"alnum_prop": 0.5512974737927478,
"repo_name": "isn-/dfk",
"id": "9dfec888c781f0f888cbb977de23ce0a161cc5db",
"size": "5866",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/schedprof/schedprof/scheduler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "195817"
},
{
"name": "C++",
"bytes": "3080"
},
{
"name": "CMake",
"bytes": "13949"
},
{
"name": "Python",
"bytes": "658"
},
{
"name": "Shell",
"bytes": "3053"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from typing import List, Optional, Union
from dbt.contracts.graph.manifest import WritableManifest
from dbt.contracts.rpc import (
GetManifestParameters,
GetManifestResult,
RPCCompileParameters,
RPCDocsGenerateParameters,
RPCRunOperationParameters,
RPCSeedParameters,
RPCTestParameters,
RemoteCatalogResults,
RemoteExecutionResult,
RemoteRunOperationResult,
RPCSnapshotParameters,
RPCSourceFreshnessParameters,
)
from dbt.rpc.method import (
Parameters, RemoteManifestMethod
)
from dbt.task.base import BaseTask
from dbt.task.compile import CompileTask
from dbt.task.freshness import FreshnessTask
from dbt.task.generate import GenerateTask
from dbt.task.run import RunTask
from dbt.task.run_operation import RunOperationTask
from dbt.task.seed import SeedTask
from dbt.task.snapshot import SnapshotTask
from dbt.task.test import TestTask
from .base import RPCTask
from .cli import HasCLI
class RPCCommandTask(
RPCTask[Parameters],
HasCLI[Parameters, RemoteExecutionResult],
BaseTask,
):
@staticmethod
def _listify(
value: Optional[Union[str, List[str]]]
) -> Optional[List[str]]:
if value is None:
return None
elif isinstance(value, str):
return [value]
else:
return value
def handle_request(self) -> RemoteExecutionResult:
return self.run()
class RemoteCompileProjectTask(
RPCCommandTask[RPCCompileParameters], CompileTask
):
METHOD_NAME = 'compile'
def set_args(self, params: RPCCompileParameters) -> None:
self.args.models = self._listify(params.models)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
class RemoteRunProjectTask(RPCCommandTask[RPCCompileParameters], RunTask):
METHOD_NAME = 'run'
def set_args(self, params: RPCCompileParameters) -> None:
self.args.models = self._listify(params.models)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
class RemoteSeedProjectTask(RPCCommandTask[RPCSeedParameters], SeedTask):
METHOD_NAME = 'seed'
def set_args(self, params: RPCSeedParameters) -> None:
# select has an argparse `dest` value of `models`.
self.args.models = self._listify(params.select)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
self.args.show = params.show
class RemoteTestProjectTask(RPCCommandTask[RPCTestParameters], TestTask):
METHOD_NAME = 'test'
def set_args(self, params: RPCTestParameters) -> None:
self.args.models = self._listify(params.models)
self.args.exclude = self._listify(params.exclude)
self.args.data = params.data
self.args.schema = params.schema
if params.threads is not None:
self.args.threads = params.threads
class RemoteDocsGenerateProjectTask(
RPCCommandTask[RPCDocsGenerateParameters],
GenerateTask,
):
METHOD_NAME = 'docs.generate'
def set_args(self, params: RPCDocsGenerateParameters) -> None:
self.args.models = None
self.args.exclude = None
self.args.compile = params.compile
def get_catalog_results(
self, nodes, sources, generated_at, compile_results, errors
) -> RemoteCatalogResults:
return RemoteCatalogResults(
nodes=nodes,
sources=sources,
generated_at=datetime.utcnow(),
_compile_results=compile_results,
errors=errors,
logs=[],
)
class RemoteRunOperationTask(
RunOperationTask,
RemoteManifestMethod[RPCRunOperationParameters, RemoteRunOperationResult],
HasCLI[RPCRunOperationParameters, RemoteRunOperationResult],
):
METHOD_NAME = 'run-operation'
def __init__(self, args, config, manifest):
super().__init__(args, config)
RemoteManifestMethod.__init__(
self, args, config, manifest # type: ignore
)
def load_manifest(self):
# we started out with a manifest!
pass
def set_args(self, params: RPCRunOperationParameters) -> None:
self.args.macro = params.macro
self.args.args = params.args
def _get_kwargs(self):
if isinstance(self.args.args, dict):
return self.args.args
else:
return RunOperationTask._get_kwargs(self)
def _runtime_initialize(self):
return RunOperationTask._runtime_initialize(self)
def handle_request(self) -> RemoteRunOperationResult:
base = RunOperationTask.run(self)
result = RemoteRunOperationResult(
results=base.results,
generated_at=base.generated_at,
logs=[],
success=base.success,
elapsed_time=base.elapsed_time
)
return result
def interpret_results(self, results):
return results.success
class RemoteSnapshotTask(RPCCommandTask[RPCSnapshotParameters], SnapshotTask):
METHOD_NAME = 'snapshot'
def set_args(self, params: RPCSnapshotParameters) -> None:
# select has an argparse `dest` value of `models`.
self.args.models = self._listify(params.select)
self.args.exclude = self._listify(params.exclude)
if params.threads is not None:
self.args.threads = params.threads
class RemoteSourceFreshnessTask(
RPCCommandTask[RPCSourceFreshnessParameters],
FreshnessTask
):
METHOD_NAME = 'snapshot-freshness'
def set_args(self, params: RPCSourceFreshnessParameters) -> None:
self.args.selected = self._listify(params.select)
if params.threads is not None:
self.args.threads = params.threads
self.args.output = None
# this is a weird and special method.
class GetManifest(
RemoteManifestMethod[GetManifestParameters, GetManifestResult]
):
METHOD_NAME = 'get-manifest'
def set_args(self, params: GetManifestParameters) -> None:
self.args.models = None
self.args.exclude = None
def handle_request(self) -> GetManifestResult:
task = RemoteCompileProjectTask(self.args, self.config, self.manifest)
task.handle_request()
manifest: Optional[WritableManifest] = None
if task.manifest is not None:
manifest = task.manifest.writable_manifest()
return GetManifestResult(
logs=[],
manifest=manifest,
)
def interpret_results(self, results):
return results.manifest is not None
|
{
"content_hash": "fabb270dbc994ce1bb48a600b62903e1",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 78,
"avg_line_length": 30.563636363636363,
"alnum_prop": 0.6763831052944675,
"repo_name": "fishtown-analytics/dbt",
"id": "99a67f8a36b90863356b53b9c03d427477c05bf3",
"size": "6724",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/octavius-catto",
"path": "core/dbt/task/rpc/project_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1247"
},
{
"name": "HTML",
"bytes": "1343185"
},
{
"name": "Makefile",
"bytes": "997"
},
{
"name": "PLpgSQL",
"bytes": "1649"
},
{
"name": "Python",
"bytes": "2059566"
},
{
"name": "Shell",
"bytes": "2419"
},
{
"name": "TSQL",
"bytes": "396955"
}
],
"symlink_target": ""
}
|
""" RHEAS module for retrieving meteorological forecasts/hindcasts
from the NMME model suite.
.. module:: nmme
:synopsis: Retrieve NMME meteorological forecast data
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
import datasets
import rpath
import dbio
import subprocess
import tempfile
import os
import sys
import shutil
import zipfile
import random
import string
import numpy as np
from datetime import datetime, timedelta
import logging
def dates(dbname):
dts = datasets.dates(dbname, "precip.nmme")
return dts
def _writeCservConfig(bbox, startdate, enddate, varname, ens):
"""Write ClimateSERV configuration file."""
log = logging.getLogger(__name__)
with tempfile.NamedTemporaryFile(dir=".", delete=False) as fcfg:
fcfg.write("[DEFAULT]\n")
fcfg.write("APIAccessKey = 1dd4d855e8b64a35b65b4841dcdbaa8b_as\n")
fcfg.write("DatasetType = Seasonal_Forecast\n")
fcfg.write("OperationType = Download\n")
fcfg.write("EarliestDate = {0}\n".format(startdate.strftime("%m/%d/%Y")))
if (enddate - startdate).days > 180:
enddate = startdate + timedelta(180)
log.warning("NMME forecast range cannot be longer than 180 days. Resetting end date!")
fcfg.write("LatestDate = {0}\n".format(enddate.strftime("%m/%d/%Y")))
fcfg.write("SeasonalEnsemble = ens{0:02d}\n".format(ens))
fcfg.write("SeasonalVariable = {0}\n".format(varname))
coords = "[{0},{1}],[{2},{1}],[{2},{3}],[{0},{3}],[{0},{1}]".format(*bbox)
fcfg.write("GeometryCoords = [{0}]\n".format(coords))
fcfg.write("BaseURL = https://climateserv.servirglobal.net/chirps/scriptAccess")
return fcfg.name
def _setEnsemble(dbname, sname, ens):
"""Set ensemble column in NMME data table."""
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute("select * from raster_resampled where sname='{0}' and tname like 'nmme_%'".format(sname))
tables = [r[1] for r in cur.fetchall()]
for table in tables:
if not dbio.columnExists(dbname, sname, table, "ensemble"):
cur.execute("alter table {0}.{1} add column ensemble int".format(sname, table))
db.commit()
sql = "update {0}.{1} set ensemble = {2} where ensemble is null".format(sname, table, ens)
cur.execute(sql)
db.commit()
cur.close()
db.close()
def ingest(dbname, varname, filename, dt, ens):
"""Imports Geotif *filename* into database *dbname*."""
schema = {'Precipitation': 'precip', 'Temperature': 'tmax'}
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute(
"select * from information_schema.tables where table_schema='{0}' and table_name='nmme'".format(schema[varname]))
if not bool(cur.rowcount):
cur.execute("create table {0}.nmme (rid serial not null primary key, fdate date, ensemble int, rast raster)".format(
schema[varname]))
db.commit()
cur.execute("select * from {0}.nmme where fdate='{1}' and ensemble = {2}".format(schema[varname], dt.strftime("%Y-%m-%d"), ens))
if bool(cur.rowcount):
cur.execute("delete from {0}.nmme where fdate='{1}' and ensemble = {2}".format(schema[varname], dt.strftime("%Y-%m-%d"), ens))
db.commit()
dbio.ingest(dbname, filename, dt, "{0}.nmme".format(schema[varname]), False, False)
sql = "update {0}.nmme set ensemble = {1} where ensemble is null".format(schema[varname], ens)
cur.execute(sql)
db.commit()
cur.execute("select * from raster_resampled where sname='{0}' and tname like 'nmme_%'".format(schema[varname]))
tables = [r[1] for r in cur.fetchall()]
for table in tables:
cur.execute("select * from {0}.{1} where fdate='{2}' and ensemble = {3}".format(schema[varname], table, dt.strftime("%Y-%m-%d"), ens))
if bool(cur.rowcount):
cur.execute("delete from {0}.{1} where fdate='{2}' and ensemble = {3}".format(schema[varname], table, dt.strftime("%Y-%m-%d"), ens))
db.commit()
tilesize = (10, 10)
dbio.createResampledTables(dbname, schema[varname], "nmme", dt, tilesize, False, "and ensemble={0}".format(ens))
_setEnsemble(dbname, schema[varname], ens)
cur.close()
db.close()
def download(dbname, dts, bbox=None):
"""Downloads NMME ensemble forecast data from the SERVIR ClimateSERV
data server, and imports them into the database *dbname*. Optionally uses
a bounding box to limit the region with [minlon, minlat, maxlon, maxlat]."""
log = logging.getLogger(__name__)
nens = 10
varnames = ["Precipitation", "Temperature"]
outpath = tempfile.mkdtemp()
for varname in varnames:
for e in range(nens):
configfile = _writeCservConfig(bbox, dts[0], dts[-1], varname, e+1)
proc = subprocess.Popen(["python", "{0}/ClimateSERV_API_Access.py".format(rpath.scripts), "-config", configfile, "-outfile", "{0}/{1}_{2}.zip".format(outpath, varname, e+1)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = proc.communicate()
log.debug(out)
f = zipfile.ZipFile("{0}/{1}_{2}.zip".format(outpath, varname, e+1))
filenames = filter(lambda s: s.endswith("tif"), f.namelist())
f.extractall(outpath, filenames)
for filename in filenames:
dt = datetime.strptime(filename.split("_")[-1][1:-4], "%Y%m%d")
if varname == "Temperature":
# convert from Kelvin to Celsius
proc = subprocess.Popen(["gdal_calc.py", "-A", "{0}/{1}".format(outpath, filename), "--calc=A-273.15", "--outfile={0}/C{1}".format(outpath, filename)])
out, err = proc.communicate()
log.debug(out)
filename = "C" + filename
ingest(dbname, varname, "{0}/{1}".format(outpath, filename), dt, e+1)
os.remove(configfile)
shutil.rmtree(outpath)
def _queryDataset(dbname, tablename, name, startyear, startmonth, startday, endyear, endmonth, endday, ens=None):
"""Retrieve meteorological forcing dataset from database."""
temptable = ''.join(random.SystemRandom().choice(string.ascii_letters) for _ in range(8))
if ens is None:
sql = "create table {0}_xy as (select gid,st_worldtorastercoordx(rast,geom) as x,st_worldtorastercoordy(rast,geom) as y,rid as tile from {4},{5}.basin where fdate=date'{1}-{2}-{3}' and st_intersects(rast,geom))".format(temptable, startyear, startmonth, startday, tablename, name)
else:
sql = "create table {0}_xy as (select gid,st_worldtorastercoordx(rast,geom) as x,st_worldtorastercoordy(rast,geom) as y,rid as tile from {4},{5}.basin where fdate=date'{1}-{2}-{3}' and st_intersects(rast,geom) and ensemble={6})".format(temptable, startyear, startmonth, startday, tablename, name, ens)
db = dbio.connect(dbname)
cur = db.cursor()
cur.execute(sql)
cur.execute("create index {0}_xy_r on {0}_xy(tile)".format(temptable))
db.commit()
if ens is None:
sql = "select gid,fdate,st_nearestvalue(rast,x,y) from {0},{1}_xy where rid=tile and fdate>=date'{2}-{3}-{4}' and fdate<=date'{5}-{6}-{7}' order by gid,fdate".format(tablename, temptable, startyear, startmonth, startday, endyear, endmonth, endday)
else:
sql = "select gid,fdate,st_nearestvalue(rast,x,y) from {0},{1}_xy where rid=tile and fdate>=date'{2}-{3}-{4}' and fdate<=date'{5}-{6}-{7}' and ensemble={8} order by gid,fdate".format(tablename, temptable, startyear, startmonth, startday, endyear, endmonth, endday, ens)
cur.execute(sql)
data = [r for r in cur.fetchall()]
cur.execute("drop table {0}_xy".format(temptable))
db.commit()
cur.close()
db.close()
return data
def _getForcings(options, models, res):
"""Retrieve meteorological forcings for ensemble."""
nens = len(models)
db = dbio.connect(models.dbname)
cur = db.cursor()
rtables = dbio.getResampledTables(models.dbname, options, res)
rsmp = rtables['precip'].split("_")[1]
prec = [None] * nens
tmax = [None] * nens
tmin = [None] * nens
temp = [None] * nens
for e in range(nens):
prec[e] = _queryDataset(models.dbname, "precip.nmme_{0}".format(rsmp), models.name, models.startyear, models.startmonth, models.startday, models.endyear, models.endmonth, models.endday, e+1)
temp[e] = _queryDataset(models.dbname, "tmax.nmme_{0}".format(rsmp), models.name, models.startyear, models.startmonth, models.startday, models.endyear, models.endmonth, models.endday, e+1)
sql = "select distinct(date_part('year',fdate)) from tmax.{0}".format(rtables['tmax'])
cur.execute(sql)
years = [r[0] for r in cur.fetchall()]
if len(years) > 2:
years.remove(min(years))
years.remove(max(years))
if len(years) > 0:
ndays = (datetime(models.endyear, models.endmonth, models.endday) - datetime(models.startyear, models.startmonth, models.startday)).days
yr = int(np.random.choice(years))
t0 = datetime(yr, models.startmonth, models.startday)
t1 = t0 + timedelta(ndays)
vtmax = _queryDataset(models.dbname, "tmax.{0}".format(rtables['tmax']), models.name, t0.year, t0.month, t0.day, t1.year, t1.month, t1.day)
vtmin = _queryDataset(models.dbname, "tmin.{0}".format(rtables['tmin']), models.name, t0.year, t0.month, t0.day, t1.year, t1.month, t1.day)
wind = _queryDataset(models.dbname, "wind.{0}".format(rtables['wind']), models.name, t0.year, t0.month, t0.day, t1.year, t1.month, t1.day)
for e in range(nens):
tmax[e] = [(vtmax[i][0], vtmax[i][1], temp[e][i][2] + 0.5 * (vtmax[i][2] - vtmin[i][2])) for i in range(len(vtmax))]
tmin[e] = [(vtmin[i][0], vtmin[i][1], temp[e][i][2] - 0.5 * (vtmax[i][2] - vtmin[i][2])) for i in range(len(vtmin))]
else:
prec = tmax = tmin = wind = None
return prec, tmax, tmin, wind
def generate(options, models):
"""Generate meteorological forecast forcings from downscaled NMME data."""
log = logging.getLogger(__name__)
options['vic']['tmax'] = options['vic']['temperature']
options['vic']['tmin'] = options['vic']['temperature']
db = dbio.connect(models.dbname)
cur = db.cursor()
dt0 = datetime(models.startyear, models.startmonth, models.startday)
dt1 = datetime(models.endyear, models.endmonth, models.endday)
# check if forecast period exists in NMME data
sql = "select count(distinct(fdate)) from precip.nmme where fdate>=date'{0}' and fdate<=date'{1}'".format(dt0.strftime("%Y-%m-%d"), dt1.strftime("%Y-%m-%d"))
cur.execute(sql)
ndata = cur.fetchone()[0]
if ndata == (dt1 - dt0).days + 1:
prec, tmax, tmin, wind = _getForcings(options, models, models.res)
if tmax is None or tmin is None or wind is None:
log.error("No data found to generate VIC forcings for NMME forecast. Exiting...")
sys.exit()
else:
for e in range(len(models)):
models[e].writeForcings(prec[e], tmax[e], tmin[e], wind)
else:
log.error("Not enough data found for requested forecast period! Exiting...")
sys.exit()
cur.close()
db.close()
|
{
"content_hash": "dcd3c8b5fd0e4bc61457a7c783cce949",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 309,
"avg_line_length": 51.342465753424655,
"alnum_prop": 0.6359836357168267,
"repo_name": "nasa/RHEAS",
"id": "db957c130576df5c976383f0862de1e937aee675",
"size": "11244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datasets/nmme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "412"
},
{
"name": "Python",
"bytes": "356616"
}
],
"symlink_target": ""
}
|
""" P1 tests for memory resource limits
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.lib.base import (
Account,
ServiceOffering,
VirtualMachine,
Resources,
Domain
)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
wait_for_cleanup,
findSuitableHostForMigration,
get_resource_type,
update_resource_count
)
from marvin.lib.utils import cleanup_resources
from marvin.codes import ERROR_NO_HOST_FOR_MIGRATION
class Services:
"""Test memory resource limit services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "resource",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 2048, # In MBs
},
"virtual_machine": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'KVM',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"domain": {
"name": "Domain",
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
"mode": 'advanced',
# Networking mode: Advanced, Basic
}
class TestDomainMemoryLimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDomainMemoryLimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering, ]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, service_off, networks=None, api_client=None):
"""Creates an instance in account"""
self.debug("Deploying an instance in account: %s" %
self.account.name)
if api_client is None:
api_client = self.apiclient
try:
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
def setupAccounts(self):
self.debug("Creating a sub-domain under: %s" % self.domain.name)
self.child_domain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.child_do_admin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_1.id
)
# Cleanup the resources created at end of test
self.cleanup.append(self.child_do_admin_1)
self.cleanup.append(self.child_domain_1)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=6144,
account=self.child_do_admin_1.name,
domainid=self.child_do_admin_1.domainid)
self.child_domain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.child_do_admin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_2.id)
# Cleanup the resources created at end of test
self.cleanup.append(self.child_do_admin_2)
self.cleanup.append(self.child_domain_2)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=6144,
account=self.child_do_admin_2.name,
domainid=self.child_do_admin_2.domainid)
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_01_change_service_offering(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM in the created domain
# 2. List Resource count for the root admin Memory usage
# 3. Upgrade and downgrade service offering
# 4. Resource count should list properly for the domain
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
#Resetting memory count in service offering
self.services["service_offering"]["memory"] = 2048
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Stopping instance: %s" % vm.name)
try:
vm.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_stop = account_list[0].memorytotal
self.assertEqual(resource_count_after_stop, expected_resource_count,
"Resource count should be same after stopping the instance")
self.debug("Creating service offering with 5 GB RAM")
self.services["service_offering"]["memory"] = 5120
self.service_offering_5gb = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering_5gb)
self.debug(
"Upgrade service offering of instance %s from %s to %s" %
(vm.name,
self.service_offering.name,
self.service_offering_5gb.name))
try:
vm.change_service_offering(self.apiclient,
serviceOfferingId=self.service_offering_5gb.id)
except Exception as e:
self.fail("Failed to change service offering of vm %s - %s" %
(vm.name, e))
update_resource_count(self.apiclient, domainid=self.domain.id, rtype=9) #RAM
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_upgrade = account_list[0].memorytotal
self.debug(resource_count_after_upgrade)
self.assertTrue(resource_count_after_upgrade > resource_count_after_stop,
"Resource count should be more than before, after upgrading service offering")
self.debug(
"Down grade service offering of instance %s from %s to %s" %
(vm.name,
self.service_offering_5gb.name,
self.service_offering.name))
try:
vm.change_service_offering(self.apiclient,
serviceOfferingId=self.service_offering.id)
except Exception as e:
self.fail("Failed to change service offering of vm %s - %s" %
(vm.name, e))
update_resource_count(self.apiclient, domainid=self.domain.id, rtype=9) #RAM
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_downgrade = account_list[0].memorytotal
self.debug(resource_count_after_downgrade)
self.assertTrue(resource_count_after_downgrade < resource_count_after_upgrade,
"Resource count should be less than before, after downgrading service offering")
self.debug("Starting instance: %s" % vm.name)
try:
vm.start(self.apiclient)
except Exception as e:
self.fail("Failed to start instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_start = account_list[0].memorytotal
self.assertTrue(resource_count_after_start == resource_count_after_downgrade,
"Resource count should be same after starting the instance")
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="true")
def test_02_migrate_vm(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM in the created domain
# 2. List Resource count for the root admin Memory usage
# 3. Migrate vm to another host, resource count should list properly.
#Resetting memory count in service offering
self.hypervisor = self.testClient.getHypervisorInfo()
if self.hypervisor.lower() in ['lxc']:
self.skipTest("vm migrate is not supported in %s" % self.hypervisor)
self.services["service_offering"]["memory"] = 2048
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
host = findSuitableHostForMigration(self.apiclient, vm.id)
if host is None:
self.skipTest(ERROR_NO_HOST_FOR_MIGRATION)
self.debug("Migrating instance: %s to host: %s" %
(vm.name, host.name))
try:
vm.migrate(self.apiclient, host.id)
except Exception as e:
self.fail("Failed to migrate instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_migrate = account_list[0].memorytotal
self.assertTrue(resource_count_after_migrate == resource_count,
"Resource count should be same after migrating the instance")
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_03_delete_vm(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM in the created domain
# 2. List Resource count for the root admin Memory usage
# 3. Delete vm, resource count should list as 0 after delete operation.
# Resetting the memory count of service offering
self.services["service_offering"]["memory"] = 2048
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Destroying instance: %s" % vm.name)
try:
vm.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
# Wait for expunge interval to cleanup Memory
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_delete = account_list[0].memorytotal
self.assertEqual(resource_count_after_delete, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_04_deploy_multiple_vm(self):
"""Test Deploy multiple VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM
# 2. Deploy multiple VMs with this service offering
# 3. List Resource count for the root admin Memory usage
# 4. Memory usage should list properly
# Resetting the memory count of service offering
self.services["service_offering"]["memory"] = 2048
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm_1 = self.createInstance(service_off=self.service_offering, api_client=api_client)
vm_2 = self.createInstance(service_off=self.service_offering, api_client=api_client)
vm_3 = self.createInstance(service_off=self.service_offering, api_client=api_client)
self.debug("Deploying instance - Memory capacity is fully utilized")
with self.assertRaises(Exception):
self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"]) * 3 #Total 3 VMs
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
vm_2.delete(self.apiclient)
vm_3.delete(self.apiclient)
return
class TestMultipleChildDomainsMemory(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMultipleChildDomainsMemory, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, account, service_off, networks=None, api_client=None):
"""Creates an instance in account"""
self.debug("Deploying an instance in account: %s" %
account.name)
if api_client is None:
api_client = self.apiclient
try:
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=account.name,
domainid=account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
def setupAccounts(self):
self.debug("Creating a domain under: %s" % self.domain.name)
self.parent_domain = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.parentd_admin = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.debug("Updating the Memory resource count for domain: %s" %
self.domain.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=4096,
account=self.parentd_admin.name,
domainid=self.parentd_admin.domainid)
self.debug("Creating a sub-domain under: %s" % self.parent_domain.name)
self.cdomain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.parent_domain.id)
self.debug("Creating a sub-domain under: %s" % self.parent_domain.name)
self.cdomain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.parent_domain.id)
self.cadmin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.cdomain_1.id
)
self.debug("Updating the Memory resource count for domain: %s" %
self.cdomain_1.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
domainid=self.cadmin_1.domainid)
self.debug("Updating the Memory resource count for account: %s" %
self.cadmin_1.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
account=self.cadmin_1.name,
domainid=self.cadmin_1.domainid)
self.cadmin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.cdomain_2.id
)
self.debug("Updating the Memory resource count for domain: %s" %
self.cdomain_2.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
domainid=self.cadmin_2.domainid)
self.debug("Updating the Memory resource count for domain: %s" %
self.cadmin_2.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
account=self.cadmin_2.name,
domainid=self.cadmin_2.domainid)
# Cleanup the resources created at end of test
self.cleanup.append(self.cadmin_1)
self.cleanup.append(self.cadmin_2)
self.cleanup.append(self.cdomain_1)
self.cleanup.append(self.cdomain_2)
self.cleanup.append(self.parentd_admin)
self.cleanup.append(self.parent_domain)
users = {
self.parent_domain: self.parentd_admin,
self.cdomain_1: self.cadmin_1,
self.cdomain_2: self.cadmin_2
}
return users
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_01_multiple_child_domains(self):
"""Test memory limits with multiple child domains"""
# Validate the following
# 1. Create Domain1 with 4 GB RAM and 2 child domains with 2 GB
# each.
# 2. Deploy VM's by Domain1 admin1/user1/ Domain2 user1/Admin1 account
# and verify the resource updates
# 3. Deploy VM by admin account after reaching max parent domain limit
# 4. Deploy VM with child account after reaching max child domain limit
# 5. Delete user account and verify the resource updates
# 6. Destroy user/admin account VM's and verify the child & Parent
# domain resource updates
self.debug("Creating service offering with 2 GB RAM")
self.services["service_offering"]["memory"] = 2048
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
api_client_cadmin_1 = self.testClient.getUserApiClient(
UserName=self.cadmin_1.name,
DomainName=self.cadmin_1.domain)
api_client_cadmin_2 = self.testClient.getUserApiClient(
UserName=self.cadmin_2.name,
DomainName=self.cadmin_2.domain)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
vm_1 = self.createInstance(account=self.cadmin_1,
service_off=self.service_offering, api_client=api_client_cadmin_1)
vm_2 = self.createInstance(account=self.cadmin_2,
service_off=self.service_offering, api_client=api_client_cadmin_2)
account_list = Account.list(self.apiclient, id=self.cadmin_1.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_1 = account_list[0].memorytotal
self.debug(resource_count_cadmin_1)
account_list = Account.list(self.apiclient, id=self.cadmin_2.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_2 = account_list[0].memorytotal
self.debug(resource_count_cadmin_2)
self.debug(
"Creating instance when Memory limit is fully used in parent domain")
with self.assertRaises(Exception):
self.createInstance(account=self.cadmin_1,
service_off=self.service_offering, api_client=api_client_cadmin_1)
self.debug(
"Creating instance when Memory limit is fully used in child domain")
with self.assertRaises(Exception):
self.createInstance(account=self.cadmin_2,
service_off=self.service_offering, api_client=api_client_cadmin_2)
self.debug("Destroying instances: %s, %s" % (vm_1.name, vm_2.name))
try:
vm_1.delete(self.apiclient)
vm_2.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
self.debug("Checking resource count for account: %s" % self.cadmin_1.name)
account_list = Account.list(self.apiclient, id=self.cadmin_1.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_1 = account_list[0].memorytotal
self.assertEqual(resource_count_cadmin_1, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
self.debug("Checking resource count for account: %s" % self.cadmin_2.name)
account_list = Account.list(self.apiclient, id=self.cadmin_1.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_2 = account_list[0].memorytotal
self.assertEqual(resource_count_cadmin_2, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
return
|
{
"content_hash": "3c3a9edb833856364a74176e574c7b8f",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 137,
"avg_line_length": 45.575129533678755,
"alnum_prop": 0.5067075943610733,
"repo_name": "wido/cloudstack",
"id": "51e92ce67f18c53abed5987bc97190bcf01e714a",
"size": "35969",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/integration/component/test_mm_domain_limits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10890"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "358651"
},
{
"name": "Dockerfile",
"bytes": "2374"
},
{
"name": "FreeMarker",
"bytes": "4887"
},
{
"name": "Groovy",
"bytes": "146420"
},
{
"name": "HTML",
"bytes": "149088"
},
{
"name": "Java",
"bytes": "36088724"
},
{
"name": "JavaScript",
"bytes": "7976318"
},
{
"name": "Python",
"bytes": "13363686"
},
{
"name": "Ruby",
"bytes": "37714"
},
{
"name": "Shell",
"bytes": "784058"
},
{
"name": "XSLT",
"bytes": "58008"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import base64
import datetime
import boto
from boto.ec2.instance import Reservation, InstanceAttribute
from boto.exception import EC2ResponseError
from freezegun import freeze_time
import sure # noqa
from moto import mock_ec2
from tests.helpers import requires_boto_gte
################ Test Readme ###############
def add_servers(ami_id, count):
conn = boto.connect_ec2()
for index in range(count):
conn.run_instances(ami_id)
@mock_ec2
def test_add_servers():
add_servers('ami-1234abcd', 2)
conn = boto.connect_ec2()
reservations = conn.get_all_instances()
assert len(reservations) == 2
instance1 = reservations[0].instances[0]
assert instance1.image_id == 'ami-1234abcd'
############################################
@freeze_time("2014-01-01 05:00:00")
@mock_ec2
def test_instance_launch_and_terminate():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
instance.state.should.equal('pending')
reservations = conn.get_all_instances()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instance = instances[0]
instance.id.should.equal(instance.id)
instance.state.should.equal('running')
instance.launch_time.should.equal("2014-01-01T05:00:00.000Z")
instance.vpc_id.should.equal(None)
instance.placement.should.equal('us-east-1a')
root_device_name = instance.root_device_name
instance.block_device_mapping[root_device_name].status.should.equal('in-use')
volume_id = instance.block_device_mapping[root_device_name].volume_id
volume_id.should.match(r'vol-\w+')
volume = conn.get_all_volumes(volume_ids=[volume_id])[0]
volume.attach_data.instance_id.should.equal(instance.id)
volume.status.should.equal('in-use')
conn.terminate_instances([instance.id])
reservations = conn.get_all_instances()
instance = reservations[0].instances[0]
instance.state.should.equal('terminated')
@freeze_time("2014-01-01 05:00:00")
@mock_ec2
def test_instance_attach_volume():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
vol1 = conn.create_volume(size=36, zone=conn.region.name)
vol1.attach(instance.id, "/dev/sda1")
vol1.update()
vol2 = conn.create_volume(size=65, zone=conn.region.name)
vol2.attach(instance.id, "/dev/sdb1")
vol2.update()
vol3 = conn.create_volume(size=130, zone=conn.region.name)
vol3.attach(instance.id, "/dev/sdc1")
vol3.update()
reservations = conn.get_all_instances()
instance = reservations[0].instances[0]
instance.block_device_mapping.should.have.length_of(3)
for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]):
v.attach_data.instance_id.should.equal(instance.id)
v.attach_data.attach_time.should.equal(instance.launch_time) # can do due to freeze_time decorator.
v.create_time.should.equal(instance.launch_time) # can do due to freeze_time decorator.
v.region.name.should.equal(instance.region.name)
v.status.should.equal('in-use')
@mock_ec2
def test_get_instances_by_id():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=2)
instance1, instance2 = reservation.instances
reservations = conn.get_all_instances(instance_ids=[instance1.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(1)
reservation.instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_instances(instance_ids=[instance1.id, instance2.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(2)
instance_ids = [instance.id for instance in reservation.instances]
instance_ids.should.equal([instance1.id, instance2.id])
# Call get_all_instances with a bad id should raise an error
with assert_raises(EC2ResponseError) as cm:
conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"])
cm.exception.code.should.equal('InvalidInstanceID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_get_instances_filtering_by_state():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
conn.terminate_instances([instance1.id])
reservations = conn.get_all_instances(filters={'instance-state-name': 'running'})
reservations.should.have.length_of(1)
# Since we terminated instance1, only instance2 and instance3 should be returned
instance_ids = [instance.id for instance in reservations[0].instances]
set(instance_ids).should.equal(set([instance2.id, instance3.id]))
reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'running'})
reservations.should.have.length_of(1)
instance_ids = [instance.id for instance in reservations[0].instances]
instance_ids.should.equal([instance2.id])
reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminated'})
list(reservations).should.equal([])
# get_all_instances should still return all 3
reservations = conn.get_all_instances()
reservations[0].instances.should.have.length_of(3)
conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
@mock_ec2
def test_get_instances_filtering_by_instance_id():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
reservations = conn.get_all_instances(filters={'instance-id': instance1.id})
# get_all_instances should return just instance1
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_instances(filters={'instance-id': [instance1.id, instance2.id]})
# get_all_instances should return two
reservations[0].instances.should.have.length_of(2)
reservations = conn.get_all_instances(filters={'instance-id': 'non-existing-id'})
reservations.should.have.length_of(0)
@mock_ec2
def test_get_instances_filtering_by_instance_type():
conn = boto.connect_ec2()
reservation1 = conn.run_instances('ami-1234abcd', instance_type='m1.small')
instance1 = reservation1.instances[0]
reservation2 = conn.run_instances('ami-1234abcd', instance_type='m1.small')
instance2 = reservation2.instances[0]
reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro')
instance3 = reservation3.instances[0]
reservations = conn.get_all_instances(filters={'instance-type': 'm1.small'})
# get_all_instances should return instance1,2
reservations.should.have.length_of(2)
reservations[0].instances.should.have.length_of(1)
reservations[1].instances.should.have.length_of(1)
instance_ids = [ reservations[0].instances[0].id,
reservations[1].instances[0].id ]
set(instance_ids).should.equal(set([instance1.id, instance2.id]))
reservations = conn.get_all_instances(filters={'instance-type': 't1.micro'})
# get_all_instances should return one
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance3.id)
reservations = conn.get_all_instances(filters={'instance-type': ['t1.micro', 'm1.small']})
reservations.should.have.length_of(3)
reservations[0].instances.should.have.length_of(1)
reservations[1].instances.should.have.length_of(1)
reservations[2].instances.should.have.length_of(1)
instance_ids = [
reservations[0].instances[0].id,
reservations[1].instances[0].id,
reservations[2].instances[0].id,
]
set(instance_ids).should.equal(set([instance1.id, instance2.id, instance3.id]))
reservations = conn.get_all_instances(filters={'instance-type': 'bogus'})
#bogus instance-type should return none
reservations.should.have.length_of(0)
@mock_ec2
def test_get_instances_filtering_by_reason_code():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.stop()
instance2.terminate()
reservations = conn.get_all_instances(filters={'state-reason-code': 'Client.UserInitiatedShutdown'})
# get_all_instances should return instance1 and instance2
reservations[0].instances.should.have.length_of(2)
set([instance1.id, instance2.id]).should.equal(set([i.id for i in reservations[0].instances]))
reservations = conn.get_all_instances(filters={'state-reason-code': ''})
# get_all_instances should return instance 3
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance3.id)
@mock_ec2
def test_get_instances_filtering_by_source_dest_check():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=2)
instance1, instance2 = reservation.instances
conn.modify_instance_attribute(instance1.id, attribute='sourceDestCheck', value=False)
source_dest_check_false = conn.get_all_instances(filters={'source-dest-check': 'false'})
source_dest_check_true = conn.get_all_instances(filters={'source-dest-check': 'true'})
source_dest_check_false[0].instances.should.have.length_of(1)
source_dest_check_false[0].instances[0].id.should.equal(instance1.id)
source_dest_check_true[0].instances.should.have.length_of(1)
source_dest_check_true[0].instances[0].id.should.equal(instance2.id)
@mock_ec2
def test_get_instances_filtering_by_vpc_id():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc1 = conn.create_vpc("10.0.0.0/16")
subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27")
reservation1 = conn.run_instances('ami-1234abcd', min_count=1, subnet_id=subnet1.id)
instance1 = reservation1.instances[0]
vpc2 = conn.create_vpc("10.1.0.0/16")
subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27")
reservation2 = conn.run_instances('ami-1234abcd', min_count=1, subnet_id=subnet2.id)
instance2 = reservation2.instances[0]
reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id})
reservations1.should.have.length_of(1)
reservations1[0].instances.should.have.length_of(1)
reservations1[0].instances[0].id.should.equal(instance1.id)
reservations2 = conn.get_all_instances(filters={'vpc-id': vpc2.id})
reservations2.should.have.length_of(1)
reservations2[0].instances.should.have.length_of(1)
reservations2[0].instances[0].id.should.equal(instance2.id)
@mock_ec2
def test_get_instances_filtering_by_architecture():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=1)
instance = reservation.instances
reservations = conn.get_all_instances(filters={'architecture': 'x86_64'})
# get_all_instances should return the instance
reservations[0].instances.should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_tag():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag('tag1', 'value1')
instance1.add_tag('tag2', 'value2')
instance2.add_tag('tag1', 'value1')
instance2.add_tag('tag2', 'wrong value')
instance3.add_tag('tag2', 'value2')
reservations = conn.get_all_instances(filters={'tag:tag0' : 'value0'})
# get_all_instances should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1'})
# get_all_instances should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1', 'tag:tag2' : 'value2'})
# get_all_instances should return the instance with both tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_instances(filters={'tag:tag1' : 'value1', 'tag:tag2' : 'value2'})
# get_all_instances should return the instance with both tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_instances(filters={'tag:tag2' : ['value2', 'bogus']})
# get_all_instances should return both instances with one of the acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance3.id)
@mock_ec2
def test_get_instances_filtering_by_tag_value():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag('tag1', 'value1')
instance1.add_tag('tag2', 'value2')
instance2.add_tag('tag1', 'value1')
instance2.add_tag('tag2', 'wrong value')
instance3.add_tag('tag2', 'value2')
reservations = conn.get_all_instances(filters={'tag-value' : 'value0'})
# get_all_instances should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_instances(filters={'tag-value' : 'value1'})
# get_all_instances should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_instances(filters={'tag-value' : ['value2', 'value1']})
# get_all_instances should return both instances with one of the acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(3)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations[0].instances[2].id.should.equal(instance3.id)
reservations = conn.get_all_instances(filters={'tag-value' : ['value2', 'bogus']})
# get_all_instances should return both instances with one of the acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance3.id)
@mock_ec2
def test_get_instances_filtering_by_tag_name():
conn = boto.connect_ec2()
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag('tag1')
instance1.add_tag('tag2')
instance2.add_tag('tag1')
instance2.add_tag('tag2X')
instance3.add_tag('tag3')
reservations = conn.get_all_instances(filters={'tag-key' : 'tagX'})
# get_all_instances should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_instances(filters={'tag-key' : 'tag1'})
# get_all_instances should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_instances(filters={'tag-key' : ['tag1', 'tag3']})
# get_all_instances should return both instances with one of the acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(3)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations[0].instances[2].id.should.equal(instance3.id)
@mock_ec2
def test_instance_start_and_stop():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', min_count=2)
instances = reservation.instances
instances.should.have.length_of(2)
instance_ids = [instance.id for instance in instances]
stopped_instances = conn.stop_instances(instance_ids)
for instance in stopped_instances:
instance.state.should.equal('stopping')
started_instances = conn.start_instances([instances[0].id])
started_instances[0].state.should.equal('pending')
@mock_ec2
def test_instance_reboot():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.reboot()
instance.state.should.equal('pending')
@mock_ec2
def test_instance_attribute_instance_type():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute("instanceType", "m1.small")
instance_attribute = instance.get_attribute("instanceType")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get('instanceType').should.equal("m1.small")
@mock_ec2
def test_modify_instance_attribute_security_groups():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
sg_id = 'sg-1234abcd'
sg_id2 = 'sg-abcd4321'
instance.modify_attribute("groupSet", [sg_id, sg_id2])
instance_attribute = instance.get_attribute("groupSet")
instance_attribute.should.be.a(InstanceAttribute)
group_list = instance_attribute.get('groupSet')
any(g.id == sg_id for g in group_list).should.be.ok
any(g.id == sg_id2 for g in group_list).should.be.ok
@mock_ec2
def test_instance_attribute_user_data():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute("userData", "this is my user data")
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("userData").should.equal("this is my user data")
@mock_ec2
def test_instance_attribute_source_dest_check():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
# Default value is true
instance.sourceDestCheck.should.equal('true')
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(True)
# Set to false (note: Boto converts bool to string, eg 'false')
instance.modify_attribute("sourceDestCheck", False)
instance.update()
instance.sourceDestCheck.should.equal('false')
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(False)
# Set back to true
instance.modify_attribute("sourceDestCheck", True)
instance.update()
instance.sourceDestCheck.should.equal('true')
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(True)
@mock_ec2
def test_user_data_with_run_instance():
user_data = b"some user data"
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', user_data=user_data)
instance = reservation.instances[0]
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
retrieved_user_data = instance_attribute.get("userData").encode('utf-8')
decoded_user_data = base64.decodestring(retrieved_user_data)
decoded_user_data.should.equal(b"some user data")
@mock_ec2
def test_run_instance_with_security_group_name():
conn = boto.connect_ec2('the_key', 'the_secret')
group = conn.create_security_group('group1', "some description")
reservation = conn.run_instances('ami-1234abcd',
security_groups=['group1'])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2
def test_run_instance_with_security_group_id():
conn = boto.connect_ec2('the_key', 'the_secret')
group = conn.create_security_group('group1', "some description")
reservation = conn.run_instances('ami-1234abcd',
security_group_ids=[group.id])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2
def test_run_instance_with_instance_type():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro")
instance = reservation.instances[0]
instance.instance_type.should.equal("t1.micro")
@mock_ec2
def test_run_instance_with_default_placement():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.placement.should.equal("us-east-1a")
@mock_ec2
def test_run_instance_with_placement():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', placement="us-east-1b")
instance = reservation.instances[0]
instance.placement.should.equal("us-east-1b")
@mock_ec2
def test_run_instance_with_subnet():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id)
instance = reservation.instances[0]
instance.subnet_id.should.equal(subnet.id)
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
@mock_ec2
def test_run_instance_with_nic_autocreated():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
private_ip = "54.0.0.1"
reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id,
security_groups=[security_group1.name],
security_group_ids=[security_group2.id],
private_ip_address=private_ip)
instance = reservation.instances[0]
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
eni = all_enis[0]
instance.interfaces.should.have.length_of(1)
instance.interfaces[0].id.should.equal(eni.id)
instance.subnet_id.should.equal(subnet.id)
instance.groups.should.have.length_of(2)
set([group.id for group in instance.groups]).should.equal(set([security_group1.id,security_group2.id]))
eni.subnet_id.should.equal(subnet.id)
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
eni.private_ip_addresses.should.have.length_of(1)
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@mock_ec2
def test_run_instance_with_nic_preexisting():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
private_ip = "54.0.0.1"
eni = conn.create_network_interface(subnet.id, private_ip, groups=[security_group1.id])
# Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications...
# annoying, but generates the desired querystring.
from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection
interface = NetworkInterfaceSpecification(network_interface_id=eni.id, device_index=0)
interfaces = NetworkInterfaceCollection(interface)
# end Boto objects
reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces,
security_group_ids=[security_group2.id])
instance = reservation.instances[0]
instance.subnet_id.should.equal(subnet.id)
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
instance.interfaces.should.have.length_of(1)
instance_eni = instance.interfaces[0]
instance_eni.id.should.equal(eni.id)
instance_eni.subnet_id.should.equal(subnet.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
instance_eni.private_ip_addresses.should.have.length_of(1)
instance_eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@requires_boto_gte("2.32.0")
@mock_ec2
def test_instance_with_nic_attach_detach():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
reservation = conn.run_instances('ami-1234abcd', security_group_ids=[security_group1.id])
instance = reservation.instances[0]
eni = conn.create_network_interface(subnet.id, groups=[security_group2.id])
# Check initial instance and ENI data
instance.interfaces.should.have.length_of(1)
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Attach
conn.attach_network_interface(eni.id, instance.id, device_index=1)
# Check attached instance and ENI data
instance.update()
instance.interfaces.should.have.length_of(2)
instance_eni = instance.interfaces[1]
instance_eni.id.should.equal(eni.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0]
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
# Detach
conn.detach_network_interface(instance_eni.attachment.id)
# Check detached instance and ENI data
instance.update()
instance.interfaces.should.have.length_of(1)
eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0]
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Detach with invalid attachment ID
with assert_raises(EC2ResponseError) as cm:
conn.detach_network_interface('eni-attach-1234abcd')
cm.exception.code.should.equal('InvalidAttachmentID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_ec2_classic_has_public_ip_address():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
instance = reservation.instances[0]
instance.ip_address.should_not.equal(None)
instance.public_dns_name.should.contain(instance.ip_address)
instance.private_ip_address.should_not.equal(None)
instance.private_dns_name.should.contain(instance.private_ip_address)
@mock_ec2
def test_run_instance_with_keypair():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
instance = reservation.instances[0]
instance.key_name.should.equal("keypair_name")
@mock_ec2
def test_describe_instance_status_no_instances():
conn = boto.connect_ec2('the_key', 'the_secret')
all_status = conn.get_all_instance_status()
len(all_status).should.equal(0)
@mock_ec2
def test_describe_instance_status_with_instances():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.run_instances('ami-1234abcd', key_name="keypair_name")
all_status = conn.get_all_instance_status()
len(all_status).should.equal(1)
all_status[0].instance_status.status.should.equal('ok')
all_status[0].system_status.status.should.equal('ok')
@mock_ec2
def test_describe_instance_status_with_instance_filter():
conn = boto.connect_ec2('the_key', 'the_secret')
# We want to filter based on this one
reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
instance = reservation.instances[0]
# This is just to setup the test
conn.run_instances('ami-1234abcd', key_name="keypair_name")
all_status = conn.get_all_instance_status(instance_ids=[instance.id])
len(all_status).should.equal(1)
all_status[0].id.should.equal(instance.id)
# Call get_all_instance_status with a bad id should raise an error
with assert_raises(EC2ResponseError) as cm:
conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"])
cm.exception.code.should.equal('InvalidInstanceID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@requires_boto_gte("2.32.0")
@mock_ec2
def test_describe_instance_status_with_non_running_instances():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd', min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.stop()
instance2.terminate()
all_running_status = conn.get_all_instance_status()
all_running_status.should.have.length_of(1)
all_running_status[0].id.should.equal(instance3.id)
all_running_status[0].state_name.should.equal('running')
all_status = conn.get_all_instance_status(include_all_instances=True)
all_status.should.have.length_of(3)
status1 = next((s for s in all_status if s.id == instance1.id), None)
status1.state_name.should.equal('stopped')
status2 = next((s for s in all_status if s.id == instance2.id), None)
status2.state_name.should.equal('terminated')
status3 = next((s for s in all_status if s.id == instance3.id), None)
status3.state_name.should.equal('running')
@mock_ec2
def test_get_instance_by_security_group():
conn = boto.connect_ec2('the_key', 'the_secret')
conn.run_instances('ami-1234abcd')
instance = conn.get_only_instances()[0]
security_group = conn.create_security_group('test', 'test')
conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id])
security_group_instances = security_group.instances()
assert len(security_group_instances) == 1
assert security_group_instances[0].id == instance.id
|
{
"content_hash": "4e39753cdf0ce33842c22ee06d99b35e",
"timestamp": "",
"source": "github",
"line_count": 807,
"max_line_length": 123,
"avg_line_length": 40.45105328376704,
"alnum_prop": 0.7073275333905159,
"repo_name": "tootedom/moto",
"id": "58b1f693a26510c448483a05ed162dd27689fa66",
"size": "32644",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_ec2/test_instances.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "230"
},
{
"name": "Python",
"bytes": "2098156"
}
],
"symlink_target": ""
}
|
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import A, URL
from gluon.storage import Storage
from s3 import s3_fullname
T = current.T
settings = current.deployment_settings
"""
Template settings for NYC Prepared
"""
# Pre-Populate
settings.base.prepopulate = ("NYC",)
settings.base.system_name = T("NYC Prepared")
settings.base.system_name_short = T("NYC Prepared")
# Theme (folder to use for views/layout.html)
settings.base.theme = "NYC"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "table_inline"
settings.msg.parser = "NYC"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC -0500"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ("US",)
settings.fin.currencies = {
"USD" : T("United States Dollars"),
}
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
settings.security.self_registration = "index"
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# Uncomment this to request the Mobile Phone when a user registers
settings.auth.registration_requests_mobile_phone = True
# Uncomment this to request the Organisation when a user registers
settings.auth.registration_requests_organisation = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Roles that newly-registered users get automatically
#settings.auth.registration_roles = { 0: ["comms_dispatch"]}
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# #"volunteer":T("Volunteer")
# }
settings.auth.registration_link_user_to_default = "staff"
settings.security.policy = 5 # Controller, Function & Table ACLs
# Enable this to have Open links in IFrames open a full page in a new tab
settings.ui.iframe_opens_full = True
settings.ui.label_attachments = "Media"
settings.ui.update_label = "Edit"
# Uncomment to disable checking that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "eden_nyc"
# Uncomment to show created_by/modified_by using Names not Emails
settings.ui.auth_user_represent = "name"
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",)
# -----------------------------------------------------------------------------
# Audit
def audit_write(method, tablename, form, record, representation):
if not current.auth.user:
# Don't include prepop
return False
if tablename in ("cms_post",
"org_facility",
"org_organisation",
"req_req",
):
# Perform normal Audit
return True
else:
# Don't Audit non user-visible resources
return False
settings.security.audit_write = audit_write
# -----------------------------------------------------------------------------
# CMS
# Uncomment to use Bookmarks in Newsfeed
settings.cms.bookmarks = True
# Uncomment to use have Filter form in Newsfeed be open by default
settings.cms.filter_open = True
# Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page
settings.cms.location_click_filters = True
# Uncomment to use organisation_id instead of created_by in Newsfeed
settings.cms.organisation = "post_organisation.organisation_id"
# Uncomment to use org_group_id in Newsfeed
settings.cms.organisation_group = "post_organisation_group.group_id"
# Uncomment to use person_id instead of created_by in Newsfeed
settings.cms.person = "person_id"
# Uncomment to use Rich Text editor in Newsfeed
settings.cms.richtext = True
# Uncomment to show Links in Newsfeed
settings.cms.show_links = True
# Uncomment to show Tags in Newsfeed
settings.cms.show_tags = True
# Uncomment to show post Titles in Newsfeed
settings.cms.show_titles = True
# -----------------------------------------------------------------------------
# Inventory Management
# Uncomment to customise the label for Facilities in Inventory Management
settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
settings.inv.stock_count = True
# Uncomment to not track pack values
settings.inv.track_pack_values = False
settings.inv.send_show_org = False
# Types common to both Send and Receive
settings.inv.shipment_types = {
1: T("Other Warehouse")
}
settings.inv.send_types = {
#21: T("Distribution")
}
settings.inv.send_type_default = 1
settings.inv.item_status = {
#0: current.messages["NONE"],
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
# -----------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Groups
settings.org.groups = "Network"
# Make Services Hierarchical
settings.org.services_hierarchical = True
# Set the label for Sites
settings.org.site_label = "Facility"
#settings.org.site_label = "Location"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
settings.org.site_last_contacted = True
# Enable certain fields just for specific Organisations
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = { \
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# Uncomment to use an Autocomplete for Site lookup fields
settings.org.site_autocomplete = True
# Extra fields to search in Autocompletes & display in Representations
settings.org.site_autocomplete_fields = ("organisation_id$name",
"location_id$addr_street",
)
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = True
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Legend
"""
db = current.db
s3db = current.s3db
table = db.org_facility_type
ltable = db.org_site_facility_type
query = (ltable.site_id == record.site_id) & \
(ltable.facility_type_id == table.id)
rows = db(query).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def org_facility_onvalidation(form):
"""
Default the name to the Street Address
"""
form_vars = form.vars
name = form_vars.get("name", None)
if name:
return
address = form_vars.get("address", None)
if address:
form_vars.name = address
else:
# We need a default
form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Tell the client to request per-feature markers
s3db.configure("org_facility", marker_fn=facility_marker_fn)
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method not in ("read", "update"):
types = r.get_vars.get("site_facility_type.facility_type_id__belongs", None)
if not types:
# Hide Private Residences
from s3 import FS
s3.filter = FS("site_facility_type.facility_type_id$name") != "Private Residence"
if r.interactive:
tablename = "org_facility"
table = s3db[tablename]
if not r.component and r.method in (None, "create", "update"):
from s3 import IS_LOCATION, S3LocationSelector, S3MultiSelectWidget
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION()
field.widget = S3LocationSelector(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
table.organisation_id.widget = S3MultiSelectWidget(multiple=False)
if r.get_vars.get("format", None) == "popup":
# Coming from req/create form
# Hide most Fields
from s3 import S3SQLCustomForm, S3SQLInlineComponent
# We default this onvalidation
table.name.notnull = False
table.name.requires = None
crud_form = S3SQLCustomForm(S3SQLInlineComponent(
"site_facility_type",
label = T("Facility Type"),
fields = [("", "facility_type_id")],
multiple = False,
required = True,
),
"name",
"location_id",
)
s3db.configure(tablename,
crud_form = crud_form,
onvalidation = org_facility_onvalidation,
)
return True
s3.prep = custom_prep
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
from gluon.html import DIV, INPUT
from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
s3db = current.s3db
if r.tablename == "org_organisation":
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
else:
# Component
if r.component_id:
# Update form
db = current.db
otable = s3db.org_organisation
org = db(otable.id == r.component_id).select(otable.pe_id,
limitby=(0, 1)
).first()
try:
pe_id = org.pe_id
except:
current.log.error("Org %s not found: cannot set rss_import correctly" % r.component_id)
# Default
rss_import = None
else:
ctable = s3db.pr_contact
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
mtable = s3db.org_group_membership
mtable.group_id.widget = S3MultiSelectWidget(multiple=False)
mtable.status_id.widget = S3MultiSelectWidget(multiple=False,
create=dict(c="org",
f="group_membership_status",
label=str(T("Add New Status")),
parent="group_membership",
child="status_id"
))
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink(
"organisation_type",
field = "organisation_type_id",
label = T("Type"),
multiple = False,
#widget = "hierarchy",
),
S3SQLInlineComponentMultiSelectWidget(
# activate hierarchical org_service:
#S3SQLInlineLink(
"service",
label = T("Services"),
field = "service_id",
# activate hierarchical org_service:
#leafonly = False,
#widget = "hierarchy",
),
S3SQLInlineComponent(
"group_membership",
label = T("Network"),
fields = [("", "group_id"),
("", "status_id"),
],
),
S3SQLInlineComponent(
"address",
label = T("Address"),
multiple = False,
# This is just Text - put into the Comments box for now
# Ultimately should go into location_id$addr_street
fields = [("", "comments")],
),
S3SQLInlineComponentMultiSelectWidget(
"location",
label = T("Neighborhoods Served"),
field = "location_id",
filterby = dict(field = "level",
options = "L4"
),
# @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget
#cols = 5,
),
"phone",
S3SQLInlineComponent(
"contact",
name = "phone2",
label = T("Phone2"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import",
value = rss_import,
),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value"),
#(T("Don't Import Feed"), "poll"),
],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"comments",
postprocess = pr_contact_postprocess,
)
from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter
# activate hierarchical org_service:
#from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter
filter_widgets = [
S3TextFilter(["name", "acronym"],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("group_membership.group_id",
label = T("Network"),
represent = "%(name)s",
#hidden = True,
),
S3LocationFilter("organisation_location.location_id",
label = T("Neighborhood"),
levels = ("L3", "L4"),
#hidden = True,
),
S3OptionsFilter("service_organisation.service_id",
#label = T("Service"),
#hidden = True,
),
# activate hierarchical org_service:
#S3HierarchyFilter("service_organisation.service_id",
# #label = T("Service"),
# #hidden = True,
# ),
S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
#hidden = True,
),
]
list_fields = ["name",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
(T("Services"), "service.name"),
"phone",
(T("Email"), "email.value"),
"website"
#(T("Neighborhoods Served"), "location.name"),
]
s3db.configure("org_organisation",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive:
if r.component_name == "facility":
if r.method in (None, "create", "update"):
from s3 import IS_LOCATION, S3LocationSelector
table = s3db.org_facility
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION()
field.widget = S3LocationSelector(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
elif r.component_name == "human_resource":
# Don't assume that user is from same org/site as Contacts they create
r.component.table.site_id.default = None
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "rheader" in output:
# Custom Tabs
tabs = [(T("Basic Details"), None),
(T("Contacts"), "human_resource"),
(T("Facilities"), "facility"),
(T("Projects"), "project"),
(T("Assets"), "asset"),
]
output["rheader"] = s3db.org_rheader(r, tabs=tabs)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_org_group_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component:
table = s3db.org_group
list_fields = ["name",
"mission",
"website",
"meetings",
]
s3db.configure("org_group",
list_fields = list_fields,
)
if r.interactive:
from gluon.html import DIV, INPUT
from s3 import S3SQLCustomForm, S3SQLInlineComponent
if r.method != "read":
from gluon.validators import IS_EMPTY_OR
from s3 import IS_LOCATION, S3LocationSelector
field = table.location_id
field.label = "" # Gets replaced by widget
#field.requires = IS_LOCATION()
#field.requires = IS_EMPTY_OR(IS_LOCATION()) # That's the default!
field.widget = S3LocationSelector(levels = ("L2",),
points = True,
polygons = True,
)
# Default location to Manhattan
db = current.db
gtable = db.gis_location
query = (gtable.name == "New York") & \
(gtable.level == "L2")
manhattan = db(query).select(gtable.id,
limitby=(0, 1)).first()
if manhattan:
field.default = manhattan.id
table.mission.readable = table.mission.writable = True
table.meetings.readable = table.meetings.writable = True
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
crud_form = S3SQLCustomForm(
"name",
"location_id",
"mission",
S3SQLInlineComponent(
"contact",
name = "phone",
label = T("Phone"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import",
value = rss_import,
),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"meetings",
"comments",
postprocess = pr_contact_postprocess,
)
s3db.configure("org_group",
crud_form = crud_form,
)
elif r.component_name == "pr_group":
list_fields = [#(T("Network"), "group_team.org_group_id"),
"name",
"description",
"meetings",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
list_fields = list_fields,
)
elif r.component_name == "organisation":
# Add Network Status to List Fields
list_fields = s3db.get_config("org_organisation", "list_fields")
list_fields.insert(1, "group_membership.status_id")
return result
s3.prep = custom_prep
if current.auth.s3_logged_in():
# Allow components with components (such as org/group) to breakout from tabs
attr["native"] = True
return attr
settings.customise_org_group_controller = customise_org_group_controller
# -----------------------------------------------------------------------------
# Persons
# Uncomment to hide fields in S3AddPersonWidget
settings.pr.request_dob = False
settings.pr.request_gender = False
# Doesn't yet work (form fails to submit)
#settings.pr.select_existing = False
settings.pr.show_emergency_contacts = False
# Only show Private Contacts Tab (Public is done via Basic Details tab)
settings.pr.contacts_tabs = ("private",)
# -----------------------------------------------------------------------------
# Persons
def customise_pr_person_controller(**attr):
"""
Non-logged in users can access pr/person
Logged-in users access via hrm/person
"""
s3db = current.s3db
s3 = current.response.s3
AUTHENTICATED = current.auth.is_logged_in()
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
#if r.method == "validate":
# # Can't validate image without the file
# image_field = s3db.pr_image.image
# image_field.requires = None
if r.interactive or r.representation == "aadata":
if not r.component:
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
]
if r.method in ("create", "update"):
get_vars = r.get_vars
# Context from a Profile page?"
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
site_id = get_vars.get("(site)", None)
if site_id:
field = s3db.hrm_human_resource.site_id
field.default = site_id
field.readable = field.writable = False
hr_fields.remove("site_id")
else:
s3db.hrm_human_resource.site_id.default = None
# ImageCrop widget doesn't currently work within an Inline Form
#image_field = s3db.pr_image.image
#from gluon.validators import IS_IMAGE
#image_field.requires = IS_IMAGE()
#image_field.widget = None
from s3 import S3SQLCustomForm, S3SQLInlineComponent
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
s3_sql_custom_fields = ["first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
#multiple = True,
fields = [("", "value")],
filterby = [dict(field = "contact_method",
options = "EMAIL"),
dict(field = "access",
options = 2),
]
),
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
#multiple = True,
fields = [("", "value")],
filterby = [dict(field = "contact_method",
options = "SMS"),
dict(field = "access",
options = 2),
]
),
#S3SQLInlineComponent(
# "image",
# name = "image",
# label = T("Photo"),
# multiple = False,
# fields = [("", "image")],
# filterby = dict(field = "profile",
# options=[True]
# )
# ),
]
if r.method != "update":
other_contact_opts = current.msg.CONTACT_OPTS.keys()
other_contact_opts.remove("EMAIL")
other_contact_opts.remove("SMS")
s3_sql_custom_fields.append(S3SQLInlineComponent("contact",
name = "contact",
label = T("Additional Public Contact Info"),
#multiple = True,
fields = [("", "contact_method"),
("", "value"),
],
filterby = [dict(field = "access",
options = 2),
dict(field = "contact_method",
options = other_contact_opts),
]
))
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Office"), "human_resource.site_id"),
]
if AUTHENTICATED:
# @ToDo: Filter these to Public to allow access to ANONYMOUS too
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3db.configure(r.tablename,
crud_form = crud_form,
list_fields = list_fields,
)
elif r.component_name == "group_membership":
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
if not AUTHENTICATED:
# Remove RHeader Tabs
tabs = None
attr["rheader"] = lambda r: s3db.pr_rheader(r, tabs=tabs)
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
# Groups
def chairperson(row):
"""
Virtual Field to show the chairperson of a group
"""
if hasattr(row, "pr_group"):
row = row.pr_group
try:
group_id = row.id
except:
# not available
return current.messages["NONE"]
db = current.db
mtable = db.pr_group_membership
ptable = db.pr_person
query = (mtable.group_id == group_id) & \
(mtable.group_head == True) & \
(mtable.person_id == ptable.id)
chairs = db(query).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.id)
if chairs:
# Only used in list view so HTML is OK
if current.auth.is_logged_in():
controller = "hrm"
else:
controller = "pr"
return ",".join([A(s3_fullname(chair),
_href=URL(c=controller, f="person", args=chair.id)).xml()
for chair in chairs])
else:
return current.messages["NONE"]
# -----------------------------------------------------------------------------
def customise_pr_group_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent
s3db = current.s3db
s3db.org_group_team.org_group_id.represent = S3Represent(lookup="org_group",
show_link=True)
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("group_team",
label = T("Network"),
fields = [("", "org_group_id")],
# @ToDo: Make this optional?
multiple = False,
),
"meetings",
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"group_team.org_group_id$name",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by network name. You may use % as wildcard. Press 'Search' without input to list all."),
#_class = "filter-search",
),
S3OptionsFilter("group_team.org_group_id",
label = T("Network"),
#hidden = True,
),
]
# Need to re-do list_fields as get over_written by hrm_group_controller()
list_fields = [(T("Network"), "group_team.org_group_id"),
"name",
"description",
"meetings",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
if r.component_name == "group_membership":
from s3layouts import S3AddResourceLink
s3db.pr_group_membership.person_id.comment = \
S3AddResourceLink(c="pr", f="person",
title=T("Create Person"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
#else:
# # RHeader wants a simplified version, but don't want inconsistent across tabs
# s3db.pr_group_membership.group_head.label = T("Chairperson")
return True
s3.prep = custom_prep
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# -----------------------------------------------------------------------------
def customise_pr_group_resource(r, tablename):
"""
Customise pr_group resource (in group & org_group controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
table = s3db.pr_group
field = table.group_type
field.default = 3 # Relief Team, to show up in hrm/group
field.readable = field.writable = False
table.name.label = T("Name")
table.description.label = T("Description")
table.meetings.readable = table.meetings.writable = True
# Increase size of widget
from s3 import s3_comments_widget
table.description.widget = s3_comments_widget
from gluon import Field
table.chairperson = Field.Method("chairperson", chairperson)
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_group_team = "group_id",
)
s3db.configure("pr_group",
# Redirect to member list when a new group has been created
create_next = URL(c="hrm", f="group",
args=["[id]", "group_membership"]),
)
settings.customise_pr_group_resource = customise_pr_group_resource
# -----------------------------------------------------------------------------
def pr_contact_postprocess(form):
"""
Import Organisation/Network RSS Feeds
"""
s3db = current.s3db
form_vars = form.vars
rss_url = form_vars.rsscontact_i_value_edit_0 or \
form_vars.rsscontact_i_value_edit_none
if not rss_url:
if form.record:
# Update form
old_rss = form.record.sub_rsscontact
import json
data = old_rss = json.loads(old_rss)["data"]
if data:
# RSS feed is being deleted, so we should disable it
old_rss = data[0]["value"]["value"]
table = s3db.msg_rss_channel
old = current.db(table.url == old_rss).select(table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if old and old.enabled:
s3db.msg_channel_disable("msg_rss_channel", old.channel_id)
return
else:
# Nothing to do :)
return
# Check if we already have a channel for this Contact
db = current.db
name = form_vars.name
table = s3db.msg_rss_channel
name_exists = db(table.name == name).select(table.id,
table.channel_id,
table.enabled,
table.url,
limitby = (0, 1)
).first()
no_import = current.request.post_vars.get("rss_no_import", None)
if name_exists:
if name_exists.url == rss_url:
# No change to either Contact Name or URL
if no_import:
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
return
elif name_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
name_exists.channel_id)
return
# Check if we already have a channel for this URL
url_exists = db(table.url == rss_url).select(table.id,
table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if url_exists:
# We have 2 feeds: 1 for the Contact & 1 for the URL
# Disable the old Contact one and link the URL one to this Contact
# and ensure active or not as appropriate
# Name field is unique so rename old one
name_exists.update_record(name="%s (Old)" % name)
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
url_exists.update_record(name=name)
if no_import:
if url_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
url_exists.channel_id)
return
elif url_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
url_exists.channel_id)
return
else:
# Update the URL
name_exists.update_record(url=rss_url)
if no_import:
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
return
elif name_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
name_exists.channel_id)
return
else:
# Check if we already have a channel for this URL
url_exists = db(table.url == rss_url).select(table.id,
table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if url_exists:
# Either Contact has changed Name or this feed is associated with
# another Contact
# - update Feed name
url_exists.update_record(name=name)
if no_import:
if url_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
url_exists.channel_id)
return
elif url_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
url_exists.channel_id)
return
elif no_import:
# Nothing to do :)
return
#else:
# # Create a new Feed
# pass
# Add RSS Channel
_id = table.insert(name=name, enabled=True, url=rss_url)
record = dict(id=_id)
s3db.update_super(table, record)
# Enable
channel_id = record["channel_id"]
s3db.msg_channel_enable("msg_rss_channel", channel_id)
# Setup Parser
table = s3db.msg_parser
_id = table.insert(channel_id=channel_id,
function_name="parse_rss",
enabled=True)
s3db.msg_parser_enable(_id)
# Check Now
async = current.s3task.async
async("msg_poll", args=["msg_rss_channel", channel_id])
async("msg_parse", args=[channel_id, "parse_rss"])
# -----------------------------------------------------------------------------
# Human Resource Management
# Uncomment to chage the label for 'Staff'
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to allow Staff & Volunteers to be registered without an Organisation
settings.hrm.org_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Certificates
settings.hrm.use_certificates = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to enable the use of HR Education
settings.hrm.use_education = False
# Uncomment to disable the use of HR Skills
#settings.hrm.use_skills = False
# Uncomment to disable the use of HR Trainings
settings.hrm.use_trainings = False
# Uncomment to disable the use of HR Description
settings.hrm.use_description = False
# Change the label of "Teams" to "Groups"
settings.hrm.teams = "Groups"
# Custom label for Organisations in HR module
#settings.hrm.organisation_label = "National Society / Branch"
settings.hrm.organisation_label = "Organization"
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component:
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
],
label = T("Name"),
),
S3OptionsFilter("organisation_id",
filter = True,
header = "",
hidden = True,
),
S3OptionsFilter("group_person.group_id",
label = T("Network"),
#filter = True,
#header = "",
hidden = True,
),
S3LocationFilter("location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
hidden = True,
),
S3OptionsFilter("site_id",
hidden = True,
),
S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
),
S3OptionsFilter("group_membership.group_id",
label = T("Team"),
filter = True,
header = "",
hidden = True,
),
]
s3db = current.s3db
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
)
s3db.pr_contact.access.default = 2 # Primary contacts should be Public
field = r.table.site_id
# Don't assume that user is from same org/site as Contacts they create
field.default = None
# Use a hierarchical dropdown instead of AC
field.widget = None
script = \
'''$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupResource':'site',
'lookupURL':'/%s/org/sites_for_org/',
'optional':true
})''' % r.application
s3.jquery_ready.append(script)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
"""
Customise hrm_human_resource resource (in facility, human_resource, organisation & person controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("person_id",
"organisation_id",
"site_id",
S3SQLInlineComponent(
"group_person",
label = T("Network"),
link = False,
fields = [("", "group_id")],
multiple = False,
),
"job_title_id",
"start_date",
)
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
(T("Network"), "group_person.group_id"),
(T("Groups"), "person_id$group_membership.group_id"),
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
s3db.configure("hrm_human_resource",
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
table = current.s3db.hrm_job_title
table.organisation_id.readable = table.organisation_id.writable = False
table.type.readable = table.type.writable = False
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
# Projects
# Use codes for projects (called 'blurb' in NYC)
settings.project.codes = True
# Uncomment this to use settings suitable for detailed Task management
settings.project.mode_task = False
# Uncomment this to use Activities for projects
settings.project.activities = True
# Uncomment this to use Milestones in project/task.
settings.project.milestones = False
# Uncomment this to disable Sectors in projects
settings.project.sectors = False
# Multiple partner organizations
settings.project.multiple_organisations = True
def customise_project_project_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component and (r.interactive or r.representation == "aadata"):
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox
s3db = current.s3db
table = r.table
tablename = "project_project"
table.code.label = T("Project blurb (max. 100 characters)")
table.code.max_length = 100
table.comments.label = T("How people can help")
script = '''$('#project_project_code').attr('maxlength','100')'''
s3.jquery_ready.append(script)
crud_form = S3SQLCustomForm(
"organisation_id",
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
"calendar",
#"drr.hfa",
#"objectives",
"human_resource_id",
# Activities
S3SQLInlineComponent(
"location",
label = T("Location"),
fields = [("", "location_id")],
),
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
"comments", # NB This is labelled 'Role' in DRRPP
],
filterby = dict(field = "role",
options = "2"
)
),
S3SQLInlineComponent(
"document",
name = "media",
label = T("URLs (media, fundraising, website, social media, etc."),
fields = ["document_id",
"name",
"url",
"comments",
],
filterby = dict(field = "name")
),
S3SQLInlineComponentCheckbox(
"activity_type",
label = T("Categories"),
field = "activity_type_id",
cols = 3,
# Filter Activity Type by Project
filter = {"linktable": "project_activity_type_project",
"lkey": "project_id",
"rkey": "activity_type_id",
},
),
#"budget",
#"currency",
"comments",
)
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter
filter_widgets = [
S3TextFilter(["name",
"code",
"description",
"organisation.name",
"organisation.acronym",
],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("status_id",
label = T("Status"),
# Not translateable
#represent = "%(name)s",
cols = 3,
),
#S3OptionsFilter("theme_project.theme_id",
# label = T("Theme"),
# #hidden = True,
# ),
S3LocationFilter("location.location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
#hidden = True,
),
# @ToDo: Widget to handle Start & End in 1!
S3DateFilter("start_date",
label = T("Start Date"),
hide_time = True,
#hidden = True,
),
S3DateFilter("end_date",
label = T("End Date"),
hide_time = True,
#hidden = True,
),
]
list_fields = ["id",
"name",
"code",
"organisation_id",
"start_date",
"end_date",
(T("Locations"), "location.location_id"),
]
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
# Requests Management
settings.req.req_type = ["People", "Stock"]#, "Summary"]
settings.req.prompt_match = False
#settings.req.use_commit = False
settings.req.requester_optional = True
settings.req.date_writable = False
settings.req.item_quantities_writable = True
settings.req.skill_quantities_writable = True
settings.req.items_ask_purpose = False
#settings.req.use_req_number = False
# Label for Requester
settings.req.requester_label = "Site Contact"
# Filter Requester as being from the Site
settings.req.requester_from_site = True
# Label for Inventory Requests
settings.req.type_inv_label = "Supplies"
# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities
settings.req.summary = True
# -----------------------------------------------------------------------------
def req_req_postprocess(form):
"""
Runs after crud_form completes
- creates a cms_post in the newswire
- @ToDo: Send out Tweets
"""
req_id = form.vars.id
db = current.db
s3db = current.s3db
rtable = s3db.req_req
# Read the full record
row = db(rtable.id == req_id).select(rtable.type,
rtable.site_id,
rtable.requester_id,
rtable.priority,
rtable.date_required,
rtable.purpose,
rtable.comments,
limitby=(0, 1)
).first()
# Build Title & Body from the Request details
priority = rtable.priority.represent(row.priority)
date_required = row.date_required
if date_required:
date = rtable.date_required.represent(date_required)
title = "%(priority)s by %(date)s" % dict(priority=priority,
date=date)
else:
title = priority
body = row.comments
if row.type == 1:
# Items
ritable = s3db.req_req_item
items = db(ritable.req_id == req_id).select(ritable.item_id,
ritable.item_pack_id,
ritable.quantity)
item_represent = s3db.supply_item_represent
pack_represent = s3db.supply_item_pack_represent
for item in items:
item = "%s %s %s" % (item.quantity,
pack_represent(item.item_pack_id),
item_represent(item.item_id))
body = "%s\n%s" % (item, body)
else:
# Skills
body = "%s\n%s" % (row.purpose, body)
rstable = s3db.req_req_skill
skills = db(rstable.req_id == req_id).select(rstable.skill_id,
rstable.quantity)
skill_represent = s3db.hrm_multi_skill_represent
for skill in skills:
item = "%s %s" % (skill.quantity, skill_represent(skill.skill_id))
body = "%s\n%s" % (item, body)
# Lookup series_id
stable = s3db.cms_series
try:
series_id = db(stable.name == "Request").select(stable.id,
cache=s3db.cache,
limitby=(0, 1)
).first().id
except:
# Prepop hasn't been run
series_id = None
# Location is that of the site
otable = s3db.org_site
location_id = db(otable.site_id == row.site_id).select(otable.location_id,
limitby=(0, 1)
).first().location_id
# Create Post
ptable = s3db.cms_post
_id = ptable.insert(series_id=series_id,
title=title,
body=body,
location_id=location_id,
person_id=row.requester_id,
)
record = dict(id=_id)
s3db.update_super(ptable, record)
# Add source link
url = "%s%s" % (settings.get_base_public_url(),
URL(c="req", f="req", args=req_id))
s3db.doc_document.insert(doc_id=record["doc_id"],
url=url,
)
# -----------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
from s3layouts import S3AddResourceLink
current.s3db.req_req.site_id.comment = \
S3AddResourceLink(c="org", f="facility",
vars = dict(child="site_id"),
title=T("Create Facility"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
current.response.s3.req_req_postprocess = req_req_postprocess
if not r.component and r.method in ("create", "update"):
script = \
'''$('#req_req_site_id').change(function(){
var url=$('#person_add').attr('href')
url=url.split('?')
var q=S3.queryString.parse(url[1])
q['(site)']=$(this).val()
url=url[0]+'?'+S3.queryString.stringify(q)
$('#person_add').attr('href',url)})'''
current.response.s3.jquery_ready.append(script)
settings.customise_req_req_resource = customise_req_req_resource
# -----------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Admin"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
# Uncomment to enable internal support requests
#("support", Storage(
# name_nice = T("Support"),
# #description = "Support Requests",
# restricted = True,
# module_type = None # This item is handled separately for the menu
# )),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 9, # 8th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Locations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 4
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Contacts"),
#description = "Human Resources Management",
restricted = True,
module_type = 3,
)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
# )),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Inventory"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 10
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 10,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 1,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 10
)),
("assess", Storage(
name_nice = T("Assessments"),
#description = "Rapid Assessments & Flexible Impact Assessments",
restricted = True,
module_type = 5,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("survey", Storage(
name_nice = T("Surveys"),
#description = "Create, enter, and manage surveys.",
restricted = True,
module_type = 5,
)),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
# )),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = False,
# module_type = 10,
# )),
#("member", Storage(
# name_nice = T("Members"),
# #description = "Membership Management System",
# restricted = True,
# module_type = 10,
# )),
# @ToDo: Rewrite in a modern style
#("budget", Storage(
# name_nice = T("Budgeting Module"),
# #description = "Allows a Budget to be drawn up",
# restricted = True,
# module_type = 10
# )),
# @ToDo: Port these Assessments to the Survey module
#("building", Storage(
# name_nice = T("Building Assessments"),
# #description = "Building Safety Assessments",
# restricted = True,
# module_type = 10,
# )),
])
|
{
"content_hash": "6f54c08e6e545e6a0dde8aa611b48197",
"timestamp": "",
"source": "github",
"line_count": 2052,
"max_line_length": 187,
"avg_line_length": 39.77436647173489,
"alnum_prop": 0.4514990749476212,
"repo_name": "collective/eden",
"id": "d9113a8253b4d481a237bb5bc5d4b75d86b59419",
"size": "81643",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "private/templates/NYC/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2030949"
},
{
"name": "JavaScript",
"bytes": "19162817"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "666"
},
{
"name": "Python",
"bytes": "28358306"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4846"
},
{
"name": "XSLT",
"bytes": "2644035"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect
from django.conf import settings
class LoginRequiredMiddleware:
def process_request(self, request):
if not request.user.is_authenticated():
path = request.path_info
if not path.startswith('/accounts/'):
return HttpResponseRedirect(settings.LOGIN_URL)
|
{
"content_hash": "11734a0a9f99fb0f0545839e99cdc46d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 63,
"avg_line_length": 35,
"alnum_prop": 0.6942857142857143,
"repo_name": "sychsergiy/DocFlow",
"id": "876287170056f5efce009a7b0837b56e29f1abab",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc_flow/apps/accounts/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "30665"
},
{
"name": "Python",
"bytes": "51117"
}
],
"symlink_target": ""
}
|
"""Repository access.
This module contains the base class for git repositories
(BaseRepo) and an implementation which uses a repository on
local disk (Repo).
"""
from io import BytesIO
import errno
import os
import sys
from dulwich.errors import (
NoIndexPresent,
NotBlobError,
NotCommitError,
NotGitRepository,
NotTreeError,
NotTagError,
CommitError,
RefFormatError,
HookError,
)
from dulwich.file import (
GitFile,
)
from dulwich.object_store import (
DiskObjectStore,
MemoryObjectStore,
ObjectStoreGraphWalker,
)
from dulwich.objects import (
check_hexsha,
Blob,
Commit,
ShaFile,
Tag,
Tree,
)
from dulwich.hooks import (
PreCommitShellHook,
PostCommitShellHook,
CommitMsgShellHook,
)
from dulwich.refs import (
check_ref_format,
RefsContainer,
DictRefsContainer,
InfoRefsContainer,
DiskRefsContainer,
read_packed_refs,
read_packed_refs_with_peeled,
write_packed_refs,
SYMREF,
)
import warnings
OBJECTDIR = 'objects'
REFSDIR = 'refs'
REFSDIR_TAGS = 'tags'
REFSDIR_HEADS = 'heads'
INDEX_FILENAME = "index"
BASE_DIRECTORIES = [
["branches"],
[REFSDIR],
[REFSDIR, REFSDIR_TAGS],
[REFSDIR, REFSDIR_HEADS],
["hooks"],
["info"]
]
def parse_graftpoints(graftpoints):
"""Convert a list of graftpoints into a dict
:param graftpoints: Iterator of graftpoint lines
Each line is formatted as:
<commit sha1> <parent sha1> [<parent sha1>]*
Resulting dictionary is:
<commit sha1>: [<parent sha1>*]
https://git.wiki.kernel.org/index.php/GraftPoint
"""
grafts = {}
for l in graftpoints:
raw_graft = l.split(None, 1)
commit = raw_graft[0]
if len(raw_graft) == 2:
parents = raw_graft[1].split()
else:
parents = []
for sha in [commit] + parents:
check_hexsha(sha, 'Invalid graftpoint')
grafts[commit] = parents
return grafts
def serialize_graftpoints(graftpoints):
"""Convert a dictionary of grafts into string
The graft dictionary is:
<commit sha1>: [<parent sha1>*]
Each line is formatted as:
<commit sha1> <parent sha1> [<parent sha1>]*
https://git.wiki.kernel.org/index.php/GraftPoint
"""
graft_lines = []
for commit, parents in graftpoints.items():
if parents:
graft_lines.append(commit + b' ' + b' '.join(parents))
else:
graft_lines.append(commit)
return b'\n'.join(graft_lines)
class BaseRepo(object):
"""Base class for a git repository.
:ivar object_store: Dictionary-like object for accessing
the objects
:ivar refs: Dictionary-like object with the refs in this
repository
"""
def __init__(self, object_store, refs):
"""Open a repository.
This shouldn't be called directly, but rather through one of the
base classes, such as MemoryRepo or Repo.
:param object_store: Object store to use
:param refs: Refs container to use
"""
self.object_store = object_store
self.refs = refs
self._graftpoints = {}
self.hooks = {}
def _init_files(self, bare):
"""Initialize a default set of named files."""
from dulwich.config import ConfigFile
self._put_named_file('description', b"Unnamed repository")
f = BytesIO()
cf = ConfigFile()
cf.set(b"core", b"repositoryformatversion", b"0")
cf.set(b"core", b"filemode", b"true")
cf.set(b"core", b"bare", bare)
cf.set(b"core", b"logallrefupdates", True)
cf.write_to_file(f)
self._put_named_file('config', f.getvalue())
self._put_named_file(os.path.join('info', 'exclude'), b'')
def get_named_file(self, path):
"""Get a file from the control dir with a specific name.
Although the filename should be interpreted as a filename relative to
the control dir in a disk-based Repo, the object returned need not be
pointing to a file in that location.
:param path: The path to the file, relative to the control dir.
:return: An open file object, or None if the file does not exist.
"""
raise NotImplementedError(self.get_named_file)
def _put_named_file(self, path, contents):
"""Write a file to the control dir with the given name and contents.
:param path: The path to the file, relative to the control dir.
:param contents: A string to write to the file.
"""
raise NotImplementedError(self._put_named_file)
def open_index(self):
"""Open the index for this repository.
:raise NoIndexPresent: If no index is present
:return: The matching `Index`
"""
raise NotImplementedError(self.open_index)
def fetch(self, target, determine_wants=None, progress=None):
"""Fetch objects into another repository.
:param target: The target repository
:param determine_wants: Optional function to determine what refs to
fetch.
:param progress: Optional progress function
:return: The local refs
"""
if determine_wants is None:
determine_wants = target.object_store.determine_wants_all
target.object_store.add_objects(
self.fetch_objects(determine_wants, target.get_graph_walker(),
progress))
return self.get_refs()
def fetch_objects(self, determine_wants, graph_walker, progress,
get_tagged=None):
"""Fetch the missing objects required for a set of revisions.
:param determine_wants: Function that takes a dictionary with heads
and returns the list of heads to fetch.
:param graph_walker: Object that can iterate over the list of revisions
to fetch and has an "ack" method that will be called to acknowledge
that a revision is present.
:param progress: Simple progress function that will be called with
updated progress strings.
:param get_tagged: Function that returns a dict of pointed-to sha -> tag
sha for including tags.
:return: iterator over objects, with __len__ implemented
"""
wants = determine_wants(self.get_refs())
if not isinstance(wants, list):
raise TypeError("determine_wants() did not return a list")
shallows = getattr(graph_walker, 'shallow', frozenset())
unshallows = getattr(graph_walker, 'unshallow', frozenset())
if wants == []:
# TODO(dborowitz): find a way to short-circuit that doesn't change
# this interface.
if shallows or unshallows:
# Do not send a pack in shallow short-circuit path
return None
return []
# If the graph walker is set up with an implementation that can
# ACK/NAK to the wire, it will write data to the client through
# this call as a side-effect.
haves = self.object_store.find_common_revisions(graph_walker)
# Deal with shallow requests separately because the haves do
# not reflect what objects are missing
if shallows or unshallows:
haves = [] # TODO: filter the haves commits from iter_shas.
# the specific commits aren't missing.
def get_parents(commit):
if commit.id in shallows:
return []
return self.get_parents(commit.id, commit)
return self.object_store.iter_shas(
self.object_store.find_missing_objects(
haves, wants, progress,
get_tagged,
get_parents=get_parents))
def get_graph_walker(self, heads=None):
"""Retrieve a graph walker.
A graph walker is used by a remote repository (or proxy)
to find out which objects are present in this repository.
:param heads: Repository heads to use (optional)
:return: A graph walker object
"""
if heads is None:
heads = self.refs.as_dict(b'refs/heads').values()
return ObjectStoreGraphWalker(heads, self.get_parents)
def get_refs(self):
"""Get dictionary with all refs.
:return: A ``dict`` mapping ref names to SHA1s
"""
return self.refs.as_dict()
def head(self):
"""Return the SHA1 pointed at by HEAD."""
return self.refs[b'HEAD']
def _get_object(self, sha, cls):
assert len(sha) in (20, 40)
ret = self.get_object(sha)
if not isinstance(ret, cls):
if cls is Commit:
raise NotCommitError(ret)
elif cls is Blob:
raise NotBlobError(ret)
elif cls is Tree:
raise NotTreeError(ret)
elif cls is Tag:
raise NotTagError(ret)
else:
raise Exception("Type invalid: %r != %r" % (
ret.type_name, cls.type_name))
return ret
def get_object(self, sha):
"""Retrieve the object with the specified SHA.
:param sha: SHA to retrieve
:return: A ShaFile object
:raise KeyError: when the object can not be found
"""
return self.object_store[sha]
def get_parents(self, sha, commit=None):
"""Retrieve the parents of a specific commit.
If the specific commit is a graftpoint, the graft parents
will be returned instead.
:param sha: SHA of the commit for which to retrieve the parents
:param commit: Optional commit matching the sha
:return: List of parents
"""
try:
return self._graftpoints[sha]
except KeyError:
if commit is None:
commit = self[sha]
return commit.parents
def get_config(self):
"""Retrieve the config object.
:return: `ConfigFile` object for the ``.git/config`` file.
"""
raise NotImplementedError(self.get_config)
def get_description(self):
"""Retrieve the description for this repository.
:return: String with the description of the repository
as set by the user.
"""
raise NotImplementedError(self.get_description)
def set_description(self, description):
"""Set the description for this repository.
:param description: Text to set as description for this repository.
"""
raise NotImplementedError(self.set_description)
def get_config_stack(self):
"""Return a config stack for this repository.
This stack accesses the configuration for both this repository
itself (.git/config) and the global configuration, which usually
lives in ~/.gitconfig.
:return: `Config` instance for this repository
"""
from dulwich.config import StackedConfig
backends = [self.get_config()] + StackedConfig.default_backends()
return StackedConfig(backends, writable=backends[0])
def get_peeled(self, ref):
"""Get the peeled value of a ref.
:param ref: The refname to peel.
:return: The fully-peeled SHA1 of a tag object, after peeling all
intermediate tags; if the original ref does not point to a tag, this
will equal the original SHA1.
"""
cached = self.refs.get_peeled(ref)
if cached is not None:
return cached
return self.object_store.peel_sha(self.refs[ref]).id
def get_walker(self, include=None, *args, **kwargs):
"""Obtain a walker for this repository.
:param include: Iterable of SHAs of commits to include along with their
ancestors. Defaults to [HEAD]
:param exclude: Iterable of SHAs of commits to exclude along with their
ancestors, overriding includes.
:param order: ORDER_* constant specifying the order of results. Anything
other than ORDER_DATE may result in O(n) memory usage.
:param reverse: If True, reverse the order of output, requiring O(n)
memory.
:param max_entries: The maximum number of entries to yield, or None for
no limit.
:param paths: Iterable of file or subtree paths to show entries for.
:param rename_detector: diff.RenameDetector object for detecting
renames.
:param follow: If True, follow path across renames/copies. Forces a
default rename_detector.
:param since: Timestamp to list commits after.
:param until: Timestamp to list commits before.
:param queue_cls: A class to use for a queue of commits, supporting the
iterator protocol. The constructor takes a single argument, the
Walker.
:return: A `Walker` object
"""
from dulwich.walk import Walker
if include is None:
include = [self.head()]
if isinstance(include, str):
include = [include]
kwargs['get_parents'] = lambda commit: self.get_parents(commit.id, commit)
return Walker(self.object_store, include, *args, **kwargs)
def __getitem__(self, name):
"""Retrieve a Git object by SHA1 or ref.
:param name: A Git object SHA1 or a ref name
:return: A `ShaFile` object, such as a Commit or Blob
:raise KeyError: when the specified ref or object does not exist
"""
if not isinstance(name, bytes):
raise TypeError("'name' must be bytestring, not %.80s" %
type(name).__name__)
if len(name) in (20, 40):
try:
return self.object_store[name]
except (KeyError, ValueError):
pass
try:
return self.object_store[self.refs[name]]
except RefFormatError:
raise KeyError(name)
def __contains__(self, name):
"""Check if a specific Git object or ref is present.
:param name: Git object SHA1 or ref name
"""
if len(name) in (20, 40):
return name in self.object_store or name in self.refs
else:
return name in self.refs
def __setitem__(self, name, value):
"""Set a ref.
:param name: ref name
:param value: Ref value - either a ShaFile object, or a hex sha
"""
if name.startswith(b"refs/") or name == b'HEAD':
if isinstance(value, ShaFile):
self.refs[name] = value.id
elif isinstance(value, bytes):
self.refs[name] = value
else:
raise TypeError(value)
else:
raise ValueError(name)
def __delitem__(self, name):
"""Remove a ref.
:param name: Name of the ref to remove
"""
if name.startswith(b"refs/") or name == b"HEAD":
del self.refs[name]
else:
raise ValueError(name)
def _get_user_identity(self):
"""Determine the identity to use for new commits.
"""
config = self.get_config_stack()
return (config.get((b"user", ), b"name") + b" <" +
config.get((b"user", ), b"email") + b">")
def _add_graftpoints(self, updated_graftpoints):
"""Add or modify graftpoints
:param updated_graftpoints: Dict of commit shas to list of parent shas
"""
# Simple validation
for commit, parents in updated_graftpoints.items():
for sha in [commit] + parents:
check_hexsha(sha, 'Invalid graftpoint')
self._graftpoints.update(updated_graftpoints)
def _remove_graftpoints(self, to_remove=[]):
"""Remove graftpoints
:param to_remove: List of commit shas
"""
for sha in to_remove:
del self._graftpoints[sha]
def do_commit(self, message=None, committer=None,
author=None, commit_timestamp=None,
commit_timezone=None, author_timestamp=None,
author_timezone=None, tree=None, encoding=None,
ref=b'HEAD', merge_heads=None):
"""Create a new commit.
:param message: Commit message
:param committer: Committer fullname
:param author: Author fullname (defaults to committer)
:param commit_timestamp: Commit timestamp (defaults to now)
:param commit_timezone: Commit timestamp timezone (defaults to GMT)
:param author_timestamp: Author timestamp (defaults to commit timestamp)
:param author_timezone: Author timestamp timezone
(defaults to commit timestamp timezone)
:param tree: SHA1 of the tree root to use (if not specified the
current index will be committed).
:param encoding: Encoding
:param ref: Optional ref to commit to (defaults to current branch)
:param merge_heads: Merge heads (defaults to .git/MERGE_HEADS)
:return: New commit SHA1
"""
import time
c = Commit()
if tree is None:
index = self.open_index()
c.tree = index.commit(self.object_store)
else:
if len(tree) != 40:
raise ValueError("tree must be a 40-byte hex sha string")
c.tree = tree
try:
self.hooks['pre-commit'].execute()
except HookError as e:
raise CommitError(e)
except KeyError: # no hook defined, silent fallthrough
pass
if merge_heads is None:
# FIXME: Read merge heads from .git/MERGE_HEADS
merge_heads = []
if committer is None:
# FIXME: Support GIT_COMMITTER_NAME/GIT_COMMITTER_EMAIL environment
# variables
committer = self._get_user_identity()
c.committer = committer
if commit_timestamp is None:
# FIXME: Support GIT_COMMITTER_DATE environment variable
commit_timestamp = time.time()
c.commit_time = int(commit_timestamp)
if commit_timezone is None:
# FIXME: Use current user timezone rather than UTC
commit_timezone = 0
c.commit_timezone = commit_timezone
if author is None:
# FIXME: Support GIT_AUTHOR_NAME/GIT_AUTHOR_EMAIL environment
# variables
author = committer
c.author = author
if author_timestamp is None:
# FIXME: Support GIT_AUTHOR_DATE environment variable
author_timestamp = commit_timestamp
c.author_time = int(author_timestamp)
if author_timezone is None:
author_timezone = commit_timezone
c.author_timezone = author_timezone
if encoding is not None:
c.encoding = encoding
if message is None:
# FIXME: Try to read commit message from .git/MERGE_MSG
raise ValueError("No commit message specified")
try:
c.message = self.hooks['commit-msg'].execute(message)
if c.message is None:
c.message = message
except HookError as e:
raise CommitError(e)
except KeyError: # no hook defined, message not modified
c.message = message
if ref is None:
# Create a dangling commit
c.parents = merge_heads
self.object_store.add_object(c)
else:
try:
old_head = self.refs[ref]
c.parents = [old_head] + merge_heads
self.object_store.add_object(c)
ok = self.refs.set_if_equals(ref, old_head, c.id)
except KeyError:
c.parents = merge_heads
self.object_store.add_object(c)
ok = self.refs.add_if_new(ref, c.id)
if not ok:
# Fail if the atomic compare-and-swap failed, leaving the commit and
# all its objects as garbage.
raise CommitError("%s changed during commit" % (ref,))
try:
self.hooks['post-commit'].execute()
except HookError as e: # silent failure
warnings.warn("post-commit hook failed: %s" % e, UserWarning)
except KeyError: # no hook defined, silent fallthrough
pass
return c.id
class Repo(BaseRepo):
"""A git repository backed by local disk.
To open an existing repository, call the contructor with
the path of the repository.
To create a new repository, use the Repo.init class method.
"""
def __init__(self, root):
if os.path.isdir(os.path.join(root, ".git", OBJECTDIR)):
self.bare = False
self._controldir = os.path.join(root, ".git")
elif (os.path.isdir(os.path.join(root, OBJECTDIR)) and
os.path.isdir(os.path.join(root, REFSDIR))):
self.bare = True
self._controldir = root
elif (os.path.isfile(os.path.join(root, ".git"))):
import re
with open(os.path.join(root, ".git"), 'r') as f:
_, path = re.match('(gitdir: )(.+$)', f.read()).groups()
self.bare = False
self._controldir = os.path.join(root, path)
else:
raise NotGitRepository(
"No git repository was found at %(path)s" % dict(path=root)
)
self.path = root
object_store = DiskObjectStore(os.path.join(self.controldir(),
OBJECTDIR))
refs = DiskRefsContainer(self.controldir())
BaseRepo.__init__(self, object_store, refs)
self._graftpoints = {}
graft_file = self.get_named_file(os.path.join("info", "grafts"))
if graft_file:
with graft_file:
self._graftpoints.update(parse_graftpoints(graft_file))
graft_file = self.get_named_file("shallow")
if graft_file:
with graft_file:
self._graftpoints.update(parse_graftpoints(graft_file))
self.hooks['pre-commit'] = PreCommitShellHook(self.controldir())
self.hooks['commit-msg'] = CommitMsgShellHook(self.controldir())
self.hooks['post-commit'] = PostCommitShellHook(self.controldir())
def controldir(self):
"""Return the path of the control directory."""
return self._controldir
def _put_named_file(self, path, contents):
"""Write a file to the control dir with the given name and contents.
:param path: The path to the file, relative to the control dir.
:param contents: A string to write to the file.
"""
path = path.lstrip(os.path.sep)
with GitFile(os.path.join(self.controldir(), path), 'wb') as f:
f.write(contents)
def get_named_file(self, path):
"""Get a file from the control dir with a specific name.
Although the filename should be interpreted as a filename relative to
the control dir in a disk-based Repo, the object returned need not be
pointing to a file in that location.
:param path: The path to the file, relative to the control dir.
:return: An open file object, or None if the file does not exist.
"""
# TODO(dborowitz): sanitize filenames, since this is used directly by
# the dumb web serving code.
path = path.lstrip(os.path.sep)
try:
return open(os.path.join(self.controldir(), path), 'rb')
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
return None
raise
def index_path(self):
"""Return path to the index file."""
return os.path.join(self.controldir(), INDEX_FILENAME)
def open_index(self):
"""Open the index for this repository.
:raise NoIndexPresent: If no index is present
:return: The matching `Index`
"""
from dulwich.index import Index
if not self.has_index():
raise NoIndexPresent()
return Index(self.index_path())
def has_index(self):
"""Check if an index is present."""
# Bare repos must never have index files; non-bare repos may have a
# missing index file, which is treated as empty.
return not self.bare
def stage(self, fs_paths):
"""Stage a set of paths.
:param fs_paths: List of paths, relative to the repository path
"""
root_path_bytes = self.path.encode(sys.getfilesystemencoding())
if not isinstance(fs_paths, list):
fs_paths = [fs_paths]
from dulwich.index import (
blob_from_path_and_stat,
index_entry_from_stat,
_fs_to_tree_path,
)
index = self.open_index()
for fs_path in fs_paths:
if not isinstance(fs_path, bytes):
fs_path = fs_path.encode(sys.getfilesystemencoding())
tree_path = _fs_to_tree_path(fs_path)
full_path = os.path.join(root_path_bytes, fs_path)
try:
st = os.lstat(full_path)
except OSError:
# File no longer exists
try:
del index[tree_path]
except KeyError:
pass # already removed
else:
blob = blob_from_path_and_stat(full_path, st)
self.object_store.add_object(blob)
index[tree_path] = index_entry_from_stat(st, blob.id, 0)
index.write()
def clone(self, target_path, mkdir=True, bare=False,
origin=b"origin"):
"""Clone this repository.
:param target_path: Target path
:param mkdir: Create the target directory
:param bare: Whether to create a bare repository
:param origin: Base name for refs in target repository
cloned from this repository
:return: Created repository as `Repo`
"""
if not bare:
target = self.init(target_path, mkdir=mkdir)
else:
target = self.init_bare(target_path)
self.fetch(target)
target.refs.import_refs(
b'refs/remotes/' + origin, self.refs.as_dict(b'refs/heads'))
target.refs.import_refs(
b'refs/tags', self.refs.as_dict(b'refs/tags'))
try:
target.refs.add_if_new(
b'refs/heads/master',
self.refs[b'refs/heads/master'])
except KeyError:
pass
# Update target head
head, head_sha = self.refs._follow(b'HEAD')
if head is not None and head_sha is not None:
target.refs.set_symbolic_ref(b'HEAD', head)
target[b'HEAD'] = head_sha
if not bare:
# Checkout HEAD to target dir
target.reset_index()
return target
def reset_index(self, tree=None):
"""Reset the index back to a specific tree.
:param tree: Tree SHA to reset to, None for current HEAD tree.
"""
from dulwich.index import (
build_index_from_tree,
validate_path_element_default,
validate_path_element_ntfs,
)
if tree is None:
tree = self[b'HEAD'].tree
config = self.get_config()
honor_filemode = config.get_boolean('core', 'filemode', os.name != "nt")
if config.get_boolean('core', 'core.protectNTFS', os.name == "nt"):
validate_path_element = validate_path_element_ntfs
else:
validate_path_element = validate_path_element_default
return build_index_from_tree(self.path, self.index_path(),
self.object_store, tree, honor_filemode=honor_filemode,
validate_path_element=validate_path_element)
def get_config(self):
"""Retrieve the config object.
:return: `ConfigFile` object for the ``.git/config`` file.
"""
from dulwich.config import ConfigFile
path = os.path.join(self._controldir, 'config')
try:
return ConfigFile.from_path(path)
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
ret = ConfigFile()
ret.path = path
return ret
def get_description(self):
"""Retrieve the description of this repository.
:return: A string describing the repository or None.
"""
path = os.path.join(self._controldir, 'description')
try:
with GitFile(path, 'rb') as f:
return f.read()
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
return None
def __repr__(self):
return "<Repo at %r>" % self.path
def set_description(self, description):
"""Set the description for this repository.
:param description: Text to set as description for this repository.
"""
self._put_named_file('description', description)
@classmethod
def _init_maybe_bare(cls, path, bare):
for d in BASE_DIRECTORIES:
os.mkdir(os.path.join(path, *d))
DiskObjectStore.init(os.path.join(path, OBJECTDIR))
ret = cls(path)
ret.refs.set_symbolic_ref(b'HEAD', b"refs/heads/master")
ret._init_files(bare)
return ret
@classmethod
def init(cls, path, mkdir=False):
"""Create a new repository.
:param path: Path in which to create the repository
:param mkdir: Whether to create the directory
:return: `Repo` instance
"""
if mkdir:
os.mkdir(path)
controldir = os.path.join(path, ".git")
os.mkdir(controldir)
cls._init_maybe_bare(controldir, False)
return cls(path)
@classmethod
def init_bare(cls, path):
"""Create a new bare repository.
``path`` should already exist and be an emty directory.
:param path: Path to create bare repository in
:return: a `Repo` instance
"""
return cls._init_maybe_bare(path, True)
create = init_bare
def close(self):
"""Close any files opened by this repository."""
self.object_store.close()
class MemoryRepo(BaseRepo):
"""Repo that stores refs, objects, and named files in memory.
MemoryRepos are always bare: they have no working tree and no index, since
those have a stronger dependency on the filesystem.
"""
def __init__(self):
from dulwich.config import ConfigFile
BaseRepo.__init__(self, MemoryObjectStore(), DictRefsContainer({}))
self._named_files = {}
self.bare = True
self._config = ConfigFile()
def _put_named_file(self, path, contents):
"""Write a file to the control dir with the given name and contents.
:param path: The path to the file, relative to the control dir.
:param contents: A string to write to the file.
"""
self._named_files[path] = contents
def get_named_file(self, path):
"""Get a file from the control dir with a specific name.
Although the filename should be interpreted as a filename relative to
the control dir in a disk-baked Repo, the object returned need not be
pointing to a file in that location.
:param path: The path to the file, relative to the control dir.
:return: An open file object, or None if the file does not exist.
"""
contents = self._named_files.get(path, None)
if contents is None:
return None
return BytesIO(contents)
def open_index(self):
"""Fail to open index for this repo, since it is bare.
:raise NoIndexPresent: Raised when no index is present
"""
raise NoIndexPresent()
def get_config(self):
"""Retrieve the config object.
:return: `ConfigFile` object.
"""
return self._config
def get_description(self):
"""Retrieve the repository description.
This defaults to None, for no description.
"""
return None
@classmethod
def init_bare(cls, objects, refs):
"""Create a new bare repository in memory.
:param objects: Objects for the new repository,
as iterable
:param refs: Refs as dictionary, mapping names
to object SHA1s
"""
ret = cls()
for obj in objects:
ret.object_store.add_object(obj)
for refname, sha in refs.items():
ret.refs[refname] = sha
ret._init_files(bare=True)
return ret
|
{
"content_hash": "def6a5c8f41e410a6c13ad0426c380ce",
"timestamp": "",
"source": "github",
"line_count": 968,
"max_line_length": 84,
"avg_line_length": 33.96797520661157,
"alnum_prop": 0.58857698975092,
"repo_name": "Sorsly/subtle",
"id": "12faefcc56aea786607e4d72149af3f091f11dde",
"size": "33773",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/third_party/dulwich/repo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
}
|
"""Fixer that changes buffer(...) into memoryview(...)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixBuffer(fixer_base.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = """
power< name='buffer' trailer< '(' [any] ')' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("memoryview", prefix=name.prefix))
|
{
"content_hash": "321b90bb8755f090626dc219cc115ecd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.5948275862068966,
"repo_name": "MalloyPower/parsing-python",
"id": "21d04aed0b3c456aa9ab3f290ed49fe21e97ef87",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/lib2to3/fixes/fix_buffer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
''' Run pylint against each python file with changes '''
import os
import re
import sys
import common
PYLINT_RCFILE = "jenkins/test/validators/.pylintrc"
LINT_EXCLUDE_PATTERN_LIST = [
r'prometheus_client'
r'ansible/inventory/aws/hosts/ec2.py'
r'ansible/inventory/gce/hosts/gce.py'
r'docs/*']
def linter(diff_file_list):
'''Use pylint to lint all python files changed in the pull request'''
file_list = []
# For each file in the diff, confirm it should be linted
for dfile in diff_file_list.split(","):
# Skip linting for specific files
skip = False
for exclude_pattern in LINT_EXCLUDE_PATTERN_LIST:
if re.match(exclude_pattern, dfile):
skip = True
break
if skip:
continue
# Skip linting if dfile is a directory or other non-file type
if not os.path.isfile(dfile):
continue
# Skip linting if the file does not have a python extension
_, ext = os.path.splitext(dfile)
if ext != ".py":
continue
file_list.append(dfile)
if len(file_list) == 0:
print "No python files have changed or all files are excluded, skipping running python linter"
return True, ""
print "Running pylint against " + " ".join(file_list)
pylint_cmd = ["/usr/bin/pylint", "--rcfile=" + PYLINT_RCFILE] + file_list
success, stdout = common.run_cli_cmd(pylint_cmd, exit_on_fail=False)
if not success:
return False, "Pylint failed:\n" + stdout
return True, ""
def usage():
''' Print usage '''
print """usage: python lint.py [file_list...]
file_list: Comma-seperated list of files to run pylint against
Arguments can be provided through the following environment variables:
file_list: PRV_CHANGED_FILES"""
def main():
''' Get base and remote SHA from arguments and run linter '''
if len(sys.argv) == 2:
file_list = sys.argv[1]
elif len(sys.argv) > 2:
print len(sys.argv)-1, "arguments provided, expected 1."
usage()
sys.exit(2)
else:
file_list = os.getenv("PRV_CHANGED_FILES", "")
if file_list == "":
print "file list must be provided"
usage()
sys.exit(3)
success, error_message = linter(file_list)
if not success:
print "Pylint failed:"
print error_message
sys.exit(1)
print "Pylint succeeded!"
if __name__ == '__main__':
main()
|
{
"content_hash": "f0358af85902a087c43c57744d8a6247",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 102,
"avg_line_length": 30.06024096385542,
"alnum_prop": 0.6072144288577155,
"repo_name": "drewandersonnz/openshift-tools",
"id": "62793a33ec4e1f5447010b5866442804b09ec45b",
"size": "2495",
"binary": false,
"copies": "11",
"ref": "refs/heads/prod",
"path": "jenkins/test/validators/lint.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24919"
},
{
"name": "Dockerfile",
"bytes": "10248"
},
{
"name": "Go",
"bytes": "127388"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "67678"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Makefile",
"bytes": "1108"
},
{
"name": "PHP",
"bytes": "30017"
},
{
"name": "Python",
"bytes": "19774421"
},
{
"name": "Shell",
"bytes": "553874"
}
],
"symlink_target": ""
}
|
from google.api_core import datetime_helpers
def undelete_table(table_id, recovered_table_id):
# [START bigquery_undelete_table]
import time
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Choose a table to recover.
# table_id = "your-project.your_dataset.your_table"
# TODO(developer): Choose a new table ID for the recovered table data.
# recovery_table_id = "your-project.your_dataset.your_table_recovered"
# TODO(developer): Choose an appropriate snapshot point as epoch
# milliseconds. For this example, we choose the current time as we're about
# to delete the table immediately afterwards.
snapshot_epoch = int(time.time() * 1000)
# [START_EXCLUDE]
# Due to very short lifecycle of the table, ensure we're not picking a time
# prior to the table creation due to time drift between backend and client.
table = client.get_table(table_id)
created_epoch = datetime_helpers.to_milliseconds(table.created)
if created_epoch > snapshot_epoch:
snapshot_epoch = created_epoch
# [END_EXCLUDE]
# "Accidentally" delete the table.
client.delete_table(table_id) # Make an API request.
# Construct the restore-from table ID using a snapshot decorator.
snapshot_table_id = "{}@{}".format(table_id, snapshot_epoch)
# Construct and run a copy job.
job = client.copy_table(
snapshot_table_id,
recovered_table_id,
# Must match the source and destination tables location.
location="US",
) # Make an API request.
job.result() # Wait for the job to complete.
print(
"Copied data from deleted table {} to {}".format(table_id, recovered_table_id)
)
# [END bigquery_undelete_table]
|
{
"content_hash": "fc6adec3787a2ef95df94d8ce179409e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 86,
"avg_line_length": 35,
"alnum_prop": 0.6824175824175824,
"repo_name": "tswast/google-cloud-python",
"id": "18b15801ffee0065a1fd77f06db7d7373dc887c7",
"size": "2396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigquery/samples/undelete_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import check_random_state
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.random import random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "prior": always predicts the class that maximizes the class prior
(like "most_frequent") and ``predict_proba`` returns the class prior.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant : int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : array or list of array of shape = [n_classes]
Class labels for each output.
n_classes_ : array or list of array of shape = [n_classes]
Number of label for each output.
class_prior_ : array or list of array of shape = [n_classes]
Probability of each class for each output.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
`sparse_output_` : bool,
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant", "prior"):
raise ValueError("Unknown strategy type.")
if self.strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if (self.strategy == "constant" and
any(constant[k] not in self.classes_[k]
for k in range(self.n_outputs_))):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self.strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self.strategy == "stratified":
class_prob = class_prior_
elif self.strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self.strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self.strategy in ("most_frequent", "prior"):
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self.strategy == "stratified":
y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)).T
elif self.strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self.strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0]
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : float or array of shape [n_outputs]
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "quantile", "constant"):
raise ValueError("Unknown strategy type: %s, expected "
"'mean', 'median', 'quantile' or 'constant'"
% self.strategy)
y = check_array(y, ensure_2d=False)
if len(y) == 0:
raise ValueError("y must not be empty.")
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
|
{
"content_hash": "150b7745f7838c157b7c62617174d076",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 79,
"avg_line_length": 36.563169164882225,
"alnum_prop": 0.5503953147877013,
"repo_name": "schets/scikit-learn",
"id": "f87a6d0def9816313f1e6aa4556185774be384e6",
"size": "17252",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sklearn/dummy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1370"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5796835"
},
{
"name": "Shell",
"bytes": "5968"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the Hyper-V utils V2.
"""
import mock
from neutron.plugins.hyperv.agent import utils
from neutron.plugins.hyperv.agent import utilsv2
from neutron.tests import base
class TestHyperVUtilsV2(base.BaseTestCase):
_FAKE_VSWITCH_NAME = "fake_vswitch_name"
_FAKE_PORT_NAME = "fake_port_name"
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_RES_DATA = "fake_res_data"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_VSWITCH = "fake_vswitch"
_FAKE_VLAN_ID = "fake_vlan_id"
_FAKE_CLASS_NAME = "fake_class_name"
_FAKE_ELEMENT_NAME = "fake_element_name"
_FAKE_HYPERV_VM_STATE = 'fake_hyperv_state'
_FAKE_ACL_ACT = 'fake_acl_action'
_FAKE_ACL_DIR = 'fake_acl_dir'
_FAKE_ACL_TYPE = 'fake_acl_type'
_FAKE_LOCAL_PORT = 'fake_local_port'
_FAKE_PROTOCOL = 'fake_port_protocol'
_FAKE_REMOTE_ADDR = '0.0.0.0/0'
_FAKE_WEIGHT = 'fake_weight'
def setUp(self):
super(TestHyperVUtilsV2, self).setUp()
self._utils = utilsv2.HyperVUtilsV2()
self._utils._wmi_conn = mock.MagicMock()
def test_connect_vnic_to_vswitch_found(self):
self._test_connect_vnic_to_vswitch(True)
def test_connect_vnic_to_vswitch_not_found(self):
self._test_connect_vnic_to_vswitch(False)
def _test_connect_vnic_to_vswitch(self, found):
self._utils._get_vnic_settings = mock.MagicMock()
if not found:
mock_vm = mock.MagicMock()
self._utils._get_vm_from_res_setting_data = mock.MagicMock(
return_value=mock_vm)
self._utils._add_virt_resource = mock.MagicMock()
else:
self._utils._modify_virt_resource = mock.MagicMock()
self._utils._get_vswitch = mock.MagicMock()
self._utils._get_switch_port_allocation = mock.MagicMock()
mock_port = mock.MagicMock()
self._utils._get_switch_port_allocation.return_value = (mock_port,
found)
self._utils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME,
self._FAKE_PORT_NAME)
if not found:
self._utils._add_virt_resource.assert_called_with(mock_vm,
mock_port)
else:
self._utils._modify_virt_resource.assert_called_with(mock_port)
def test_add_virt_resource(self):
self._test_virt_method('AddResourceSettings', 3, '_add_virt_resource',
True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def test_add_virt_feature(self):
self._test_virt_method('AddFeatureSettings', 3, '_add_virt_feature',
True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def test_modify_virt_resource(self):
self._test_virt_method('ModifyResourceSettings', 3,
'_modify_virt_resource', False,
ResourceSettings=[self._FAKE_RES_DATA])
def test_remove_virt_resource(self):
self._test_virt_method('RemoveResourceSettings', 2,
'_remove_virt_resource', False,
ResourceSettings=[self._FAKE_RES_PATH])
def test_remove_virt_feature(self):
self._test_virt_method('RemoveFeatureSettings', 2,
'_remove_virt_feature', False,
FeatureSettings=[self._FAKE_RES_PATH])
def _test_virt_method(self, vsms_method_name, return_count,
utils_method_name, with_mock_vm, *args, **kwargs):
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
vsms_method = getattr(mock_svc, vsms_method_name)
mock_rsd = self._mock_vsms_method(vsms_method, return_count)
if with_mock_vm:
mock_vm = mock.MagicMock()
mock_vm.path_.return_value = self._FAKE_VM_PATH
getattr(self._utils, utils_method_name)(mock_vm, mock_rsd)
else:
getattr(self._utils, utils_method_name)(mock_rsd)
if args:
vsms_method.assert_called_once_with(*args)
else:
vsms_method.assert_called_once_with(**kwargs)
def _mock_vsms_method(self, vsms_method, return_count):
args = None
if return_count == 3:
args = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
else:
args = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
vsms_method.return_value = args
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
self._utils._check_job_status = mock.MagicMock()
return mock_res_setting_data
def test_disconnect_switch_port_delete_port(self):
self._test_disconnect_switch_port(True)
def test_disconnect_switch_port_modify_port(self):
self._test_disconnect_switch_port(False)
def _test_disconnect_switch_port(self, delete_port):
self._utils._get_switch_port_allocation = mock.MagicMock()
mock_sw_port = mock.MagicMock()
self._utils._get_switch_port_allocation.return_value = (mock_sw_port,
True)
if delete_port:
self._utils._remove_virt_resource = mock.MagicMock()
else:
self._utils._modify_virt_resource = mock.MagicMock()
self._utils.disconnect_switch_port(self._FAKE_VSWITCH_NAME,
self._FAKE_PORT_NAME,
delete_port)
if delete_port:
self._utils._remove_virt_resource.assert_called_with(mock_sw_port)
else:
self._utils._modify_virt_resource.assert_called_with(mock_sw_port)
def test_get_vswitch(self):
self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = [
self._FAKE_VSWITCH]
vswitch = self._utils._get_vswitch(self._FAKE_VSWITCH_NAME)
self.assertEqual(self._FAKE_VSWITCH, vswitch)
def test_get_vswitch_not_found(self):
self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = []
self.assertRaises(utils.HyperVException, self._utils._get_vswitch,
self._FAKE_VSWITCH_NAME)
def test_get_vswitch_external_port(self):
mock_vswitch = mock.MagicMock()
mock_sw_port = mock.MagicMock()
mock_vswitch.associators.return_value = [mock_sw_port]
mock_le = mock_sw_port.associators.return_value
mock_le.__len__.return_value = 1
mock_le1 = mock_le[0].associators.return_value
mock_le1.__len__.return_value = 1
vswitch_port = self._utils._get_vswitch_external_port(mock_vswitch)
self.assertEqual(mock_sw_port, vswitch_port)
def test_set_vswitch_port_vlan_id(self):
mock_port_alloc = mock.MagicMock()
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
mock_port_alloc, True))
self._utils._get_vlan_setting_data_from_port_alloc = mock.MagicMock()
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.RemoveFeatureSettings.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
mock_vlan_settings = mock.MagicMock()
self._utils._get_vlan_setting_data = mock.MagicMock(return_value=(
mock_vlan_settings, True))
mock_svc.AddFeatureSettings.return_value = (self._FAKE_JOB_PATH,
None,
self._FAKE_RET_VAL)
self._utils.set_vswitch_port_vlan_id(self._FAKE_VLAN_ID,
self._FAKE_PORT_NAME)
self.assertTrue(mock_svc.RemoveFeatureSettings.called)
self.assertTrue(mock_svc.AddFeatureSettings.called)
def test_get_setting_data(self):
self._utils._get_first_item = mock.MagicMock(return_value=None)
mock_data = mock.MagicMock()
self._utils._get_default_setting_data = mock.MagicMock(
return_value=mock_data)
ret_val = self._utils._get_setting_data(self._FAKE_CLASS_NAME,
self._FAKE_ELEMENT_NAME,
True)
self.assertEqual(ret_val, (mock_data, False))
def test_enable_port_metrics_collection(self):
mock_port = mock.MagicMock()
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
mock_port, True))
mock_acl = mock.MagicMock()
with mock.patch.multiple(
self._utils,
_get_default_setting_data=mock.MagicMock(return_value=mock_acl),
_add_virt_feature=mock.MagicMock()):
self._utils.enable_port_metrics_collection(self._FAKE_PORT_NAME)
self.assertEqual(4, len(self._utils._add_virt_feature.mock_calls))
self._utils._add_virt_feature.assert_called_with(
mock_port, mock_acl)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_enable_control_metrics_ok(self, mock_get_port_allocation):
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition
mock_metric_def = mock.MagicMock()
mock_port = mock.MagicMock()
mock_get_port_allocation.return_value = (mock_port, True)
mock_metrics_def_source.return_value = [mock_metric_def]
m_call = mock.call(Subject=mock_port.path_.return_value,
Definition=mock_metric_def.path_.return_value,
MetricCollectionEnabled=self._utils._METRIC_ENABLED)
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
mock_metrics_svc.ControlMetrics.assert_has_calls([m_call, m_call])
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_enable_control_metrics_no_port(self, mock_get_port_allocation):
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
mock_get_port_allocation.return_value = (None, False)
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_enable_control_metrics_no_def(self, mock_get_port_allocation):
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition
mock_port = mock.MagicMock()
mock_get_port_allocation.return_value = (mock_port, True)
mock_metrics_def_source.return_value = None
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._is_port_vm_started')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_can_enable_control_metrics_true(self, mock_get, mock_is_started):
mock_acl = mock.MagicMock()
mock_acl.Action = self._utils._ACL_ACTION_METER
self._test_can_enable_control_metrics(mock_get, mock_is_started,
[mock_acl, mock_acl], True)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._is_port_vm_started')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._get_switch_port_allocation')
def test_can_enable_control_metrics_false(self, mock_get, mock_is_started):
self._test_can_enable_control_metrics(mock_get, mock_is_started, [],
False)
def _test_can_enable_control_metrics(self, mock_get_port, mock_vm_started,
acls, expected_result):
mock_port = mock.MagicMock()
mock_acl = mock.MagicMock()
mock_acl.Action = self._utils._ACL_ACTION_METER
mock_port.associators.return_value = acls
mock_get_port.return_value = (mock_port, True)
mock_vm_started.return_value = True
result = self._utils.can_enable_control_metrics(self._FAKE_PORT_NAME)
self.assertEqual(expected_result, result)
def test_is_port_vm_started_true(self):
self._test_is_port_vm_started(self._utils._HYPERV_VM_STATE_ENABLED,
True)
def test_is_port_vm_started_false(self):
self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False)
def _test_is_port_vm_started(self, vm_state, expected_result):
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
mock_port = mock.MagicMock()
mock_vmsettings = mock.MagicMock()
mock_summary = mock.MagicMock()
mock_summary.EnabledState = vm_state
mock_vmsettings.path_.return_value = self._FAKE_RES_PATH
mock_port.associators.return_value = [mock_vmsettings]
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
[mock_summary])
result = self._utils._is_port_vm_started(mock_port)
self.assertEqual(expected_result, result)
mock_svc.GetSummaryInformation.assert_called_once_with(
[self._utils._VM_SUMMARY_ENABLED_STATE],
[self._FAKE_RES_PATH])
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._bind_security_rule')
def test_create_default_reject_all_rules(self, mock_bind, mock_remove):
(m_port, m_acl) = self._setup_security_rule_test()
m_acl.Action = self._utils._ACL_ACTION_DENY
self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME)
calls = []
ipv4_pair = (self._utils._ACL_TYPE_IPV4, self._utils._IPV4_ANY)
ipv6_pair = (self._utils._ACL_TYPE_IPV6, self._utils._IPV6_ANY)
for direction in [self._utils._ACL_DIR_IN, self._utils._ACL_DIR_OUT]:
for acl_type, address in [ipv4_pair, ipv6_pair]:
for protocol in [self._utils._TCP_PROTOCOL,
self._utils._UDP_PROTOCOL,
self._utils._ICMP_PROTOCOL]:
calls.append(mock.call(m_port, direction, acl_type,
self._utils._ACL_ACTION_DENY,
self._utils._ACL_DEFAULT,
protocol, address, mock.ANY))
self._utils._remove_virt_feature.assert_called_once_with(m_acl)
self._utils._bind_security_rule.assert_has_calls(calls)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._bind_security_rule')
def test_create_default_reject_all_rules_already_added(self, mock_bind,
mock_remove):
(m_port, m_acl) = self._setup_security_rule_test()
m_acl.Action = self._utils._ACL_ACTION_DENY
m_port.associators.return_value = [
m_acl] * self._utils._REJECT_ACLS_COUNT
self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME)
self.assertFalse(self._utils._remove_virt_feature.called)
self.assertFalse(self._utils._bind_security_rule.called)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._add_virt_feature')
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._create_security_acl')
def test_bind_security_rule(self, mock_create_acl, mock_add, mock_remove):
(m_port, m_acl) = self._setup_security_rule_test()
mock_create_acl.return_value = m_acl
self._utils._bind_security_rule(
m_port, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
self._FAKE_ACL_ACT, self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL,
self._FAKE_REMOTE_ADDR, self._FAKE_WEIGHT)
self._utils._add_virt_feature.assert_called_once_with(m_port, m_acl)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_virt_feature')
def test_remove_security_rule(self, mock_remove_feature):
mock_acl = self._setup_security_rule_test()[1]
self._utils.remove_security_rule(
self._FAKE_PORT_NAME, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR)
self._utils._remove_virt_feature.assert_called_once_with(mock_acl)
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
'._remove_multiple_virt_features')
def test_remove_all_security_rules(self, mock_remove_feature):
mock_acl = self._setup_security_rule_test()[1]
self._utils.remove_all_security_rules(self._FAKE_PORT_NAME)
self._utils._remove_multiple_virt_features.assert_called_once_with(
[mock_acl])
def _setup_security_rule_test(self):
mock_port = mock.MagicMock()
mock_acl = mock.MagicMock()
mock_port.associators.return_value = [mock_acl]
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
mock_port, True))
self._utils._filter_security_acls = mock.MagicMock(
return_value=[mock_acl])
return (mock_port, mock_acl)
def test_filter_acls(self):
mock_acl = mock.MagicMock()
mock_acl.Action = self._FAKE_ACL_ACT
mock_acl.Applicability = self._utils._ACL_APPLICABILITY_LOCAL
mock_acl.Direction = self._FAKE_ACL_DIR
mock_acl.AclType = self._FAKE_ACL_TYPE
mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR
acls = [mock_acl, mock_acl]
good_acls = self._utils._filter_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR,
self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR)
bad_acls = self._utils._filter_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE)
self.assertEqual(acls, good_acls)
self.assertEqual([], bad_acls)
class TestHyperVUtilsV2R2(base.BaseTestCase):
_FAKE_ACL_ACT = 'fake_acl_action'
_FAKE_ACL_DIR = 'fake_direction'
_FAKE_ACL_TYPE = 'fake_acl_type'
_FAKE_LOCAL_PORT = 'fake_local_port'
_FAKE_PROTOCOL = 'fake_port_protocol'
_FAKE_REMOTE_ADDR = '10.0.0.0/0'
def setUp(self):
super(TestHyperVUtilsV2R2, self).setUp()
self._utils = utilsv2.HyperVUtilsV2R2()
def test_filter_security_acls(self):
self._test_filter_security_acls(
self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR)
def test_filter_security_acls_default(self):
default = self._utils._ACL_DEFAULT
self._test_filter_security_acls(
default, default, self._FAKE_REMOTE_ADDR)
def _test_filter_security_acls(self, local_port, protocol, remote_addr):
acls = []
default = self._utils._ACL_DEFAULT
for port, proto in [(default, default), (local_port, protocol)]:
mock_acl = mock.MagicMock()
mock_acl.Action = self._utils._ACL_ACTION_ALLOW
mock_acl.Direction = self._FAKE_ACL_DIR
mock_acl.LocalPort = port
mock_acl.Protocol = proto
mock_acl.RemoteIPAddress = remote_addr
acls.append(mock_acl)
right_acls = [a for a in acls if a.LocalPort == local_port]
good_acls = self._utils._filter_security_acls(
acls, mock_acl.Action, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
local_port, protocol, remote_addr)
bad_acls = self._utils._filter_security_acls(
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
local_port, protocol, remote_addr)
self.assertEqual(right_acls, good_acls)
self.assertEqual([], bad_acls)
def test_get_new_weight(self):
mockacl1 = mock.MagicMock()
mockacl1.Weight = self._utils._MAX_WEIGHT - 1
mockacl2 = mock.MagicMock()
mockacl2.Weight = self._utils._MAX_WEIGHT - 3
self.assertEqual(self._utils._MAX_WEIGHT - 2,
self._utils._get_new_weight([mockacl1, mockacl2]))
def test_get_new_weight_no_acls(self):
self.assertEqual(self._utils._MAX_WEIGHT - 1,
self._utils._get_new_weight([]))
def test_get_new_weight_default_acls(self):
mockacl1 = mock.MagicMock()
mockacl1.Weight = self._utils._MAX_WEIGHT - 1
mockacl2 = mock.MagicMock()
mockacl2.Weight = self._utils._MAX_WEIGHT - 2
mockacl2.Action = self._utils._ACL_ACTION_DENY
self.assertEqual(self._utils._MAX_WEIGHT - 2,
self._utils._get_new_weight([mockacl1, mockacl2]))
|
{
"content_hash": "a0c398a65928399fa90ce991d5b0a5bb",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 79,
"avg_line_length": 43.41317365269461,
"alnum_prop": 0.6000459770114942,
"repo_name": "sajuptpm/neutron-ipam",
"id": "c020f16e0f6910eae137c2efd46b3e73416b7aff",
"size": "22489",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable/icehouse",
"path": "neutron/tests/unit/hyperv/test_hyperv_utilsv2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "9102565"
},
{
"name": "Shell",
"bytes": "9603"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import mxnet as mx
import re
def load_checkpoint(prefix, epoch):
"""
Load model checkpoint from file.
:param prefix: Prefix of model name.
:param epoch: Epoch number of model we would like to load.
:return: (arg_params, aux_params)
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
"""
save_dict = mx.nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
def convert_context(params, ctx):
"""
:param params: dict of str to NDArray
:param ctx: the context to convert to
:return: dict of str of NDArray with context ctx
"""
new_params = dict()
for k, v in params.items():
new_params[k] = v.as_in_context(ctx)
return new_params
def load_param(prefix, epoch, convert=False, ctx=None, process=False):
"""
wrapper for load checkpoint
:param prefix: Prefix of model name.
:param epoch: Epoch number of model we would like to load.
:param convert: reference model should be converted to GPU NDArray first
:param ctx: if convert then ctx must be designated.
:param process: model should drop any test
:return: (arg_params, aux_params)
"""
arg_params, aux_params = load_checkpoint(prefix, epoch)
if convert:
if ctx is None:
ctx = mx.cpu()
arg_params = convert_context(arg_params, ctx)
aux_params = convert_context(aux_params, ctx)
if process:
tests = [k for k in arg_params.keys() if '_test' in k]
for test in tests:
arg_params[test.replace('_test', '')] = arg_params.pop(test)
return arg_params, aux_params
def load_param_file(model_path, process=True):
_ = re.search('-\d{4}.params', model_path)
assert _ is not None, (r"Model params should be in format xxx-%04d.params")
ext_ = _.group(0)
model_path_base = model_path.replace(ext_,"")
model_path_epoch = int(ext_.replace("-","").replace(".params",""))
arg_params, aux_params = load_param(model_path_base, model_path_epoch, process=process)
return arg_params, aux_params
|
{
"content_hash": "cfb4482908454650ba7b40b25c7bc5f2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 91,
"avg_line_length": 36.05882352941177,
"alnum_prop": 0.6264274061990212,
"repo_name": "vincentlooi/FCIS",
"id": "7c78cb60d3b52cb6fcbc3415412887ac30436829",
"size": "2452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/utils/load_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9632"
},
{
"name": "C++",
"bytes": "48901"
},
{
"name": "Cuda",
"bytes": "634496"
},
{
"name": "Makefile",
"bytes": "366"
},
{
"name": "Python",
"bytes": "554802"
},
{
"name": "Shell",
"bytes": "347"
}
],
"symlink_target": ""
}
|
from twisted.web import resource
from coherence.upnp.core.soap_service import UPnPPublisher
from coherence.upnp.core import service
class FakeMediaReceiverRegistrarBackend:
def upnp_IsAuthorized(self, *args, **kwargs):
r = {'Result': 1}
return r
def upnp_IsValidated(self, *args, **kwargs):
r = {'Result': 1}
return r
def upnp_RegisterDevice(self, *args, **kwargs):
""" in parameter RegistrationReqMsg """
RegistrationReqMsg = kwargs['RegistrationReqMsg']
""" FIXME: check with WMC and WMP """
r = {'RegistrationRespMsg': 'WTF should be in here?'}
return r
class MediaReceiverRegistrarControl(service.ServiceControl, UPnPPublisher):
def __init__(self, server):
service.ServiceControl.__init__(self)
UPnPPublisher.__init__(self)
self.service = server
self.variables = server.get_variables()
self.actions = server.get_actions()
class MediaReceiverRegistrarServer(service.ServiceServer, resource.Resource):
implementation = 'optional'
def __init__(self, device, backend=None):
self.device = device
if backend == None:
backend = self.device.backend
resource.Resource.__init__(self)
self.version = 1
self.namespace = 'microsoft.com'
self.id_namespace = 'microsoft.com'
service.ServiceServer.__init__(self, 'X_MS_MediaReceiverRegistrar', self.version, backend)
self.device_description_tmpl = 'xbox-description-1.xml'
self.control = MediaReceiverRegistrarControl(self)
self.putChild('scpd.xml', service.scpdXML(self, self.control))
self.putChild('control', self.control)
def listchilds(self, uri):
cl = ''
for c in self.children:
cl += '<li><a href=%s/%s>%s</a></li>' % (uri, c, c)
return cl
def render(self, request):
return '<html><p>root of the MediaReceiverRegistrar</p><p><ul>%s</ul></p></html>' % self.listchilds(request.uri)
|
{
"content_hash": "83ca03e79a7b9a8af00cdd8e10be1409",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 120,
"avg_line_length": 32.74193548387097,
"alnum_prop": 0.6364532019704433,
"repo_name": "coherence-project/Coherence",
"id": "9b85ac82b366902d83356cbde541d394bc3587c1",
"size": "2198",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "coherence/upnp/services/servers/media_receiver_registrar_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1305048"
},
{
"name": "Roff",
"bytes": "712"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
}
|
import click
from markov_slackbot.main import markov_slackbot
from markov_slackbot.main import generate_example_config_file
from markov_slackbot.main import prepare_environment
def main():
cli.add_command(run_bot)
cli.add_command(generate_example_config)
cli.add_command(prepare_env)
cli()
@click.group()
def cli():
pass
@click.command()
@click.option('--config_file', default='config.json',
help='Configuration filepath.')
def run_bot(config_file):
"""Start the bot."""
markov_slackbot(config_file)
@click.command()
def generate_example_config():
"""Generate an example config file."""
generate_example_config_file()
@click.command()
def prepare_env():
"""Prepare the environment for the bot."""
prepare_environment()
if __name__ == "__main__":
main()
|
{
"content_hash": "148149a9d5b89262278ba31d7ab31137",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 61,
"avg_line_length": 20.195121951219512,
"alnum_prop": 0.678743961352657,
"repo_name": "StuartJSquires/markov_slackbot",
"id": "2946456ed15a8b05c2be4535429c66d34dac05ec",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "markov_slackbot/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "36630"
}
],
"symlink_target": ""
}
|
import math
import torch
from ..constraints import Positive
from .kernel import Kernel
class PeriodicKernel(Kernel):
r""" Computes a covariance matrix based on the periodic kernel
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
.. math::
\begin{equation*}
k_{\text{Periodic}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left(
\frac{2 \sin^2 \left( \pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_1 / p \right) }
{ \ell^2 } \right)
\end{equation*}
where
* :math:`p` is the periord length parameter.
* :math:`\ell` is a lengthscale parameter.
.. note::
This kernel does not have an `outputscale` parameter. To add a scaling parameter,
decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
.. note::
This kernel does not have an ARD lengthscale option.
Args:
:attr:`batch_shape` (torch.Size, optional):
Set this if you want a separate lengthscale for each
batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
:attr:`active_dims` (tuple of ints, optional):
Set this if you want to compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions. Default: `None`.
:attr:`period_length_prior` (Prior, optional):
Set this if you want to apply a prior to the period length parameter. Default: `None`.
:attr:`lengthscale_prior` (Prior, optional):
Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
:attr:`lengthscale_constraint` (Constraint, optional):
Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.
:attr:`period_length_constraint` (Constraint, optional):
Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.
:attr:`eps` (float):
The minimum value that the lengthscale/period length can take
(prevents divide by zero errors). Default: `1e-6`.
Attributes:
:attr:`lengthscale` (Tensor):
The lengthscale parameter. Size = `*batch_shape x 1 x 1`.
:attr:`period_length` (Tensor):
The period length parameter. Size = `*batch_shape x 1 x 1`.
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
>>>
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))
>>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)
"""
has_lengthscale = True
def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):
super(PeriodicKernel, self).__init__(**kwargs)
if period_length_constraint is None:
period_length_constraint = Positive()
self.register_parameter(
name="raw_period_length", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))
)
if period_length_prior is not None:
self.register_prior(
"period_length_prior",
period_length_prior,
lambda: self.period_length,
lambda v: self._set_period_length(v),
)
self.register_constraint("raw_period_length", period_length_constraint)
@property
def period_length(self):
return self.raw_period_length_constraint.transform(self.raw_period_length)
@period_length.setter
def period_length(self, value):
self._set_period_length(value)
def _set_period_length(self, value):
if not torch.is_tensor(value):
value = torch.as_tensor(value).to(self.raw_period_length)
self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))
def forward(self, x1, x2, diag=False, **params):
x1_ = x1.div(self.period_length)
x2_ = x2.div(self.period_length)
diff = self.covar_dist(x1_, x2_, diag=diag, **params)
res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
if diag:
res = res.squeeze(0)
return res
|
{
"content_hash": "2365e136a791128bd001bf786f51098b",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 116,
"avg_line_length": 40.56140350877193,
"alnum_prop": 0.6176470588235294,
"repo_name": "jrg365/gpytorch",
"id": "65c1f1b085733dfa12fe58526a06c675b99ab9dd",
"size": "4648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpytorch/kernels/periodic_kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6005"
},
{
"name": "C++",
"bytes": "242"
},
{
"name": "Python",
"bytes": "338860"
}
],
"symlink_target": ""
}
|
"""Tests for the unified diff parser process."""
from __future__ import unicode_literals
import codecs
import os.path
import unittest
from unidiff import PatchSet
from unidiff.patch import PY2
from unidiff.errors import UnidiffParseError
if not PY2:
unicode = str
class TestUnidiffParser(unittest.TestCase):
"""Tests for Unified Diff Parser."""
def setUp(self):
super(TestUnidiffParser, self).setUp()
self.samples_dir = os.path.dirname(os.path.realpath(__file__))
self.sample_file = os.path.join(
self.samples_dir, 'samples/sample0.diff')
self.sample_bad_file = os.path.join(
self.samples_dir, 'samples/sample1.diff')
def test_missing_encoding(self):
utf8_file = os.path.join(self.samples_dir, 'samples/sample3.diff')
# read bytes
with open(utf8_file, 'rb') as diff_file:
if PY2:
self.assertRaises(UnicodeDecodeError, PatchSet, diff_file)
else:
# unicode expected
self.assertRaises(TypeError, PatchSet, diff_file)
def test_encoding_param(self):
utf8_file = os.path.join(self.samples_dir, 'samples/sample3.diff')
with open(utf8_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8')
# 3 files updated by diff
self.assertEqual(len(res), 3)
added_unicode_line = res.added_files[0][0][1]
self.assertEqual(added_unicode_line.value, 'holá mundo!\n')
def test_no_newline_at_end_of_file(self):
utf8_file = os.path.join(self.samples_dir, 'samples/sample3.diff')
with open(utf8_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8')
# 3 files updated by diff
self.assertEqual(len(res), 3)
added_unicode_line = res.added_files[0][0][4]
self.assertEqual(added_unicode_line.line_type, '\\')
self.assertEqual(added_unicode_line.value, ' No newline at end of file\n')
added_unicode_line = res.modified_files[0][0][8]
self.assertEqual(added_unicode_line.line_type, '\\')
self.assertEqual(added_unicode_line.value, ' No newline at end of file\n')
def test_preserve_dos_line_endings(self):
utf8_file = os.path.join(self.samples_dir, 'samples/sample4.diff')
with open(utf8_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8')
# 3 files updated by diff
self.assertEqual(len(res), 3)
added_unicode_line = res.added_files[0][0][1]
self.assertEqual(added_unicode_line.value, 'holá mundo!\r\n')
def test_preserve_dos_line_endings_empty_line_type(self):
utf8_file = os.path.join(self.samples_dir, 'samples/sample5.diff')
with open(utf8_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8')
# 2 files updated by diff
self.assertEqual(len(res), 2)
modified_unicode_line = res.modified_files[0][0][6]
self.assertEqual(modified_unicode_line.value, '\r\n')
self.assertEqual(modified_unicode_line.line_type, ' ')
modified_unicode_line = res.modified_files[1][0][6]
self.assertEqual(modified_unicode_line.value, '\n')
self.assertEqual(modified_unicode_line.line_type, ' ')
def test_print_hunks_without_gaps(self):
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
res = PatchSet(diff_file)
lines = unicode(res).splitlines()
self.assertEqual(lines[12], '@@ -5,16 +11,10 @@')
self.assertEqual(lines[31], '@@ -22,3 +22,7 @@')
def _test_parse_sample(self, metadata_only):
"""Parse sample file."""
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
res = PatchSet(diff_file, metadata_only=metadata_only)
# three file in the patch
self.assertEqual(len(res), 3)
# three hunks
self.assertEqual(len(res[0]), 3)
# first file is modified
self.assertTrue(res[0].is_modified_file)
self.assertFalse(res[0].is_removed_file)
self.assertFalse(res[0].is_added_file)
self.assertFalse(res[0].is_binary_file)
# Hunk 1: five additions, no deletions, a section header
self.assertEqual(res[0][0].added, 6)
self.assertEqual(res[0][0].removed, 0)
self.assertEqual(res[0][0].section_header, 'Section Header')
# Hunk 2: 2 additions, 8 deletions, no section header
self.assertEqual(res[0][1].added, 2)
self.assertEqual(res[0][1].removed, 8)
self.assertEqual(res[0][1].section_header, '')
# Hunk 3: four additions, no deletions, no section header
self.assertEqual(res[0][2].added, 4)
self.assertEqual(res[0][2].removed, 0)
self.assertEqual(res[0][2].section_header, '')
# Check file totals
self.assertEqual(res[0].added, 12)
self.assertEqual(res[0].removed, 8)
# second file is added
self.assertFalse(res[1].is_modified_file)
self.assertFalse(res[1].is_removed_file)
self.assertTrue(res[1].is_added_file)
self.assertFalse(res[1].is_binary_file)
# third file is removed
self.assertFalse(res[2].is_modified_file)
self.assertTrue(res[2].is_removed_file)
self.assertFalse(res[2].is_added_file)
self.assertFalse(res[2].is_binary_file)
self.assertEqual(res.added, 21)
self.assertEqual(res.removed, 17)
def test_parse_sample_full(self):
self._test_parse_sample(metadata_only=False)
def test_parse_sample_metadata_only(self):
self._test_parse_sample(metadata_only=True)
def test_patchset_compare(self):
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
ps1 = PatchSet(diff_file)
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
ps2 = PatchSet(diff_file)
other_file = os.path.join(self.samples_dir, 'samples/sample3.diff')
with open(other_file, 'rb') as diff_file:
ps3 = PatchSet(diff_file, encoding='utf-8')
self.assertEqual(ps1, ps2)
self.assertNotEqual(ps1, ps3)
def test_patchset_from_string(self):
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
diff_data = diff_file.read()
ps1 = PatchSet.from_string(diff_data)
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
ps2 = PatchSet(diff_file)
self.assertEqual(ps1, ps2)
def test_patchset_from_bytes_string(self):
with codecs.open(self.sample_file, 'rb') as diff_file:
diff_data = diff_file.read()
ps1 = PatchSet.from_string(diff_data, encoding='utf-8')
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
ps2 = PatchSet(diff_file)
self.assertEqual(ps1, ps2)
def test_patchset_string_input(self):
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
diff_data = diff_file.read()
ps1 = PatchSet(diff_data)
with codecs.open(self.sample_file, 'r', encoding='utf-8') as diff_file:
ps2 = PatchSet(diff_file)
self.assertEqual(ps1, ps2)
def test_parse_malformed_diff(self):
"""Parse malformed file."""
with open(self.sample_bad_file) as diff_file:
self.assertRaises(UnidiffParseError, PatchSet, diff_file)
def test_parse_malformed_diff_longer_than_expected(self):
"""Parse malformed file with non-terminated hunk."""
utf8_file = os.path.join(self.samples_dir, 'samples/sample6.diff')
with open(utf8_file, 'r') as diff_file:
self.assertRaises(UnidiffParseError, PatchSet, diff_file)
def test_parse_malformed_diff_shorter_than_expected(self):
"""Parse malformed file with non-terminated hunk."""
utf8_file = os.path.join(self.samples_dir, 'samples/sample7.diff')
with open(utf8_file, 'r') as diff_file:
self.assertRaises(UnidiffParseError, PatchSet, diff_file)
def test_from_filename_with_cr_in_diff_text_files(self):
"""Parse git diff text files that contain CR"""
utf8_file = os.path.join(self.samples_dir, 'samples/git_cr.diff')
self.assertRaises(UnidiffParseError, PatchSet.from_filename, utf8_file)
ps1 = PatchSet.from_filename(utf8_file, newline='\n')
import io
with io.open(utf8_file, 'r', newline='\n') as diff_file:
ps2 = PatchSet(diff_file)
self.assertEqual(ps1, ps2)
def test_parse_diff_with_new_and_modified_binary_files(self):
"""Parse git diff file with newly added and modified binaries files."""
utf8_file = os.path.join(self.samples_dir, 'samples/sample8.diff')
with open(utf8_file, 'r') as diff_file:
res = PatchSet(diff_file)
# three file in the patch
self.assertEqual(len(res), 5)
# first empty file is added
self.assertFalse(res[0].is_modified_file)
self.assertFalse(res[0].is_removed_file)
self.assertTrue(res[0].is_added_file)
self.assertFalse(res[0].is_binary_file)
# second file is added
self.assertFalse(res[1].is_modified_file)
self.assertFalse(res[1].is_removed_file)
self.assertTrue(res[1].is_added_file)
self.assertTrue(res[1].is_binary_file)
# third file is modified
self.assertTrue(res[2].is_modified_file)
self.assertFalse(res[2].is_removed_file)
self.assertFalse(res[2].is_added_file)
self.assertTrue(res[2].is_binary_file)
# fourth file is removed
self.assertFalse(res[3].is_modified_file)
self.assertTrue(res[3].is_removed_file)
self.assertFalse(res[3].is_added_file)
self.assertTrue(res[3].is_binary_file)
# fifth empty file is added
self.assertFalse(res[4].is_modified_file)
self.assertFalse(res[4].is_removed_file)
self.assertTrue(res[4].is_added_file)
self.assertFalse(res[4].is_binary_file)
def test_parse_round_trip_with_binary_files_in_diff(self):
"""Parse git diff with binary files though round trip"""
utf8_file = os.path.join(self.samples_dir, 'samples/sample8.diff')
with open(utf8_file, 'r') as diff_file:
res1 = PatchSet(diff_file)
res2 = PatchSet(str(res1))
self.assertEqual(res1, res2)
def test_parse_diff_git_no_prefix(self):
utf8_file = os.path.join(self.samples_dir, 'samples/git_no_prefix.diff')
with open(utf8_file, 'r') as diff_file:
res = PatchSet(diff_file)
self.assertEqual(len(res), 3)
self.assertEqual(res[0].source_file, 'file1')
self.assertEqual(res[0].target_file, '/dev/null')
self.assertTrue(res[0].is_removed_file)
self.assertEqual(res[0].path, 'file1')
self.assertEqual(res[1].source_file, 'file2')
self.assertEqual(res[1].target_file, 'file2')
self.assertTrue(res[1].is_modified_file)
self.assertEqual(res[1].path, 'file2')
self.assertEqual(res[2].source_file, '/dev/null')
self.assertEqual(res[2].target_file, 'file3')
self.assertTrue(res[2].is_added_file)
self.assertEqual(res[2].path, 'file3')
def test_parse_filename_with_spaces(self):
filename = os.path.join(self.samples_dir, 'samples/git_filenames_with_spaces.diff')
with open(filename) as f:
res = PatchSet(f)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].source_file, '/dev/null')
self.assertEqual(res[0].target_file, 'b/has spaces/t.sql')
self.assertTrue(res[0].is_added_file)
self.assertEqual(res[0].path, 'has spaces/t.sql')
def test_parse_filename_prefix_with_spaces(self):
filename = os.path.join(self.samples_dir, 'samples/git_filenames_with_spaces_prefix.diff')
with open(filename) as f:
res = PatchSet(f)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].source_file, '/dev/null')
self.assertEqual(res[0].target_file, 'dst://foo bar/baz')
self.assertTrue(res[0].is_added_file)
self.assertEqual(res[0].path, 'dst://foo bar/baz')
def test_deleted_file(self):
filename = os.path.join(self.samples_dir, 'samples/git_delete.diff')
with open(filename) as f:
res = PatchSet(f)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].source_file, 'a/somefile.c')
self.assertEqual(res[0].target_file, '/dev/null')
self.assertTrue(res[0].is_removed_file)
def test_diff_lines_linenos(self):
with open(self.sample_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8')
target_line_nos = []
source_line_nos = []
diff_line_nos = []
for diff_file in res:
for hunk in diff_file:
for line in hunk:
target_line_nos.append(line.target_line_no)
source_line_nos.append(line.source_line_no)
diff_line_nos.append(line.diff_line_no)
expected_target_line_nos = [
# File: 1, Hunk: 1
1, 2, 3, 4, 5, 6, 7, 8, 9,
# File: 1, Hunk: 2
11, 12, 13, None, None, None, None, None, None, None, 14, 15, 16, None, 17, 18, 19, 20,
# File: 1, Hunk: 3
22, 23, 24, 25, 26, 27, 28,
# File: 2, Hunk 1
1, 2, 3, 4, 5, 6, 7, 8, 9,
# File: 3, Hunk 1
None, None, None, None, None, None, None, None, None,
]
expected_source_line_nos = [
# File: 1, Hunk: 1
None, None, None, None, None, None, 1, 2, 3,
# File: 1, Hunk: 2
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, None, 15, 16, 17, None, 18, 19, 20,
# File: 1, Hunk: 3
22, 23, 24, None, None, None, None,
# File: 2, Hunk 1
None, None, None, None, None, None, None, None, None,
# File: 3, Hunk 1
1, 2, 3, 4, 5, 6, 7, 8, 9,
]
expected_diff_line_nos = [
# File: 1, Hunk: 1
4, 5, 6, 7, 8, 9, 10, 11, 12,
# File: 1, Hunk: 2
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
# File: 1, Hunk: 3
33, 34, 35, 36, 37, 38, 39,
# File: 2, Hunk 1
43, 44, 45, 46, 47, 48, 49, 50, 51,
# File: 3, Hunk 1
55, 56, 57, 58, 59, 60, 61, 62, 63,
]
self.assertEqual(target_line_nos, expected_target_line_nos)
self.assertEqual(source_line_nos, expected_source_line_nos)
self.assertEqual(diff_line_nos, expected_diff_line_nos)
def test_diff_hunk_positions(self):
with open(self.sample_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8')
self.do_test_diff_hunk_positions(res)
def test_diff_metadata_only(self):
with open(self.sample_file, 'rb') as diff_file:
res = PatchSet(diff_file, encoding='utf-8', metadata_only=True)
self.do_test_diff_hunk_positions(res)
def do_test_diff_hunk_positions(self, res):
hunk_positions = []
for diff_file in res:
for hunk in diff_file:
hunk_positions.append((hunk.source_start, hunk.target_start,
hunk.source_length, hunk.target_length))
expected_hunk_positions = [
# File: 1, Hunk: 1
(1, 1, 3, 9),
# File: 1, Hunk: 2
(5, 11, 16, 10),
# File: 1, Hunk: 3
(22, 22, 3, 7),
# File: 2, Hunk: 1
(0, 1, 0, 9),
# File: 3, Hunk: 1
(1, 0, 9, 0)
]
self.assertEqual(hunk_positions, expected_hunk_positions)
class TestVCSSamples(unittest.TestCase):
"""Tests for real examples from VCS."""
samples = ['bzr.diff', 'git.diff', 'hg.diff', 'svn.diff']
def test_samples(self):
tests_dir = os.path.dirname(os.path.realpath(__file__))
for fname in self.samples:
file_path = os.path.join(tests_dir, 'samples', fname)
with codecs.open(file_path, 'r', encoding='utf-8') as diff_file:
res = PatchSet(diff_file)
# 3 files updated by diff
self.assertEqual(len(res), 3)
# 1 added file
added_files = res.added_files
self.assertEqual(len(added_files), 1)
self.assertEqual(added_files[0].path, 'added_file')
# 1 hunk, 4 lines
self.assertEqual(len(added_files[0]), 1)
self.assertEqual(added_files[0].added, 4)
self.assertEqual(added_files[0].removed, 0)
# 1 removed file
removed_files = res.removed_files
self.assertEqual(len(removed_files), 1)
self.assertEqual(removed_files[0].path, 'removed_file')
# 1 hunk, 3 removed lines
self.assertEqual(len(removed_files[0]), 1)
self.assertEqual(removed_files[0].added, 0)
self.assertEqual(removed_files[0].removed, 3)
# 1 modified file
modified_files = res.modified_files
self.assertEqual(len(modified_files), 1)
self.assertEqual(modified_files[0].path, 'modified_file')
# 1 hunk, 3 added lines, 1 removed line
self.assertEqual(len(modified_files[0]), 1)
self.assertEqual(modified_files[0].added, 3)
self.assertEqual(modified_files[0].removed, 1)
self.assertEqual(res.added, 7)
self.assertEqual(res.removed, 4)
# check that original diffs and those produced
# by unidiff are the same
with codecs.open(file_path, 'r', encoding='utf-8') as diff_file:
self.assertEqual(diff_file.read(), str(res))
def test_git_renaming(self):
tests_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(tests_dir, 'samples/git_rename.diff')
with codecs.open(file_path, 'r', encoding='utf-8') as diff_file:
res = PatchSet(diff_file)
self.assertEqual(len(res), 3)
self.assertEqual(len(res.modified_files), 3)
self.assertEqual(len(res.added_files), 0)
self.assertEqual(len(res.removed_files), 0)
# renamed and modified files
for patch in res[:2]:
self.assertTrue(patch.is_rename)
self.assertEqual(patch.added, 1)
self.assertEqual(patch.removed, 1)
# renamed file under sub-path
patch = res[2]
self.assertTrue(patch.is_rename)
self.assertEqual(patch.added, 0)
self.assertEqual(patch.removed, 0)
# confirm the full path is in source/target filenames
self.assertEqual(patch.source_file, 'a/sub/onefile')
self.assertEqual(patch.target_file, 'b/sub/otherfile')
# check path is the target path
self.assertEqual(patch.path, 'sub/otherfile')
# check that original diffs and those produced
# by unidiff are the same
with codecs.open(file_path, 'r', encoding='utf-8') as diff_file:
self.assertEqual(diff_file.read(), str(res))
|
{
"content_hash": "2774e5d41a979ed473c948c75960bca8",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 99,
"avg_line_length": 39.45748987854251,
"alnum_prop": 0.5934229427457418,
"repo_name": "matiasb/python-unidiff",
"id": "221e5fa1e6b1c7ea3985dacc6e52181ee4f3643a",
"size": "20643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63678"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from pyos8 import *
from socket import *
from sockwrap import Socket
def handle_client(client,addr):
print "Connection from", addr
while True:
data = yield client.recv(65536)
if not data:
break
yield client.send(data)
print "Client closed"
yield client.close()
def server(port):
print "Server starting"
rawsock = socket(AF_INET,SOCK_STREAM)
rawsock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
rawsock.bind(("",port))
rawsock.listen(1024)
sock = Socket(rawsock)
while True:
client,addr = yield sock.accept()
yield NewTask(handle_client(client,addr))
sched = Scheduler()
sched.new(server(45000))
sched.mainloop()
|
{
"content_hash": "5b4bc05124196b8cf7f509088090a7e7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 49,
"avg_line_length": 24.310344827586206,
"alnum_prop": 0.6581560283687943,
"repo_name": "iamaris/xpython",
"id": "edcdaabf6bb3321a5b37434b128ba112288d6ef7",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coroutines/echoserver2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3135"
},
{
"name": "CSS",
"bytes": "2844"
},
{
"name": "HTML",
"bytes": "31759"
},
{
"name": "JavaScript",
"bytes": "4056"
},
{
"name": "Jupyter Notebook",
"bytes": "127311"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "OpenEdge ABL",
"bytes": "54"
},
{
"name": "Python",
"bytes": "288160"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
}
|
import sys
import numpy
import math
import pygame
import pygame.freetype
from pathlib import Path
def getfont_mono(pathstr=None):
if pathstr is None:
#d = Path(sys.argv[0]).resolve().parent
f = 'liberation_mono.ttf'
paths = [
#d / f,
#d.parent / 'share' / 'pyflightcontrol' / f,
Path('.').resolve() / f,
Path('/usr/share/pyflightcontrol') / f,
Path('/usr/local/share/pyflightcontrol') / f
]
paths = filter(lambda x: x.exists(), paths)
for x in paths:
pathstr = str(x)
return pathstr
raise FileNotFoundError()
return pathstr
def getfont_mono_obj(obj=None):
if obj is None:
obj = pygame.freetype.Font(getfont_mono())
return obj
# Font notes: width is ~60% of size, height is ~114% of size
fontfile = getfont_mono()
fontobj_dict = {}
class text(object):
def __init__(self, text, color, size=10, rotation=0):
self._fontobj = fontobj_dict.get(size)
if self._fontobj is None:
self._fontobj = pygame.font.Font(fontfile, size)
fontobj_dict[size] = self._fontobj
self._text = self._fontobj.render(text, True, color)
rect = self._text.get_rect()
txt_x = rect.width/2.0
txt_y = rect.height/2.0
if rotation != 0:
self._text = pygame.transform.rotate(self._text, rotation)
vec = numpy.array([math.cos(rotation*math.pi/180),
-math.sin(rotation*math.pi/180)])
self._left = -txt_x*vec
self._right = txt_x*vec
rect = self._text.get_rect()
vec = numpy.array([rect.width/2.0, rect.height/2.0])
self._left = vec + self._left
self._right = vec + self._right
else:
self._left = numpy.array((0, txt_y))
self._right = numpy.array((txt_x*2, txt_y))
def blitTo(self, surf, loc, left=True):
loc = numpy.array(loc)
if left:
loc = loc - self._left
else:
loc = loc - self._right
loc = (int(round(loc[0])), int(round(loc[1])))
surf.blit(self._text, loc)
|
{
"content_hash": "7305b9d34006a481ea34f69f24974523",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 70,
"avg_line_length": 33.803030303030305,
"alnum_prop": 0.5369789332138055,
"repo_name": "rbmj/pyflightcontrol",
"id": "874675a832843ac493e057dae648e4d5a89d61b2",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyflightcontrol/base/font.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "235"
},
{
"name": "Protocol Buffer",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "37100"
},
{
"name": "Shell",
"bytes": "169"
}
],
"symlink_target": ""
}
|
"""
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter
in pattern and a non-empty word in str.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
You may assume pattern contains only lowercase letters, and str contains
lowercase letters separated by a single space.
"""
class Solution(object):
def wordPattern(self, pattern, words):
"""
:type pattern: str
:type words: str
:rtype: bool
"""
words_list = words.split()
if len(words_list) != len(pattern):
return False
word_dict, pattern_dict = {}, {}
for p, w in zip(pattern, words_list):
if p not in pattern_dict:
pattern_dict[p] = w
if w not in word_dict:
word_dict[w] = p
if word_dict[w] != p or pattern_dict[p] != w:
return False
return True
if __name__ == '__main__':
s = Solution()
print s.wordPattern('abba', 'dog cat cat dog')
print s.wordPattern('abba', 'dog cat cat fish')
|
{
"content_hash": "861d63b36afe628e087c5c93a499eb7e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 31.372093023255815,
"alnum_prop": 0.6004447739065975,
"repo_name": "fantuanmianshi/Daily",
"id": "07a8fd80afda3b363ced77eaa8c86023080e5b88",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LeetCode/word_pattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "908"
},
{
"name": "Python",
"bytes": "137913"
}
],
"symlink_target": ""
}
|
"""Tests for subscription management."""
from __future__ import annotations
from core import feconf
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
from typing import Final, List
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import user_models
(user_models,) = models.Registry.import_models([models.Names.USER])
COLLECTION_ID: Final = 'col_id'
COLLECTION_ID_2: Final = 'col_id_2'
EXP_ID: Final = 'exp_id'
EXP_ID_2: Final = 'exp_id_2'
FEEDBACK_THREAD_ID: Final = 'fthread_id'
FEEDBACK_THREAD_ID_2: Final = 'fthread_id_2'
USER_ID: Final = 'user_id'
USER_ID_2: Final = 'user_id_2'
class SubscriptionsTest(test_utils.GenericTestBase):
"""Tests for subscription management."""
OWNER_2_EMAIL: Final = 'owner2@example.com'
OWNER2_USERNAME: Final = 'owner2'
def setUp(self) -> None:
super().setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.OWNER_2_EMAIL, self.OWNER2_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner_2_id = self.get_user_id_from_email(self.OWNER_2_EMAIL)
self.owner = user_services.get_user_actions_info(self.owner_id)
def _get_thread_ids_subscribed_to(self, user_id: str) -> List[str]:
"""Returns the feedback thread ids to which the user corresponding to
the given user id is subscribed to.
Args:
user_id: str. The user id.
Returns:
List(str). The list containing all the feedback thread ids to
which the user is subscribed to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
# TODO(#15621): The explicit declaration of type for ndb properties
# should be removed. Currently, these ndb properties are annotated with
# Any return type. Once we have proper return type we can remove this.
if subscriptions_model:
feedback_thread_ids: List[str] = (
subscriptions_model.general_feedback_thread_ids
)
return feedback_thread_ids
else:
return []
def _get_exploration_ids_subscribed_to(self, user_id: str) -> List[str]:
"""Returns all the exploration ids of the explorations to which the user
has subscribed to.
Args:
user_id: str. The user id.
Returns:
List(str). The list containing all the exploration ids of the
explorations to which the user has subscribed to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
# TODO(#15621): The explicit declaration of type for ndb properties
# should be removed. Currently, these ndb properties are annotated with
# Any return type. Once we have proper return type we can remove this.
if subscriptions_model:
exploration_ids: List[str] = subscriptions_model.exploration_ids
return exploration_ids
else:
return []
def _get_collection_ids_subscribed_to(self, user_id: str) -> List[str]:
"""Returns all the collection ids of the collections to which the user
has subscribed to.
Args:
user_id: str. The user id.
Returns:
List(str). The list containing all the collection ids of the
collections to which the user has subscribed to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
# TODO(#15621): The explicit declaration of type for ndb properties
# should be removed. Currently, these ndb properties are annotated with
# Any return type. Once we have proper return type we can remove this.
if subscriptions_model:
collection_ids: List[str] = subscriptions_model.collection_ids
return collection_ids
else:
return []
def test_subscribe_to_feedback_thread(self) -> None:
self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID])
# Repeated subscriptions to the same thread have no effect.
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID])
subscription_services.subscribe_to_thread(
USER_ID, FEEDBACK_THREAD_ID_2)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID),
[FEEDBACK_THREAD_ID, FEEDBACK_THREAD_ID_2])
def test_subscribe_to_exploration(self) -> None:
self.assertEqual(self._get_exploration_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
# Repeated subscriptions to the same exploration have no effect.
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID_2)
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID),
[EXP_ID, EXP_ID_2])
def test_get_exploration_ids_subscribed_to(self) -> None:
self.assertEqual(
subscription_services.get_exploration_ids_subscribed_to(
USER_ID), [])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
subscription_services.get_exploration_ids_subscribed_to(USER_ID),
[EXP_ID])
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID_2)
self.assertEqual(
subscription_services.get_exploration_ids_subscribed_to(USER_ID),
[EXP_ID, EXP_ID_2])
def test_get_all_threads_subscribed_to(self) -> None:
self.assertEqual(
subscription_services.get_all_threads_subscribed_to(
USER_ID), [])
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
self.assertEqual(
subscription_services.get_all_threads_subscribed_to(USER_ID),
[FEEDBACK_THREAD_ID])
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID_2)
self.assertEqual(
subscription_services.get_all_threads_subscribed_to(USER_ID),
[FEEDBACK_THREAD_ID, FEEDBACK_THREAD_ID_2])
def test_thread_and_exp_subscriptions_are_tracked_individually(
self
) -> None:
self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID)
subscription_services.subscribe_to_exploration(USER_ID, EXP_ID)
self.assertEqual(
self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
def test_posting_to_feedback_thread_results_in_subscription(self) -> None:
# The viewer posts a message to the thread.
message_text = 'text'
feedback_services.create_thread(
feconf.ENTITY_TYPE_EXPLORATION, 'exp_id',
self.viewer_id, 'subject', message_text)
thread_ids_subscribed_to = self._get_thread_ids_subscribed_to(
self.viewer_id)
self.assertEqual(len(thread_ids_subscribed_to), 1)
thread_id = thread_ids_subscribed_to[0]
self.assertEqual(
feedback_services.get_messages(thread_id)[0].text,
message_text)
# The editor posts a follow-up message to the thread.
new_message_text = 'new text'
feedback_services.create_message(
thread_id, self.editor_id, '', '', new_message_text)
# The viewer and editor are now both subscribed to the thread.
self.assertEqual(
self._get_thread_ids_subscribed_to(self.viewer_id), [thread_id])
self.assertEqual(
self._get_thread_ids_subscribed_to(self.editor_id), [thread_id])
def test_creating_exploration_results_in_subscription(self) -> None:
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [])
exp_services.save_new_exploration(
USER_ID, exp_domain.Exploration.create_default_exploration(EXP_ID))
self.assertEqual(
self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID])
def test_adding_new_exploration_owner_or_editor_role_results_in_subscription( # pylint: disable=line-too-long
self
) -> None:
exploration = exp_domain.Exploration.create_default_exploration(EXP_ID)
exp_services.save_new_exploration(self.owner_id, exploration)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_2_id), [])
rights_manager.assign_role_for_exploration(
self.owner, EXP_ID, self.owner_2_id, rights_domain.ROLE_OWNER)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_2_id), [EXP_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.editor_id), [])
rights_manager.assign_role_for_exploration(
self.owner, EXP_ID, self.editor_id, rights_domain.ROLE_EDITOR)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.editor_id), [EXP_ID])
def test_adding_new_exploration_viewer_role_does_not_result_in_subscription(
self
) -> None:
exploration = exp_domain.Exploration.create_default_exploration(EXP_ID)
exp_services.save_new_exploration(self.owner_id, exploration)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.viewer_id), [])
rights_manager.assign_role_for_exploration(
self.owner, EXP_ID, self.viewer_id, rights_domain.ROLE_VIEWER)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.viewer_id), [])
def test_deleting_exploration_does_not_delete_subscription(self) -> None:
exploration = exp_domain.Exploration.create_default_exploration(EXP_ID)
exp_services.save_new_exploration(self.owner_id, exploration)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID])
exp_services.delete_exploration(self.owner_id, EXP_ID)
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID])
def test_subscribe_to_collection(self) -> None:
self.assertEqual(self._get_collection_ids_subscribed_to(USER_ID), [])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID])
# Repeated subscriptions to the same collection have no effect.
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID_2)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID),
[COLLECTION_ID, COLLECTION_ID_2])
def test_get_collection_ids_subscribed_to(self) -> None:
self.assertEqual(
subscription_services.get_collection_ids_subscribed_to(
USER_ID), [])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID)
self.assertEqual(
subscription_services.get_collection_ids_subscribed_to(USER_ID),
[COLLECTION_ID])
subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID_2)
self.assertEqual(
subscription_services.get_collection_ids_subscribed_to(USER_ID),
[COLLECTION_ID, COLLECTION_ID_2])
def test_creating_collection_results_in_subscription(self) -> None:
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [])
self.save_new_default_collection(COLLECTION_ID, USER_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID])
def test_adding_new_collection_owner_or_editor_role_results_in_subscription(
self
) -> None:
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_2_id), [])
rights_manager.assign_role_for_collection(
self.owner, COLLECTION_ID, self.owner_2_id,
rights_domain.ROLE_OWNER)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_2_id),
[COLLECTION_ID])
self.assertEqual(
self._get_collection_ids_subscribed_to(self.editor_id), [])
rights_manager.assign_role_for_collection(
self.owner, COLLECTION_ID, self.editor_id,
rights_domain.ROLE_EDITOR)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.editor_id),
[COLLECTION_ID])
def test_adding_new_collection_viewer_role_does_not_result_in_subscription(
self
) -> None:
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.viewer_id), [])
rights_manager.assign_role_for_collection(
self.owner, COLLECTION_ID, self.viewer_id,
rights_domain.ROLE_VIEWER)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.viewer_id), [])
def test_deleting_collection_does_not_delete_subscription(self) -> None:
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
collection_services.delete_collection(self.owner_id, COLLECTION_ID)
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
def test_adding_exploration_to_collection_does_not_create_subscription(
self
) -> None:
self.save_new_default_collection(COLLECTION_ID, self.owner_id)
# The author is subscribed to the collection but to no explorations.
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_id), [])
# Another author creates an exploration.
self.save_new_valid_exploration(EXP_ID, self.owner_2_id)
# If the collection author adds the exploration to his/her collection,
# the collection author should not be subscribed to the exploration nor
# should the exploration author be subscribed to the collection.
collection_services.update_collection(
self.owner_id, COLLECTION_ID, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': EXP_ID
}], 'Add new exploration to collection.')
# Ensure subscriptions are as expected.
self.assertEqual(
self._get_collection_ids_subscribed_to(self.owner_id),
[COLLECTION_ID])
self.assertEqual(
self._get_exploration_ids_subscribed_to(self.owner_2_id), [EXP_ID])
class UserSubscriptionsTest(test_utils.GenericTestBase):
"""Tests for subscription management."""
OWNER_2_EMAIL: Final = 'owner2@example.com'
OWNER2_USERNAME: Final = 'owner2'
def setUp(self) -> None:
super().setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.OWNER_2_EMAIL, self.OWNER2_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner_2_id = self.get_user_id_from_email(self.OWNER_2_EMAIL)
def _get_all_subscribers_of_creator(self, user_id: str) -> List[str]:
"""Returns all the ids of the subscribers that have subscribed to the
creator.
Args:
user_id: str. The user id.
Returns:
List(str). The list containing all the ids of the subscribers that
have subscribed to the creator.
"""
subscribers_model = user_models.UserSubscribersModel.get(
user_id, strict=False)
# TODO(#15621): The explicit declaration of type for ndb properties
# should be removed. Currently, these ndb properties are annotated with
# Any return type. Once we have proper return type we can remove this.
if subscribers_model:
subscriber_ids: List[str] = subscribers_model.subscriber_ids
return subscriber_ids
else:
return []
def _get_all_creators_subscribed_to(self, user_id: str) -> List[str]:
"""Returns the ids of the creators the given user has subscribed to.
Args:
user_id: str. The user id.
Returns:
List(str). The list containing all the creator ids the given user
has subscribed to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
# TODO(#15621): The explicit declaration of type for ndb properties
# should be removed. Currently, these ndb properties are annotated with
# Any return type. Once we have proper return type we can remove this.
if subscriptions_model:
creator_ids: List[str] = subscriptions_model.creator_ids
return creator_ids
else:
return []
def test_exception_is_raised_when_user_self_subscribes(self) -> None:
with self.assertRaisesRegex(
Exception, 'User %s is not allowed to self subscribe.' % USER_ID):
subscription_services.subscribe_to_creator(USER_ID, USER_ID)
def test_subscribe_to_creator(self) -> None:
self.assertEqual(self._get_all_subscribers_of_creator(
self.owner_id), [])
# Subscribe a user to a creator.
subscription_services.subscribe_to_creator(USER_ID, self.owner_id)
self.assertEqual(
self._get_all_subscribers_of_creator(self.owner_id), [USER_ID])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID),
[self.owner_id])
# Repeated subscriptions to the same creator has no effect.
subscription_services.subscribe_to_creator(USER_ID, self.owner_id)
self.assertEqual(
self._get_all_subscribers_of_creator(self.owner_id), [USER_ID])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID),
[self.owner_id])
# Subscribe another creator.
subscription_services.subscribe_to_creator(
USER_ID_2, self.owner_id)
self.assertEqual(
self._get_all_subscribers_of_creator(self.owner_id),
[USER_ID, USER_ID_2])
self.assertEqual(
self._get_all_creators_subscribed_to(
USER_ID_2), [self.owner_id])
def test_unsubscribe_from_creator(self) -> None:
self.assertEqual(self._get_all_subscribers_of_creator(
self.owner_id), [])
# Add subscribers to a creator.
subscription_services.subscribe_to_creator(USER_ID, self.owner_id)
subscription_services.subscribe_to_creator(USER_ID_2, self.owner_id)
self.assertEqual(
self._get_all_subscribers_of_creator(self.owner_id), [
USER_ID, USER_ID_2])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID),
[self.owner_id])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID_2),
[self.owner_id])
# Unsubscribing a user from a creator.
subscription_services.unsubscribe_from_creator(USER_ID, self.owner_id)
self.assertEqual(
self._get_all_subscribers_of_creator(self.owner_id), [USER_ID_2])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID), [])
# Unsubscribing the same user again has no effect.
subscription_services.unsubscribe_from_creator(USER_ID, self.owner_id)
self.assertEqual(
self._get_all_subscribers_of_creator(self.owner_id), [USER_ID_2])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID), [])
# Unsubscribing the second user.
subscription_services.unsubscribe_from_creator(
USER_ID_2, self.owner_id)
self.assertEqual(self._get_all_subscribers_of_creator(
self.owner_id), [])
self.assertEqual(
self._get_all_creators_subscribed_to(USER_ID_2),
[])
def test_get_all_subscribers_of_creator(self) -> None:
self.assertEqual(
subscription_services.get_all_subscribers_of_creator(
self.owner_id), [])
subscription_services.subscribe_to_creator(USER_ID, self.owner_id)
self.assertEqual(
subscription_services.get_all_subscribers_of_creator(self.owner_id),
[USER_ID])
subscription_services.subscribe_to_creator(USER_ID_2, self.owner_id)
self.assertEqual(
subscription_services.get_all_subscribers_of_creator(self.owner_id),
[USER_ID, USER_ID_2])
def test_get_all_creators_subscribed_to(self) -> None:
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
USER_ID), [])
subscription_services.subscribe_to_creator(USER_ID, self.owner_id)
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
USER_ID), [self.owner_id])
subscription_services.subscribe_to_creator(USER_ID, self.owner_2_id)
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
USER_ID), [self.owner_id, self.owner_2_id])
|
{
"content_hash": "e71973472a38742888f94d03e3984f55",
"timestamp": "",
"source": "github",
"line_count": 558,
"max_line_length": 114,
"avg_line_length": 41.4910394265233,
"alnum_prop": 0.6415428472702143,
"repo_name": "oppia/oppia",
"id": "74dff6a33a919dad5012877e59cbafbbdc0df381",
"size": "23775",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/subscription_services_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class DiskInstanceView(Model):
"""The instance view of the disk.
:param name: The disk name.
:type name: str
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2015_06_15.models.InstanceViewStatus]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, *, name: str=None, statuses=None, **kwargs) -> None:
super(DiskInstanceView, self).__init__(**kwargs)
self.name = name
self.statuses = statuses
|
{
"content_hash": "25a6726fd5549d799d68b2b8ee25cdfb",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 29.863636363636363,
"alnum_prop": 0.6133942161339422,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "cc4b78b3c2b377bca721f77ae5b7f04e045ff783",
"size": "1131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/models/disk_instance_view_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import sys
from optparse import make_option
from scaffolder.core.utils import import_class
from scaffolder.core.optparser import CommandMeta
from scaffolder.core.optparser import CommandOptionParser
from scaffolder import get_version
from clint.textui.colored import red
class CommandController():
DEFAULT_ARGUMENT = '__DEFAULT__'
def __init__(self, stdout=None, stderr=None):
self.stdout = stdout or sys.stdout
self.stderr = stderr or sys.stderr
#Hold a ref to all Cmd's id
self.command_ids = []
self.prog = ''
self.command = ''
self.argv = ()
#Make CliApplication, instantiate a new CommandController and set
self.description = "The program %prog has the following options and commands"
self.parser = CommandOptionParser(description=self.description)
def register_command_names(self, command_names):
"""
Note that we insert instead of append to list
the commands in help as they were introduced.
TODO: Add support for aliases(?)
@param command_names: list of command names
@return: void
"""
for id in command_names:
self.command_ids.insert(0, id)
def execute(self, argv=None):
argv = sys.argv if not argv else argv
#TODO: We might want to use default command!!
if len(argv) == 1:
return self.show_help()
command, arguments = self.parse_argv(argv)
self.run_command(command, arguments)
def run_command(self, cmd, argv):
try:
Command = self.get_command_class(cmd)
except Exception, e:
self.exception_handler(e)
command = Command(cmd, stdout=self.stdout, stderr=self.stderr)
command.run_from_argv(argv)
def parse_argv(self, argv):
# This should always be here
self.prog = argv[0]
#Expect subcommand else would have show_help
self.command = argv[1]
# We want to store the arguments or show help
self.argv = argv[2:] or CommandController.DEFAULT_ARGUMENT
return self.command, self.argv
def get_command_class(self, cmd):
try:
#module name
module = cmd.lower()
#class name
command = "{0}Command".format(module.title())
cmd_path = self.build_command_package(module, command)
if isinstance(cmd_path, basestring):
Command = import_class(cmd_path)
return Command
except Exception, e:
#TODO: Here we should try to load all Commands and
#get their aliases. OR Register the alias as well.
self.exception_handler(e)
def build_command_package(self, module, command):
return 'scaffolder.commands.' + module+'.'+command
def exception_handler(self, e):
self.stderr.write(str(e)+'\n')
self.show_help()
sys.exit(-1)
def show_help(self, load_commands=True):
#let's load all registered commands and register them
for cmd in self.command_ids:
Command = self.get_command_class(cmd)
command = Command(cmd)
self.parser.add_subcommand(command)
return self.stdout.write(self.parser.format_help())
class BaseCommand(CommandMeta):
option_list = (
make_option('--debug',
action='store_true',
dest='debug',
default=False,
help='Debug mode'
),
make_option('--traceback',
action='store_true',
dest='traceback',
default=True,
help='Print traceback in error'
)
)
def __init__(self, name, parser=None, help=None, aliases=(), stdout=None, stderr=None):
self.stdout = stdout or sys.stdout
self.stderr = stderr or sys.stderr
CommandMeta.__init__(self, name, parser=parser, help=help, aliases=aliases)
def execute(self, *args, **options):
try:
self.run(*args, **options)
except Exception, e:
if options['debug']:
self.stderr.write(red('%s\n' % e))
sys.exit(1)
else:
self.exit_with_help(message=e.message)
def run_from_argv(self, argv):
argv = self.check_values(argv)
options, args = self.parser.parse_args(argv)
self.execute(*args, **options.__dict__)
def run(self, *args, **options):
raise NotImplementedError()
def get_default_option(self):
return ['--help']
def get_version(self):
return get_version()
def exit_with_help(self, message=None, color=red):
if message:
print "%s\n" % color(message)
self.parser.print_help()
self.parser.exit()
def check_values(self, argv):
if CommandController.DEFAULT_ARGUMENT in argv:
argv = self.get_default_option()
return argv
|
{
"content_hash": "72bd7b263e9cbef8708a0b95b53ad02e",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 91,
"avg_line_length": 30.830303030303032,
"alnum_prop": 0.5803027324552782,
"repo_name": "goliatone/minions",
"id": "07e9ebf6035296d030b16b06916207eefc23b256",
"size": "5134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scaffolder/core/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45202"
}
],
"symlink_target": ""
}
|
"""Empty revision
This is the empty revision that can be used as the base for future
migrations.
Initial database creation shall be done via `metadata.create_all()` and
`alembic stamp head`.
Revision ID: 4784a128a6dd
Revises:
Create Date: 2017-12-13 00:48:12.079431
"""
from alembic import op
import sqlalchemy as sa
import pycroft
# revision identifiers, used by Alembic.
revision = '4784a128a6dd'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
{
"content_hash": "b9d58ef49179b010e4fc0dfc2eb97792",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 16.25,
"alnum_prop": 0.7442307692307693,
"repo_name": "lukasjuhrich/pycroft",
"id": "b0fb50c7523783314d432bc17264f23d43543436",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycroft/model/alembic/versions/4784a128a6dd_empty_revision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9709"
},
{
"name": "Dockerfile",
"bytes": "2877"
},
{
"name": "HTML",
"bytes": "98163"
},
{
"name": "JavaScript",
"bytes": "66723"
},
{
"name": "Mako",
"bytes": "509"
},
{
"name": "Python",
"bytes": "907170"
},
{
"name": "Shell",
"bytes": "12435"
}
],
"symlink_target": ""
}
|
def test_init(medius_client):
assert medius_client.access_token == 'ACCESS_TOKEN'
assert medius_client.mocked_request_method.called_once()
assert medius_client.mocked_request_method.call_args[0][0] == 'https://api.medium.com/v1/me'
assert medius_client.user_id == 'ID'
assert medius_client.username == 'USERNAME'
def test_default_kwargs(medius_client):
assert 'user_id' in medius_client.default_kwargs
assert medius_client.default_kwargs['user_id'] == 'ID'
def test_get_current_user(medius_client):
response = medius_client.get_current_user()
assert response['id'] == 'ID'
assert response['username'] == 'USERNAME'
def test_get_posts(medius_client):
response = medius_client.get('posts')
assert 'posts' in response.data
assert len(response.data['posts']) == 1
assert medius_client.mocked_request_method.call_args[0][0] == 'https://api.medium.com/v1/users/ID/posts'
def test_create_post(medius_client, post_payload):
medius_client.post('posts', post_payload)
expected_content = '<h1>{}</h1>{}'.format(post_payload['title'], post_payload['content'])
assert medius_client.mocked_request_method.call_args[1]['json']['content'] == expected_content
assert medius_client.mocked_request_method.call_args[0][0] == 'https://api.medium.com/v1/users/ID/posts'
def test_create_post_into_publication(medius_client, post_payload):
medius_client.post_into_publication('PUBLICATION', post_payload)
expected_content = '<h1>{}</h1>{}'.format(post_payload['title'], post_payload['content'])
assert medius_client.mocked_request_method.call_args[1]['json']['content'] == expected_content
assert medius_client.mocked_request_method.call_args[0][0] == 'https://api.medium.com/v1/publications/PUBLICATION_ID/posts'
|
{
"content_hash": "f99ab5acda4ba93325938a0031352d8e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 127,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.7053571428571429,
"repo_name": "cleberzavadniak/medius",
"id": "f26be385af607b4d9189419286dada4fafe74692",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_medius.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7505"
}
],
"symlink_target": ""
}
|
import glob
import sys
def main():
hits_dir = sys.argv[1]
input_files = [inp for inp in glob.glob(hits_dir + '/input*') if '.txt' not in inp]
inputs = ['urls\n']
for input_file in input_files:
with open(input_file) as fh:
inputs.extend(fh.readlines()[1:])
with open(hits_dir + '/input', 'w') as fh:
fh.writelines(inputs)
if __name__ == '__main__':
main()
|
{
"content_hash": "fc9ba882b19266f0338dcdad22c992af",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 87,
"avg_line_length": 25.625,
"alnum_prop": 0.5658536585365853,
"repo_name": "andriluka/mechturk-public",
"id": "8d28824dad6446daab1154f1c23829c602ff4a78",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/merge_inputs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8181"
},
{
"name": "HTML",
"bytes": "9251"
},
{
"name": "JavaScript",
"bytes": "136454"
},
{
"name": "Matlab",
"bytes": "64045"
},
{
"name": "Python",
"bytes": "40794"
},
{
"name": "Shell",
"bytes": "17771"
}
],
"symlink_target": ""
}
|
import cPickle
from diamond.handler.Handler import Handler
from cloudify_handler.format import jsonify
class TestHandler(Handler):
def process(self, metric):
if self.config.get('output_cloudify_format', False):
metric = jsonify(metric)
with open(self.config['log_path'], 'a') as f:
cPickle.dump(metric, f)
|
{
"content_hash": "d6e9e662b69b6b55c24a5a2bff2f2aca",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.672316384180791,
"repo_name": "geokala/cloudify-diamond-plugin",
"id": "b17ceeda977d7e895b0004b0a8fec741ebc72821",
"size": "992",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "diamond_agent/tests/resources/blueprint/handlers/test_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "37713"
}
],
"symlink_target": ""
}
|
from sqlalchemy.orm import class_mapper
from gearshift import config, identity
from gearshift.database import session
from gearshift.util import load_class
import logging
log = logging.getLogger("gearshift.identity.saprovider")
# Global class references --
# these will be set when the provider is initialised.
user_class = None
group_class = None
permission_class = None
visit_class = None
class SqlAlchemyIdentity(object):
"""Identity that uses a model from a database (via SQLAlchemy)."""
def __init__(self, visit_key=None, user=None):
self.visit_key = visit_key
if user:
self._user = user
if visit_key is not None:
self.login()
@property
def user(self):
"""Get user instance for this identity."""
try:
return self._user
except AttributeError:
# User hasn't already been set
pass
# Attempt to load the user. After this code executes, there *will* be
# a _user attribute, even if the value is None.
visit = self.visit_link
self._user = visit and user_class.query.get(visit.user_id)
return self._user
@property
def user_name(self):
"""Get user name of this identity."""
if not self.user:
return None
return self.user.user_name
@property
def user_id(self):
"""Get user id of this identity."""
if not self.user:
return None
return self.user.user_id
@property
def anonymous(self):
"""Return true if not logged in."""
return not self.user
@property
def permissions(self):
"""Get set of permission names of this identity."""
try:
return self._permissions
except AttributeError:
# Permissions haven't been computed yet
pass
if not self.user:
self._permissions = frozenset()
else:
self._permissions = frozenset(
p.permission_name for p in self.user.permissions)
return self._permissions
@property
def groups(self):
"""Get set of group names of this identity."""
try:
return self._groups
except AttributeError:
# Groups haven't been computed yet
pass
if not self.user:
self._groups = frozenset()
else:
self._groups = frozenset(g.group_name for g in self.user.groups)
return self._groups
@property
def group_ids(self):
"""Get set of group IDs of this identity."""
try:
return self._group_ids
except AttributeError:
# Groups haven't been computed yet
pass
if not self.user:
self._group_ids = frozenset()
else:
self._group_ids = frozenset(g.group_id for g in self.user.groups)
return self._group_ids
@property
def visit_link(self):
"""Get the visit link to this identity."""
if self.visit_key is None:
return None
return visit_class.query.filter_by(visit_key=self.visit_key).first()
@property
def login_url(self):
"""Get the URL for the login page."""
return identity.get_failure_url()
def login(self):
"""Set the link between this identity and the visit."""
visit = self.visit_link
if visit:
visit.user_id = self._user.user_id
else:
visit = visit_class()
visit.visit_key = self.visit_key
visit.user_id = self._user.user_id
session.flush()
def logout(self):
"""Remove the link between this identity and the visit."""
visit = self.visit_link
if visit:
session.delete(visit)
session.flush()
# Clear the current identity
identity.set_current_identity(SqlAlchemyIdentity())
class SqlAlchemyIdentityProvider(object):
"""IdentityProvider that uses a model from a database (via SQLAlchemy)."""
def __init__(self):
super(SqlAlchemyIdentityProvider, self).__init__()
get = config.get
global user_class, group_class, permission_class, visit_class
user_class_path = get("tools.identity.saprovider.model.user", None)
user_class = load_class(user_class_path)
group_class_path = get("tools.identity.saprovider.model.group", None)
group_class = load_class(group_class_path)
permission_class_path = get(
"tools.identity.saprovider.model.permission", None)
permission_class = load_class(permission_class_path)
visit_class_path = get("tools.identity.saprovider.model.visit", None)
log.info("Loading: %s", visit_class_path)
visit_class = load_class(visit_class_path)
# Default encryption algorithm is to use plain text passwords
algorithm = get("tools.identity.saprovider.encryption_algorithm", None)
self.encrypt_password = lambda pw: \
identity.encrypt_pw_with_algorithm(algorithm, pw)
def create_provider_model(self):
"""Create the database tables if they don't already exist."""
class_mapper(user_class).local_table.create(checkfirst=True)
class_mapper(group_class).local_table.create(checkfirst=True)
class_mapper(permission_class).local_table.create(checkfirst=True)
class_mapper(visit_class).local_table.create(checkfirst=True)
def validate_identity(self, user_name, password, visit_key):
"""Validate the identity represented by user_name using the password.
Must return either None if the credentials weren't valid or an object
with the following properties:
user_name: original user name
user: a provider dependant object (TG_User or similar)
groups: a set of group names
permissions: a set of permission names
"""
user = user_class.query.filter_by(user_name=user_name).first()
if not user:
log.warning("No such user: %s", user_name)
return None
if not self.validate_password(user, user_name, password):
log.info("Passwords don't match for user: %s", user_name)
return None
log.info("Associating user (%s) with visit (%s)",
user_name, visit_key)
return SqlAlchemyIdentity(visit_key, user)
def validate_password(self, user, user_name, password):
"""Check the user_name and password against existing credentials.
Note: user_name is not used here, but is required by external
password validation schemes that might override this method.
If you use SqlAlchemyIdentityProvider, but want to check the passwords
against an external source (i.e. PAM, LDAP, Windows domain, etc),
subclass SqlAlchemyIdentityProvider, and override this method.
"""
return user.password == self.encrypt_password(password)
def load_identity(self, visit_key):
"""Lookup the principal represented by user_name.
Return None if there is no principal for the given user ID.
Must return an object with the following properties:
user_name: original user name
user: a provider dependant object (TG_User or similar)
groups: a set of group names
permissions: a set of permission names
"""
return SqlAlchemyIdentity(visit_key)
def anonymous_identity(self):
"""Return anonymous identity.
Must return an object with the following properties:
user_name: original user name
user: a provider dependant object (TG_User or similar)
groups: a set of group names
permissions: a set of permission names
"""
return SqlAlchemyIdentity()
def authenticated_identity(self, user):
"""Construct Identity object for users with no visit_key."""
return SqlAlchemyIdentity(user=user)
|
{
"content_hash": "86acaa755e6fd6217e8504ed798ba4f7",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 79,
"avg_line_length": 35.04347826086956,
"alnum_prop": 0.6202233250620347,
"repo_name": "dbrattli/python-gearshift",
"id": "d2e0e3e9a0969373c884cfa84d40de2d3054758f",
"size": "8060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gearshift/identity/saprovider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7127"
},
{
"name": "Python",
"bytes": "606764"
}
],
"symlink_target": ""
}
|
"""
WSGI config for imdel_backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "imdel_backend.settings")
application = get_wsgi_application()
|
{
"content_hash": "acfa4eaaf5d1250dc5c790360ea7d04a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.25,
"alnum_prop": 0.7722772277227723,
"repo_name": "aleksanderhan/imdel_backend",
"id": "4ad21b29dd9432e9b5a3458ac5eea014c2da3e2d",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imdel_backend/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16973"
}
],
"symlink_target": ""
}
|
'''
Generate some random frames from an image, and also plot out the colors
'''
# coding: utf-8
import matplotlib
import cv2
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy
import sys
import argparse
import random
import os
parser = argparse.ArgumentParser(description="Plot the color profiles of movies from Gareth's goPros")
parser.add_argument('-f', '--file', help='Movie file', required=True)
parser.add_argument('-o', '--out', help='output file name to draw the graph to', required=True)
parser.add_argument('-n', '--number', help='Number of images to print', required=True)
parser.add_argument('-m', '--frames', help='Stop after this number of frames (default == all)', type=int)
parser.add_argument('-d', '--median', help='Calculate and plot the median color intenity instead of the mean color intensity. Note that the median is more noisy and longer to compute than the mean', action='store_true')
parser.add_argument('-w', '--window', help='Window size to average the numbers over (try 1/100 * # images). If not provided the numbers are not averaged.')
args = parser.parse_args()
def printImages(imgs, band):
keys = imgs.keys()
print "Choosing from " + str(len(keys)) + " images"
if not os.path.exists(str(band)):
os.mkdir(str(band))
for i in range(int(args.number)):
r = random.randint(0, len(imgs)-1)
print("Wrote the file to: " + str(band) + os.path.sep + str(keys[r]))
cv2.imwrite(str(band) + os.path.sep + str(keys[r]) + ".JPG", imgs[keys[r]])
def movingaverage(interval, window_size):
window= numpy.ones(int(window_size))/float(window_size)
return numpy.convolve(interval, window, 'same')
vid = cv2.VideoCapture(args.file)
ret, img = vid.read()
average=[]
count=0
band=1
imgset = {}
while (ret):
rgb=[]
for i in range(3):
channel = img[:,:,i]
if args.median:
rgb.append(numpy.median(channel))
else:
rgb.append(numpy.average(channel))
if (count % 200) < 1:
sys.stderr.write(str(count) + ": " + str(rgb[0]) + " " + str(rgb[1]) + " " + str(rgb[2]) + "\n")
if rgb[2] > 150:
print("Length of the image set is " + str(len(imgset)))
if len(imgset) > 10: # this is because we may have multiple images with BGR >150
print("Writing images at " + str(count))
printImages(imgset, band)
band+=1
imgset = {}
rint = random.randint(1, 100)
if rint == 1: # choose 1:100 images first, and than randomly from those
imgset[count]=img
print("Saving frame: " + str(count) + " rint: " + str(rint))
average.append(rgb)
count += 1
ret, img = vid.read()
if args.frames > 0 and count > args.frames:
ret = False
dtc=None
if args.window:
dt=numpy.transpose(average)
for i in range(dt.shape[0]):
dt[i]=movingaverage(dt[i], args.window)
dtc=numpy.transpose(dt)
else:
dtc=average
fontP = FontProperties()
fontP.set_size('small')
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(dtc)
#ax.set_xticklabels(xlabels, rotation=45, fontproperties=fontP)
#ax.set_xlabel('Image number in the series')
ax.set_ylabel('Reef colors')
box = ax.get_position()
#ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
#ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height])
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height *0.85])
header=["blue", "green", "red"]
ax.legend((header), loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4, prop=fontP)
fig.savefig(args.out)
|
{
"content_hash": "86513449edcad4c415ed26ad55601585",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 219,
"avg_line_length": 33.42727272727273,
"alnum_prop": 0.6464509110688061,
"repo_name": "linsalrob/depth-profile-video-processing",
"id": "15a9a86bf394ad10fada0594f4fc4949ea49000c",
"size": "3677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "randomFrames.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46316"
}
],
"symlink_target": ""
}
|
import argparse
import pytest
import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import fresh_config
local_config = fresh_config()
local_config.retries = 2
@python_app
def sleep_fail(sleep_dur, sleep_rand_max, fail_prob, inputs=[]):
import time
import random
s = sleep_dur + random.randint(-sleep_rand_max, sleep_rand_max)
time.sleep(s)
x = float(random.randint(0, 100)) / 100
if x <= fail_prob:
# print("Fail")
raise Exception("App failure")
else:
pass
# print("Succeed")
@python_app
def double(x):
return x * 2
@pytest.mark.local
def test_simple(n=10):
import time
start = time.time()
x = double(n)
print("Result : ", x.result())
assert x.result() == n * \
2, "Expected double to return:{0} instead got:{1}".format(
n * 2, x.result())
print("Duration : {0}s".format(time.time() - start))
print("[TEST STATUS] test_parallel_for [SUCCESS]")
return True
@pytest.mark.skip('broken')
def test_no_deps(numtasks=10):
"""Test basic error handling, with no dependent failures
"""
fus = []
for i in range(0, 10):
fu = sleep_fail(0.1, 0, .8)
fus.extend([fu])
count = 0
for fu in fus:
try:
fu.result()
except Exception as e:
print("Caught exception : ", "*" * 20)
print(e)
print("*" * 20)
count += 1
print("Caught failures of {0}/{1}".format(count, len(fus)))
@pytest.mark.skip('broken')
def test_fail_sequence(numtasks=10):
"""Test failure in a sequence of dependencies
App1 -> App2 ... -> AppN
"""
sleep_dur = 0.1
fail_prob = 0.4
fus = {0: None}
for i in range(0, numtasks):
print("Chaining {0} to {1}".format(i + 1, fus[i]))
fus[i + 1] = sleep_fail(sleep_dur, 0, fail_prob, inputs=[fus[i]])
# time.sleep(numtasks*sleep_dur)
for k in sorted(fus.keys()):
try:
x = fus[i].result()
print("{0} : {1}".format(k, x))
except Exception as e:
print("{0} : {1}".format(k, e))
return
@pytest.mark.skip('broken')
def test_deps(numtasks=10):
"""Random failures in branches of Map -> Map -> reduce
App1 App2 ... AppN
"""
fus = []
for i in range(0, numtasks):
fu = sleep_fail(0.2, 0, .4)
fus.extend([fu])
# App1 App2 ... AppN
# | | |
# V V V
# App1 App2 ... AppN
fus_2 = []
for fu in fus:
fu = sleep_fail(0, 0, .8, inputs=[fu])
fus_2.extend([fu])
# App1 App2 ... AppN
# | | |
# V V V
# App1 App2 ... AppN
# \ | /
# \ | /
# App_Final
fu_final = sleep_fail(1, 0, 0, inputs=fus_2)
try:
print("Final status : ", fu_final.result())
except parsl.dataflow.error.DependencyError as e:
print("Caught the right exception")
print("Exception : ", e)
except Exception as e:
assert 5 == 1, "Expected DependencyError got : %s" % e
else:
print("Shoot! no errors ")
@python_app
def sleep_then_fail(sleep_dur=0.1):
import time
import math
time.sleep(sleep_dur)
math.ceil("Trigger TypeError")
return 0
@pytest.mark.skip('broken')
def test_fail_nowait(numtasks=10):
"""Test basic error handling, with no dependent failures
"""
import time
fus = []
for i in range(0, numtasks):
fu = sleep_then_fail(sleep_dur=0.1)
fus.extend([fu])
try:
[x.result() for x in fus]
except Exception as e:
assert isinstance(e, TypeError), "Expected a TypeError, got {}".format(e)
# fus[0].result()
time.sleep(1)
print("Done")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
test_simple()
# test_fail_nowait(numtasks=int(args.count))
# test_no_deps(numtasks=int(args.count))
# test_fail_sequence(numtasks=int(args.count))
# test_deps(numtasks=int(args.count))
|
{
"content_hash": "3ec1f2161782ccd39a6f184ba70cd209",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 81,
"avg_line_length": 23.579787234042552,
"alnum_prop": 0.544777802842319,
"repo_name": "Parsl/parsl",
"id": "e065dba9e65e41b20ba0adace3631800a4734497",
"size": "4433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/tests/test_error_handling/test_rand_fail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1263"
},
{
"name": "CSS",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Makefile",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "1173869"
},
{
"name": "Shell",
"bytes": "12057"
}
],
"symlink_target": ""
}
|
import sys, os
import re
from subprocess import call, Popen, PIPE
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
from repo_util import run_cmd_get_output
# Call Doxygen to get XML files from the header files
print "Calling Doxygen to generate latest XML files"
call('doxygen')
# Generate 'api_name.inc' files using the XML files by Doxygen
os.system("python gen-dxd.py")
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
#
suppress_warnings = ['image.nonlocal_uri']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe', 'link-roles']
# Breathe extension variables
breathe_projects = { "esp32-idf": "xml/" }
breathe_default_project = "esp32-idf"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ESP-IDF Programming Guide'
copyright = u'2016 - 2017, Espressif'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Readthedocs largely ignores 'version' and 'release', and displays one of
# 'latest', tag name, or branch name, depending on the build type.
# Still, this is useful for non-RTD builds.
# This is supposed to be "the short X.Y version", but it's the only version
# visible when you open index.html.
# Display full version to make things less confusing.
version = run_cmd_get_output('git describe')
# The full version, including alpha/beta/rc tags.
# If needed, nearest tag is returned by 'git describe --abbrev=0'.
release = version
print 'Version: {0} Release: {1}'.format(version, release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Use sphinx_rtd_theme for local builds --------------------------------
# ref. https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
#
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
|
{
"content_hash": "22d4c26af2faa5b78597284686766276",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 106,
"avg_line_length": 33.514084507042256,
"alnum_prop": 0.711914267703299,
"repo_name": "hwmaier/esp-idf",
"id": "120de169a7921db3d7fbbba55b4b978fac257e1a",
"size": "9953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "138074"
},
{
"name": "C",
"bytes": "21269427"
},
{
"name": "C++",
"bytes": "2641475"
},
{
"name": "CMake",
"bytes": "4504"
},
{
"name": "Makefile",
"bytes": "63156"
},
{
"name": "Objective-C",
"bytes": "112775"
},
{
"name": "Python",
"bytes": "274902"
},
{
"name": "Shell",
"bytes": "17899"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from peewee import ForeignKeyField, DateTimeField
from wx.app import database
from wx.models.station import Station
class Report(database.Model):
station = ForeignKeyField(Station, related_name='reports')
timestamp = DateTimeField(default=datetime.now)
class Meta:
order_by = ('-timestamp',)
|
{
"content_hash": "2ec1451d0f1d29b940fcedfc93d5d87d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 23.2,
"alnum_prop": 0.75,
"repo_name": "zeckalpha/wx",
"id": "119582858572904690407fe7ba34bb523432ac0c",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wx/models/report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5728"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
import sys
import os
# -- Allow Markdown -----------------------------------------------------
# source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# Fake modules requring C-Libraries
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.colors',
'matplotlib.pyplot', 'matplotlib.cm', 'matplotlib.path', 'matplotlib.patches', 'matplotlib.projections', 'matplotlib.projections.geo', 'healpy', 'astropy', 'astropy.io', 'pylibmc', 'HMpTy', 'HMpTy.mysql', 'ligo', 'ligo.gracedb', 'ligo.gracedb.rest']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
moduleDirectory = os.path.dirname(os.path.realpath(__file__))
exec(open(moduleDirectory + "/../../tastic/__version__.py").read())
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary', 'sphinx.ext.graphviz']
# Generate Summaries
autosummary_generate = True
# Show Todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime, date, time
now = datetime.now()
now = now.strftime("%Y")
project = u'tastic'
copyright = u'%(now)s, Dave Young' % locals()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "v" + str(__version__)
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates',
'**__version__.py', '**setup.py', 'api/tastic.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_images/thespacedoctor_icon_white_circle.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
html_add_permalinks = u" ∞"
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tasticdoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tastic.tex', u'tastic Documentation',
u'Dave Young', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_images/thespacedoctor_icon_dark.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tastic', u'tastic Documentation',
[u'Dave Young'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tastic', u'tastic Documentation',
u'Dave Young', 'tastic', 'A python package for working with taskpaper documents',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Added to the start of every source file
# rst_prolog = """
# """
# The name of the default domain
primary_domain = "py"
trim_footnote_reference_space = True
def updateUsageRST():
from tastic import cl_utils
usage = cl_utils.__doc__
if not "Usage:" in usage or "todo:" in usage:
return None
usageString = ""
for l in usage.split("\n"):
usageString += " " + l + "\n"
usage = """Command-Line Usage
==================
.. code-block:: bash
%(usageString)s""" % locals()
moduleDirectory = os.path.dirname(__file__)
uFile = moduleDirectory + "/_includes/usage.rst"
exists = os.path.exists(uFile)
if exists:
import codecs
writeFile = codecs.open(uFile, encoding='utf-8', mode='w')
writeFile.write(usage)
writeFile.close()
return None
updateUsageRST()
def generateAutosummaryIndex():
import tastic
import inspect
import os.path
import time
# CHECK FOR LAST MODIFIED TIME - DON'T UPDATE IF < 5 SEC
# autobuild GOES INTO INFINITE LOOP OTHERWISE
moduleDirectory = os.path.dirname(__file__)
file = moduleDirectory + "/autosummary.rst"
pathToWriteFile = file
exists = os.path.exists(file)
if not exists:
pathToWriteFile = file
try:
writeFile = open(pathToWriteFile, 'w')
writeFile.write("")
writeFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToWriteFile,)
raise IOError(message)
now = time.time()
delta = now - os.path.getmtime(file)
if delta < 5:
return None
# GET ALL SUBPACKAGES
allSubpackages = ["tastic"]
allSubpackages += findAllSubpackges(
pathToPackage="tastic"
)
# INSPECT TO FIND ALL MODULES, CLASSES AND FUNCTIONS
allModules = []
allClasses = []
allFunctions = []
for sp in allSubpackages:
for name, obj in inspect.getmembers(__import__(sp, fromlist=[''])):
if inspect.ismodule(obj):
if name in ["numpy"]:
continue
thisMod = sp + "." + name
if thisMod not in allSubpackages and len(name) and name[0] != "_" and name[-5:] != "tests":
allModules.append(sp + "." + name)
for spm in allSubpackages + allModules:
for name, obj in inspect.getmembers(__import__(spm, fromlist=[''])):
if inspect.isclass(obj):
thisClass = spm + "." + name
if (thisClass == obj.__module__ or spm == obj.__module__) and len(name) and name[0] != "_":
allClasses.append(thisClass)
if inspect.isfunction(obj):
thisFunction = spm + "." + name
if (spm == obj.__module__ or obj.__module__ == thisFunction) and len(name) and name != "main" and name[0] != "_":
allFunctions.append(thisFunction)
allSubpackages = ("\n ").join(allSubpackages)
allModules = ("\n ").join(allModules)
allClasses = ("\n ").join(allClasses)
allFunctions = ("\n ").join(allFunctions)
thisText = u"""
Subpackages
-----------
.. autosummary::
:toctree: _autosummary
:nosignatures:
:template: autosummary/subpackage.rst
%(allSubpackages)s
Modules
-----------
.. autosummary::
:toctree: _autosummary
:nosignatures:
%(allModules)s
Classes
-----------
.. autosummary::
:toctree: _autosummary
:nosignatures:
%(allClasses)s
Functions
-----------
.. autosummary::
:toctree: _autosummary
:nosignatures:
%(allFunctions)s
""" % locals()
import codecs
moduleDirectory = os.path.dirname(__file__)
writeFile = codecs.open(
moduleDirectory + "/autosummary.rst", encoding='utf-8', mode='w')
writeFile.write(thisText)
writeFile.close()
import re
regex = re.compile(r'\n\s*.*?utKit\.utKit(\n|$)', re.I)
allClasses = regex.sub("\n", allClasses)
classAndFunctions = u"""
**Classes**
.. autosummary::
:nosignatures:
%(allClasses)s
**Functions**
.. autosummary::
:nosignatures:
%(allFunctions)s
""" % locals()
moduleDirectory = os.path.dirname(__file__)
writeFile = codecs.open(
moduleDirectory + "/classes_and_functions.rst", encoding='utf-8', mode='w')
writeFile.write(classAndFunctions)
writeFile.close()
return thisText
def findAllSubpackges(
pathToPackage
):
import pkgutil
importedPackage = __import__(
pathToPackage, fromlist=[''])
subPackages = []
for importer, modname, ispkg in pkgutil.walk_packages(importedPackage.__path__, prefix=importedPackage.__name__ + '.',
onerror=lambda x: None):
if ispkg and "tests" != modname[-5:] and "._" not in modname and ".tests." not in modname:
subPackages.append(modname)
return subPackages
autosummaryText = generateAutosummaryIndex()
# Add substitutions here
rst_epilog = u"""
.. |tsd| replace:: thespacedoctor
""" % locals()
|
{
"content_hash": "de862f666cb648c808294a081f81e2f7",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 265,
"avg_line_length": 29.574786324786324,
"alnum_prop": 0.6508200274546637,
"repo_name": "thespacedoctor/tastic",
"id": "9db819255864f176eba954d62a99336dff8fa1a2",
"size": "14260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112624"
},
{
"name": "Shell",
"bytes": "2233"
}
],
"symlink_target": ""
}
|
import unittest
import pytest
from airflow.models import Connection
from airflow.models.dag import DAG
from airflow.providers.mongo.hooks.mongo import MongoHook
from airflow.providers.mongo.sensors.mongo import MongoSensor
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
@pytest.mark.integration("mongo")
class TestMongoSensor(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(conn_id='mongo_test', conn_type='mongo', host='mongo', port='27017', schema='test')
)
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
hook = MongoHook('mongo_test')
hook.insert_one('foo', {'bar': 'baz'})
self.sensor = MongoSensor(
task_id='test_task',
mongo_conn_id='mongo_test',
dag=self.dag,
collection='foo',
query={'bar': 'baz'},
)
def test_poke(self):
assert self.sensor.poke(None)
|
{
"content_hash": "f85b0871adbd25e2412d07a7364da795",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 106,
"avg_line_length": 28.63888888888889,
"alnum_prop": 0.6304558680892337,
"repo_name": "dhuang/incubator-airflow",
"id": "6623631550855bccb6b162e6e7f56a76e1fdd814",
"size": "1820",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/mongo/sensors/test_mongo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: BSD
|
This file contains the DAL support for many relational databases, including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage::
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported DAL URI strings::
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
'pytds://user:password@server:port/database' # python-tds
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2:ibm_db_dbi://DSN=dsn;UID=user;PWD=pass'
'db2:pyodbc://driver=DB2;hostname=host;database=database;uid=user;pwd=password;port=port'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore (uses ndb by default)
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info::
help(DAL)
help(Field)
"""
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from ._compat import (
PY2,
pickle,
hashlib_md5,
pjoin,
copyreg,
integer_types,
with_metaclass,
long,
unquote,
iteritems,
)
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import (
Serializable,
SQLCallableList,
BasicStorage,
RecordUpdater,
RecordDeleter,
TimingHandler,
)
from .helpers.methods import hide_password, smart_query, auto_validators, auto_represent, uuidstr
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME
from .helpers.rest import RestParser
from .helpers.serializers import serializers
from .objects import Table, Field, Rows, Row, Set
from .adapters.base import BaseAdapter, NullAdapter
from .default_validators import default_validators
TABLE_ARGS = set(
(
"migrate",
"primarykey",
"fake_migrate",
"format",
"redefine",
"singular",
"plural",
"trigger_name",
"sequence_name",
"fields",
"common_filter",
"polymodel",
"table_class",
"on_define",
"rname",
)
)
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
#: intercept arguments for DAL customisation on call
intercepts = [
"logger",
"representers",
"serializers",
"uuid",
"validators",
"validators_method",
"Table",
"Row",
]
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have db_hash
as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delays table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
representers = {}
validators_method = default_validators
uuid = uuidstr
logger = logging.getLogger("pyDAL")
Field = Field
Table = Table
Rows = Rows
Row = Row
record_operators = {"update_record": RecordUpdater, "delete_record": RecordDeleter}
execution_handlers = [TimingHandler]
def __new__(cls, uri="sqlite://dummy.db", *args, **kwargs):
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_"):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_zombie_"):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == "<zombie>":
db_uid = kwargs["db_uid"] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get("db_uid", hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, "_pydal_db_instances_", {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
"defined": sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
"lazy": sorted(db._LAZY_TABLES.keys()),
},
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
instances = enumerate(instances)
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbname
)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbanme
)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError("failure to commit distributed transaction")
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(
self,
uri="sqlite://dummy.db",
pool_size=0,
folder=None,
db_codec="UTF-8",
check_reserved=None,
migrate=True,
fake_migrate=False,
migrate_enabled=True,
fake_migrate_all=False,
decode_credentials=False,
driver_args=None,
adapter_args=None,
attempts=5,
auto_import=False,
bigint_id=False,
debug=False,
lazy_tables=False,
db_uid=None,
after_connection=None,
tables=None,
ignore_field_case=True,
entity_quoting=True,
table_hash=None,
):
if uri == "<zombie>" and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows")
if not issubclass(self.Row, Row):
raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row")
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = "request_tenant"
self._common_fields = []
self._referee_name = "%(table)s"
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._aliased_tables = threading.local()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith("jdbc:"):
uri = "jdbc:" + uri
self._dbname = REGEX_DBNAME.match(uri).group()
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(
db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
after_connection=after_connection,
entity_quoting=entity_quoting,
)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
# self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
self._adapter.dialect._force_bigints()
# if there are multiple URIs to try in sequence, do not defer connection
if len(uris) > 1:
self._adapter.connector()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
"DEBUG: connect attempt %i, connection error:\n%s" % (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self,
pool_size=0,
uri="None",
folder=folder,
db_codec=db_codec,
after_connection=after_connection,
entity_quoting=entity_quoting,
)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
if check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder, tables=tables)
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, "_pydal_timings_", [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(
self, path, migrate=False, fake_migrate=False, tables=None
):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + "_*.table")
for filename in glob.glob(pattern):
tfile = self._adapter.migrator.file_open(filename, "r" if PY2 else "rb")
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7 : -6]
mf = [
(
value["sortable"],
Field(
key,
type=value["type"],
length=value.get("length", None),
notnull=value.get("notnull", False),
unique=value.get("unique", False),
),
)
for key, value in iteritems(sql_fields)
]
mf.sort(key=lambda a: a[0])
self.define_table(
name,
*[item[1] for item in mf],
**dict(migrate=migrate, fake_migrate=fake_migrate)
)
finally:
self._adapter.migrator.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self._check_reserved which is a list of operators to use.
"""
for backend in self._check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword'
% (name, backend.upper())
)
def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True):
return RestParser(self).parse(patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **kwargs):
invalid_kwargs = set(kwargs) - TABLE_ARGS
if invalid_kwargs:
raise SyntaxError(
'invalid table "%s" attributes: %s' % (tablename, invalid_kwargs)
)
if not fields and "fields" in kwargs:
fields = kwargs.get("fields", ())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
redefine = kwargs.get("redefine", False)
if tablename in self.tables:
if redefine:
try:
delattr(self, tablename)
except:
pass
else:
raise SyntaxError("table already defined: %s" % tablename)
elif (
tablename.startswith("_")
or tablename in dir(self)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError("invalid table name: %s" % tablename)
elif self._check_reserved:
self.check_reserved_keyword(tablename)
if self._lazy_tables:
if tablename not in self._LAZY_TABLES or redefine:
self._LAZY_TABLES[tablename] = (tablename, fields, kwargs)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **kwargs)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **kwargs):
kwargs_get = kwargs.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + [
f if isinstance(f, Table) else f.clone() for f in common_fields
]
table_class = kwargs_get("table_class", Table)
table = table_class(self, tablename, *fields, **kwargs)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires is DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and kwargs_get("migrate", self._migrate)
if (
migrate
and self._uri not in (None, "None")
or self._adapter.dbengine == "google:datastore"
):
fake_migrate = self._fake_migrate_all or kwargs_get(
"fake_migrate", self._fake_migrate
)
polymodel = kwargs_get("polymodel", None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table,
migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel,
)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = kwargs_get("on_define", None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[
(k, getattr(self, "_" + k, None))
for k in [
"pool_size",
"folder",
"db_codec",
"check_reserved",
"migrate",
"fake_migrate",
"migrate_enabled",
"fake_migrate_all",
"decode_credentials",
"driver_args",
"adapter_args",
"attempts",
"bigint_id",
"debug",
"lazy_tables",
]
]
)
)
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat, sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(
self, "_lazy_tables"
) and key in object.__getattribute__(self, "_LAZY_TABLES"):
tablename, fields, kwargs = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **kwargs)
aliased_tables = object.__getattribute__(self, "_aliased_tables")
aliased = getattr(aliased_tables, key, None)
if aliased:
return aliased
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object %s exists and cannot be redefined" % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, "_uri"):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def rollback(self):
self._adapter.rollback()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
self._adapter._clean_tlocals()
def executesql(
self,
query,
placeholders=None,
as_dict=False,
fields=None,
colnames=None,
as_ordered_dict=False,
):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor, "description"):
raise RuntimeError(
"database does not support executesql(...,as_dict=True)"
)
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError(
"Result set includes duplicate column names. Specify unique column names using the 'colnames' argument"
)
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
#: extracted_fields is empty we should make it from colnames
# what 'col_fields' is for
col_fields = [] # [[tablename, fieldname], ....]
newcolnames = []
for tf in colnames:
if "." in tf:
t_f = tf.split(".")
tf = ".".join(adapter.dialect.quote(f) for f in t_f)
else:
t_f = None
if not extracted_fields:
col_fields.append(t_f)
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data,
fields = extracted_fields or [tf and self[tf[0]][tf[1]] for tf in col_fields],
colnames=colnames
)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [
field for field in table._referenced_by if not field.table == thistable
]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get("max_fetch_rows,", 500))
write_colnames = kwargs["write_colnames"] = kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write("TABLE %s\r\n" % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs["write_colnames"] = write_colnames
for k in range(0, nrows, step):
self(query).select(limitby=(k, k + step)).export_to_csv_file(
ofile, *args, **kwargs
)
kwargs["write_colnames"] = False
ofile.write("\r\n\r\n")
ofile.write("END")
def import_from_csv_file(
self,
ifile,
id_map=None,
null="<NULL>",
unique="uuid",
map_tablenames=None,
ignore_missing_tables=False,
*args,
**kwargs
):
# if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == "END":
return
elif not line.startswith("TABLE "):
raise SyntaxError("Invalid file format")
elif not line[6:] in self.tables:
raise SyntaxError("Unknown table : %s" % line[6:])
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename, tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset, *args, **kwargs
)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError(
"Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)"
)
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL("<zombie>", db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
|
{
"content_hash": "30221cd8f010fb0de3542ab859dba478",
"timestamp": "",
"source": "github",
"line_count": 1026,
"max_line_length": 169,
"avg_line_length": 36.95711500974659,
"alnum_prop": 0.5365525607890712,
"repo_name": "willimoa/pydal",
"id": "b1bd7cf49dea93c1f298faa0bc9db83838259804",
"size": "37970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydal/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "334"
},
{
"name": "Python",
"bytes": "1291647"
}
],
"symlink_target": ""
}
|
"""sorno_locate_git gets the remote location of a local file/directory from a
local git repository.
Copyright 2016 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import os
import sys
class App(object):
def __init__(self, args):
self.args = args
def run(self):
loc = self.args.file_or_dir
if not loc:
loc = "."
loc_path = os.path.abspath(loc)
if os.path.isdir(loc_path):
path = loc_path
else:
path = os.path.dirname(loc_path)
# find git config file
git_config_file = None
# The local root directory of your local repo, e.g. /User/user1/mygit
root_dir = None
while path and path != "/":
if ".git" in os.listdir(path):
root_dir = path
git_dir = os.path.join(path, ".git")
if "config" in os.listdir(git_dir):
git_config_file = os.path.join(git_dir, "config")
break
path = os.path.dirname(path)
if not git_config_file:
print("Cannot find git config file", file=sys.stderr)
return 1
prefix = "url = "
with open(git_config_file) as file_obj:
for line in file_obj:
line = line.strip()
if line.startswith(prefix):
# E.g. found "http://github.com/xxx/yyy" in the git config
# file
git_repo_url = line[len(prefix):]
# E.g. /User/user1/mygit/my/awesome/file becomes
# my/awesome/file
relative_path = loc_path[len(root_dir):].lstrip("/")
if relative_path and "github.com" in git_repo_url:
relative_path = os.path.join(
"tree/master",
relative_path,
)
remote_loc = os.path.join(
# take out .git from the end
git_repo_url[:-4],
relative_path,
)
remote_loc = remote_loc.rstrip("/")
# found it!
print(remote_loc)
return 0
print("Cannot find the remote url", file=sys.stderr)
return 1
def parse_args(cmd_args):
description = __doc__.split("Copyright 2016")[0].strip()
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("file_or_dir", nargs="?")
args = parser.parse_args(cmd_args)
return args
def main():
args = parse_args(sys.argv[1:])
app = App(args)
sys.exit(app.run())
if __name__ == '__main__':
main()
|
{
"content_hash": "c7d2a66f6e067f6863d585fdfdfa0627",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 30.791304347826088,
"alnum_prop": 0.542219711945778,
"repo_name": "hermantai/sorno-py-scripts",
"id": "77078bf4c27dffa6816f315df4932bbd4570d561",
"size": "3563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/sorno_locate_git.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "318513"
},
{
"name": "Shell",
"bytes": "954"
}
],
"symlink_target": ""
}
|
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class S198_Handler(BaseHTTPRequestHandler):
def do_GET(self):
print self.path
f = open(curdir + sep + "index.html")
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
self.wfile.write("<p id='username'>"+self.path[1:]+"</p>")
self.wfile.write("\n</HTML>")
f.close()
return
def main():
try:
server = HTTPServer(('', 8000), S198_Handler)
print 'Started Server on port 8000'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == "__main__":
main();
|
{
"content_hash": "c16daa5078c74e2ec987a7a237e09b30",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 30.26923076923077,
"alnum_prop": 0.5781448538754765,
"repo_name": "amin10/scripts",
"id": "5db66d226bcdb30af6d9e8f70f2a5d25f9f6b5f0",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonserver/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "34"
},
{
"name": "Python",
"bytes": "787"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
import pandas as pd
from confparser import load_config
from expyriment import stimuli, misc
def launch_instructions(instructions_ini, exp):
# Select .ini file for instructions
setting = load_config(instructions_ini)
# Define the pathway of the instructions file
instructions_fname = ''.join((setting["inst_filename"], ".csv"))
instructions_dir = os.path.abspath((setting["inputs_dir"]))
instructions_path = os.path.join(instructions_dir, instructions_fname)
# Generate a dataframe containing the instructions
df_inst = pd.read_csv(instructions_path, sep='|')
# Convert the dataframe into a list
instructions = df_inst.values.tolist()
# Convert each element of the dataframe into a string
instructions = [[''.join(instructions[i][j])
for j in np.arange(len(instructions[i]))]
for i in np.arange(len(df_inst))]
# Initialization of variable containing the value of the key pressed
found_key = 0
response_key = 0
# While "h" key to return to main menu is not pressed...
while not (found_key == misc.constants.K_h or response_key == 'h'):
# Read the instructions file, line by line
ldx = 0
while ldx < len(instructions):
line = instructions[ldx]
# ... and item by item
for word in line:
# For lines with one item
if word in ("no_item", "no_probe", "fdbk_yes",
"fdbk_no"):
pass
# For lines corresponding to the examples, i.e. containing
# more than one item
else:
text_display = stimuli.TextBox(
word.decode('utf-8'),
map(int, setting["box_size"]),
position=map(int, setting["box_position"]),
text_size=setting["txtsize"],
text_colour=map(int, setting["txtcolour"]))
text_display.present()
exp.clock.wait(300)
# Check whether "h" key was pressed
found_key = exp.keyboard.check([misc.constants.K_h])
# If yes, breaks the loop
if found_key == misc.constants.K_h:
break
# If "h" key was pressed during the presentation of the example,
# it breaks the loop and return to main menu
if found_key == misc.constants.K_h:
break
# After the display of the last word of sentence's example,
# goes straight to the next line of instructions
elif line[-1] not in ("no_item", "fdbk_yes", "fdbk_no"):
exp.clock.wait(300)
# Waits for the participant's response and gives feedback whether
# the answer was correct or not
elif line[-1] in ("fdbk_yes", "fdbk_no"):
response_key, _ = exp.keyboard.wait_char([setting["YES"],
setting["NO"], 'h'])
if response_key == 'h':
break
elif ((response_key == setting["YES"] and
line[-1] == "fdbk_yes") or
(response_key == setting["NO"] and
line[-1] == "fdbk_no")):
message_display = stimuli.TextLine(
"Correct!", text_size=setting["txtsize"],
text_colour=(0, 204, 0))
message_display.present()
exp.clock.wait(2000)
else:
message_display = stimuli.TextLine(
"Incorrect!", text_size=setting["txtsize"],
text_colour=(255, 0, 0))
message_display.present()
exp.clock.wait(2000)
# Checks whether "ENTER", "LEFT" or m" key were pressed.
# If "ENTER", goes to the next line;
# if "LEFT", goes to the previous slide
# if "h", returns to main menu.
else:
found_key, _ = exp.keyboard.wait([misc.constants.K_RETURN,
misc.constants.K_LEFT,
misc.constants.K_h])
if found_key == misc.constants.K_LEFT:
ldx = ldx - 2
if ldx < 0:
ldx = -1
elif found_key == misc.constants.K_h:
break
ldx = ldx + 1
|
{
"content_hash": "839dcfa8d4c868cb8b95907d1f8bca2e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 78,
"avg_line_length": 47.22222222222222,
"alnum_prop": 0.4954010695187166,
"repo_name": "hbp-brain-charting/public_protocols",
"id": "074ee3a74ff4f44d58409aa57838b5c2f7a99d16",
"size": "4700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsvp_language/rsvp_language_protocol/langexpy_script/instdisplay.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "19731"
},
{
"name": "C",
"bytes": "1437"
},
{
"name": "CSS",
"bytes": "108589"
},
{
"name": "Cython",
"bytes": "46795"
},
{
"name": "HTML",
"bytes": "7261"
},
{
"name": "JavaScript",
"bytes": "608582"
},
{
"name": "Jupyter Notebook",
"bytes": "10815"
},
{
"name": "MATLAB",
"bytes": "387974"
},
{
"name": "Python",
"bytes": "864626"
},
{
"name": "Scilab",
"bytes": "230686"
},
{
"name": "Shell",
"bytes": "1053"
}
],
"symlink_target": ""
}
|
"""
Allows downloading a Plex media item from a local or shared library. You
may specify the item by the PlexWeb url (everything after !) or by
manually searching the items from the command line wizard.
Original contribution by lad1337.
"""
import argparse
import os
import re
from urllib.parse import unquote
from plexapi import utils
from plexapi.video import Episode, Movie, Show
VALID_TYPES = (Movie, Episode, Show)
def search_for_item(url=None):
if url: return get_item_from_url(opts.url)
servers = [s for s in account.resources() if 'server' in s.provides]
server = utils.choose('Choose a Server', servers, 'name').connect()
query = input('What are you looking for?: ')
item = []
items = [i for i in server.search(query) if i.__class__ in VALID_TYPES]
items = utils.choose('Choose result', items, lambda x: '(%s) %s' % (x.type.title(), x.title[0:60]))
if not isinstance(items, list):
items = [items]
for i in items:
if isinstance(i, Show):
display = lambda i: '%s %s %s' % (i.grandparentTitle, i.seasonEpisode, i.title)
selected_eps = utils.choose('Choose episode', i.episodes(), display)
if isinstance(selected_eps, list):
item += selected_eps
else:
item.append(selected_eps)
else:
item.append(i)
if not isinstance(item, list):
item = [item]
return item
def get_item_from_url(url):
# Parse the ClientID and Key from the URL
clientid = re.findall('[a-f0-9]{40}', url)
key = re.findall('key=(.*?)(&.*)?$', url)
if not clientid or not key:
raise SystemExit('Cannot parse URL: %s' % url)
clientid = clientid[0]
key = unquote(key[0][0])
# Connect to the server and fetch the item
servers = [r for r in account.resources() if r.clientIdentifier == clientid]
if len(servers) != 1:
raise SystemExit('Unknown or ambiguous client id: %s' % clientid)
server = servers[0].connect()
return server.fetchItem(key)
if __name__ == '__main__':
# Command line parser
from plexapi import CONFIG
from tqdm import tqdm
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-u', '--username', help='Your Plex username',
default=CONFIG.get('auth.myplex_username'))
parser.add_argument('-p', '--password', help='Your Plex password',
default=CONFIG.get('auth.myplex_password'))
parser.add_argument('--url', default=None, help='Download from URL (only paste after !)')
opts = parser.parse_args()
# Search item to download
account = utils.getMyPlexAccount(opts)
items = search_for_item(opts.url)
for item in items:
for part in item.iterParts():
# We do this manually since we don't want to add a progress to Episode etc
filename = '%s.%s' % (item._prettyfilename(), part.container)
url = item._server.url('%s?download=1' % part.key)
filepath = utils.download(url, token=account.authenticationToken, filename=filename, savepath=os.getcwd(),
session=item._server._session, showstatus=True)
#print(' %s' % filepath)
|
{
"content_hash": "baa07e40492940208b0f3033264056f0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 118,
"avg_line_length": 38.32941176470588,
"alnum_prop": 0.6243093922651933,
"repo_name": "pkkid/python-plexapi",
"id": "69a39b8134ce608ed7516104768953dad8750c16",
"size": "3305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/plex-download.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "932008"
}
],
"symlink_target": ""
}
|
"""Provides yaml parser Python APIs for Matter."""
from . import parser
|
{
"content_hash": "6caed77d47a4bc99117a4d3232cd2dfc",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 36,
"alnum_prop": 0.7361111111111112,
"repo_name": "project-chip/connectedhomeip",
"id": "055bec97cafac42d1c5829eb9901eb5b7ac7834a",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/controller/python/chip/yaml/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
}
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2022 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from pysces.version import __version__
__doc__ = '''network and internet oriented utilities'''
from time import strftime
from getpass import getuser
class PyscesHTML:
"""PySCeS HTML formatting class: contains some basic html elements that can be used in generated reports."""
__version__ = __version__
def HTML_header(self, File):
"""
HTML_header(File)
Write an HTML page header to file (use with HTML_footer)
Arguments:
=========
File: an open, writable Python file object
"""
header = '\n'
header += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
header += '<html>\n'
header += '<head>\n'
header += (
'<title>PySCeS data generated at '
+ strftime("%H:%M:%S (%Z)")
+ '</title>\n'
)
header += (
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\n'
)
header += '</head>\n'
header += '<body>\n\n'
header += '<h4><a href="http://pysces.sourceforge.net">PySCeS</a></h4>\n\n'
File.write(header)
File.write('<!-- PySCeS data generated at ' + strftime("%H:%M:%S") + '-->\n\n')
return File
def HTML_footer(self, File):
"""
HTML_footer(File)
Write an HTML page footer to file (use with HTML_header)
Arguments:
=========
File: an open, writable Python file object
"""
File.write(
'\n<p><a href="http://pysces.sourceforge.net"><font size="3">PySCeS '
+ __version__
+ '</font></a><font size="2"> output\n generated at '
+ strftime("%H:%M:%S")
+ ' by <i>'
)
try:
File.write(getuser())
except:
File.write('PySCeS')
File.write('</i>)</font></p>\n')
File.write('</body>\n')
File.write('</html>\n\n')
return File
def par(self, str, File=None, align='l', txtout=0):
"""
par(str,File=None,align='l',txtout=0)
Format <par> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<p align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n '
else:
seeker_active = 1
cntr = 0
strout += '\n</p>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
def h1(self, str, File=None, align='l', txtout=0):
"""
h1(str,File=None,align='l',txtout=0)
Format <h1> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<h1 align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
print(str[x])
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n '
else:
seeker_active = 1
cntr = 0
strout += '\n</h1>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
def h2(self, str, File=None, align='l', txtout=0):
"""
h2(str,File=None,align='l',txtout=0)
Format <h2> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<h2 align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
print(str[x])
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n '
else:
seeker_active = 1
cntr = 0
strout += '\n</h2>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
def h3(self, str, File=None, align='l', txtout=0):
"""
h3(str,File=None,align='l',txtout=0)
Format <h3> text and write it to a file (or string)
Arguments:
=========
str: the string of text to be written
File [default=None]: an open, writable, Python file object
align [default='l']: HTML alignment attribute ('l','c','r')
txtout [default=0]: do not write to file (1) return formatted HTML string
"""
if not txtout:
assert type(File) == file, 'The 2nd argument needs to be an open file'
if align == 'l':
align = 'left'
elif align == 'r':
align == 'right'
elif align == 'c':
align = 'center'
else:
align = ''
strout = '\n<h3 align="' + align + '">'
cntr = 0
max_str_len = 75
seeker_active = 0
for x in range(len(str)):
cntr += 1
strout += str[x]
if seeker_active:
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
cntr = max_str_len
seeker_active = 0
if cntr >= max_str_len:
print(str[x])
if str[x] == ' ' or str[x] == '.' or str[x] == ',':
strout += '\n'
else:
seeker_active = 1
cntr = 0
strout += '\n</h3>\n'
if txtout:
return strout
else:
File.write(strout)
del str
del strout
import email
import email.utils
import mimetypes
import smtplib
from email.mime.text import MIMEText
from time import sleep, strftime
from getpass import getuser
import os
class PyscesSMTP:
"""A purely experimental class that extends PySCeS with SMTP mailer capabilities. Initialise with
sender address and local mail server name."""
__smtp_active = 0
def __init__(self, fromadd, server):
self.server = server
try:
self.userstr = getuser()
except:
self.userstr = 'PySCeS '
self.msgintro = ''
self.fromhead = self.userstr + ' <' + fromadd + '>'
self.signature = (
3 * '\n'
+ '---\nSent using PySCeS 0.2.2 (http://pysces.sourceforge.net/)\n '
)
# auto-open connection now closed
# self.SMTPOpen()
def GenericMail(self, toadd, msgtxt, subj='PySCeS generated email'):
"""
GenericMail( toadd, msgtxt, subj='PySCeS generated email')
Generate and send a text (non-mime) email message
Arguments:
=========
toadd: recipient address
msgtxt: the message body as a string
subj [default='PySCeS generated email']: message subject line
"""
assert type(msgtxt) == str, '\nMessage text must be a string'
assert self.__smtp_active, 'SMTP Server not active\n'
msgtxt = self.msgintro + msgtxt
msgtxt += self.signature
outer = MIMEText(msgtxt)
outer['Subject'] = subj
outer['To'] = toadd
outer['From'] = self.fromhead
outer['Date'] = email.Utils.formatdate(localtime='true')
outer.epilogue = ' '
if self.CheckGo():
try:
self.__SMTPserver.sendmail(self.fromhead, toadd, outer.as_string())
except SMTPServerDisconnected as e:
print(e)
self.SMTPOpen()
self.__SMTPserver.sendmail(self.fromhead, toadd, outer.as_string())
sleep(0.2)
else:
print('\nEmail send aborted')
def CheckGo(self):
"""
CheckGo()
Do you want to continue yes or no?
Returns 1 or 0
Arguments:
None
"""
GO = 1
while GO:
resp = input('\nDo you want to continue (yes/no): ')
if resp.lower() == 'yes':
print('OK.')
GO = 0
return 1
elif resp.lower() == 'no':
print('Skipped.')
GO = 0
return 0
else:
print('\nyes to continue, no to exit')
## def GenericMailHTML(self, toadd, msgtxt, htmltxt, subj='PySCeS generated email'):
## """
## GenericMailHTML( toadd, msgtxt, htmltxt, subj='PySCeS generated email')
##
## Generate a mime-compliant HTML email message
##
## Arguments:
## =========
## toadd: recipient address
## msgtxt: text only message string
## htmltxt: html formatted message string
## subj [default='PySCeS generated email']: the subject line
##
## """
## assert type(msgtxt) == str, '\nMessage text must be a string'
## assert self.__smtp_active, 'SMTP Server not active\n'
## # Create the enclosing (outer) message
## outer = email.MIMEMultipart.MIMEMultipart()
## outer['Subject'] = subj
## outer['To'] = toadd
## outer['From'] = self.fromhead
## outer['Date'] = email.Utils.formatdate(localtime='true')
## outer.preamble = ' \n'
## outer.epilogue = '\n---\nGenerated by PySCeS 0.2.2\n '
##
## msgtxt += self.signature
## msg = email.MIMEText.MIMEText(msgtxt)
## msg.add_header('Content-Disposition', 'inline')
## outer.attach(msg)
##
## self.__SMTPserver.sendmail(self.fromhead,toadd,outer.as_string())
##
## ctype='text/plain'
## maintype, subtype = ctype.split('/', 1)
## fp = open(infile, 'r')
## att = email.MIMEBase.MIMEBase(maintype, subtype)
## att.set_payload(fp.read())
## fp.close()
## # Encode the payload using Base64
## #email.Encoders.encode_base64(att)
## # Set the filename parameter
## att.add_header('Content-Disposition', 'attachment', filename=infile)
## outer.attach(att)
##
## SMTPserver.sendmail(fromhead,toadd,outer.as_string())
##
## sleep(0.2) #seconds
def SMTPOpen(self):
"""
SMTPOpen()
Start client and connect to an SMTP server
Arguments:
None
"""
self.__SMTPserver = smtplib.SMTP(self.server)
self.__smtp_active = 1
print('\nSMTP server connection opened\n')
def SMTPClose(self):
"""
SMTPClose()
Close connection to SMTP server
Arguments:
None
"""
self.__SMTPserver.close()
self.__smtp_active = 0
print('\nSMTP server connection closed\n')
if __name__ == '__main__':
replyTo = 'bgoli@sun.ac.za'
server = 'mail.sun.ac.za'
print('Reply to:', replyTo)
print('SMTP server:', server)
smtp = PyscesSMTP(replyTo, server)
smtp.GenericMail(
'bgoli@sun.ac.za',
'This test message created: ' + strftime("%a, %d %b %Y %H:%M:%S"),
)
# smtp.GenericMail('jr@sun.ac.za','This test message created: '+ strftime("%a, %d %b %Y %H:%M:%S"))
smtp.SMTPClose()
|
{
"content_hash": "812180271c5ef51d51776f7021a71568",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 112,
"avg_line_length": 29.818930041152264,
"alnum_prop": 0.5002760143527464,
"repo_name": "PySCeS/pysces",
"id": "e22afda484a99d13bf9c5fc06226b5d3ed3e64da",
"size": "14492",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pysces/PyscesWeb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1932"
},
{
"name": "CMake",
"bytes": "2761"
},
{
"name": "Fortran",
"bytes": "1182461"
},
{
"name": "Papyrus",
"bytes": "11983"
},
{
"name": "Python",
"bytes": "1931888"
},
{
"name": "Shell",
"bytes": "2667"
}
],
"symlink_target": ""
}
|
from caravan.commands import run_swf_command
from caravan.commands.base import BaseCommand
class Command(BaseCommand):
description = 'Terminate a workflow execution'
def setup_arguments(self, parser):
parser.add_argument('-d', '--domain', required=True)
parser.add_argument('-i', '--id', required=True)
parser.add_argument('--run-id')
parser.add_argument('--reason')
parser.add_argument('--details')
parser.add_argument('--child-policy', choices=self.CHILD_POLICIES)
def run(self):
run_swf_command(
'terminate_workflow_execution',
domain=self.args.domain,
workflowId=self.args.id,
runId=self.args.run_id,
reason=self.args.reason,
details=self.args.details,
childPolicy=self.args.child_policy,
)
return 'Execution terminated.'
|
{
"content_hash": "cc31e71e1b21f964a92d018a56261920",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 33.44444444444444,
"alnum_prop": 0.6190476190476191,
"repo_name": "pior/caravan",
"id": "6fd1c3ea7baa984f53878caf79ac0239eaf98d43",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caravan/commands/terminate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63052"
}
],
"symlink_target": ""
}
|
'''Setup for filament_watch'''
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name="filament_watch",
version="1.0",
author="Richard L. Lynch",
author_email="rich@richlynch.com",
description=("Monitors filament motion and pauses/cancels OctoPrint if the filament stops feeding."),
long_description=README,
license="MIT",
keywords="3d_printer 3d printer filament watch monitor jam safety",
url="https://github.com/rllynch/filament_watch",
packages=['filament_watch'],
include_package_data=True,
entry_points={
'console_scripts': ['filament_watch = filament_watch.filament_watch:main'],
},
install_requires=[
'requests',
'pyserial',
'cherrypy>=3.1',
'pyyaml'
]
)
|
{
"content_hash": "1c747349e203704f4103546ad2ba1305",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 105,
"avg_line_length": 26.272727272727273,
"alnum_prop": 0.6551326412918108,
"repo_name": "rllynch/filament_watch",
"id": "59a25d6c5e72fa5c23f352e42866bc6f8b048141",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1198"
},
{
"name": "CSS",
"bytes": "191"
},
{
"name": "HTML",
"bytes": "1176"
},
{
"name": "JavaScript",
"bytes": "4241"
},
{
"name": "Makefile",
"bytes": "196"
},
{
"name": "Python",
"bytes": "43386"
},
{
"name": "Shell",
"bytes": "426"
}
],
"symlink_target": ""
}
|
from unittest import mock
from ironic.common import exception
from ironic.objects import base
from ironic.objects import fields
from ironic.objects import notification
from ironic.tests import base as test_base
class TestNotificationBase(test_base.TestCase):
@base.IronicObjectRegistry.register_if(False)
class TestObject(base.IronicObject):
VERSION = '1.0'
fields = {
'fake_field_1': fields.StringField(nullable=True),
'fake_field_2': fields.IntegerField(nullable=True)
}
@base.IronicObjectRegistry.register_if(False)
class TestObjectMissingField(base.IronicObject):
VERSION = '1.0'
fields = {
'fake_field_1': fields.StringField(nullable=True),
}
@base.IronicObjectRegistry.register_if(False)
class TestObjectMaskSecrets(base.IronicObject):
VERSION = '1.0'
fields = {
'instance_info': fields.FlexibleDictField(nullable=True),
'driver_info': fields.FlexibleDictField(nullable=True),
'driver_internal_info': fields.FlexibleDictField(nullable=True),
'some_dict': fields.FlexibleDictField(nullable=True),
}
@base.IronicObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'fake_field_a': ('test_obj', 'fake_field_1'),
'fake_field_b': ('test_obj', 'fake_field_2')
}
fields = {
'fake_field_a': fields.StringField(nullable=True),
'fake_field_b': fields.IntegerField(nullable=False),
'an_extra_field': fields.StringField(nullable=False),
'an_optional_field': fields.IntegerField(nullable=True)
}
@base.IronicObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'fake_field': fields.StringField()
}
@base.IronicObjectRegistry.register_if(False)
class TestNotificationPayloadMaskSecrets(
notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'instance_info': ('test_obj', 'instance_info'),
'driver_info': ('test_obj', 'driver_info'),
'driver_internal_info': ('test_obj', 'driver_internal_info'),
'some_dict': ('test_obj', 'some_dict'),
}
fields = {
'instance_info': fields.FlexibleDictField(nullable=True),
'driver_info': fields.FlexibleDictField(nullable=True),
'driver_internal_info': fields.FlexibleDictField(nullable=True),
'some_dict': fields.FlexibleDictField(nullable=True),
}
@base.IronicObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.IronicObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
def setUp(self):
super(TestNotificationBase, self).setUp()
self.fake_obj = self.TestObject(fake_field_1='fake1', fake_field_2=2)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type, expected_payload,
expected_publisher, notif_level):
mock_notifier.prepare.assert_called_once_with(
publisher_id=expected_publisher)
# Handler actually sending out the notification depends on the
# notification level
mock_notify = getattr(mock_notifier.prepare.return_value, notif_level)
self.assertTrue(mock_notify.called)
self.assertEqual(mock_context, mock_notify.call_args[0][0])
self.assertEqual(expected_event_type,
mock_notify.call_args[1]['event_type'])
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('ironic.common.rpc.VERSIONED_NOTIFIER', autospec=True)
def test_emit_notification(self, mock_notifier):
self.config(notification_level='debug')
payload = self.TestNotificationPayload(an_extra_field='extra',
an_optional_field=1)
payload.populate_schema(test_obj=self.fake_obj)
notif = self.TestNotification(
event_type=notification.EventType(
object='test_object', action='test',
status=fields.NotificationStatus.START),
level=fields.NotificationLevel.DEBUG,
publisher=notification.NotificationPublisher(
service='ironic-conductor',
host='host'),
payload=payload)
mock_context = mock.Mock()
notif.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='baremetal.test_object.test.start',
expected_payload={
'ironic_object.name': 'TestNotificationPayload',
'ironic_object.data': {
'fake_field_a': 'fake1',
'fake_field_b': 2,
'an_extra_field': 'extra',
'an_optional_field': 1
},
'ironic_object.version': '1.0',
'ironic_object.namespace': 'ironic'},
expected_publisher='ironic-conductor.host',
notif_level=fields.NotificationLevel.DEBUG)
@mock.patch('ironic.common.rpc.VERSIONED_NOTIFIER', autospec=True)
def test_no_emit_level_too_low(self, mock_notifier):
# Make sure notification doesn't emit when set notification
# level < config level
self.config(notification_level='warning')
payload = self.TestNotificationPayload(an_extra_field='extra',
an_optional_field=1)
payload.populate_schema(test_obj=self.fake_obj)
notif = self.TestNotification(
event_type=notification.EventType(
object='test_object', action='test',
status=fields.NotificationStatus.START),
level=fields.NotificationLevel.DEBUG,
publisher=notification.NotificationPublisher(
service='ironic-conductor',
host='host'),
payload=payload)
mock_context = mock.Mock()
notif.emit(mock_context)
self.assertFalse(mock_notifier.called)
@mock.patch('ironic.common.rpc.VERSIONED_NOTIFIER', autospec=True)
def test_no_emit_notifs_disabled(self, mock_notifier):
# Make sure notifications aren't emitted when notification_level
# isn't defined, indicating notifications should be disabled
payload = self.TestNotificationPayload(an_extra_field='extra',
an_optional_field=1)
payload.populate_schema(test_obj=self.fake_obj)
notif = self.TestNotification(
event_type=notification.EventType(
object='test_object', action='test',
status=fields.NotificationStatus.START),
level=fields.NotificationLevel.DEBUG,
publisher=notification.NotificationPublisher(
service='ironic-conductor',
host='host'),
payload=payload)
mock_context = mock.Mock()
notif.emit(mock_context)
self.assertFalse(mock_notifier.called)
@mock.patch('ironic.common.rpc.VERSIONED_NOTIFIER', autospec=True)
def test_no_emit_schema_not_populated(self, mock_notifier):
self.config(notification_level='debug')
payload = self.TestNotificationPayload(an_extra_field='extra',
an_optional_field=1)
notif = self.TestNotification(
event_type=notification.EventType(
object='test_object', action='test',
status=fields.NotificationStatus.START),
level=fields.NotificationLevel.DEBUG,
publisher=notification.NotificationPublisher(
service='ironic-conductor',
host='host'),
payload=payload)
mock_context = mock.Mock()
self.assertRaises(exception.NotificationPayloadError, notif.emit,
mock_context)
self.assertFalse(mock_notifier.called)
@mock.patch('ironic.common.rpc.VERSIONED_NOTIFIER', autospec=True)
def test_emit_notification_empty_schema(self, mock_notifier):
self.config(notification_level='debug')
payload = self.TestNotificationPayloadEmptySchema(fake_field='123')
notif = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object', action='test',
status=fields.NotificationStatus.ERROR),
level=fields.NotificationLevel.ERROR,
publisher=notification.NotificationPublisher(
service='ironic-conductor',
host='host'),
payload=payload)
mock_context = mock.Mock()
notif.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='baremetal.test_object.test.error',
expected_payload={
'ironic_object.name': 'TestNotificationPayloadEmptySchema',
'ironic_object.data': {
'fake_field': '123',
},
'ironic_object.version': '1.0',
'ironic_object.namespace': 'ironic'},
expected_publisher='ironic-conductor.host',
notif_level=fields.NotificationLevel.ERROR)
def test_populate_schema(self):
payload = self.TestNotificationPayload(an_extra_field='extra',
an_optional_field=1)
payload.populate_schema(test_obj=self.fake_obj)
self.assertEqual('extra', payload.an_extra_field)
self.assertEqual(1, payload.an_optional_field)
self.assertEqual(self.fake_obj.fake_field_1, payload.fake_field_a)
self.assertEqual(self.fake_obj.fake_field_2, payload.fake_field_b)
def test_populate_schema_missing_required_obj_field(self):
test_obj = self.TestObject(fake_field_1='populated')
# this payload requires missing fake_field_b
payload = self.TestNotificationPayload(an_extra_field='too extra')
self.assertRaises(exception.NotificationSchemaKeyError,
payload.populate_schema,
test_obj=test_obj)
def test_populate_schema_nullable_field_auto_populates(self):
"""Test that nullable fields always end up in the payload."""
test_obj = self.TestObject(fake_field_2=123)
payload = self.TestNotificationPayload()
payload.populate_schema(test_obj=test_obj)
self.assertIsNone(payload.fake_field_a)
def test_populate_schema_no_object_field(self):
test_obj = self.TestObjectMissingField(fake_field_1='foo')
payload = self.TestNotificationPayload()
self.assertRaises(exception.NotificationSchemaKeyError,
payload.populate_schema,
test_obj=test_obj)
def test_event_type_with_status(self):
event_type = notification.EventType(
object="some_obj", action="some_action", status="success")
self.assertEqual("baremetal.some_obj.some_action.success",
event_type.to_event_type_field())
def test_event_type_without_status_fails(self):
event_type = notification.EventType(
object="some_obj", action="some_action")
self.assertRaises(NotImplementedError,
event_type.to_event_type_field)
def test_event_type_invalid_status_fails(self):
self.assertRaises(ValueError,
notification.EventType, object="some_obj",
action="some_action", status="invalid")
def test_event_type_make_status_invalid(self):
def make_status_invalid():
event_type.status = "Roar"
event_type = notification.EventType(
object='test_object', action='test', status='start')
self.assertRaises(ValueError, make_status_invalid)
def test_mask_secrets_not_affected(self):
payload = self.TestNotificationPayload(an_extra_field='extra',
an_optional_field=1)
payload.populate_schema(test_obj=self.fake_obj)
notification.mask_secrets(payload)
self.assertEqual('extra', payload.an_extra_field)
self.assertEqual(1, payload.an_optional_field)
self.assertEqual(self.fake_obj.fake_field_1, payload.fake_field_a)
self.assertEqual(self.fake_obj.fake_field_2, payload.fake_field_b)
def test_mask_secrets_no_secrets(self):
instance_info = {'inst1': 'v1'}
driver_info = {'driver_i1': 'd1'}
driver_internal_info = {'driver_int1': 'dii1'}
some_dict = {'key1': 'v1'}
test_obj = self.TestObjectMaskSecrets(
instance_info=instance_info,
driver_info=driver_info,
driver_internal_info=driver_internal_info,
some_dict=some_dict)
payload = self.TestNotificationPayloadMaskSecrets()
payload.populate_schema(test_obj=test_obj)
notification.mask_secrets(payload)
self.assertEqual(test_obj.instance_info, payload.instance_info)
self.assertEqual(test_obj.driver_info, payload.driver_info)
self.assertEqual(test_obj.driver_internal_info,
payload.driver_internal_info)
self.assertEqual(test_obj.some_dict, payload.some_dict)
def test_mask_secrets_has_secrets(self):
instance_info = {'configdrive': 'somestuffhere',
'image_url': 'http://image_to_fetch'}
driver_info = {'password': 'some password'}
driver_internal_info = {'agent_secret_token': '123532234145'}
some_dict = {'password': 'another password'}
test_obj = self.TestObjectMaskSecrets(
instance_info=instance_info,
driver_info=driver_info,
driver_internal_info=driver_internal_info,
some_dict=some_dict)
payload = self.TestNotificationPayloadMaskSecrets()
payload.populate_schema(test_obj=test_obj)
notification.mask_secrets(payload)
self.assertNotEqual(test_obj.instance_info, payload.instance_info)
self.assertEqual('******', payload.instance_info['configdrive'])
self.assertEqual('******', payload.instance_info['image_url'])
self.assertNotEqual(test_obj.driver_info, payload.driver_info)
self.assertEqual('******', payload.driver_info['password'])
self.assertNotEqual(test_obj.driver_internal_info,
payload.driver_internal_info)
self.assertEqual('******',
payload.driver_internal_info['agent_secret_token'])
self.assertEqual(test_obj.some_dict, payload.some_dict)
|
{
"content_hash": "bb8ed612d8e7406337430138bb140143",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 79,
"avg_line_length": 43.69774011299435,
"alnum_prop": 0.6145840067231236,
"repo_name": "openstack/ironic",
"id": "255845debf757a599c5127b75557bdf04c20c832",
"size": "16042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/objects/test_notification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "PowerShell",
"bytes": "1676"
},
{
"name": "Python",
"bytes": "9506176"
},
{
"name": "Shell",
"bytes": "188127"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from pybtex.richtext import Symbol, Text
from pybtex.style.formatting import BaseStyle, toplevel
from pybtex.style.template import (
field, first_of, href, join, names, optional, optional_field, sentence,
tag, together, words
)
def dashify(text):
dash_re = re.compile(r'-+')
return Text(Symbol('ndash')).join(text.split(dash_re))
pages = field('pages', apply_func=dashify)
date = words [optional_field('month'), field('year')]
class Style(BaseStyle):
def format_names(self, role, as_sentence=True):
formatted_names = names(role, sep=', ', sep2 = ' and ', last_sep=', and ')
if as_sentence:
return sentence [formatted_names]
else:
return formatted_names
def get_article_template(self, e):
volume_and_pages = first_of [
# volume and pages, with optional issue number
optional [
join [
field('volume'),
optional['(', field('number'),')'],
':', pages
],
],
# pages only
words ['pages', pages],
]
template = toplevel [
self.format_names('author'),
self.format_title(e, 'title'),
sentence [
tag('em') [field('journal')],
optional[ volume_and_pages ],
date],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def format_author_or_editor(self, e):
return first_of [
optional[ self.format_names('author') ],
self.format_editor(e),
]
def format_editor(self, e, as_sentence=True):
editors = self.format_names('editor', as_sentence=False)
if 'editor' not in e.persons:
# when parsing the template, a FieldIsMissing exception
# will be thrown anyway; no need to do anything now,
# just return the template that will throw the exception
return editors
if len(e.persons['editor']) > 1:
word = 'editors'
else:
word = 'editor'
result = join(sep=', ') [editors, word]
if as_sentence:
return sentence [result]
else:
return result
def format_volume_and_series(self, e, as_sentence=True):
volume_and_series = optional [
words [
together ['Volume' if as_sentence else 'volume', field('volume')], optional [
words ['of', field('series')]
]
]
]
number_and_series = optional [
words [
join(sep=Symbol('nbsp')) ['Number' if as_sentence else 'number', field('number')],
optional [
words ['in', field('series')]
]
]
]
series = optional_field('series')
result = first_of [
volume_and_series,
number_and_series,
series,
]
if as_sentence:
return sentence(capfirst=True) [result]
else:
return result
def format_chapter_and_pages(self, e):
return join(sep=', ') [
optional [together ['chapter', field('chapter')]],
optional [together ['pages', pages]],
]
def format_edition(self, e):
return optional [
words [
field('edition', apply_func=lambda x: x.lower()),
'edition',
]
]
def format_title(self, e, which_field, as_sentence=True):
formatted_title = field(
which_field, apply_func=lambda text: text.capitalize()
)
if as_sentence:
return sentence [ formatted_title ]
else:
return formatted_title
def format_btitle(self, e, which_field, as_sentence=True):
formatted_title = tag('em') [ field(which_field) ]
if as_sentence:
return sentence[ formatted_title ]
else:
return formatted_title
def format_address_organization_publisher_date(
self, e, include_organization=True):
"""Format address, organization, publisher, and date.
Everything is optional, except the date.
"""
# small difference from unsrt.bst here: unsrt.bst
# starts a new sentence only if the address is missing;
# for simplicity here we always start a new sentence
if include_organization:
organization = optional_field('organization')
else:
organization = None
return first_of[
# this will be rendered if there is an address
optional [
join(sep=' ') [
sentence[
field('address'),
date,
],
sentence[
organization,
optional_field('publisher'),
],
],
],
# if there is no address then we have this
sentence[
organization,
optional_field('publisher'),
date,
],
]
def get_book_template(self, e):
template = toplevel [
self.format_author_or_editor(e),
self.format_btitle(e, 'title'),
self.format_volume_and_series(e),
sentence [
field('publisher'),
optional_field('address'),
self.format_edition(e),
date
],
optional[ sentence [ self.format_isbn(e) ] ],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_booklet_template(self, e):
template = toplevel [
self.format_names('author'),
self.format_title(e, 'title'),
sentence [
optional_field('howpublished'),
optional_field('address'),
date,
optional_field('note'),
],
self.format_web_refs(e),
]
return template
def get_inbook_template(self, e):
template = toplevel [
self.format_author_or_editor(e),
sentence [
self.format_btitle(e, 'title', as_sentence=False),
self.format_chapter_and_pages(e),
],
self.format_volume_and_series(e),
sentence [
field('publisher'),
optional_field('address'),
optional [
words [field('edition'), 'edition']
],
date,
optional_field('note'),
],
self.format_web_refs(e),
]
return template
def get_incollection_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
words [
'In',
sentence [
optional[ self.format_editor(e, as_sentence=False) ],
self.format_btitle(e, 'booktitle', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
self.format_chapter_and_pages(e),
],
],
sentence [
optional_field('publisher'),
optional_field('address'),
self.format_edition(e),
date,
],
self.format_web_refs(e),
]
return template
def get_inproceedings_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
words [
'In',
sentence [
optional[ self.format_editor(e, as_sentence=False) ],
self.format_btitle(e, 'booktitle', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
optional[ pages ],
],
self.format_address_organization_publisher_date(e),
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_manual_template(self, e):
# TODO this only corresponds to the bst style if author is non-empty
# for empty author we should put the organization first
template = toplevel [
optional [ sentence [ self.format_names('author') ] ],
self.format_btitle(e, 'title'),
sentence [
optional_field('organization'),
optional_field('address'),
self.format_edition(e),
optional[ date ],
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_mastersthesis_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
sentence[
"Master's thesis",
field('school'),
optional_field('address'),
date,
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_misc_template(self, e):
template = toplevel [
optional[ sentence [self.format_names('author')] ],
optional[ self.format_title(e, 'title') ],
sentence[
optional[ field('howpublished') ],
optional[ date ],
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_online_template(self, e):
return self.get_misc_template(e)
def get_phdthesis_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_btitle(e, 'title'),
sentence[
first_of [
optional_field('type'),
'PhD thesis',
],
field('school'),
optional_field('address'),
date,
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_proceedings_template(self, e):
if 'editor' in e.persons:
main_part = [
self.format_editor(e),
sentence [
self.format_btitle(e, 'title', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
self.format_address_organization_publisher_date(e),
],
]
else:
main_part = [
optional [ sentence [ field('organization') ] ],
sentence [
self.format_btitle(e, 'title', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
self.format_address_organization_publisher_date(
e, include_organization=False),
],
]
template = toplevel [
main_part + [
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
]
return template
def get_techreport_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
sentence [
words[
first_of [
optional_field('type'),
'Technical Report',
],
optional_field('number'),
],
field('institution'),
optional_field('address'),
date,
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_unpublished_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
sentence [
field('note'),
optional[ date ]
],
self.format_web_refs(e),
]
return template
def format_web_refs(self, e):
# based on urlbst output.web.refs
return sentence [
optional [ self.format_url(e),
optional [ ' (visited on ', field('urldate'), ')' ] ],
optional [ self.format_eprint(e) ],
optional [ self.format_pubmed(e) ],
optional [ self.format_doi(e) ],
]
def format_url(self, e):
# based on urlbst format.url
url = field('url', raw=True)
return words [
'URL:',
href(url) [ url ]
]
def format_pubmed(self, e):
# based on urlbst format.pubmed
url = join [ 'https://www.ncbi.nlm.nih.gov/pubmed/', field('pubmed', raw=True) ]
return href(url) [
join [
'PMID:',
field('pubmed', raw=True)
]
]
def format_doi(self, e):
# based on urlbst format.doi
url = join [ 'https://doi.org/', field('doi', raw=True) ]
return href(url) [
join [
'doi:',
field('doi', raw=True)
]
]
def format_eprint(self, e):
# based on urlbst format.eprint
url = join [ 'https://arxiv.org/abs/', field('eprint', raw=True) ]
return href(url) [
join [
'arXiv:',
field('eprint', raw=True)
]
]
def format_isbn(self, e):
return join(sep=' ') [ 'ISBN', field('isbn') ]
|
{
"content_hash": "59fce48540172d0eb80575337a7b3957",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 98,
"avg_line_length": 32.35374149659864,
"alnum_prop": 0.4761003644519204,
"repo_name": "live-clones/pybtex",
"id": "fd48230d9bcd6e5bd98896b4cfdd24d92319ce6a",
"size": "15372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybtex/style/formatting/unsrt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "796787"
},
{
"name": "TeX",
"bytes": "25253"
}
],
"symlink_target": ""
}
|
SHARED_DATA_USERNAME = "admin_user"
SHARED_DATA_PASSWORD = "admin_password"
|
{
"content_hash": "35264abbf9da29ff9b062d251e93aec0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 39,
"avg_line_length": 38,
"alnum_prop": 0.7631578947368421,
"repo_name": "telerik/cloudbase-init",
"id": "66ec8cfb17446f7b5b8113b1c6f9365ff3713cc1",
"size": "737",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudbaseinit/plugins/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "684477"
}
],
"symlink_target": ""
}
|
import os
import csv
import pickle
from functools import wraps
from io import StringIO
from operator import itemgetter
from flask.ext.heroku import Heroku
from flask import (
Flask, Response, redirect, request, render_template, send_from_directory,
url_for
)
import youtube
app = Flask(__name__)
class AuthError(Exception):
pass
EXCEPTIONS = {'authError': AuthError}
def login_required(func):
@wraps(func)
def wrapper(*args, **kw):
session = get_session()
return (
func(session, *args, **kw)
if session
else
redirect(url_for('auth'))
)
return wrapper
@app.route("/auth")
def auth():
return redirect(youtube.get_authorize_url())
@app.route('/oauth2callback')
def oauth2callback():
code = request.args.get('code')
session = youtube.get_auth_session(code)
resp = redirect(url_for('index'))
resp.set_cookie('session', pickle.dumps(session).decode('latin-1'))
return resp
def paginate(func, *args, **kw):
data = {'nextPageToken'} # just so it enters the loop initially
while 'nextPageToken' in data:
data = func(*args, **kw).json()
error = data.get('error')
for error in data.get('error', {}).get('errors', []):
error_type = EXCEPTIONS.get(error['reason'], Exception)
raise error_type(error['message'])
kw['params']['pageToken'] = data.get('nextPageToken')
yield from data['items']
def get_subs(session):
return paginate(
session.get,
'https://www.googleapis.com/youtube/v3/subscriptions',
params={
'part': 'id,snippet,contentDetails',
'mine': True,
'maxResults': 50
}
)
def get_session():
session = request.cookies.get('session')
return (
pickle.loads(session.encode('latin-1'))
if session
else None
)
def to_csv(headers, data):
fh = StringIO()
writ = csv.DictWriter(fh, headers)
writ.writeheader()
writ.writerows(data)
return Response(fh.getvalue(), mimetype='application/csv')
@app.route('/subscriptions.csv')
@login_required
def subscriptions_csv(session):
items = get_subs(session)
snippets = map(itemgetter('snippet'), items)
snippets = sorted(snippets, key=itemgetter('title'))
return to_csv(
['title', 'url'],
(
{
'title': snip['title'],
'url': 'https://www.youtube.com/channel/{}'.format(
snip['resourceId']['channelId']
)
}
for snip in snippets
)
)
@app.route('/subscriptions')
@login_required
def subscriptions(session):
try:
items = list(get_subs(session))
except AuthError:
return redirect(url_for('auth'))
return render_template(
'subscriptions.html',
items=sorted(items, key=lambda q: q['snippet']['title'])
)
@app.route('/logout')
def logout():
resp = redirect(url_for('index'))
resp.delete_cookie('session')
return resp
@app.route('/')
def index():
return (
redirect(url_for('subscriptions'))
if get_session()
else
render_template('index.html')
)
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
if __name__ == "__main__":
Heroku().init_app(app)
port = os.environ.get('PORT', 5000)
app.run(debug=True, host="0.0.0.0", port=int(port))
|
{
"content_hash": "175739787cf0751fe7c51fc2e58f567b",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 77,
"avg_line_length": 21.041666666666668,
"alnum_prop": 0.5889674681753889,
"repo_name": "Mause/yt_sub",
"id": "e81c4f203036cff46844a0382bc3629cb23dac67",
"size": "3583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_subscriptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "HTML",
"bytes": "2938"
},
{
"name": "Python",
"bytes": "5171"
}
],
"symlink_target": ""
}
|
import random, unittest, collections
from treedict import TreeDict, getTree
import treedict
from copy import deepcopy, copy
from hashlib import md5
import random
from common import *
class TestBadValues(unittest.TestCase):
def test_KeyNotEmpty(self):
p = sample_tree()
self.assertRaises(NameError, lambda: p.set("", 0))
def test_KeyNotEmpty(self):
p = sample_tree()
self.assertRaises(NameError, lambda: p.set("", 0))
def test_KeyAttributeName(self):
p = sample_tree()
self.assertRaises(NameError, lambda: p.set("0393", 0))
def test_GetBadName_01(self):
p = sample_tree()
self.assert_(KeyError, lambda: p.get("0393"))
def test_GetBadName_02(self):
p = sample_tree()
self.assert_(KeyError, lambda: p.get("0393.a.b.c"))
def test_GetBadName_03(self):
p = sample_tree()
self.assert_(KeyError, lambda: p.get("0393.a.b.c"))
def test_getattr_BadName_01(self): # ??????
p = sample_tree()
self.assert_(KeyError, lambda: p.a_nonexistant)
def testAttackWithNone_prune(self):
p = makeTDInstance()
self.assertRaises(TypeError, lambda: p.prune(None))
def testAttackWithNone_attach(self):
p = sample_tree()
self.assertRaises(TypeError, lambda: p.attach(None))
def testAttackWithNone_equals(self):
p = sample_tree()
self.assert_(p != None)
def testAttackWithNone_getClosest(self):
p = sample_tree()
self.assertRaises(TypeError, lambda: p.getClosest(None))
def testAttackWithNone_branch(self):
p = sample_tree()
self.assertRaises(TypeError, lambda: p.makeBranch(None))
def testAttackWithNone_fullNameOf(self):
p = sample_tree()
self.assertRaises(TypeError, lambda: p.fullNameOf(None))
def testAttackWithNone_hash(self):
p = sample_tree()
self.assertRaises(TypeError, lambda: p.hash(keys=[None]))
def testBadKey_01_hash(self):
p = sample_tree()
self.assertRaises(KeyError, lambda: p.hash(key="keynothere"))
def testBadKey_02_hash(self):
p = sample_tree()
self.assertRaises(KeyError, lambda: p.hash(keys=["keynothere"]))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "788a15af05d76c98c8b56ee5066636dd",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 72,
"avg_line_length": 28.6375,
"alnum_prop": 0.633347883020515,
"repo_name": "hoytak/treedict",
"id": "2c6bf031fcb6ec73a0745e00f7f6abd7922484b4",
"size": "3840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_badvalues.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "402"
},
{
"name": "Python",
"bytes": "377116"
},
{
"name": "Shell",
"bytes": "459"
}
],
"symlink_target": ""
}
|
"""A wrapper around ggplot2 ( http://had.co.nz/ggplot2/ ) and
plotnine (https://plotnine.readthedocs.io/en/stable/index.html )
Takes a pandas.DataFrame object, then add layers with the various add_xyz
functions (e.g. add_scatter).
Referr to the ggplot/plotnine documentation about the layers (geoms), and simply
replace geom_* with add_*.
See http://docs.ggplot2.org/0.9.3.1/index.html or
https://plotnine.readthedocs.io/en/stable/index.html<Paste>
You do not need to seperate aesthetics from values - the wrapper
will treat a parameter as value if and only if it is not a column name.
(so y = 0 is a value, color = 'blue 'is a value - except if you have a column
'blue', then it's a column!. And y = 'value' doesn't work, but that seems to be a ggplot issue).
When the DataFrame is passed to the plotting library:
- row indices are truned into columns with 'reset_index'
- multi level column indices are flattend by concatenating them with ' '
-> (X, 'mean) becomes 'x mean'
R Error messages are not great - most of them translate to 'one or more columns were not found',
but they can appear as a lot of different actual messages such as
- argument "env" is missing, with no defalut
- object 'y' not found
- object 'dat_0' not found
- requires the follewing missing aesthetics: x
- non numeric argument to binary operator
without actually quite pointing at what is strictly the offending value.
Also, the error appears when rendering (or printing in ipython notebook),
not when adding the layer.
"""
#from .plot_r import Plot, plot_heatmap, multiplot, MultiPagePlot, convert_dataframe_to_r
from .base import _PlotBase
#from . import plot_nine
from .plot_nine import Plot, Expression, Scalar
all = [Plot, Expression, Scalar]
|
{
"content_hash": "8d70ef641fa9ff5c144735d7f52b6b05",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 96,
"avg_line_length": 46.55263157894737,
"alnum_prop": 0.7377049180327869,
"repo_name": "TyberiusPrime/pyggplot",
"id": "3bfe10706172220fc2b3b93f4b334eec437e17b1",
"size": "3306",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/pyggplot/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "342805"
}
],
"symlink_target": ""
}
|
import xlrd
def open_db(filename):
try:
data = xlrd.open_workbook(filename)
return data
except Exception, e:
print str(e)
def read_excel(filename):
data = open_db(filename)
table = data.sheet_by_index(0)
# rows
nrows = table.nrows
# columns
ncols = table.ncols
# print "load original dataset..."
items = []
for i in range(nrows):
for j in range(ncols):
items.append(table.cell_value(i, j))
print items
#print len(items)
return items
|
{
"content_hash": "bad8f5992166c77f7a7347327f81ba18",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 48,
"avg_line_length": 20.96153846153846,
"alnum_prop": 0.5798165137614679,
"repo_name": "ClarkYan/msc-thesis",
"id": "8d27404f44d313f22aaaf726c949b428904d1862",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/shtp1/load_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "1874"
},
{
"name": "Python",
"bytes": "68708"
}
],
"symlink_target": ""
}
|
import random
import numpy as np
import math
class gpsarsa:
def __init__(self, actions, nstates=0, epsilon=0.1, gamma=0.9, sigma=5, p=4):
self.mean_q = {}
self.cov_q = {}
self.states_vectors = {} # states vector representation
self.action_vectors = {} # actions vector representation
self.pairs = []
self.actions = actions
self.nstates = nstates # num of states
self.epsilon = epsilon
self.gamma = gamma
self.sigma = sigma # Gaussian noise
self.p = p
self.rewards = [0, -1]
self.Kmatrix = None
self.Hmatrix = None
pass
def getMeanQ(self, state, action):
return self.mean_q.get((state, action), 0.0)
def getCovQ(self, state, action):
return self.cov_q.get((state, action), 0.0)
def startEpisode(self, s_state, s_action):
pair = (s_state, s_action)
# self.pairs.append(pair)
self.Kmatrix = np.array([[self.kernel(pair, pair)]])
self.Hmatrix = np.array([[1, -1*self.gamma]])
pass
def learn(self, state1, action1, reward, state2, action2):
pair = (state2, action2)
self.pairs.append(pair)
# expand H matrix
H = self.Hmatrix
t = len(H[0])-1
H = np.hstack([H, np.zeros((t, 1))])
H = np.vstack([H, np.zeros((1, t+2))])
H[t][t] = 1
H[t][t+1] = -1*self.gamma
self.Hmatrix = H
# calculate k vector for later use
kvector = self.kernel_vector(pair)
# expand K matrix
K = self.Kmatrix
t = len(K)-1
K = np.hstack([K, np.transpose([kvector])])
K = np.vstack([K, [np.append(kvector, 0)]])
knew = self.kernel(pair, pair)
K[t+1][t+1] = knew
kvector = np.append(kvector, knew)
self.Kmatrix = K
# append reward
self.rewards.append(reward)
# calculate Q-function posterior
self.calcQposterior(pair, kvector, H, K, self.rewards)
pass
# terminal step in episode
def endEpisode(self):
pass
def chooseAction(self, state):
if random.random() < self.epsilon:
action = random.choice(self.actions)
else:
q = [self.getMeanQ(state, a) for a in self.actions]
maxQ = min(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
return action
def kernel_vector(self, pair):
V = np.zeros(len(self.pairs))
for i, pair0 in enumerate(self.pairs):
print 'i: %d' % i
V[i] = self.kernel(pair0, pair)
return V
def kernel(self, pair1, pair2):
# state kernel (s, s') * action kernel (a, a')
state1 = pair1[0]
action1 = pair1[1]
state2 = pair2[0]
action2 = pair2[1]
state1vec = self.stateToVector(state1)
state2vec = self.stateToVector(state2)
action1vec = self.actionToVector(action1)
action2vec = self.actionToVector(action2)
ka = self.gaussian_kernel(action1vec, action2vec)
ks = self.gaussian_kernel(state1vec, state2vec)
return ks * ka
def calcQposterior(self, pair, kvec, H, K, rvec):
# H*K*H_trans
HT = np.transpose(H)
hkh = np.dot(np.dot(HT, K), H)
# sigma^2*H*H_trans
shh = math.pow(self.sigma, 2) * np.dot(HT, H)
W = np.linalg.inv(np.add(hkh, shh))
val = np.dot(np.dot(H, W), rvec)
mu = np.dot(np.transpose(kvec), val)
self.mean_q[pair] = mu
pass
def gaussian_kernel(self, vec1, vec2):
dist = np.linalg.norm(vec1-vec2)
val = -1 * dist/(2*math.pow(self.sigma, 2))
return math.pow(self.p, 2) * math.exp(val)
def stateToVector(self, state):
if self.states_vectors.get(state) is None:
v = np.zeros(self.nstates)
v[len(self.states_vectors)] = 1
self.states_vectors[state] = v
return self.states_vectors[state]
def actionToVector(self, action):
if len(self.action_vectors) == 0:
for i, act in enumerate(self.actions):
v = np.zeros(len(self.actions))
v[i] = 1
self.action_vectors[act] = v
return self.action_vectors[action]
def main():
import gpsarsa
actions = (1, 2, 3, 4)
gpsarsa = gpsarsa.gpsarsa(actions, 48)
# for j in range(4):
# for i in range(12):
# print gpsarsa.stateToVector((i, j))
# print 'states count: %d' % len(gpsarsa.states_vectors)
for i in range(4):
print gpsarsa.actionToVector((i+1))
if __name__ == "__main__":
main()
|
{
"content_hash": "b51b6097f7079660cc15776a11d982aa",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 81,
"avg_line_length": 30.81761006289308,
"alnum_prop": 0.5424489795918367,
"repo_name": "hkhpub/cliff-walking",
"id": "bc1408c8971cc1067c68093f041405a99a8785e3",
"size": "4900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpsarsa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32570"
}
],
"symlink_target": ""
}
|
#exclusions = ('__weakref__', # special-members
#'__doc__', '__module__', '__dict__', # undoc-members
#)
#exclude = name in exclusions
#inclusions = ('_src')
#include = name in inclusions
#if include:
#print app, what, name, obj, skip, options
#return False
#return skip or exclude
#def setup(app):
##app.connect('autodoc-process-docstring', cut_lines(2))
##app.connect('autodoc_default_flags', autodoc_default_flags)
##app.connect('autodoc_member_order', autodoc_member_order)
#app.connect('autodoc-skip-member', autodoc_skip_member)
import sys
import os
print "python exec:", sys.executable
print "sys.path:", sys.path
try:
import numpy
print "numpy: %s, %s" % (numpy.__version__, numpy.__file__)
except ImportError:
print "no numpy"
try:
import matplotlib
print "matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__)
except ImportError:
print "no matplotlib"
try:
import ipython
print "ipython: %s, %s" % (ipython.__version__, ipython.__file__)
except ImportError:
print "no ipython"
try:
import sphinx
print "sphinx: %s, %s" % (sphinx.__version__, sphinx.__file__)
except ImportError:
print "no sphinx"
print "sys.path:", sys.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('../GPy'))
#print "sys.path.after:", sys.path
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('./sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
print "Importing extensions"
extensions = ['sphinx.ext.autodoc',
#'sphinx.ext.doctest'
'sphinx.ext.viewcode',
'sphinx.ext.pngmath',
'ipython_directive',
'ipython_console_highlighting'
#'matplotlib.sphinxext.plot_directive'
]
plot_formats = [('png', 80), ('pdf', 50)]
print "finished importing"
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
#############################################################################
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#import mock
print "Mocking"
MOCK_MODULES = ['sympy',
'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache',
'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser', 'Tango', 'numdifftools'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#on_rtd = True
if on_rtd:
sys.path.append(os.path.abspath('../GPy'))
import subprocess
proc = subprocess.Popen("pwd", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("ls ../", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#Lets regenerate our rst files from the source, -P adds private modules (i.e kern._src)
proc = subprocess.Popen("sphinx-apidoc -P -f -o . ../GPy", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#proc = subprocess.Popen("whereis numpy", stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
#print "program output:", out
#proc = subprocess.Popen("whereis matplotlib", stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
#print "program output:", out
print "Compiled files"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\\usepackage{MnSymbol}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
{
"content_hash": "0a1b4d44f1288eb3f222d68bdf7ff8bd",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 98,
"avg_line_length": 31.773399014778324,
"alnum_prop": 0.663953488372093,
"repo_name": "fivejjs/GPy",
"id": "91a6c75b164a628847bec427e1ced7b73283d392",
"size": "13613",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "707"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "1415164"
}
],
"symlink_target": ""
}
|
"""
Parse FindBugs -xml:withMessages output.
FindBugs XML output is pretty straightforward: a sequence of <BugInstance> tags,
with type and rank (severity) attributes. Each has one or more <SourceLine>
tags; if more, one will have an attribute of primary="true". However, it groups
nearby bugs of the same type via another <SourceLine> with
role="SOURCE_LINE_ANOTHER_INSTANCE". Also, each <SourceLine> can be a range of
lines, so we just pick the first.
The only other issues are that FindBugs works on compiled binaries, so the
source file names may not line up with your repo blame filenames; and it
sometimes outputs duplicate bugs.
"""
from xml.etree import ElementTree
from itertools import ifilter
from blamethrower import Analyne
__all__ = ['analyze', 'HELP', 'OPTIONS']
HELP = 'FindBugs -xml:withMessages'
OPTIONS = {'prefix': 'add path prefix to FindBugs filenames'}
def rank2severity(rank):
""":Return: the BlameThrower severity for a bug of the given `rank`."""
if 1 <= rank <= 4:
return 'high'
elif rank <= 12:
return 'med'
elif rank <= 20:
return 'low'
else:
assert False
def analyze(bugsfile, prefix=''):
""":Return: an iterable of :class:`Analyne` objects read from FindBugs
-xml:withMessages file `bugsfile`.
:param str prefix: A path prefix to prepend to every filename.
"""
for _, bug in ifilter(lambda event_elt: event_elt[1].tag == 'BugInstance', ElementTree.iterparse(bugsfile)):
bugtype = bug.get('type')
severity = rank2severity(int(bug.get('rank')))
sourcelines = bug.findall('SourceLine')
assert sourcelines, "No SourceLine for bug: {}".format(bug.attrib)
if len(sourcelines) > 1:
sourcelines = [line for line in sourcelines if line.attrib.get('primary') or line.attrib.get('role') == 'SOURCE_LINE_ANOTHER_INSTANCE']
assert sourcelines, "No SourceLine for bug: {}".format(bug.attrib)
for sourceline in sourcelines:
filename = prefix + sourceline.get('sourcepath')
linenum = sourceline.get('start')
yield Analyne(filename, int(linenum), bugtype, severity, None)
bug.clear()
|
{
"content_hash": "f0e30ad382a0cfd3e1a75b93a23d4c64",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 147,
"avg_line_length": 39.25,
"alnum_prop": 0.6801637852593266,
"repo_name": "jkleint/blamethrower",
"id": "8b1ac351b23d1b27de0225d4636ca667404beb26",
"size": "2302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blamethrower/analyzers/findbugs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42702"
},
{
"name": "Shell",
"bytes": "5976"
}
],
"symlink_target": ""
}
|
import numpy as np
from . import Graph # prevent circular import in Python < 3.5
class FullConnected(Graph):
r"""Fully connected graph.
All weights are set to 1. There is no self-connections.
Parameters
----------
N : int
Number of vertices (default = 10)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.FullConnected(N=20)
>>> G.set_coordinates(kind='spring', seed=42)
>>> fig, axes = plt.subplots(1, 2)
>>> _ = axes[0].spy(G.W, markersize=5)
>>> _ = G.plot(ax=axes[1])
"""
def __init__(self, N=10, **kwargs):
W = np.ones((N, N)) - np.identity(N)
plotting = {'limits': np.array([-1, 1, -1, 1])}
super(FullConnected, self).__init__(W, plotting=plotting, **kwargs)
|
{
"content_hash": "d5faf62718e51089ff061dbaae36afaf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 24.625,
"alnum_prop": 0.5609137055837563,
"repo_name": "epfl-lts2/pygsp",
"id": "0f4c1deee0abcc9a06e09a784b417af3ca0d8c1e",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygsp/graphs/fullconnected.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2796"
},
{
"name": "Makefile",
"bytes": "966"
},
{
"name": "Python",
"bytes": "392112"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
}
|
from setuptools import setup, Extension
long_description = open('README.rst').read()
setup(name="jellyfish",
version="0.1.2",
platforms=["any"],
description=("a library for doing approximate and "
"phonetic matching of strings."),
url="http://github.com/sunlightlabs/jellyfish",
long_description=long_description,
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Text Processing :: Linguistic"],
ext_modules=[Extension("jellyfish", ['jellyfishmodule.c', 'jaro.c',
'hamming.c', 'levenshtein.c',
'damerau_levenshtein.c', 'mra.c',
'soundex.c', 'metaphone.c',
'nysiis.c', 'porter.c'])])
|
{
"content_hash": "1527bab147a81575c4c1868665caff07",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 48.69565217391305,
"alnum_prop": 0.49464285714285716,
"repo_name": "sdiehl/pycraig",
"id": "a966cc92b4fb5662e38a840eaf409208e6baa92a",
"size": "1142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycraig/jellyfish/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "42740"
},
{
"name": "Python",
"bytes": "13173"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, include
from rest_framework import routers
from inventory.api.views import HostViewSet, InterfaceViewSet, IPAddressViewSet
from api.views import InventoryRootView
router = routers.DefaultRouter()
# rewrite default view name
router.root_view_name = 'inventory-root'
router.APIRootView = InventoryRootView
router.register(r'hosts', HostViewSet)
router.register(r'interfaces', InterfaceViewSet)
router.register(r'ip_address', IPAddressViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
]
|
{
"content_hash": "9a1f1beaae49d74c5b19535b5318077d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.7919254658385093,
"repo_name": "H0neyBadger/cmdb",
"id": "6eb373c4cc6cdea7352d3b9ae7ba7d5b500267c8",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41045"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
}
|
"""Basic tests for basic deterministic model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.models.video import basic_recurrent
from tensor2tensor.models.video import tests_utils
import tensorflow as tf
class NextFrameTest(tests_utils.BaseNextFrameTest):
def testBasicDeterministic(self):
self.TestOnVariousInputOutputSizes(
basic_recurrent.next_frame_basic_recurrent(),
basic_recurrent.NextFrameBasicRecurrent,
256,
False)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "8fe6f210b775ffe8809f6fe31e37cd12",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 54,
"avg_line_length": 26,
"alnum_prop": 0.7357859531772575,
"repo_name": "mlperf/training_results_v0.5",
"id": "d9deb5c987af37f19de9984ac8c19b6417335e58",
"size": "1204",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/t2t/tensor2tensor/models/video/basic_recurrent_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
}
|
from novaclient.v1_1 import aggregates
from novaclient.tests import utils
from novaclient.tests.v1_1 import fakes
cs = fakes.FakeClient()
class AggregatesTest(utils.TestCase):
def test_list_aggregates(self):
result = cs.aggregates.list()
cs.assert_called('GET', '/os-aggregates')
for aggregate in result:
self.assertTrue(isinstance(aggregate, aggregates.Aggregate))
def test_create_aggregate(self):
body = {"aggregate": {"name": "test", "availability_zone": "nova1"}}
aggregate = cs.aggregates.create("test", "nova1")
cs.assert_called('POST', '/os-aggregates', body)
self.assertTrue(isinstance(aggregate, aggregates.Aggregate))
def test_get(self):
aggregate = cs.aggregates.get("1")
cs.assert_called('GET', '/os-aggregates/1')
self.assertTrue(isinstance(aggregate, aggregates.Aggregate))
aggregate2 = cs.aggregates.get(aggregate)
cs.assert_called('GET', '/os-aggregates/1')
self.assertTrue(isinstance(aggregate2, aggregates.Aggregate))
def test_get_details(self):
aggregate = cs.aggregates.get_details("1")
cs.assert_called('GET', '/os-aggregates/1')
self.assertTrue(isinstance(aggregate, aggregates.Aggregate))
aggregate2 = cs.aggregates.get_details(aggregate)
cs.assert_called('GET', '/os-aggregates/1')
self.assertTrue(isinstance(aggregate2, aggregates.Aggregate))
def test_update(self):
aggregate = cs.aggregates.get("1")
values = {"name": "foo"}
body = {"aggregate": values}
result1 = aggregate.update(values)
cs.assert_called('PUT', '/os-aggregates/1', body)
self.assertTrue(isinstance(result1, aggregates.Aggregate))
result2 = cs.aggregates.update(2, values)
cs.assert_called('PUT', '/os-aggregates/2', body)
self.assertTrue(isinstance(result2, aggregates.Aggregate))
def test_update_with_availability_zone(self):
aggregate = cs.aggregates.get("1")
values = {"name": "foo", "availability_zone": "new_zone"}
body = {"aggregate": values}
result3 = cs.aggregates.update(aggregate, values)
cs.assert_called('PUT', '/os-aggregates/1', body)
self.assertTrue(isinstance(result3, aggregates.Aggregate))
def test_add_host(self):
aggregate = cs.aggregates.get("1")
host = "host1"
body = {"add_host": {"host": "host1"}}
result1 = aggregate.add_host(host)
cs.assert_called('POST', '/os-aggregates/1/action', body)
self.assertTrue(isinstance(result1, aggregates.Aggregate))
result2 = cs.aggregates.add_host("2", host)
cs.assert_called('POST', '/os-aggregates/2/action', body)
self.assertTrue(isinstance(result2, aggregates.Aggregate))
result3 = cs.aggregates.add_host(aggregate, host)
cs.assert_called('POST', '/os-aggregates/1/action', body)
self.assertTrue(isinstance(result3, aggregates.Aggregate))
def test_remove_host(self):
aggregate = cs.aggregates.get("1")
host = "host1"
body = {"remove_host": {"host": "host1"}}
result1 = aggregate.remove_host(host)
cs.assert_called('POST', '/os-aggregates/1/action', body)
self.assertTrue(isinstance(result1, aggregates.Aggregate))
result2 = cs.aggregates.remove_host("2", host)
cs.assert_called('POST', '/os-aggregates/2/action', body)
self.assertTrue(isinstance(result2, aggregates.Aggregate))
result3 = cs.aggregates.remove_host(aggregate, host)
cs.assert_called('POST', '/os-aggregates/1/action', body)
self.assertTrue(isinstance(result3, aggregates.Aggregate))
def test_set_metadata(self):
aggregate = cs.aggregates.get("1")
metadata = {"foo": "bar"}
body = {"set_metadata": {"metadata": metadata}}
result1 = aggregate.set_metadata(metadata)
cs.assert_called('POST', '/os-aggregates/1/action', body)
self.assertTrue(isinstance(result1, aggregates.Aggregate))
result2 = cs.aggregates.set_metadata(2, metadata)
cs.assert_called('POST', '/os-aggregates/2/action', body)
self.assertTrue(isinstance(result2, aggregates.Aggregate))
result3 = cs.aggregates.set_metadata(aggregate, metadata)
cs.assert_called('POST', '/os-aggregates/1/action', body)
self.assertTrue(isinstance(result3, aggregates.Aggregate))
def test_delete_aggregate(self):
aggregate = cs.aggregates.list()[0]
aggregate.delete()
cs.assert_called('DELETE', '/os-aggregates/1')
cs.aggregates.delete('1')
cs.assert_called('DELETE', '/os-aggregates/1')
cs.aggregates.delete(aggregate)
cs.assert_called('DELETE', '/os-aggregates/1')
|
{
"content_hash": "76979e963d48162564dfba29e474cfd1",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 76,
"avg_line_length": 39.26829268292683,
"alnum_prop": 0.6459627329192547,
"repo_name": "citrix-openstack-build/python-novaclient",
"id": "bbd7f2c10e0316d3eab81005808662e412a79542",
"size": "5466",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "novaclient/tests/v1_1/test_aggregates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "837455"
},
{
"name": "Shell",
"bytes": "4466"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import itertools
import os
from lib.core.common import checkFile
from lib.core.common import parseXmlFile
from lib.core.data import kb
from lib.core.data import paths
from lib.parse.handler import FingerprintHandler
def headersParser(headers):
"""
This function calls a class that parses the input HTTP headers to
fingerprint the back-end database management system operating system
and the web application technology
"""
if not kb.headerPaths:
kb.headerPaths = {
"cookie": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "cookie.xml"),
"microsoftsharepointteamservices": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "sharepoint.xml"),
"server": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "server.xml"),
"servlet-engine": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "servlet.xml"),
"set-cookie": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "cookie.xml"),
"x-aspnet-version": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "x-aspnet-version.xml"),
"x-powered-by": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "x-powered-by.xml"),
}
for header in itertools.ifilter(lambda x: x in kb.headerPaths, headers):
value = headers[header]
xmlfile = kb.headerPaths[header]
checkFile(xmlfile)
handler = FingerprintHandler(value, kb.headersFp)
parseXmlFile(xmlfile, handler)
parseXmlFile(paths.GENERIC_XML, handler)
|
{
"content_hash": "f2d2f0834ff74190032a27db23841c7d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 114,
"avg_line_length": 40.80952380952381,
"alnum_prop": 0.6371061843640606,
"repo_name": "JeyZeta/Dangerous",
"id": "7384b14b9b6869b536aa0eb2038f624163f3065d",
"size": "1737",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/lib/parse/headers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
}
|
"""Tests for perfkitbenchmarker.vm_util."""
import os
import psutil
import subprocess
import threading
import time
import unittest
import mock
from perfkitbenchmarker import vm_util
class ShouldRunOnInternalIpAddressTestCase(unittest.TestCase):
def setUp(self):
p = mock.patch(vm_util.__name__ + '.FLAGS')
self.flags = p.start()
self.flags_patch = p
self.sending_vm = mock.MagicMock()
self.receiving_vm = mock.MagicMock()
def tearDown(self):
self.flags_patch.stop()
def _RunTest(self, expectation, ip_addresses, is_reachable=True):
self.flags.ip_addresses = ip_addresses
self.sending_vm.IsReachable.return_value = is_reachable
self.assertEqual(
expectation,
vm_util.ShouldRunOnInternalIpAddress(
self.sending_vm, self.receiving_vm))
def testExternal_Reachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, True)
def testExternal_Unreachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, False)
def testInternal_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, True)
def testInternal_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, False)
def testBoth_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, True)
def testBoth_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, False)
def testReachable_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.REACHABLE, True)
def testReachable_Unreachable(self):
self._RunTest(
False, vm_util.IpAddressSubset.REACHABLE, False)
def HaveSleepSubprocess():
"""Checks if the current process has a sleep subprocess."""
for child in psutil.Process(os.getpid()).children(recursive=True):
if 'sleep' in child.cmdline():
return True
return False
class WaitUntilSleepTimer(threading.Thread):
"""Timer that waits for a sleep subprocess to appear.
This is intended for specific tests that want to trigger timer
expiry as soon as it detects that a subprocess is executing a
"sleep" command.
It assumes that the test driver is not parallelizing the tests using
this method since that may lead to inconsistent results.
TODO(klausw): If that's an issue, could add a unique fractional part
to the sleep command args to distinguish them.
"""
def __init__(self, interval, function):
threading.Thread.__init__(self)
self.end_time = time.time() + interval
self.function = function
self.finished = threading.Event()
self.have_sleep = threading.Event()
def WaitForSleep():
while not self.finished.is_set():
if HaveSleepSubprocess():
self.have_sleep.set()
break
time.sleep(0) # yield to other Python threads
threading.Thread(target=WaitForSleep).run()
def cancel(self):
self.finished.set()
def run(self):
while time.time() < self.end_time and not self.have_sleep.is_set():
time.sleep(0) # yield to other Python threads
if not self.finished.is_set():
self.function()
self.finished.set()
class IssueCommandTestCase(unittest.TestCase):
def testTimeoutNotReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'])
self.assertEqual(retcode, 0)
@mock.patch('threading.Timer', new=WaitUntilSleepTimer)
def testTimeoutReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '2s'], timeout=1)
self.assertEqual(retcode, -9)
self.assertFalse(HaveSleepSubprocess())
def testNoTimeout(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'], timeout=None)
self.assertEqual(retcode, 0)
def testNoTimeout_ExceptionRaised(self):
with mock.patch('subprocess.Popen', spec=subprocess.Popen) as mock_popen:
mock_popen.return_value.wait.side_effect = KeyboardInterrupt()
with self.assertRaises(KeyboardInterrupt):
vm_util.IssueCommand(['sleep', '2s'], timeout=None)
self.assertFalse(HaveSleepSubprocess())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d0fae4c398d56c581045dc3e3a8756b9",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 30.261194029850746,
"alnum_prop": 0.7055487053020961,
"repo_name": "meteorfox/PerfKitBenchmarker",
"id": "1cdc9734be14feb5327ff720ac940e9573290de4",
"size": "4666",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/vm_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1843285"
},
{
"name": "Shell",
"bytes": "23474"
}
],
"symlink_target": ""
}
|
"""Test for the wordcount example."""
# pytype: skip-file
import collections
import logging
import re
import tempfile
import unittest
from apache_beam.examples import wordcount_dataframe
from apache_beam.testing.util import open_shards
class WordCountTest(unittest.TestCase):
SAMPLE_TEXT = """
a
a b
a b c
loooooonger words
"""
def create_temp_file(self, contents):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(contents.encode('utf-8'))
return f.name
def test_basics(self):
temp_path = self.create_temp_file(self.SAMPLE_TEXT)
expected_words = collections.defaultdict(int)
for word in re.findall(r'[\w]+', self.SAMPLE_TEXT):
expected_words[word] += 1
wordcount_dataframe.run(
['--input=%s*' % temp_path, '--output=%s.result' % temp_path])
# Parse result file and compare.
results = []
with open_shards(temp_path + '.result-*') as result_file:
for line in result_file:
match = re.search(r'(\S+),([0-9]+)', line)
if match is not None:
results.append((match.group(1), int(match.group(2))))
elif line.strip():
self.assertEqual(line.strip(), 'word,count')
self.assertEqual(sorted(results), sorted(expected_words.items()))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
{
"content_hash": "0d3866e0e9990bfb3482ce9827cd250d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 27.2,
"alnum_prop": 0.6551470588235294,
"repo_name": "axbaretto/beam",
"id": "aa2fcc568012acc0758803bfc63c81fd5708df9a",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/wordcount_dataframe_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from ctypes import POINTER
from ctypes import c_void_p
from ctypes import cdll
import ctypes.util
import platform
# LLVM_VERSION: sync with PACKAGE_VERSION in CMakeLists.txt
# but leave out the 'svn' suffix.
LLVM_VERSION = '10.0.0'
__all__ = [
'c_object_p',
'get_library',
]
c_object_p = POINTER(c_void_p)
class LLVMObject(object):
"""Base class for objects that are backed by an LLVM data structure.
This class should never be instantiated outside of this package.
"""
def __init__(self, ptr, ownable=True, disposer=None):
assert isinstance(ptr, c_object_p)
self._ptr = self._as_parameter_ = ptr
self._self_owned = True
self._ownable = ownable
self._disposer = disposer
self._owned_objects = []
def take_ownership(self, obj):
"""Take ownership of another object.
When you take ownership of another object, you are responsible for
destroying that object. In addition, a reference to that object is
placed inside this object so the Python garbage collector will not
collect the object while it is still alive in libLLVM.
This method should likely only be called from within modules inside
this package.
"""
assert isinstance(obj, LLVMObject)
self._owned_objects.append(obj)
obj._self_owned = False
def from_param(self):
"""ctypes function that converts this object to a function parameter."""
return self._as_parameter_
def __del__(self):
if not hasattr(self, '_self_owned') or not hasattr(self, '_disposer'):
return
if self._self_owned and self._disposer:
self._disposer(self)
class CachedProperty(object):
"""Decorator that caches the result of a property lookup.
This is a useful replacement for @property. It is recommended to use this
decorator on properties that invoke C API calls for which the result of the
call will be idempotent.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except: # pragma: no cover
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
def get_library():
"""Obtain a reference to the llvm library."""
# On Linux, ctypes.cdll.LoadLibrary() respects LD_LIBRARY_PATH
# while ctypes.util.find_library() doesn't.
# See http://docs.python.org/2/library/ctypes.html#finding-shared-libraries
#
# To make it possible to run the unit tests without installing the LLVM shared
# library into a default linker search path. Always Try ctypes.cdll.LoadLibrary()
# with all possible library names first, then try ctypes.util.find_library().
names = ['LLVM-' + LLVM_VERSION, 'LLVM-' + LLVM_VERSION + 'svn']
t = platform.system()
if t == 'Darwin':
pfx, ext = 'lib', '.dylib'
elif t == 'Windows':
pfx, ext = '', '.dll'
else:
pfx, ext = 'lib', '.so'
for i in names:
try:
lib = cdll.LoadLibrary(pfx + i + ext)
except OSError:
pass
else:
return lib
for i in names:
t = ctypes.util.find_library(i)
if t:
return cdll.LoadLibrary(t)
raise Exception('LLVM shared library not found!')
|
{
"content_hash": "49d1fbb5c435aad97638d8e9ecda248b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 86,
"avg_line_length": 30.128205128205128,
"alnum_prop": 0.6204255319148936,
"repo_name": "epiqc/ScaffCC",
"id": "9c6c6d433458c1f4223ed76c37037c64df435331",
"size": "3890",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "llvm/bindings/python/llvm/common.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3493637"
},
{
"name": "Batchfile",
"bytes": "753"
},
{
"name": "C",
"bytes": "3272269"
},
{
"name": "C++",
"bytes": "56117969"
},
{
"name": "CMake",
"bytes": "204481"
},
{
"name": "CSS",
"bytes": "55547"
},
{
"name": "Cuda",
"bytes": "5785"
},
{
"name": "Emacs Lisp",
"bytes": "20994"
},
{
"name": "HTML",
"bytes": "3200864"
},
{
"name": "JavaScript",
"bytes": "17391"
},
{
"name": "LLVM",
"bytes": "10223782"
},
{
"name": "M",
"bytes": "578"
},
{
"name": "M4",
"bytes": "189436"
},
{
"name": "MATLAB",
"bytes": "22305"
},
{
"name": "Makefile",
"bytes": "413012"
},
{
"name": "Mercury",
"bytes": "1195"
},
{
"name": "OCaml",
"bytes": "343061"
},
{
"name": "Objective-C",
"bytes": "18301489"
},
{
"name": "Objective-C++",
"bytes": "317800"
},
{
"name": "PHP",
"bytes": "1128"
},
{
"name": "Perl",
"bytes": "200404"
},
{
"name": "Python",
"bytes": "1043548"
},
{
"name": "Roff",
"bytes": "18799"
},
{
"name": "Shell",
"bytes": "566849"
},
{
"name": "Vim script",
"bytes": "27002"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SynCookie(A10BaseClass):
"""Class Description::
Global Syn-Cookie Protection.
Class syn-cookie supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param on_threshold: {"description": "on-threshold for Syn-cookie (Decimal number)", "partition-visibility": "shared", "default": 0, "optional": true, "format": "number", "maximum": 2147483647, "minimum": 0, "type": "number"}
:param enable: {"default": 0, "optional": true, "type": "number", "description": "Global Syn-Cookie Protection", "format": "flag"}
:param off_threshold: {"description": "off-threshold for Syn-cookie (Decimal number)", "partition-visibility": "shared", "default": 0, "optional": true, "format": "number", "maximum": 2147483647, "minimum": 0, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/syn-cookie`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "syn-cookie"
self.a10_url="/axapi/v3/syn-cookie"
self.DeviceProxy = ""
self.on_threshold = ""
self.enable = ""
self.off_threshold = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "3a67fb9be1f2ed607458a3195677307b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 231,
"avg_line_length": 41,
"alnum_prop": 0.6329565734681737,
"repo_name": "amwelch/a10sdk-python",
"id": "2c3c49da0e5b211ff603ebb6f112b7b182c2cbb2",
"size": "1681",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/syn/syn_cookie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
import pytest
from commitizen import BaseCommitizen, defaults, factory
from commitizen.config import BaseConfig
from commitizen.exceptions import NoCommitizenFoundException
def test_factory():
config = BaseConfig()
config.settings.update({"name": defaults.DEFAULT_SETTINGS["name"]})
r = factory.commiter_factory(config)
assert isinstance(r, BaseCommitizen)
def test_factory_fails():
config = BaseConfig()
config.settings.update({"name": "Nothing"})
with pytest.raises(NoCommitizenFoundException) as excinfo:
factory.commiter_factory(config)
assert "The committer has not been found in the system." in str(excinfo)
|
{
"content_hash": "30a4532f66ad37a4454cc008d90f43d0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.75,
"repo_name": "Woile/commitizen",
"id": "5fbd2deebbbc8e2beede3ac3caa8072ed0bbe403",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100927"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
}
|
"""
Does all steps required for initialising the application
Fetches list of cnx 50 stocks
and populates the redis cache
"""
import sys
import requests
import redis
companies = []
sectors = []
try:
redis_obj = redis.Redis(host='10.193.124.239', port=6379, db=0, password=None)
except Exception, e:
print >> sys.stderr, """Please ensure redis is running on localhost on 6379.
If not change config accordingly"""
raise(e)
def get_cnx_100_scrips():
"""
Fetches cnx100 scrips for nseindia.com
Warning : The link gets depricated pretty often. Watch out for errors
Order of columns from NSEIndia script:
0 : Company Name
1 : Industry
2 : Symbol
Stores a redis data structire of the following format:
'companies' = 'cn1,cn2,cn3'.. #comma separated list of companies
'sectors' = 's1,s2,s3'.. #Comma separated list of distinct sectors
'scrips' = 'c1,c2,c3'.. # comma separated list of scrips
'<c1>:sector'= s1 # Given scrip as key, throws its sector
'<s1>:companies' : 'c1,c2,c3' # Given sector as key, throws comma sep. list of scrips
'<c1>:name' : Given c1, throws cn1
This is better than a json against each company as this approach helps
retrieval significantly
"""
companies = []
sectors = []
scrips = []
companies_sector_map = {}
sector_companies_map = {}
scrip_company_map = {}
nseindia_url = 'https://www.nseindia.com/content/indices/ind_niftylist.csv'
resp = requests.get(nseindia_url)
if resp.status_code != requests.codes.ok:
raise Exception('Error fetching scrip from NseIndia')
content = resp.text
lines = content.split('\n') # Did not use a csvreader since this is an one off script
lines = lines[1:]
for line in lines:
print line
line_parts = line.split(',')
if line_parts and len(line_parts) > 1:
print line_parts
companies.append(line_parts[0])
sectors.append(line_parts[1])
scrips.append(line_parts[2])
companies_sector_map[line_parts[2]] = line_parts[1]
redis_obj.set('%s:sector'%(line_parts[2]),line_parts[1])
if sector_companies_map.get(line_parts[1]):
sector_companies_map[line_parts[1]].append(line_parts[2])
else:
sector_companies_map[line_parts[1]] = [line_parts[2]]
scrip_company_map[line_parts[2]] = line_parts[0]
redis_obj.set('%s:name'%(line_parts[2]),line_parts[0])
# build redis cache out of local dictionaries
redis_obj.set('companies',','.join(companies))
redis_obj.set('scrips',','.join(scrips))
redis_obj.set('sectors',','.join(list(set(sectors))))
for k,v in sector_companies_map.iteritems():
redis_obj.set('%s:companies'%(k),','.join(v))
if __name__ == '__main__':
get_cnx_100_scrips()
|
{
"content_hash": "8220e10e52bd47b53dd9b12b52f1315f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 93,
"avg_line_length": 40.726027397260275,
"alnum_prop": 0.6115035317860746,
"repo_name": "rajaram1990/Nifty50Cards",
"id": "37d4d74f0a548a4d62b8857e59b5d292b4776bae",
"size": "2973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9060"
},
{
"name": "JavaScript",
"bytes": "3247"
},
{
"name": "Python",
"bytes": "22458"
},
{
"name": "Shell",
"bytes": "1041"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MyProfile.area'
db.alter_column(u'accounts_myprofile', 'area', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'MyProfile.mobile'
db.alter_column(u'accounts_myprofile', 'mobile', self.gf('django.db.models.fields.BigIntegerField')(default=1))
# Changing field 'MyProfile.compound'
db.alter_column(u'accounts_myprofile', 'compound', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'MyProfile.bldg_num'
db.alter_column(u'accounts_myprofile', 'bldg_num', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'MyProfile.apt_num'
db.alter_column(u'accounts_myprofile', 'apt_num', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Changing field 'MyProfile.area'
db.alter_column(u'accounts_myprofile', 'area', self.gf('django.db.models.fields.IntegerField')(default=1))
# Changing field 'MyProfile.mobile'
db.alter_column(u'accounts_myprofile', 'mobile', self.gf('django.db.models.fields.BigIntegerField')(null=True))
# Changing field 'MyProfile.compound'
db.alter_column(u'accounts_myprofile', 'compound', self.gf('django.db.models.fields.IntegerField')(default=1))
# Changing field 'MyProfile.bldg_num'
db.alter_column(u'accounts_myprofile', 'bldg_num', self.gf('django.db.models.fields.IntegerField')(default=1))
# Changing field 'MyProfile.apt_num'
db.alter_column(u'accounts_myprofile', 'apt_num', self.gf('django.db.models.fields.IntegerField')(default=1))
models = {
u'accounts.myprofile': {
'Meta': {'object_name': 'MyProfile'},
'apt_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bldg_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'compound': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cross': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.BigIntegerField', [], {}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'street_num': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
|
{
"content_hash": "b098c6bf3ffdb4623c3b789795815805",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 187,
"avg_line_length": 65.36082474226804,
"alnum_prop": 0.5884858044164037,
"repo_name": "joeyjy/ayi-django",
"id": "af2b9115e8aaaf996f50b08660dd5ff1dad1d36e",
"size": "6364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ms/accounts/migrations/0005_auto__chg_field_myprofile_area__chg_field_myprofile_mobile__chg_field_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28391"
},
{
"name": "JavaScript",
"bytes": "71898"
},
{
"name": "Makefile",
"bytes": "5612"
},
{
"name": "Python",
"bytes": "425280"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.jvm_dependency_usage import JvmDependencyUsage
from pants.goal.products import MultipleRootedProducts
from pants.util.dirutil import safe_mkdir, touch
from pants_test.tasks.task_test_base import TaskTestBase
class TestJvmDependencyUsage(TaskTestBase):
@classmethod
def task_type(cls):
return JvmDependencyUsage
def _setup(self, target_classfiles):
"""Takes a dict mapping targets to lists of classfiles."""
context = self.context(target_roots=target_classfiles.keys())
# Create classfiles in a target-specific directory, and add it to the classpath for the target.
classpath_products = context.products.get_data('runtime_classpath', ClasspathProducts.init_func(self.pants_workdir))
for target, classfiles in target_classfiles.items():
target_dir = os.path.join(self.test_workdir, target.id)
safe_mkdir(target_dir)
for classfile in classfiles:
touch(os.path.join(target_dir, classfile))
classpath_products.add_for_target(target, [('default', target_dir)])
product_deps_by_src = context.products.get_data('product_deps_by_src', dict)
return self.create_task(context), product_deps_by_src
def make_java_target(self, *args, **kwargs):
assert 'target_type' not in kwargs
return self.make_target(target_type=JavaLibrary, *args, **kwargs)
def _cover_output(self, graph):
# coverage of the output code
self.assertNotEqual(graph.to_json(), "")
self.assertNotEqual(graph.to_summary(), "")
def test_simple_dep_usage_graph(self):
t1 = self.make_java_target(spec=':t1', sources=['a.java', 'b.java'])
t2 = self.make_java_target(spec=':t2', sources=['c.java'], dependencies=[t1])
t3 = self.make_java_target(spec=':t3', sources=['d.java', 'e.java'], dependencies=[t1])
self.set_options(size_estimator='filecount')
dep_usage, product_deps_by_src = self._setup({
t1: ['a.class', 'b.class'],
t2: ['c.class'],
t3: ['d.class', 'e.class'],
})
product_deps_by_src[t1] = {}
product_deps_by_src[t2] = {'c.java': ['a.class']}
product_deps_by_src[t3] = {'d.java': ['a.class', 'b.class'],
'e.java': ['a.class', 'b.class']}
graph = dep_usage.create_dep_usage_graph([t1, t2, t3], '')
self.assertEqual(graph._nodes[t1].products_total, 2)
self.assertEqual(graph._nodes[t2].products_total, 1)
self.assertEqual(graph._nodes[t3].products_total, 2)
self.assertEqual(graph._nodes[t1].dep_edges, {})
self.assertEqual(len(graph._nodes[t2].dep_edges[t1].products_used), 1)
self.assertEqual(len(graph._nodes[t3].dep_edges[t1].products_used), 2)
self.assertEqual(graph._trans_cost(t1), 2)
self.assertEqual(graph._trans_cost(t2), 3)
self.assertEqual(graph._trans_cost(t3), 4)
self._cover_output(graph)
def test_dep_usage_graph_with_synthetic_targets(self):
t1 = self.make_java_target(spec=':t1', sources=['t1.thrift'])
t1_x = self.make_java_target(spec=':t1.x', derived_from=t1)
t1_y = self.make_java_target(spec=':t1.y', derived_from=t1)
t1_z = self.make_java_target(spec=':t1.z', derived_from=t1)
t2 = self.make_java_target(spec=':t2',
sources=['a.java', 'b.java'],
dependencies=[t1, t1_x, t1_y, t1_z])
self.set_options(size_estimator='nosize')
dep_usage, product_deps_by_src = self._setup({
t1_x: ['x1.class'],
t1_y: ['y1.class'],
t1_z: ['z1.class', 'z2.class', 'z3.class'],
t2: ['a.class', 'b.class'],
})
product_deps_by_src[t1] = {}
product_deps_by_src[t1_x] = {}
product_deps_by_src[t1_y] = {}
product_deps_by_src[t1_z] = {}
product_deps_by_src[t2] = {'a.java': ['x1.class'],
'b.java': ['z1.class', 'z2.class']}
graph = dep_usage.create_dep_usage_graph([t1, t1_x, t1_y, t1_z, t2], '')
self.assertEqual(graph._nodes[t1].products_total, 5)
self.assertEqual(len(graph._nodes[t2].dep_edges[t1].products_used), 3)
self._cover_output(graph)
|
{
"content_hash": "c7c00d7f4f9b125c4b3178a099cc95a9",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 120,
"avg_line_length": 42.81553398058252,
"alnum_prop": 0.6471655328798186,
"repo_name": "qma/pants",
"id": "b13f9986dffc3fec22f84d109d0cd097d1e7dc36",
"size": "4557",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/test_jvm_dependency_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "315576"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4166893"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49622"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
"""The tests for reproduction of state."""
import pytest
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
DOMAIN,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
MediaPlayerEntityFeature,
)
from homeassistant.components.media_player.reproduce_state import async_reproduce_states
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
STATE_BUFFERING,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import Context, State
from tests.common import async_mock_service
ENTITY_1 = "media_player.test1"
ENTITY_2 = "media_player.test2"
@pytest.mark.parametrize(
"service,state,supported_feature",
[
(SERVICE_TURN_ON, STATE_ON, MediaPlayerEntityFeature.TURN_ON),
(SERVICE_TURN_OFF, STATE_OFF, MediaPlayerEntityFeature.TURN_OFF),
(SERVICE_MEDIA_PLAY, STATE_BUFFERING, MediaPlayerEntityFeature.PLAY),
(SERVICE_MEDIA_PLAY, STATE_PLAYING, MediaPlayerEntityFeature.PLAY),
(SERVICE_MEDIA_STOP, STATE_IDLE, MediaPlayerEntityFeature.STOP),
(SERVICE_MEDIA_PAUSE, STATE_PAUSED, MediaPlayerEntityFeature.PAUSE),
],
)
async def test_state(hass, service, state, supported_feature):
"""Test that we can turn a state into a service call."""
calls_1 = async_mock_service(hass, DOMAIN, service)
if service != SERVICE_TURN_ON:
async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
# Don't support the feature won't call the service
hass.states.async_set(ENTITY_1, "something", {ATTR_SUPPORTED_FEATURES: 0})
await async_reproduce_states(hass, [State(ENTITY_1, state)])
await hass.async_block_till_done()
assert len(calls_1) == 0
hass.states.async_set(
ENTITY_1, "something", {ATTR_SUPPORTED_FEATURES: supported_feature}
)
await async_reproduce_states(hass, [State(ENTITY_1, state)])
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": ENTITY_1}
async def test_turn_on_with_mode(hass):
"""Test that state with additional attributes call multiple services."""
hass.states.async_set(
ENTITY_1,
"something",
{
ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.SELECT_SOUND_MODE
},
)
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
calls_2 = async_mock_service(hass, DOMAIN, SERVICE_SELECT_SOUND_MODE)
await async_reproduce_states(
hass, [State(ENTITY_1, "on", {ATTR_SOUND_MODE: "dummy"})]
)
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": ENTITY_1}
assert len(calls_2) == 1
assert calls_2[0].data == {"entity_id": ENTITY_1, ATTR_SOUND_MODE: "dummy"}
async def test_multiple_same_state(hass):
"""Test that multiple states with same state gets calls."""
for entity in ENTITY_1, ENTITY_2:
hass.states.async_set(
entity,
"something",
{ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON},
)
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await async_reproduce_states(hass, [State(ENTITY_1, "on"), State(ENTITY_2, "on")])
await hass.async_block_till_done()
assert len(calls_1) == 2
# order is not guaranteed
assert any(call.data == {"entity_id": "media_player.test1"} for call in calls_1)
assert any(call.data == {"entity_id": "media_player.test2"} for call in calls_1)
async def test_multiple_different_state(hass):
"""Test that multiple states with different state gets calls."""
for entity in ENTITY_1, ENTITY_2:
hass.states.async_set(
entity,
"something",
{
ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
},
)
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
await async_reproduce_states(hass, [State(ENTITY_1, "on"), State(ENTITY_2, "off")])
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": "media_player.test1"}
assert len(calls_2) == 1
assert calls_2[0].data == {"entity_id": "media_player.test2"}
async def test_state_with_context(hass):
"""Test that context is forwarded."""
hass.states.async_set(
ENTITY_1,
"something",
{ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON},
)
calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
context = Context()
await async_reproduce_states(hass, [State(ENTITY_1, "on")], context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {"entity_id": ENTITY_1}
assert calls[0].context == context
async def test_attribute_no_state(hass):
"""Test that no state service call is made with none state."""
hass.states.async_set(
ENTITY_1,
"something",
{
ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOUND_MODE
},
)
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
calls_3 = async_mock_service(hass, DOMAIN, SERVICE_SELECT_SOUND_MODE)
value = "dummy"
await async_reproduce_states(
hass, [State(ENTITY_1, None, {ATTR_SOUND_MODE: value})]
)
await hass.async_block_till_done()
assert len(calls_1) == 0
assert len(calls_2) == 0
assert len(calls_3) == 1
assert calls_3[0].data == {"entity_id": ENTITY_1, ATTR_SOUND_MODE: value}
@pytest.mark.parametrize(
"service,attribute,supported_feature",
[
(
SERVICE_VOLUME_SET,
ATTR_MEDIA_VOLUME_LEVEL,
MediaPlayerEntityFeature.VOLUME_SET,
),
(
SERVICE_VOLUME_MUTE,
ATTR_MEDIA_VOLUME_MUTED,
MediaPlayerEntityFeature.VOLUME_MUTE,
),
(
SERVICE_SELECT_SOURCE,
ATTR_INPUT_SOURCE,
MediaPlayerEntityFeature.SELECT_SOURCE,
),
(
SERVICE_SELECT_SOUND_MODE,
ATTR_SOUND_MODE,
MediaPlayerEntityFeature.SELECT_SOUND_MODE,
),
],
)
async def test_attribute(hass, service, attribute, supported_feature):
"""Test that service call is made for each attribute."""
hass.states.async_set(
ENTITY_1,
"something",
{ATTR_SUPPORTED_FEATURES: supported_feature},
)
calls_1 = async_mock_service(hass, DOMAIN, service)
value = "dummy"
await async_reproduce_states(hass, [State(ENTITY_1, None, {attribute: value})])
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": ENTITY_1, attribute: value}
async def test_play_media(hass):
"""Test playing media."""
hass.states.async_set(
ENTITY_1,
"something",
{ATTR_SUPPORTED_FEATURES: MediaPlayerEntityFeature.PLAY_MEDIA},
)
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_PLAY_MEDIA)
value_1 = "dummy_1"
value_2 = "dummy_2"
await async_reproduce_states(
hass,
[
State(
ENTITY_1,
None,
{ATTR_MEDIA_CONTENT_TYPE: value_1, ATTR_MEDIA_CONTENT_ID: value_2},
)
],
)
await async_reproduce_states(
hass,
[
State(
ENTITY_1,
None,
{
ATTR_MEDIA_CONTENT_TYPE: value_1,
ATTR_MEDIA_CONTENT_ID: value_2,
},
)
],
)
await hass.async_block_till_done()
assert len(calls_1) == 2
assert calls_1[0].data == {
"entity_id": ENTITY_1,
ATTR_MEDIA_CONTENT_TYPE: value_1,
ATTR_MEDIA_CONTENT_ID: value_2,
}
assert calls_1[1].data == {
"entity_id": ENTITY_1,
ATTR_MEDIA_CONTENT_TYPE: value_1,
ATTR_MEDIA_CONTENT_ID: value_2,
}
|
{
"content_hash": "8e97587acb11d16187cf10dd825af88d",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 88,
"avg_line_length": 29.316326530612244,
"alnum_prop": 0.6190973430792436,
"repo_name": "mezz64/home-assistant",
"id": "cc30058d4b1e6945581c32f70e7b8ee42924ad54",
"size": "8619",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/media_player/test_reproduce_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from wheelcms_axle.registries.registry import Registry
from UserDict import IterableUserDict
class WorkflowRegistry(IterableUserDict):
_default = None
def get_default(self):
return self._default
def set_default(self, workflow):
self._default = workflow
def register(self, spoke, workflow):
self[spoke] = workflow
def get(self, spoke, default=None):
return IterableUserDict.get(self, spoke, default) or self.get_default()
def __getitem__(self, spoke):
try:
return IterableUserDict.__getitem__(self, spoke)
except KeyError:
return self._default
|
{
"content_hash": "9077d544787dc14e47620f20ccee69ad",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 28.043478260869566,
"alnum_prop": 0.662015503875969,
"repo_name": "wheelcms/wheelcms_axle",
"id": "f73b99ebfc78639201ae1f78b9db64e1dc91e483",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wheelcms_axle/registries/workflow.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8772"
},
{
"name": "HTML",
"bytes": "48850"
},
{
"name": "JavaScript",
"bytes": "1663405"
},
{
"name": "Python",
"bytes": "484553"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
}
|
"""while_v2 and gradient.
This is a version of while_loop that emits a single While op, as well as the
gradient function for While ops produced by while_loop. This will eventually
replace the current tf.while_loop implementation once it reaches feature and
performance parity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import backprop_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util_v2 as util
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import while_v2_indexed_slices_rewriter
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
# pylint: disable=protected-access
def while_loop(cond,
body,
loop_vars,
shape_invariants=None,
parallel_iterations=10,
maximum_iterations=None,
name=None,
return_same_structure=True,
back_prop=True):
"""Like tf.while_loop, except emits a single While op."""
# Keep the original loop_vars around to know which args were TensorArrays.
orig_loop_vars = loop_vars
# Cache its length since we use it at multiple places below.
len_orig_loop_vars = len(orig_loop_vars)
# Convert TensorArrays to their flow variables. These get converted back to
# TensorArrays before calling `cond` and `body`. See `wrapped_cond` and
# `wrapped_body` below.
loop_vars = list(_tensor_array_to_flow(orig_loop_vars))
loop_vars = nest.map_structure(
ops.internal_convert_to_tensor_or_indexed_slices, loop_vars,
expand_composites=True)
if shape_invariants is not None:
nest.assert_same_structure(orig_loop_vars, shape_invariants,
expand_composites=False)
signature = nest.map_structure(
control_flow_ops._shape_invariant_to_type_spec, loop_vars,
list(shape_invariants), expand_composites=False)
shape_invariants = nest.map_structure(
control_flow_ops._get_shape_invariant, loop_vars,
list(shape_invariants), expand_composites=False)
else:
signature = nest.map_structure(
type_spec.type_spec_from_value, loop_vars, expand_composites=False)
shape_invariants = nest.map_structure(
control_flow_ops._get_shape_invariant, loop_vars,
expand_composites=False)
if not name:
name = "while"
with ops.name_scope(name) as scope:
with ops.name_scope(None):
cond_name = util.unique_fn_name(scope, "cond")
body_name = util.unique_fn_name(scope, "body")
maximum_iterations_loop_var = _build_maximum_iterations_loop_var(
maximum_iterations)
loop_counter = constant_op.constant(
0,
dtype=maximum_iterations_loop_var.dtype
if maximum_iterations is not None else None,
name="loop_counter")
# Add loop counter needed for computing gradients.
loop_vars = [loop_counter, maximum_iterations_loop_var] + loop_vars
shape_invariants = [tensor_shape.TensorShape([])] * 2 + shape_invariants
signature = (
[tensor_spec.TensorSpec.from_tensor(loop_counter),
tensor_spec.TensorSpec.from_tensor(maximum_iterations_loop_var)] +
signature)
# Automatic control dependencies are added in defuns, but not in v1
# graphs. Propagate that behavior here.
add_control_dependencies = ops.get_default_graph()._add_control_dependencies
def wrapped_cond(loop_counter, maximum_iterations_arg, *args):
"""Extra `cond` wrapper that can handle the extra counter loop_var."""
# Convert the flow variables in `args` to TensorArrays. `args` should
# already have the same structure as `orig_loop_vars` but currently there
# is no nest.zip so we call `_pack_sequence_as` which flattens both
# `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
# and packs it into the structure of `orig_loop_vars`.
pred = cond(*_pack_sequence_as(orig_loop_vars, args))
if (tensor_util.is_tensor(pred) and
(pred.shape.dims is None or pred.shape.dims)):
pred = array_ops.squeeze_v2(pred)
if maximum_iterations is None:
return pred
else:
return math_ops.logical_and(
loop_counter < maximum_iterations_arg, pred)
# NOTE(skyewm): we set collections to the outer graph's collections for
# compatibility with TPUEstimator.
cond_graph = func_graph_module.func_graph_from_py_func(
cond_name,
wrapped_cond,
[], # We provide signature instead of args.
{},
signature=signature,
func_graph=util.WhileCondFuncGraph(
cond_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access
add_control_dependencies=add_control_dependencies)
def wrapped_body(loop_counter, maximum_iterations_arg, *args):
"""Loop body augmented with counter update.
Args:
loop_counter: Loop counter which needs to be incremented in the body.
maximum_iterations_arg: Maximum iterations of the loop.
*args: List of args
Returns:
A list of tensors the same length as args.
"""
# Capture the tensors already captured in cond_graph so that they appear
# in the same order in body_graph.external_captures.
for t in cond_graph.external_captures:
ops.get_default_graph().capture(t)
# Convert the flow variables in `args` to TensorArrays. `args` should
# already have the same structure as `orig_loop_vars` but currently there
# is no nest.zip so we call `_pack_sequence_as` which flattens both
# `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
# and packs it into the structure of `orig_loop_vars`.
outputs = body(*_pack_sequence_as(orig_loop_vars, args))
if not nest.is_sequence_or_composite(outputs):
outputs = [outputs]
# Compare the structure of input and output of body converting the
# top-level tuples to list to be compatible with legacy while_loop.
nest.assert_same_structure(list(outputs), list(orig_loop_vars),
expand_composites=True)
outputs = _tensor_array_to_flow(outputs)
# TODO(srbs): Update lowering code to create _Enter nodes with
# is_constant=True for inputs that are directly passed to outputs.
return [loop_counter + 1, maximum_iterations_arg] + list(outputs)
body_graph = func_graph_module.func_graph_from_py_func(
body_name,
wrapped_body,
[], # We provide signature instead of args.
{},
signature=signature,
func_graph=util.WhileBodyFuncGraph(
body_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access
add_control_dependencies=add_control_dependencies)
# Add external captures of body to the list of loop vars.
# Note that external tensors will be treated as loop invariants, i.e.,
# the value of that tensor in each iteration is the same as it was at the
# beginning of the loop execution.
loop_vars = loop_vars + body_graph.external_captures
# TODO(srbs): Update lowering code to create _Enter nodes with
# is_constant=True for inputs that are directly passed to outputs.
body_graph.outputs.extend(body_graph.internal_captures)
# Capture the extra `external_captures` of `body_graph` in `cond_graph` so
# that it expects to receive those as arguments.
with cond_graph.as_default():
num_cond_captures = len(cond_graph.external_captures)
assert (cond_graph.external_captures ==
body_graph.external_captures[:num_cond_captures])
cond_graph_captures = object_identity.ObjectIdentitySet(
cond_graph.external_captures)
_duplicate_body_captures_in_cond(
cond_graph, body_graph.external_captures[num_cond_captures:])
# Make sure that the shapes of the loop outputs are compatible with the
# shape invariants, or the shapes of the loop vars if the invariants are not
# specified.
num_flattened_outputs = len(nest.flatten(orig_loop_vars,
expand_composites=True))
# First var is loop counter and second var is maximum_iterations.
first_loop_var_index = 2
_check_shapes_compat(
body_graph.outputs[first_loop_var_index:first_loop_var_index +
num_flattened_outputs],
nest.flatten(
shape_invariants[first_loop_var_index:first_loop_var_index +
len_orig_loop_vars], expand_composites=True),
nest.flatten(loop_vars[first_loop_var_index:first_loop_var_index +
len_orig_loop_vars], expand_composites=True))
num_original_outputs = len(body_graph.outputs)
if back_prop and util.output_all_intermediates():
# Export all tensors in the loop body that may be needed for gradient
# computation. We do this by accumulating the intermediate values in
# TensorLists.
intermediate_tensors = _get_intermediates(body_graph)
for intermediate_tensor in intermediate_tensors:
tensor_list = list_ops.empty_tensor_list(
element_dtype=intermediate_tensor.dtype,
element_shape=intermediate_tensor.shape,
max_num_elements=maximum_iterations)
loop_vars.append(tensor_list)
with cond_graph.as_default():
# Add a placeholder to cond_graph's inputs corresponding to the
# tensor_list.
cond_graph.capture(tensor_list)
with body_graph.as_default():
# Push the intermediate tensor to the tensor list. This captures the
# `tensor_list` as well.
appended_tensor_list = list_ops.tensor_list_push_back(
tensor_list, intermediate_tensor)
# Add this modified tensor list to the list of outputs.
body_graph.outputs.append(appended_tensor_list)
flattened_loop_vars = nest.flatten(loop_vars, expand_composites=True)
_check_num_inputs_outputs(cond_graph, body_graph,
len(flattened_loop_vars))
_check_inputs_outputs_types_match(body_graph, flattened_loop_vars)
with ops.control_dependencies(
list(cond_graph.control_captures) + list(body_graph.control_captures)):
output_shapes = [t.shape for t in body_graph.outputs]
orig_loop_vars_range = slice(first_loop_var_index,
first_loop_var_index + num_flattened_outputs)
output_shapes[orig_loop_vars_range] = nest.flatten(
shape_invariants, expand_composites=True)[orig_loop_vars_range]
cond_stateful_ops = [
op for op in cond_graph.get_operations() if op._is_stateful
]
body_stateful_ops = [
op for op in body_graph.get_operations() if op._is_stateful
]
if (cond_stateful_ops or body_stateful_ops):
op_fn = gen_functional_ops._while
else:
op_fn = gen_functional_ops.stateless_while
outputs = op_fn(
flattened_loop_vars,
util.create_new_tf_function(cond_graph),
util.create_new_tf_function(body_graph),
output_shapes=output_shapes,
parallel_iterations=parallel_iterations,
name=scope)
# This is needed so we do not compute derivative wrt these extra outputs.
outputs[0].op._set_attr("_num_original_outputs",
attr_value_pb2.AttrValue(i=num_original_outputs))
outputs[0].op._cond_graph = cond_graph
outputs[0].op._body_graph = body_graph
_copy_handle_data(body_graph.outputs, outputs)
util.maybe_set_lowering_attr(outputs[0].op)
util.maybe_propagate_compile_time_consts_in_xla(outputs[0].op)
if not ops.get_default_graph().building_function:
# In V1 graph mode, return identities for each output of the While op,
# rather than the output of the While op directly. This makes pruning work
# if the output of while_loop() is fetched: the lowering pass converts the
# While outputs into IdentityN outputs, which if fetched will cause all
# ops in the body to be run (since it takes all exit ops as input). After
# lowering, each output identity op will end up with only the appropriate
# exit op as input.
outputs = tuple(array_ops.identity(t) for t in outputs)
outputs = _pack_sequence_as(
orig_loop_vars, outputs[first_loop_var_index:first_loop_var_index +
num_flattened_outputs])
if return_same_structure:
return outputs
flattened_outputs = nest.flatten(outputs, expand_composites=True)
if len(flattened_outputs) == 1:
return flattened_outputs[0]
else:
return outputs
@ops.RegisterGradient("StatelessWhile")
@ops.RegisterGradient("While")
def _WhileGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of a While op produced by while_loop."""
# Note that op is not always the same as while_op because the gradient tape,
# for eager mode compatibility, forgets information about the proper op. Since
# the loop cannot run in eager mode, however, we can safely introspect into
# the graph here.
while_op = op.outputs[0].op
cond_graph = _get_graph(while_op, "cond", "_cond_graph")
body_graph = _get_graph(while_op, "body", "_body_graph")
orig_num_params = len(body_graph.outputs)
maximum_iterations = op.inputs[1]
parallel_iterations = op.get_attr("parallel_iterations")
try:
num_original_outputs = while_op.get_attr("_num_original_outputs")
except: # pylint: disable=bare-except
num_original_outputs = len(while_op.outputs)
num_intermediates = len(while_op.outputs) - num_original_outputs
grads = [
_preprocess_grad(grad, body_out, while_in, while_out) # pylint: disable=g-complex-comprehension
for grad, body_out, while_in, while_out in zip(
grads[:num_original_outputs],
body_graph.outputs[:num_original_outputs],
while_op.inputs[:num_original_outputs],
while_op.outputs[:num_original_outputs])
] + [None] * num_intermediates
# We compute the gradient for the sub-graph between trainable ys and xs
# with non-None incoming gradients. We later pad the None's to the list of
# outputs.
ys, xs, non_none_grads = zip(*[(y, x, grad) for (y, x, grad) in zip(
body_graph.outputs, body_graph.inputs, grads) if grad is not None])
body_grad_graph, args = _create_grad_func(
ys, xs, non_none_grads, cond_graph, body_graph,
util.unique_grad_fn_name(body_graph.name), op, maximum_iterations)
if body_grad_graph.while_op_needs_rewrite:
# Modify 'op' to output the intermediate accumulators needed by the grad
# function.
# NOTE(skyewm): if there are any active sessions, this modification to `op`
# may make them unrunnable!
cond_graph.name += "_rewritten"
body_graph.name += "_rewritten"
new_inputs = body_grad_graph.extra_inputs
new_outputs = body_graph.outputs[orig_num_params:]
while_op._set_func_attr("cond", util.create_new_tf_function(cond_graph))
while_op._set_func_attr("body", util.create_new_tf_function(body_graph))
while_op._set_type_list_attr("T", body_graph.output_types)
while_op._set_shape_list_attr("output_shapes", body_graph.output_shapes)
while_op._add_while_inputs(new_inputs)
while_op._add_outputs([t.dtype for t in new_outputs],
[t.shape for t in new_outputs])
_copy_handle_data(new_outputs, op.outputs[orig_num_params:])
# Do not ingore grads wrt extra outputs when computing higher order
# derivatives.
while_op._set_attr("_num_original_outputs",
attr_value_pb2.AttrValue(i=len(while_op.outputs)))
captured_inputs = _resolve_grad_captures(body_graph, body_grad_graph,
while_op)
loop_vars = args + captured_inputs
# This modifies body_grad_graph.
loop_vars = while_v2_indexed_slices_rewriter.rewrite_grad_indexed_slices(
grads, body_grad_graph, loop_vars, while_op.inputs)
def grad_cond(counter, unused_maximum_iterations_arg, forward_loop_iters,
*unused_args):
return counter < forward_loop_iters
grad_cond_name = util.unique_grad_fn_name(op.get_attr("cond").name)
cond_grad_graph = func_graph_module.func_graph_from_py_func(
grad_cond_name, grad_cond, loop_vars, {},
func_graph=util.WhileCondFuncGraph(grad_cond_name))
_check_num_inputs_outputs(cond_grad_graph, body_grad_graph, len(loop_vars))
outputs = gen_functional_ops._while(
loop_vars,
util.create_new_tf_function(cond_grad_graph),
util.create_new_tf_function(body_grad_graph),
output_shapes=[t.shape for t in body_grad_graph.outputs],
parallel_iterations=parallel_iterations,
name="%s_grad" % while_op.name)
grad_op = outputs[0].op
_copy_handle_data(body_grad_graph.outputs, outputs)
util.maybe_set_lowering_attr(grad_op)
util.maybe_propagate_compile_time_consts_in_xla(grad_op)
# See comment in while_loop.
outputs = [array_ops.identity(t) for t in outputs]
return _get_structured_grad_output(outputs, grads, body_grad_graph)
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that should be accumulated."""
# We currently accumulate output tensors of most ops in the function and rely
# on the pruning pass to get rid of the unused accumulators at runtime.
# However, this can bloat the GraphDef and make debugging harder so we perform
# some optimizations.
#
# Optimization we currently perform:
# 1. We do not accumulate tensors which already have an accumulator
# in the loop body.
# 2. We do not accumulate outputs of Identity nodes. When building the
# FuncGraph, we add an Identity node for each output (see
# `AutomaticControlDependencies.mark_as_return`). Accumulating outputs
# of all these nodes bloats the GraphDef quite a bit so we remove those.
# Since the gradient of an Identity node does not rely on its forward op's
# input this is safe to do.
#
# Other possible optimizations:
# 1. Only accumulate tensors that will be required by the backward pass.
# This will require running the gradient pass and hence would increase the
# graph building time for the forward pass.
# 2. Do not accumulate Const nodes created inside the loop body.
# 3. Do not accumulate loop vars that are returned as-is just like captured
# tensors.
intermediates = []
reverse_captures = dict(
(v.experimental_ref(), k) for k, v in func_graph.captures)
for op in func_graph.get_operations():
if op.type == "Identity":
continue
# Accumulating mutexes can cause deadlock.
if op.type == "MutexLock":
continue
for o in op.outputs:
if (o is not func_graph.inputs[0] and # Loop counter.
o.dtype != dtypes.resource and # Do not accumulate resource tensors.
_get_accumulator(o) is None and # Has existing accumulator.
o.experimental_ref() not in reverse_captures
): # Captured value, hence loop invariant.
intermediates.append(o)
return intermediates
def _preprocess_grad(grad, body_graph_output, while_op_input, while_op_output):
"""Returns the initial gradient to be used for a given output tensor.
Args:
grad: the original gradient Tensor passed to the gradient function.
body_graph_output: the corresponding Tensor in the body graph.
while_op_input: the corresponding Tensor input of the While op.
while_op_output: the corresponding Tensor output of the While op.
Returns:
A Tensor or None.
"""
# Set the incoming gradient of non-trainable inputs to None. It is possible
# that we receive non-None gradients for non-trainable types in nested while
# loops because we accumulate outputs of the inner while as variant tensors
# which are trainable and hence receive zeros_like tensors in the gradient
# pass. The non-trainable tensors then receive the popped zeros tensor from
# this zeros variant. The gradient for the loop vars corresponding to these
# tensors is None or zeros (this happens only if the loop var is accumulated
# as well) in _grad_fn so we reset these.
# TODO(b/118712257): Remove once we can handle None output grads in _grad_fn.
if not _is_trainable(body_graph_output):
return None
# GradientTape initializes resource and variant grads as None instead of
# zeros. Set to zeros so _GradientsHelper computes the gradients instead of
# returning None.
# TODO(b/143286622): The supports_default_grad check is needed
# because While op emits non-differentiable resource tensors
# as outputs. Remove this check when that is not the case.
# Note: We use `while_op_input` instead of `while_op_output` for the call
# to `supports_default_grad` because `while_op_output` may be missing
# handle_data if the While is in a restored saved model.
if (while_op_output.dtype in (dtypes.resource, dtypes.variant) and
default_gradient.supports_default_grad(while_op_input) and grad is None):
return _zeros_like(while_op_input, while_op_output)
return grad
# TODO(skyewm): make this return constants if op_output's shape is fully
# defined (this can be done by checking the "shape" attr of resource vars).
def _zeros_like(op_input, op_output):
"""Like array_ops.zeros_like() but also accepts resource var handles."""
if op_output.dtype == dtypes.resource:
# Note: We use `op_input` instead of `op_output` to get the zeros dtype
# because `op_output` may be missing handle_data if the While is in a
# restored saved model.
return array_ops.zeros(
gen_resource_variable_ops.variable_shape(op_output),
dtype=default_gradient.get_zeros_dtype(op_input))
return array_ops.zeros_like(op_output)
def _is_trainable(tensor):
"""Returns whether the given tensor is trainable."""
if not backprop_util.IsTrainable(tensor):
return False
# Special case: untrainable accumulator output. The gradients algorithm
# doesn't know about tensor lists of untrainable elements. In theory the
# tensor list gradient functions should return None as appropriate, but
# because we can't return None from the gradient function we filter out
# untrainable accumulator output here to avoid computing the gradient at all.
if tensor.op.type == "TensorListPopBack" and tensor.value_index == 0:
assert tensor.dtype == dtypes.variant
element_type = tensor.op.get_attr("element_dtype")
return backprop_util.IsTrainable(element_type)
return True
def _get_graph(while_op, func_attr_name, attr_graph_name):
"""Returns `FuncGraph` for the given function attribute.
Args:
while_op: The While Operation.
func_attr_name: string
attr_graph_name: cached forward graph name
Returns:
`FuncGraph`
"""
func_graph = getattr(while_op, attr_graph_name, None)
if func_graph is None:
# TODO(srbs): Handle TensorShapeProto in function_def_to_graph.input_shapes.
input_shapes = [
tensor_shape.TensorShape(s) for s in while_op.get_attr("output_shapes")
]
func_name = while_op.get_attr(func_attr_name).name
func_graph = util.get_func_graph(while_op, input_shapes, func_name)
func_graph._while = while_op
return func_graph
def _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op,
maximum_iterations):
"""Builds and returns the gradient FuncGraph of `func_graph` and its args.
The returned grad_func_graph must be called with the returned
args + grad_func_graph.captures.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grads: The incoming grads for `ys`.
cond_graph: FuncGraph for the forward cond function.
body_graph: FuncGraph for the forward body function.
name: Name of the returned gradient function.
while_op: The forward While op.
maximum_iterations: Tensor. The maximum number of iterations.
Returns:
2-tuple of (grad_func_graph, args).
"""
assert len(ys) == len(grads)
total_iters = while_op.outputs[0]
counter = constant_op.constant(
0, dtype=total_iters.dtype, name="grad_counter")
# Build frozen sets so that we do not have linear time lookups in
# `_is_loop_invariant`. Note: `body_graph.inputs` and `body_graph.outputs`
# may get updated during gradient computation because we add accumulators to
# the forward op. However, those are not loop invariants so wouldn't affect
# the output of `_is_loop_invariant`. Also we would never attempt to capture
# those accumulators so `_is_loop_invariant` should never receive those new
# tensors as args.
body_graph_inputs = object_identity.ObjectIdentitySet(body_graph.inputs)
body_graph_outputs = object_identity.ObjectIdentitySet(body_graph.outputs)
args = [counter, maximum_iterations, total_iters] + list(grads)
# Note: The returned function does not have `args` in the list of
# `external_captures`.
grad_func_graph = func_graph_module.func_graph_from_py_func(
name,
lambda *args: _grad_fn(ys, xs, args, body_graph),
args, {},
func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph,
maximum_iterations, while_op,
body_graph_inputs, body_graph_outputs))
# Update the list of outputs with tensors corresponding to the captured
# tensors. We capture 3 types of tensors when building the grad fn:
# 1. Accumulators for forward graph intermediates which are not loop
# invariants. The outputs corresponding to these are populated in
# `popped_tensor_lists` by `_WhileBodyGradFuncGraph`.
# 2. Resources, which are output as is.
# 3. Forward graph loop invariants, which are output as is.
for external_capture, internal_capture in grad_func_graph.captures:
if ops.tensor_id(internal_capture) in grad_func_graph.popped_tensor_lists:
new_output = grad_func_graph.popped_tensor_lists[ops.tensor_id(
internal_capture)]
elif (internal_capture.dtype == dtypes.resource or _is_loop_invariant(
external_capture, body_graph_inputs, body_graph_outputs)):
new_output = internal_capture
else:
raise ValueError("Tensor %s which captures %s is in list of "
"internal_captures but is not a resource, is not in "
"popped_tensor_lists and does not capture a loop "
"invariant." %
(str(internal_capture), str(external_capture)))
grad_func_graph.outputs.append(new_output)
grad_func_graph.structured_outputs.append(new_output)
return grad_func_graph, args
def _grad_fn(ys, xs, args, func_graph):
"""Computes the gradient of `func_graph` in the current graph.
This function builds the gradient graph of the corresponding forward-pass
`func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
args: The input arguments.
args[0] - Loop counter
args[1] - Total number of iterations.
args[2] - maximum_iterations.
args[3:] - Incoming gradients for `ys`.
func_graph: function.FuncGraph. The corresponding forward-pass function.
Returns:
The output gradient Tensors.
"""
grad_ys = args[3:]
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# after the forward While op has been rewritten in _resolve_grad_captures.
# TODO(srbs): Mark GradientsHelper as public?
grad_outs = gradients_util._GradientsHelper(
ys, xs, grad_ys=grad_ys, src_graph=func_graph,
unconnected_gradients="zero")
# TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there
# is a tf.StopGradient in the loop body.
assert all(g is not None for g in grad_outs)
counter = args[0]
maximum_iterations = args[1]
total_iters = args[2]
return [counter + 1, maximum_iterations, total_iters] + grad_outs
def _resolve_grad_captures(body_graph, body_grad_graph, while_op):
"""Returns the tensors to pass as captured inputs to `body_grad_graph`.
`body_grad_graph` may have external references to:
1. Its outer graph containing the input gradients. These are left as-is.
2. Accumulators captured from the forward-pass graph. These should have been
added as `while_op` outputs after the gradient graph was built. We replace
these with the corresponding output of `while_op`, i.e. a tensor in
`body_graph.outer_graph`. In the case of nested control flow or functions,
the gradient logic handling `body_grad_graph.outer_graph` will make sure
the tensor from `body_graph.outer_graph` is also correctly captured.
Args:
body_graph: FuncGraph. The forward-pass body function.
body_grad_graph: FuncGraph. The body gradients function.
while_op: The forward-pass While Operation calling `body_graph`.
Returns:
A list of input tensors to be passed as the captured inputs to
`body_grad_graph`.
"""
new_capture_inputs = []
for t in body_grad_graph.external_captures:
# All values captured by gradient computation should be from the forward
# graph or a captured resource variable (note that input gradients are
# regular non-captured inputs).
if t.graph == body_graph:
# Captured accumulator or loop invariant.
for i, output in enumerate(t.graph.outputs):
if output is t:
t = while_op.outputs[i]
break
# Note: We rely on the capturing logic of the gradient While op graph to
# correctly capture the tensors in `body_graph.outer_graph`. Both cond_v2
# and while_v2 handle this while building their gradient functions.
assert t.graph == body_graph.outer_graph
else:
# Captured resource variable
assert t.dtype == dtypes.resource
new_capture_inputs.append(t)
return new_capture_inputs
def _get_structured_grad_output(outputs, grads, body_grad_graph):
"""Returns the values that should be returned from the while grad function.
Args:
outputs: the raw Tensor outputs of the grad While op.
grads: the input gradients to the gradient function.
body_grad_graph: _WhileBodyGradFuncGraph.
Returns:
A list of gradient values. May include Nones.
"""
result = []
# outputs[0] is the loop counter.
# outputs[1] is maximum_iterations.
# outputs[2] is the total number of loop iterations.
outputs_idx = 3
structured_outputs_idx = 3
for g in grads:
# Set None as the output gradient for tensors with None input gradient.
if g is None:
result.append(None)
continue
output = body_grad_graph.structured_outputs[structured_outputs_idx]
structured_outputs_idx += 1
if isinstance(output, ops.IndexedSlices):
# TODO(skyewm): is there a more robust way to determine the order of
# flattened IndexedSlices components?
result.append(ops.IndexedSlices(
values=outputs[outputs_idx],
indices=outputs[outputs_idx + 1],
dense_shape=outputs[outputs_idx + 2]))
outputs_idx += 3
else:
assert isinstance(output, ops.Tensor)
result.append(outputs[outputs_idx])
outputs_idx += 1
return result
def _get_accumulator(tensor):
r"""Returns TensorList if any containing accumulated values of tensor.
We try to find a pattern of the form:
input_tl tensor
\ /
(TensorListPushBack)
|
output_tl
which satisfies the following conditions:
1. input_tl must be in tensor.graph.inputs.
2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.
3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).
output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is
returned if such a pattern is found else None is returned.
Args:
tensor: The Tensor to be accumulated.
Returns:
A variant tensor in the same graph as `tensor` or None if no accumulator is
found.
"""
assert isinstance(tensor.graph, func_graph_module.FuncGraph)
def get_func_graph_output(t):
"""Returns t or Identity(t) whichever exists in graph outputs else None."""
for output in tensor.graph.outputs:
if output is t:
return t
# tf.defun adds an Identity for each output, check whether that is the case.
identity_op = t.consumers()[0]
if (identity_op.type == "Identity" and
identity_op.outputs[0] in tensor.graph.outputs):
return identity_op.outputs[0]
return None
for consumer in tensor.consumers():
# Find the consumer that is a TensorListPushBack node whose TensorList input
# is in the list of function inputs.
if consumer.type != "TensorListPushBack":
continue
accum_input_idx = -1
for accum_input_idx, inp in enumerate(tensor.graph.inputs):
if inp is consumer.inputs[0]:
break
else:
continue
output = get_func_graph_output(consumer.outputs[0])
if output is None:
# The TensorList output of `consumer` is not in the list of function
# outputs.
continue
for accum_output_idx, out in enumerate(tensor.graph.outputs):
if out is output:
if accum_input_idx == accum_output_idx:
return output
break
return None
class _WhileBodyGradFuncGraph(util.WhileBodyFuncGraph):
"""FuncGraph for the gradient function of the body of a While op.
Contains the logic for capturing the tensors from the body of the forward
While op which is as follows:
1. If the tensor is of resource type (these are not accumulated):
a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop
inputs and outputs at the same index.
b. Lookup the corresponding resource tensor in the forward outer graph and
try to capture that.
2. If the tensor is not of resource type:
a. Create an accumulator for that tensor and output it from the forward
pass. Note this also requires adding it as an input to the forward pass.
b. Capture the accumulator from the forward pass in this FuncGraph. This
will later be resolved to the correct output of the forward While op.
c. Pop a value from the captured placeholder and use it as the captured
value for the forward pass tensor.
This only allows capturing tensors in the forward graph. A ValueError is
raised if an attempt is made to capture a tensor not in the forward graph.
To manually capture capture a tensor that is not in the forward graph, call
`capture` with `whitelisted=True`.
Note: The `captures` dict does not contain the forward tensor since it is not
directly captured. It contains the accumulator corresponding to this forward
tensor.
Attributes:
while_op_needs_rewrite: True if any non-resource intermediates were
captured, meaning the forward While op needs to be rewritten to output the
corresponding accumulators.
extra_inputs: list of EmptyTensorList tensors to be used as initial input to
the new accumulators in the forward graph. It may also contain external
captures of the custom gradient function.
popped_tensor_lists: dict from the captured accumulator placeholder to the
TensorList obtained after popping the intermediate tensor from it. The
values of this dict need to be added to the list of outputs.
"""
def __init__(self, name, forward_cond_graph, forward_body_graph,
maximum_iterations, forward_while_op, body_graph_inputs,
body_graph_outputs):
super(_WhileBodyGradFuncGraph, self).__init__(name)
self.extra_inputs = []
self.popped_tensor_lists = {}
# FuncGraph for the body of the forward While op.
self._forward_graph = forward_body_graph
# FuncGraph for the cond of the forward While op.
self._forward_cond_graph = forward_cond_graph
self._maximum_iterations = maximum_iterations
self._forward_while_op = forward_while_op
# Only for use in `_is_loop_invariant`. These are not updated when
# additional tensors are added to `forward_body_graph.inputs` and
# `forward_body_graph.outputs` in `_capture_helper`.
self._forward_graph_inputs = body_graph_inputs
self._forward_graph_outputs = body_graph_outputs
# Dict from forward intermediate tensor to its indirectly captured tensor
# in this graph. Indirect capturing happens in two ways:
# 1. For non-resource tensors we capture their accumulators from the forward
# outer graph and pop values from that accumulator inside this graph
# using TensorListPopBack.
# 2. For resource tensors we directly capture their corresponding tensor
# in the forward outer graph.
self._indirect_captures = {}
@property
def while_op_needs_rewrite(self):
return self.extra_inputs
def capture(self, tensor, name=None, whitelisted=False):
"""Selectively captures external tensors.
If `whitelisted` is False only allows capturing tensors in the
`_forward_graph`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
whitelisted: If False (default), only allows capturing tensors from the
forward graph.
Returns:
The placeholder in this graph for the tensor.
Raises:
ValueError: If attempting to capture an external tensor not in the forward
graph with `whitelisted` set to False.
"""
if not whitelisted and (isinstance(tensor, ops.EagerTensor) or
(tensor.graph is not self and
tensor.graph != self._forward_graph)):
with self._forward_cond_graph.as_default():
self._forward_cond_graph.capture(tensor)
with self._forward_graph.as_default():
already_captured = self._forward_graph.captured(tensor)
if not already_captured:
self.extra_inputs.append(tensor)
tensor = self._forward_graph.capture(tensor)
if not already_captured:
self._forward_graph.outputs.append(tensor)
return super(_WhileBodyGradFuncGraph, self).capture(tensor, name)
def _capture_helper(self, tensor, name):
if tensor.graph is not self._forward_graph:
return super(_WhileBodyGradFuncGraph, self)._capture_helper(tensor, name)
while tensor.op.type == "Identity":
# We do not accumulate the output of identity nodes so we try to capture
# the input of the Identity node instead.
tensor = tensor.op.inputs[0]
captured_tensor = self._indirect_captures.get(ops.tensor_id(tensor))
if captured_tensor is not None:
return captured_tensor
# Do not accumulate loop invariants.
if (any(tensor is t for t in self._forward_graph.inputs) and
any(tensor is t for t in self._forward_graph.outputs)):
captured_tensor = super(_WhileBodyGradFuncGraph,
self)._capture_helper(tensor, name)
# Add to `popped_tensor_lists` so that this gets added to the list of
# outputs.
# TODO(srbs): Rename popped_tensor_lists.
self.popped_tensor_lists[ops.tensor_id(captured_tensor)] = captured_tensor
self._indirect_captures[ops.tensor_id(tensor)] = captured_tensor
return captured_tensor
# Do not accumulate Const nodes. Instead copy them directly in the backward
# graph.
# TODO(srbs): This just checks for `Const` nodes. Consider checking for
# graph compile time consts in general.
# TODO(srbs): Consider making this a loop input.
if constant_op.is_constant(tensor):
real_value = constant_op.constant(
tensor_util.constant_value(tensor), dtype=tensor.dtype)
self._indirect_captures[ops.tensor_id(tensor)] = real_value
return real_value
# Resource tensors are not accumulated and handled specially.
if tensor.dtype == dtypes.resource:
return self._resource_capture_helper(tensor)
# No need to accumulate loop invariants. Capture them directly.
# The captured tensor gets resolved to the corresponding while output in
# `_resolve_grad_captures`.
if _is_loop_invariant(tensor, self._forward_graph_inputs,
self._forward_graph_outputs):
captured_tensor = super(_WhileBodyGradFuncGraph,
self)._capture_helper(tensor, name)
return captured_tensor
# Create or find an existing accumulator output for `tensor` in the forward
# graph, and fetch from this accumulator in the gradient graph to get the
# raw intermediate value.
accumulator = _get_accumulator(tensor)
if accumulator is None:
# Create the initial empty tensor list.
#
# Note: We clear the control dependencies to avoid a cycle in case a
# control tensor has an input path to an output of the forward While.
#
# E.g.:
# x = tf.while_loop(...)
# y = f(x)
# with tf.control_dependencies([y]):
# tf.gradients(y, x)
#
# Since the EmptyTensorList is fed back into the forward While, not
# removing the control edge would cause a cycle.
with self._forward_graph.outer_graph.as_default():
with util.clear_control_inputs():
tensor_list = list_ops.empty_tensor_list(
element_dtype=tensor.dtype,
element_shape=tensor.shape,
max_num_elements=self._maximum_iterations,
name=_build_accumulator_name(tensor))
self.extra_inputs.append(tensor_list)
# Push the intermediate tensor to the tensor list. This captures
# `tensor_list`.
with self._forward_graph.as_default():
accumulator = list_ops.tensor_list_push_back(tensor_list, tensor)
# Add the modified tensor list to the list of outputs. This output will be
# all the accumulated values.
self._forward_graph.outputs.append(accumulator)
# Capture in the cond graph as well so the forward cond and body inputs
# match.
with self._forward_cond_graph.as_default():
self._forward_cond_graph.capture(tensor_list)
# Capture the accumulator tensor list in the gradient graph directly from
# the forward graph -- we'll later modify this to capture the final list
# output by the forward While op instead.
captured_accumulator = super(_WhileBodyGradFuncGraph, self)._capture_helper(
accumulator, name)
# Pop the intermediate value from the tensor list in the gradient graph.
new_tensor_list, captured_tensor = list_ops.tensor_list_pop_back(
captured_accumulator, element_dtype=tensor.dtype)
self._indirect_captures[ops.tensor_id(tensor)] = captured_tensor
self.popped_tensor_lists[ops.tensor_id(
captured_accumulator)] = new_tensor_list
return captured_tensor
def _resource_capture_helper(self, tensor):
"""Returns the captured resource tensor.
Resource-type tensors are not accumulated. If a resource tensor exists in
the loop body it must either be a loop input or an output of a nested While
op inside the loop body which had captured the external resource.
Args:
tensor: the external resource Tensor to be captured.
Returns:
Tensor in this graph.
"""
assert tensor.dtype == dtypes.resource
index = util.resource_input_index(
tensor.name, [t.name for t in self._forward_graph.inputs],
{op.name: op.node_def for op in self._forward_graph.get_operations()},
self._forward_graph._functions)
input_placeholder = self._forward_graph.inputs[index]
tensor_in_outer_graph = self._forward_graph._while.inputs[index]
assert input_placeholder.dtype == dtypes.resource
assert tensor_in_outer_graph.dtype == dtypes.resource
# This must be a loop invariant.
assert input_placeholder is self._forward_graph.outputs[index], (
"Resource tensors must be loop invariants %s." % tensor_in_outer_graph)
self._indirect_captures[ops.tensor_id(tensor)] = self.capture(
tensor_in_outer_graph, whitelisted=True)
return self._indirect_captures[ops.tensor_id(tensor)]
def _check_shapes_compat(output_tensors, shape_invariants, input_tensors):
for (t, shape, input_t) in zip(output_tensors, shape_invariants,
input_tensors):
if not control_flow_ops._ShapeLessThanOrEqual(t.shape, shape):
raise ValueError(
"Input tensor '%s' enters the loop with shape %s, but has "
"shape %s after one iteration. To allow the shape to vary across "
"iterations, use the `shape_invariants` argument of tf.while_loop to "
"specify a less-specific shape." % (input_t.name, shape, t.shape))
def _check_num_inputs_outputs(cond_graph, body_graph, num_flattened_loop_vars):
"""Checks the number of inputs/outputs of `cond_graph` and `body_graph`."""
assert len(cond_graph.inputs) == num_flattened_loop_vars, (
"cond_graph takes %d inputs; Expected: %d" % (len(cond_graph.inputs),
num_flattened_loop_vars))
assert len(cond_graph.outputs) == 1, (
"cond_graph has %d outputs; Expected: 1" % len(cond_graph.outputs))
assert len(body_graph.inputs) == num_flattened_loop_vars, (
"body_graph takes %d inputs; Expected: %d" % (len(body_graph.inputs),
num_flattened_loop_vars))
assert len(body_graph.outputs) == num_flattened_loop_vars, (
"body_graph has %d outputs; Expected: %d" % (len(body_graph.outputs),
num_flattened_loop_vars))
def _check_inputs_outputs_types_match(body_graph, flattened_loop_vars):
for inp, out, loop_var in zip(body_graph.inputs, body_graph.outputs,
flattened_loop_vars):
if inp.dtype != out.dtype:
raise TypeError("Loop var {} enters the loop with type {} "
"but has type {} after 1 iteration.".format(
loop_var.name, inp.dtype, out.dtype))
def _build_cond_placeholders_name_prefix(cond_graph):
return cond_graph.unique_name(cond_graph.name + "___redundant_placeholder")
def _duplicate_body_captures_in_cond(cond_graph, body_graph_captures):
"""Creates placeholders for body captures in cond_graph.
This is needed to match signatures of cond and body graphs.
Args:
cond_graph: cond branch graph
body_graph_captures: Tensors which were captured when building the
`body_graph`.
"""
types = [t.dtype.as_datatype_enum for t in body_graph_captures]
# TODO(srbs): Providing a unique prefix does not ensure that there is no
# conflict between the placeholder names and existing nodes in the graph.
# However passing a list of strings may not be performant.
# Ideally we should move `Graph.unique_name` to C++ or make
# `Graph._names_in_use` a trie so that we can find a unique prefix.
# TODO(b/143286622): This should not be required once captures are separated
# from regular loop vars.
placeholders = c_api.TF_CreatePlaceholders(
cond_graph._c_graph, types,
compat.as_str(_build_cond_placeholders_name_prefix(cond_graph)))
placeholder_ops = [
_OperationWithOutputs(ph.oper, cond_graph)
for ph in placeholders
]
tensors = []
for op, ph, dtype in zip(placeholder_ops, placeholders, types):
tensor = ops.Tensor._create_with_tf_output(op, 0, dtype, ph)
op._outputs = [tensor]
tensors.append(tensor)
# Update `cond_graph._captures` and `cond_graph.inputs` to contain the
# newly created placeholders.
tuples = zip(body_graph_captures, tensors)
keys = [id(t) for t in body_graph_captures]
cond_graph._captures.update(zip(keys, tuples))
cond_graph.inputs.extend(tensors)
def _copy_handle_data(src_tensors, tgt_tensors):
for src_t, tgt_t in zip(src_tensors, tgt_tensors):
custom_gradient.copy_handle_data(src_t, tgt_t)
def _graph_name(graph):
if isinstance(graph, func_graph_module.FuncGraph):
return graph.name
return "Base"
def _pack_sequence_as(structure_with_tas, loop_vars):
"""Like `nest.pack_sequence_as` but also replaces flows with TensorArrays."""
def flow_to_tensor_array(flow, ta): # pylint: disable=missing-docstring
return (tensor_array_ops.build_ta_with_new_flow(ta, flow) if isinstance( # pylint: disable=g-long-ternary
ta, tensor_array_ops.TensorArray) else flow)
flattened_loop_vars = [
flow_to_tensor_array(*z)
for z in zip(nest.flatten(loop_vars, expand_composites=True),
nest.flatten(structure_with_tas, expand_composites=True))
]
return nest.pack_sequence_as(structure_with_tas, flattened_loop_vars,
expand_composites=True)
def _tensor_array_to_flow(loop_vars):
def f(maybe_ta):
if isinstance(maybe_ta, tensor_array_ops.TensorArray):
return maybe_ta.flow
return maybe_ta
return nest.map_structure(f, loop_vars, expand_composites=True)
def _build_maximum_iterations_loop_var(maximum_iterations):
if maximum_iterations is None:
# Default value for max_num_elements to EmptyTensorList meaning that the
# list size is unbounded.
maximum_iterations = -1
# EmptyTensorList expects `max_num_elements` to be of type int32.
return ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
def _build_accumulator_name(tensor):
# Tensor name may be of the form "pow/y:0". Name scope does not allow ":".
return "{}/accumulator".format(tensor.name).replace(":", "_")
def _is_loop_invariant(tensor, inputs, outputs):
return tensor in inputs and tensor in outputs
class _OperationWithOutputs(ops.Operation):
"""Operation with pre-built `TF_Output`s.
The C API for creating the extra placeholders for the cond graph returns
SWIG wrapped TF_Output* pointers which we can use directly for
`Operation.outputs`. The default constructor for `Operation` does not provide
a way of specifying pre-built output tensors and always creates them. This is
a performance overhead. It is not clear if adding that feature to the
`Operation` API would be generally useful so for now we just have our own
lightweight `Operation` implementation. Note that this does not extract a
stacktrace as well since we don't expect this operation to be used.
TODO(b/143286622): This should not be required once captures are separated
from regular loop vars.
"""
def __init__(self, c_op, g):
self._c_op = c_op
self._graph = g
self._outputs = None # Initialized by _duplicate_body_captures_in_cond().
self._id_value = g._add_op(self, self.name)
self._is_stateful = False
# pylint: enable=protected-access
|
{
"content_hash": "7591d5e4f42c77d085a7fa597d3d6fc6",
"timestamp": "",
"source": "github",
"line_count": 1216,
"max_line_length": 110,
"avg_line_length": 43.0296052631579,
"alnum_prop": 0.689377723415641,
"repo_name": "arborh/tensorflow",
"id": "a4ef39038a1c36d4b896ef39d18c23e74313f6d1",
"size": "53012",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/while_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76730781"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299305"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38757009"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
import os.path, os
import ujson as json
from flask import Flask, make_response, url_for, redirect, render_template, request, send_from_directory
from flask_mail import Mail
from flask_user import current_user, UserManager, SQLAlchemyAdapter
import pytz
#import yaml
#import redis
#import requests
from classes import blueprint as classes_blueprint
from discussion import blueprint as discussion_blueprint
from user import blueprint as user_blueprint
from models import *
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'lWxOiKqKPNwJmSldbiSkEbkNjgh2uRSNAb+SK00P3R')
SQLALCHEMY_DATABASE_URI = "sqlite:///app.db"
# SQLALCHEMY_ECHO = True # print all SQL requests
CSRF_ENABLED = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'reg@skuuper.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'SophisticatedRegistrator69')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"ClassInTouch" <reg@skuuper.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.zone.ee')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = 'ClassInTouch' # Used in email templates
UPLOAD_FOLDER = '/tmp'
# Custom variables
DATABASE_TYPE = "sqlite3"
SESSION_DURATION = 86400
BONUS_TIMEOUT = 115200
def create_app():
""" Flask application factory """
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
app.config.update(
DEBUG=True,
FILES_ROOT='.',
)
app.register_blueprint(discussion_blueprint)
app.register_blueprint(classes_blueprint)
app.register_blueprint(user_blueprint)
db.app = app
db.init_app(app)
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User) # Register the User model
user_manager = UserManager(db_adapter, app) # Initialize Flask-User
db.session.commit()
return app
app = create_app()
mail = Mail(app)
def change_tz(user, time):
tz = pytz.timezone(current_user.timezone)
return time.replace(tzinfo=pytz.timezone('UTC')).astimezone(tz)
@app.route('/')
def main():
resp = make_response(render_template('index.html'))
# track all users
cookie = request.cookies.get("token", None)
if not cookie:
resp.set_cookie("token", generate_token())
#resp.set_cookie("anonymous", str(get_user_state()))
#resp.mimetype = 'application/json'
return resp
@app.route('/favicon.ico')
@app.route('/favicon.png')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/png')
if __name__ == '__main__':
# Create all database tables
# print app.url_map # some debug
app.run(host='0.0.0.0', port=5100, debug=True)
|
{
"content_hash": "d02a90f48f03318d8535d6b1c19da84b",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 104,
"avg_line_length": 30.87878787878788,
"alnum_prop": 0.6833496892378148,
"repo_name": "NetBUG/classintouch",
"id": "e88a238800c11499a35e32c4b7b1c58963b62553",
"size": "3178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1640"
},
{
"name": "CSS",
"bytes": "137129"
},
{
"name": "HTML",
"bytes": "99880"
},
{
"name": "JavaScript",
"bytes": "122072"
},
{
"name": "Objective-C",
"bytes": "23284"
},
{
"name": "Python",
"bytes": "24261"
},
{
"name": "Ruby",
"bytes": "133"
},
{
"name": "Shell",
"bytes": "29"
},
{
"name": "Swift",
"bytes": "67416"
}
],
"symlink_target": ""
}
|
def _status(device, **kwarg):
#print device.full()
pass
def _config(device, **kwarg):
#print device.full()
pass
# function variables holding current callbacks
status = _status
config = _config
def register_status_cb(callback):
global status
if callback:
def newstatus(device, **kwarg):
try:
callback(device, kwarg)
except:
pass
status = newstatus
else:
status = _status
def register_config_cb(callback):
global config
if callback:
def newconfig(device, **kwarg):
try:
callback(device, kwarg)
except:
pass
config = newconfig
else:
config = _config
|
{
"content_hash": "6509ecb2e1b0cde2375891af89b7b590",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 46,
"avg_line_length": 18.047619047619047,
"alnum_prop": 0.5435356200527705,
"repo_name": "UniPiTechnology/evok",
"id": "cbf86b7a9afd70e56870243c65f127a7b83c3ecd",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "evok/devents.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "896959"
},
{
"name": "Dockerfile",
"bytes": "951"
},
{
"name": "HTML",
"bytes": "5251"
},
{
"name": "JavaScript",
"bytes": "52771"
},
{
"name": "Makefile",
"bytes": "5494"
},
{
"name": "Python",
"bytes": "653387"
},
{
"name": "Roff",
"bytes": "481694"
},
{
"name": "Shell",
"bytes": "70720"
},
{
"name": "Vim Script",
"bytes": "641"
}
],
"symlink_target": ""
}
|
from typing import Callable
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from confirmation.models import Confirmation, ConfirmationKeyError, get_object_from_key
from zerver.actions.user_settings import do_change_user_setting
from zerver.context_processors import common_context
from zerver.lib.send_email import clear_scheduled_emails
from zerver.models import ScheduledEmail, UserProfile
def process_unsubscribe(
request: HttpRequest,
confirmation_key: str,
subscription_type: str,
unsubscribe_function: Callable[[UserProfile], None],
) -> HttpResponse:
try:
user_profile = get_object_from_key(
confirmation_key, [Confirmation.UNSUBSCRIBE], mark_object_used=False
)
except ConfirmationKeyError:
return render(request, "zerver/unsubscribe_link_error.html")
assert isinstance(user_profile, UserProfile)
unsubscribe_function(user_profile)
context = common_context(user_profile)
context.update(subscription_type=subscription_type)
return render(request, "zerver/unsubscribe_success.html", context=context)
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile: UserProfile) -> None:
do_change_user_setting(
user_profile, "enable_offline_email_notifications", False, acting_user=user_profile
)
def do_welcome_unsubscribe(user_profile: UserProfile) -> None:
clear_scheduled_emails(user_profile.id, ScheduledEmail.WELCOME)
def do_digest_unsubscribe(user_profile: UserProfile) -> None:
do_change_user_setting(user_profile, "enable_digest_emails", False, acting_user=user_profile)
def do_login_unsubscribe(user_profile: UserProfile) -> None:
do_change_user_setting(user_profile, "enable_login_emails", False, acting_user=user_profile)
def do_marketing_unsubscribe(user_profile: UserProfile) -> None:
do_change_user_setting(user_profile, "enable_marketing_emails", False, acting_user=user_profile)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe),
"login": ("login", do_login_unsubscribe),
"marketing": ("marketing", do_marketing_unsubscribe),
}
# Login NOT required. These are for one-click unsubscribes.
@csrf_exempt
def email_unsubscribe(request: HttpRequest, email_type: str, confirmation_key: str) -> HttpResponse:
if email_type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[email_type]
return process_unsubscribe(request, confirmation_key, display_name, unsubscribe_function)
return render(request, "zerver/unsubscribe_link_error.html")
|
{
"content_hash": "d3f55381ab4b9d03398fcf7b2a42cf1a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 100,
"avg_line_length": 39.265822784810126,
"alnum_prop": 0.7537072856221793,
"repo_name": "zulip/zulip",
"id": "b071bdc90d9f746343986b5afcce9dde6323925c",
"size": "3102",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/views/unsubscribe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "509211"
},
{
"name": "Dockerfile",
"bytes": "4219"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "696430"
},
{
"name": "Handlebars",
"bytes": "384277"
},
{
"name": "JavaScript",
"bytes": "4098367"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112433"
},
{
"name": "Python",
"bytes": "10336945"
},
{
"name": "Ruby",
"bytes": "3166"
},
{
"name": "Shell",
"bytes": "147162"
},
{
"name": "TypeScript",
"bytes": "286785"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from django.utils.translation import ugettext_lazy as _
from rapidsms.contrib.handlers.handlers.keyword import KeywordHandler
class AppointmentHandler(KeywordHandler):
"Base keyword handler for the APPT prefix."
prefix = 'APPT'
form = None
success_text = ''
@classmethod
def _keyword(cls):
if hasattr(cls, "keyword"):
pattern = r"^\s*(?:%s)\s*(?:%s)(?:[\s,;:]+(.+))?$" % (cls.prefix, cls.keyword)
else:
pattern = r"^\s*(?:%s)\s*?$" % cls.prefix
return re.compile(pattern, re.IGNORECASE)
def handle(self, text):
"Parse text, validate data, and respond."
parsed = self.parse_message(text)
form = self.form(data=parsed, connection=self.msg.connection)
if form.is_valid():
params = form.save()
self.respond(self.success_text % params)
else:
error = form.error()
if error is None:
self.unknown()
else:
self.respond(error)
return True
def help(self):
"Return help mesage."
if self.help_text:
keyword = self.keyword.split('|')[0].upper()
help_text = self.help_text % {'prefix': self.prefix, 'keyword': keyword}
self.respond(help_text)
def unknown(self):
"Common fallback for unknown errors."
keyword = self.keyword.split('|')[0].upper()
params = {'prefix': self.prefix, 'keyword': keyword}
self.respond(_('Sorry, we cannot understand that message. '
'For additional help send: %(prefix)s %(keyword)s') % params)
|
{
"content_hash": "b2f158730b25b2f5bc81f1955592068d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 90,
"avg_line_length": 32.40384615384615,
"alnum_prop": 0.5750741839762611,
"repo_name": "caktus/rapidsms-appointments",
"id": "4b0b4da35c5fb96591c180499040700125245fc5",
"size": "1685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appointments/handlers/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "118181"
},
{
"name": "Shell",
"bytes": "5126"
}
],
"symlink_target": ""
}
|
from six import moves
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@test.idempotent_id('3027f8e6-3492-4a11-8575-c3293017af4d')
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
body = self.create_image(name='New Name',
container_format='bare',
disk_format='raw',
is_public=False,
properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
body = self.client.update_image(image_id, data=image_file)['image']
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@test.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
def test_register_remote_image(self):
# Register a new remote image
body = self.create_image(name='New Remote Image',
container_format='bare',
disk_format='raw', is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
self.assertEqual(properties['key2'], 'value2')
@test.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
def test_register_http_image(self):
body = self.create_image(name='New Http Image',
container_format='bare',
disk_format='raw', is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
self.client.show_image(image_id)
@test.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
def test_register_image_with_min_ram(self):
# Register an image with min ram
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
container_format='bare',
disk_format='raw',
is_public=False,
min_ram=40,
properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
img1 = cls._create_remote_image('one', 'bare', 'raw')
img2 = cls._create_remote_image('two', 'ami', 'ami')
img3 = cls._create_remote_image('dup', 'bare', 'raw')
img4 = cls._create_remote_image('dup', 'bare', 'raw')
img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
cls.created_set = set(cls.created_images)
# 4x-4x remote image
cls.remote_set = set((img1, img2, img3, img4))
cls.standard_set = set((img5, img6, img7, img8))
# 5x bare, 3x ami
cls.bare_set = set((img1, img3, img4, img7, img8))
cls.ami_set = set((img2, img5, img6))
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
cls.size142_set = set((img6, img7, img8,))
# dup named
cls.dup_set = set((img3, img4))
@classmethod
def _create_remote_image(cls, name, container_format, disk_format):
"""
Create a new remote image and return the ID of the newly-registered
image
"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False,
location=location)
image_id = image['id']
return image_id
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file)
image_id = image['id']
return image_id
@test.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()['images']
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
def test_index_disk_format(self):
images_list = self.client.list_images(disk_format='ami')['images']
for image in images_list:
self.assertEqual(image['disk_format'], 'ami')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.ami_set <= result_set)
self.assertFalse(self.created_set - self.ami_set <= result_set)
@test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
def test_index_container_format(self):
images_list = (self.client.list_images(container_format='bare')
['images'])
for image in images_list:
self.assertEqual(image['container_format'], 'bare')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.bare_set <= result_set)
self.assertFalse(self.created_set - self.bare_set <= result_set)
@test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
def test_index_max_size(self):
images_list = self.client.list_images(size_max=42)['images']
for image in images_list:
self.assertTrue(image['size'] <= 42)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size42_set <= result_set)
self.assertFalse(self.created_set - self.size42_set <= result_set)
@test.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
def test_index_min_size(self):
images_list = self.client.list_images(size_min=142)['images']
for image in images_list:
self.assertTrue(image['size'] >= 142)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size142_set <= result_set)
self.assertFalse(self.size42_set <= result_set)
@test.idempotent_id('e5dc26d9-9aa2-48dd-bda5-748e1445da98')
def test_index_status_active_detail(self):
images_list = self.client.list_images(detail=True,
status='active',
sort_key='size',
sort_dir='desc')['images']
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
self.assertTrue(size <= top_size)
top_size = size
self.assertEqual(image['status'], 'active')
@test.idempotent_id('097af10a-bae8-4342-bff4-edf89969ed2a')
def test_index_name(self):
images_list = self.client.list_images(
detail=True,
name='New Remote Image dup')['images']
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
self.assertTrue(self.dup_set <= result_set)
self.assertFalse(self.created_set - self.dup_set <= result_set)
class UpdateImageMetaTest(base.BaseV1ImageTest):
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image.
"""
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file,
properties={'key1': 'value1'})
image_id = image['id']
return image_id
@test.idempotent_id('01752c1c-0275-4de3-9e5b-876e44541928')
def test_list_image_metadata(self):
# All metadata key/value pairs for an image should be returned
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@test.idempotent_id('d6d7649c-08ce-440d-9ea7-e3dda552f33c')
def test_update_image_metadata(self):
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
metadata = self.client.get_image_meta(self.image_id)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
metadata = self.client.update_image(
self.image_id, properties=metadata['properties'])['image']
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'alt1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata['properties'])
|
{
"content_hash": "20ecef333f2a1445a1e5c732b35c7c98",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 77,
"avg_line_length": 44.64528301886792,
"alnum_prop": 0.5741695545600541,
"repo_name": "tudorvio/tempest",
"id": "7739d160295809782d98be73238b1203ff9929fa",
"size": "12467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/image/v1/test_images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2734396"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import threading
from netmiko import ConnectHandler
from net_system.models import NetworkDevice, Credentials
import django
def show_version(n_device):
creds = n_device.credentials
remote_conn = ConnectHandler(device_type=n_device.device_type, ip=n_device.ip_address,
username=creds.username, password=creds.password,
port=n_device.port)
print '*' * 100
print remote_conn.send_command("show version")
print '*' * 100
def main():
django.setup()
start_time = datetime.now()
network_devices = NetworkDevice.objects.all()
for n_device in network_devices:
# Creating and starting thread with show_version function
network_thread = threading.Thread(target=show_version, args=(n_device,))
network_thread.start()
main_thread = threading.currentThread()
for n_thread in threading.enumerate():
if n_thread != main_thread:
print n_thread
n_thread.join()
total_time = datetime.now() - start_time
print "Total time is :{}".format(total_time)
if __name__ == "__main__":
main()
|
{
"content_hash": "c85607592e2b0823890e0248307364f4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 90,
"avg_line_length": 27.511627906976745,
"alnum_prop": 0.6398985629754861,
"repo_name": "linkdebian/pynet_course",
"id": "6493744e9c56d2a4fdd1dec6126225a81140085f",
"size": "1505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class8/exercise6.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20547"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class StoryConfig(AppConfig):
name = 'story'
|
{
"content_hash": "42dffef5457d900685e667bdb56aa5fc",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17,
"alnum_prop": 0.7411764705882353,
"repo_name": "DjangoGirls/djangogirls",
"id": "d8c9522ac024b4fd44de1d15938e7c8f88c27d92",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "story/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "428291"
},
{
"name": "JavaScript",
"bytes": "13711"
},
{
"name": "Python",
"bytes": "422267"
},
{
"name": "Stylus",
"bytes": "32803"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
class BaseModelSerializer(serializers.ModelSerializer):
id = serializers.SerializerMethodField()
def get_id(self, instance):
return str(instance.id)
|
{
"content_hash": "6fad90d307025b684512231f97d9327d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.7621359223300971,
"repo_name": "mnazim/django-rest-kickstart",
"id": "1f872de6d3ae01e67bad9a10e71ee192fe7881ca",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5089"
},
{
"name": "Python",
"bytes": "19681"
}
],
"symlink_target": ""
}
|
from agents import AGENTS
import random
class CustomUserAgentMiddleware(object):
def process_request(self, request, spider):
agent = random.choice(AGENTS)
request.headers['User-Agent'] = agent
|
{
"content_hash": "97f6f31481cd7d505985f90a0e7c5aa7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 47,
"avg_line_length": 26.875,
"alnum_prop": 0.7209302325581395,
"repo_name": "carpedm20/hali",
"id": "2773ab6df4c63cd566aec4639e3b6a34b96f8f95",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/spider/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Lua",
"bytes": "61404"
},
{
"name": "Python",
"bytes": "58616"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
}
|
__author__ = 'rogerjiang'
'''
Purposes:
1. Visualization of training data
2. Evaluation of training data augmentation
'''
'''
Notes on the data files:
train_wkt_v4.csv: training labels with ImageId, ClassType, MultipolygonWKT
train_geoson_v3 (similar to train_wkt_v4.csv): training labels with ImageId
(folder name), ClassType (detailed, name of .geojson files), Multipolygon
(data of .geojson files, also contains detailed ClassType information)
grid_size.csv: sizes of all images with ImageId, 0<Xmax<1, -1<Ymin<0
(size of images, assuming origin (0,0) is at the upper left corner)
three_band: all 3-band images, in name of ImageId.tif
sixteen_band: all 16-band images, in name of ImageId_{A,M,P}.tif
sample_submission.csv: submission with ImageId, ClassType, MultipolygonWKT
If the order of dimension in all the image data is x-y, this order is switched
to y-x in grid_sizes and wkt data from train_wkt_v4.
-------------
'''
'''
Basically, the combination of ClassType and MultipolygonWKT gives the voxel-wise
class labels.
The 'three_band' and 'sixteen_band' folders are the input for training.
ImageId connects the class labels with the training data.
MultipolygonWKT is relative position in the figure and can be converted to pixel
coordinate with the grid_size (Xmax, Ymin)
There is slightly mismatch between the three_band and sixteen_band data due to
delay in measurements, such that they should be aligned.
'''
import tifffile
import shapely.wkt as wkt
import pandas as pd
import cv2
import numpy as np
import matplotlib.pyplot as plt
from descartes.patch import PolygonPatch
from matplotlib.patches import Patch
import random
from matplotlib import cm
from shapely import affinity
from shapely.affinity import scale
from shapely.geometry import MultiPolygon, Polygon
from collections import defaultdict
import sys
import seaborn as sns
import os
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CLASSES = {
1: 'Bldg',
2: 'Struct',
3: 'Road',
4: 'Track',
5: 'Trees',
6: 'Crops',
7: 'Fast H2O',
8: 'Slow H2O',
9: 'Truck',
10: 'Car',
}
COLORS = {
1: '0.7',
2: '0.4',
3: '#b35806',
4: '#dfc27d',
5: '#1b7837',
6: '#a6dba0',
7: '#74add1',
8: '#4575b4',
9: '#f46d43',
10: '#d73027',
}
# ZORDER defines the priority for plotting overlay of class labels.
ZORDER = {
1: 6,
2: 5,
3: 4,
4: 1,
5: 3,
6: 2,
7: 7,
8: 8,
9: 9,
10: 10,
}
# train_wkt_v4.csv stores the polygon data for all images and classes. The polygons
# uses relative coordinate positions.
_df = pd.read_csv(data_dir + '/data/train_wkt_v4.csv',
names=['ImageId', 'ClassId', 'MultipolygonWKT'], skiprows = 1)
# grid_sizes.csv stores the relative size of for each image. The origin is at the
# upper left corner, which means Xmax is positive and Ymin is negative.
_df1 = pd.read_csv(data_dir + '/data/grid_sizes.csv',
names = ['ImageId', 'Xmax', 'Ymin'], skiprows = 1)
# sample_submission.csv is the file for submission
_df2 = pd.read_csv(data_dir + '/data/sample_submission.csv',
names=['ImageId', 'ClassId', 'MultipolygonWKT'], skiprows = 1)
# Two of the training images were photoed at the same spot at different times,
# under different weather condition. It's up to you to decide whether to
# exclude the duplicates ('6110_1_2', '6110_3_1'). Here I exclude none of them.
duplicates = []
train_wkt_v4 = _df[np.invert(np.in1d(_df.ImageId, duplicates))]
grid_sizes = _df1[np.invert(np.in1d(_df1.ImageId, duplicates))]
test_wkt = _df2
all_train_names = sorted(train_wkt_v4.ImageId.unique())
all_test_names = sorted(test_wkt.ImageId.unique())
train_IDs_dict = dict(zip(np.arange(len(all_train_names)), all_train_names))
train_IDs_dict_r = dict(zip(all_train_names, np.arange(len(all_train_names))))
test_IDs_dict = dict(zip(np.arange(len(all_test_names)), all_test_names))
test_IDs_dict_r = dict(zip(all_test_names, np.arange(len(all_test_names))))
def resize(im, shape_out):
'''
Resize an image using cv2.
Note: x and y are switched in cv2.resize
:param im:
:param shape_out:
:return:
'''
return cv2.resize(im, (shape_out[1], shape_out[0]),
interpolation=cv2.INTER_CUBIC)
def affine_transform(img, warp_matrix, out_shape):
'''
Apply affine transformation using warp_matrix to img, and perform
interpolation as needed
:param img:
:param warp_matrix:
:param out_shape:
:return:
'''
new_img = cv2.warpAffine(img, warp_matrix, (out_shape[1], out_shape[0]),
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP,
borderMode= cv2.BORDER_REPLICATE)
# new_img[new_img == 0] = np.average(new_img)
return new_img
def get_polygon_list(image_id, class_type):
'''
Load the wkt data (relative coordiantes of polygons) from csv file and
returns a list of polygons (in the format of shapely multipolygon)
:param image_id:
:param class_type:
:return:
'''
all_polygon = train_wkt_v4[train_wkt_v4.ImageId == image_id]
polygon = all_polygon[all_polygon.ClassId == class_type].MultipolygonWKT
# For empty polygon, polygon is a string of 'MULTIPOLYGON EMPTY'
# wkt.loads will automatically handle this and len(polygon_list) returns 0
# But polygon_list will never be None!
polygon_list = wkt.loads(polygon.values[0])
return polygon_list
def convert_coordinate_to_raster(coords, img_size, xymax):
'''
Converts the relative coordinates of contours into raster coordinates.
:param coords:
:param img_size:
:param xymax:
:return:
'''
xmax, ymax = xymax
width, height = img_size
coords[:, 0] *= (height + 1) / xmax
coords[:, 1] *= (width + 1) / ymax
coords = np.round(coords).astype(np.int32)
return coords
def generate_contours(polygon_list, img_size, xymax):
'''
Convert shapely MultipolygonWKT type of data (relative coordinate) into
list type of date for polygon raster coordinates
:param polygon_list:
:param img_size:
:param xymax:
:return:
'''
if len(polygon_list) == 0:
return [], []
to_ind = lambda x: np.array(list(x)).astype(np.float32)
perim_list = [convert_coordinate_to_raster(to_ind(poly.exterior.coords),
img_size, xymax)
for poly in polygon_list]
inter_list = [convert_coordinate_to_raster(
to_ind(poly.coords), img_size, xymax)
for poly_ex in polygon_list for poly in poly_ex.interiors]
return perim_list, inter_list
def generate_mask_from_contours(img_size, perim_list, inter_list, class_id = 1):
'''
Create pixel-wise mask from contours from polygon of raster coordinates
:param img_size:
:param perim_list:
:param inter_list:
:param class_id:
:return:
'''
mask = np.zeros(img_size, np.uint8)
if perim_list is None:
return mask
# mask should match the dimension of image
# however, cv2.fillpoly assumes the x and y axes are oppsite between mask and
# perim_list (inter_list)
cv2.fillPoly(mask, perim_list, class_id)
cv2.fillPoly(mask, inter_list, 0)
return mask
def plot_polygon(polygon_list, ax, scaler = None, alpha = 0.7):
'''
polygon_list is a dictionary of polygon list for all class types.
key is the class id, and value is the polygon list.
:param polygon_list:
:param ax:
:param scaler:
:param alpha:
:return:
'''
legend_list = []
for cl in CLASSES:
# Patch is a function in the matplotlib.patches module
legend_list.append(Patch(
color = COLORS[cl],
label = '{}: ({})'.format(CLASSES[cl], len(polygon_list[cl]))))
for polygon in polygon_list[cl]:
if scaler is not None:
# affinity is a function from shapely
polygon_rescale = affinity.scale(polygon, xfact = scaler[1],
yfact = scaler[0],
origin = [0., 0., 0.])
else:
polygon_rescale = polygon
# PolygonPatch is a function from descartes.patch module
# polygon_list is in relative coordinates and they are
# generated from get_polygon_list and are further
# converted to raster coordinates through scaler.
patch = PolygonPatch(polygon = polygon_rescale, color = COLORS[cl],
lw = 0, alpha = alpha, zorder = ZORDER[cl])
ax.add_patch(patch)
ax.autoscale_view()
ax.set_title('Objects')
ax.set_xticks([])
ax.set_yticks([])
return legend_list
def plot_image(img, ax, image_id, image_key, selected_channel = None):
'''
Plot an selected channels of img into ax.
:param img:
:param ax:
:param image_id:
:param image_key:
:param selected_channel:
:return:
'''
title_suffix = ''
if selected_channel is not None:
img = img[:, :, selected_channel]
title_suffix = '(' + ','.join(repr(i) for i in selected_channel) + ')'
ax.imshow(img)
ax.set_title(image_id + '-' + image_key + title_suffix)
ax.set_xlabel(img.shape[0])
ax.set_ylabel(img.shape[1])
ax.set_xticks([])
ax.set_yticks([])
def plot_overlay(img, ax, image_id, image_key, polygon_list, scaler = [1., 1.],
x_range = None, y_range = None, label = None, alpha = 1.0,
rgb = False):
'''
Plot image with polygon overlays
:param img:
:param ax:
:param image_id:
:param image_key:
:param polygon_list:
:param scaler:
:return:
'''
# cm is a function from matplotlib
if not x_range:
x_range = [0, img.shape[0]]
if not y_range:
y_range = [0, img.shape[1]]
if rgb:
ax.imshow(scale_percentile(img), vmax=1., vmin=0.)
else:
ax.imshow(scale_percentile(rgb2gray(img)),
cmap = cm.gray, vmax = 1., vmin = 0.)
ax.set_xlabel(x_range[1] - x_range[0])
ax.set_ylabel(y_range[1] - y_range[0])
legend = plot_polygon(polygon_list, ax, scaler, alpha = alpha)
ax.set_title(image_id + '-' + image_key + '-Overlay')
return legend
def scale_percentile(img):
'''
Scale an image's 1 - 99 percentiles into 0 - 1 for display
:param img:
:return:
'''
orig_shape = img.shape
if len(orig_shape) == 3:
img = np.reshape(img,
[orig_shape[0] * orig_shape[1], orig_shape[2]]
).astype(np.float32)
elif len(orig_shape) == 2:
img = np.reshape(img, [orig_shape[0] * orig_shape[1]]).astype(np.float32)
mins = np.percentile(img, 1, axis = 0)
maxs = np.percentile(img, 99, axis = 0) - mins
img = (img - mins) / maxs
img.clip(0., 1.)
img = np.reshape(img, orig_shape)
return img
def rgb2gray(rgb):
'''
Converts rgb images to grey scale images
:param rgb:
:return:
'''
return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])
def crop(img, crop_coord):
'''
Crop out an patch from img, given the coordinates
:param img:
:param crop_coord:
:return:
'''
width, height = img.shape[0], img.shape[1]
x_lim = crop_coord[0].astype(np.int)
y_lim = crop_coord[1].astype(np.int)
assert 0 <= x_lim[0] < x_lim[1] <= width
assert 0 <= y_lim[0] < y_lim[1] <= height
return img[x_lim[0]: x_lim[1], y_lim[0]: y_lim[1]]
def get_image_area(image_id):
'''
Calculate the area of an image
:param image_id:
:return:
'''
xmax = grid_sizes[grid_sizes.ImageId == image_id].Xmax.values[0]
ymin = grid_sizes[grid_sizes.ImageId == image_id].Ymin.values[0]
return abs(xmax * ymin)
def image_stat(image_id):
'''
Return the statistics ofd an image as a pd dataframe
:param image_id:
:return:
'''
counts, total_area, mean_area, std_area = {}, {}, {}, {}
img_area = get_image_area(image_id)
for cl in CLASSES:
polygon_list = get_polygon_list(image_id, cl)
counts[cl] = len(polygon_list)
if len(polygon_list) > 0:
total_area[cl] = np.sum([poly.area for poly in polygon_list])\
/ img_area * 100.
mean_area[cl] = np.mean([poly.area for poly in polygon_list])\
/ img_area * 100.
std_area[cl] = np.std([poly.area for poly in polygon_list])\
/ img_area * 100.
return pd.DataFrame({'Class': CLASSES, 'Counts': counts,
'TotalArea': total_area, 'MeanArea': mean_area,
'STDArea': std_area})
def collect_stats():
'''
Collect the area statistics for all images and concatenate them
:return:
'''
stats = []
total_no = len(all_train_names) - 1
for image_no, image_id in enumerate(all_train_names):
stat = image_stat(image_id)
stat['ImageId'] = image_id
stats.append(stat)
sys.stdout.write('\rCollecting class stats [{}{}] {}%'.\
format('=' * image_no,
' ' * (total_no - image_no),
100 * image_no / total_no))
sys.stdout.flush()
sys.stdout.write('\n')
return pd.concat(stats)
def calculate_class_weights():
'''
:return: class-wise true-label-area / false-label-area as a dictionary
'''
df = collect_stats()
df = df.fillna(0)
df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
df = df.sum(axis=1)
df = df / (2500. - df)
return df.to_dict()
def plot_stats(value, title):
'''
Plot 2D grid plot of statistics of MeanArea, Counts, TotalArea, STDArea.
:param value:
:param title:
:return:
'''
stats = collect_stats()
pvt = stats.pivot(index='Class', columns='ImageId', values = value)
pvt.fillna(0., inplace = True)
fig, ax = plt.subplots(figsize = (10, 4))
im = ax.imshow(pvt, interpolation = 'nearest', cmap = plt.cm.plasma,
extent = [0 ,25, 10, 0])
ax.set_xlabel('Image')
ax.set_ylabel('Class Type')
ax.set_xticks(np.arange(0.5, 25.4, 1))
ax.set_yticks(np.arange(0.5, 10.4, 1))
ax.set_xticklabels(np.arange(1, 26))
ax.set_yticklabels(pvt.index)
ax.set_title(title)
fig.colorbar(im)
def plot_bar_stats():
stats = collect_stats()
pvt = stats.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
perc_area = np.cumsum(pvt, axis = 0)
class_r = {}
sns.set_style('white')
sns.set_context({'figure.figsize': (12, 8)})
for cl in CLASSES: class_r[CLASSES[cl]] = cl
for cl in np.arange(1, 11):
class_name = perc_area.index[-cl]
class_id = class_r[class_name]
ax = sns.barplot(x = perc_area.columns, y = perc_area.loc[class_name],
color = COLORS[class_id], label = class_name)
ax.legend(loc = 2)
sns.despine(left = True)
ax.set_xlabel('Image ID')
ax.set_ylabel('Class Type')
ax.set_xticklabels(perc_area.columns, rotation = -60)
def jaccard_index(mask_1, mask_2):
'''
Calculate jaccard index between two masks
:param mask_1:
:param mask_2:
:return:
'''
assert len(mask_1.shape) == len(mask_2.shape) == 2
assert 0 <= np.amax(mask_1) <=1
assert 0 <= np.amax(mask_2) <=1
intersection = np.sum(mask_1.astype(np.float32) * mask_2.astype(np.float32))
union = np.sum(mask_1.astype(np.float32) + mask_2.astype(np.float32)) - \
intersection
if union == 0:
return 1.
return intersection / union
def mask_to_polygons(mask, img_id, epsilon = 1, min_area = 1., test = True):
'''
Generate polygons from mask
:param mask:
:param epsilon:
:param min_area:
:return:
'''
# find contours, cv2 switches the x-y coordiante of mask to y-x in contours
# This matches the wkt data in train_wkt_v4, which is desirable for submission
image, contours, hierarchy = cv2.findContours(
((mask == 1) * 255).astype(np.uint8),
cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
# create approximate contours
approx_contours = [cv2.approxPolyDP(cnt, epsilon, True)
for cnt in contours]
if not contours:
return MultiPolygon()
cnt_children = defaultdict(list)
child_contours = set()
assert hierarchy.shape[0] == 1
for idx, (_, _, _, parent_idx) in enumerate(hierarchy[0]):
if parent_idx != -1:
child_contours.add(idx)
cnt_children[parent_idx].append(approx_contours[idx])
# create actual polygon filtering by area (remove artifacts)
all_polygons = []
for idx, cnt in enumerate(approx_contours):
if idx not in child_contours and cv2.contourArea(cnt) >= min_area:
assert cnt.shape[1] == 1
poly = Polygon(shell = cnt[:, 0, :],
holes = [c[:, 0, :] for c in cnt_children.get(idx, [])
if cv2.contourArea(c) >= min_area])
all_polygons.append(poly)
# approximating polygons might have created invalid ones, fix them
all_polygons = MultiPolygon(all_polygons)
if not all_polygons.is_valid:
all_polygons = all_polygons.buffer(0)
# Sometimes buffer() converts a simple Multipolygon to just a Polygon,
# need to keep it a Multi throughout
if all_polygons.type == 'Polygon':
all_polygons = MultiPolygon([all_polygons])
id = test_IDs_dict[img_id] if test else train_IDs_dict[img_id]
x_max = grid_sizes[grid_sizes.ImageId == id].Xmax.values[0]
y_min = grid_sizes[grid_sizes.ImageId == id].Ymin.values[0]
x_scaler, y_scaler = x_max / mask.shape[1], y_min / mask.shape[0]
scaled_pred_polygons = scale(all_polygons, xfact=x_scaler,
yfact=y_scaler, origin=(0., 0., 0.))
return scaled_pred_polygons
def polygon_jaccard(final_polygons, train_polygons):
'''
Calcualte the jaccard index of two polygons, based on data type of
shapely.geometry.MultiPolygon
:param final_polygons:
:param train_polygons:
:return:
'''
return final_polygons.intersection(train_polygons).area /\
final_polygons.union(train_polygons).area
class ImageData():
def __init__(self, image_id, phase = 'train'):
self.image_id = train_IDs_dict[image_id] \
if phase == 'train' else test_IDs_dict[image_id]
self.stat = image_stat(self.image_id) if phase == 'train' else None
self.three_band_image = None
self.sixteen_band_image = None
self.image = None
self.image_size = None
self._xymax = None
self.label = None
self.crop_image = None
self.train_feature = None
self.pred_mask = None
def load_pre_mask(self):
self.pred_mask = None
def load_image(self):
'''
Load three band and sixteen band images, registered and at the same
resolution
Assign value for image_size
:return:
'''
im = self.image_stack()
self.three_band_image = im[..., 0: 3]
self.sixteen_band_image = im[..., 3:]
self.image = im
self.image_size = np.shape(im)[0: 2]
xmax = grid_sizes[grid_sizes.ImageId == self.image_id].Xmax.values[0]
ymax = grid_sizes[grid_sizes.ImageId == self.image_id].Ymin.values[0]
self._xymax = [xmax, ymax]
def get_image_path(self):
'''
Returns the paths for all images
:return:
'''
return {
'3': '{}/data/three_band/{}.tif'.format(data_dir, self.image_id),
'A': '{}/data/sixteen_band/{}_A.tif'.format(data_dir, self.image_id),
'M': '{}/data/sixteen_band/{}_M.tif'.format(data_dir, self.image_id),
'P': '{}/data/sixteen_band/{}_P.tif'.format(data_dir, self.image_id)
}
def read_image(self):
'''
Read all original images
:return:
'''
images = {}
path = self.get_image_path()
for key in path:
im = tifffile.imread(path[key])
if key != 'P':
images[key] = np.transpose(im, (1, 2, 0))
elif key == 'P':
images[key] = im
im3 = images['3']
ima = images['A']
imm = images['M']
imp = images['P']
[nx, ny, _] = im3.shape
images['A'] = resize(ima, [nx, ny])
images['M'] = resize(imm, [nx, ny])
images['P'] = resize(imp, [nx, ny])
return images
def image_stack(self):
'''
Resample all images to highest resolution and align all images
:return:
'''
images = self.read_image()
im3 = images['3']
ima = images['A']
imm = images['M']
imp = images['P']
imp = np.expand_dims(imp, 2)
[nx, ny, _] = im3.shape
warp_matrix_a = np.load(
(data_dir +
'/utils/image_alignment/{}_warp_matrix_a.npz').format(self.image_id)
)
warp_matrix_m = np.load(
(data_dir +
'/utils/image_alignment/{}_warp_matrix_m.npz').format(self.image_id)
)
ima = affine_transform(ima, warp_matrix_a, [nx, ny])
imm = affine_transform(imm, warp_matrix_m, [nx, ny])
im = np.concatenate((im3, ima, imm, imp), axis = -1)
return im
def create_label(self):
'''
Create the class labels
:return:
'''
if self.image is None:
self.load_image()
labels = np.zeros(np.append(self.image_size, len(CLASSES)), np.uint8)
for cl in CLASSES:
polygon_list = get_polygon_list(self.image_id, cl)
perim_list, inter_list = generate_contours(
polygon_list, self.image_size, self._xymax)
mask = generate_mask_from_contours(
self.image_size, perim_list, inter_list, class_id = 1)
labels[..., cl - 1] = mask
self.label = labels
def create_train_feature(self):
'''
Create synthesized features
:return:
'''
if self.three_band_image is None:
self.load_image()
m = self.sixteen_band_image[..., 8:].astype(np.float32)
rgb = self.three_band_image.astype(np.float32)
image_r = rgb[..., 0]
image_g = rgb[..., 1]
image_b = rgb[..., 2]
nir = m[..., 7]
re = m[..., 5]
L, C1, C2 = 1.0, 6.0, 7.5
evi = np.nan_to_num(
(nir - image_r) / (nir + C1 * image_r - C2 * image_b + L))
evi = evi.clip(max=np.percentile(evi, 99), min=np.percentile(evi, 1))
evi = np.expand_dims(evi, 2)
ndwi = (image_g - nir) / (image_g + nir)
ndwi = np.expand_dims(ndwi, 2)
savi = (nir - image_r) / (image_r + nir)
savi = np.expand_dims(savi, 2)
# binary = (ccci > 0.11).astype(np.float32) marks water fairly well
ccci = np.nan_to_num(
(nir - re) / (nir + re) * (nir - image_r) / (nir + image_r))
ccci = ccci.clip(
max=np.percentile(ccci, 99.9),
min=np.percentile(ccci, 0.1))
ccci = np.expand_dims(ccci, 2)
feature = np.concatenate([m, rgb, evi, ndwi, savi, ccci], 2)
feature[feature == np.inf] = 0
feature[feature == -np.inf] = 0
self.train_feature = feature
def visualize_image(self, plot_all = True):
'''
Visualize all images and class labels
:param plot_all:
:return:
'''
if self.label is None:
self.create_label()
if not plot_all:
fig, axarr = plt.subplots(figsize = [10, 10])
ax = axarr
else:
fig, axarr = plt.subplots(figsize = [20, 20], ncols = 3, nrows = 3)
ax = axarr[0][0]
polygon_list = {}
for cl in CLASSES:
polygon_list[cl] = get_polygon_list(self.image_id, cl)
print '{}: {} \t\tcount = {}'.format(
cl, CLASSES[cl], len(polygon_list[cl]))
legend = plot_polygon(polygon_list = polygon_list, ax = ax)
ax.set_xlim(0, self._xymax[0])
ax.set_ylim(self._xymax[1], 0)
ax.set_xlabel(self.image_size[0])
ax.set_ylabel(self.image_size[1])
if plot_all:
three_band_rescale = scale_percentile(self.three_band_image)
sixteen_band_rescale = scale_percentile(self.sixteen_band_image)
plot_image(three_band_rescale, axarr[0][1], self.image_id, '3')
plot_overlay(three_band_rescale, axarr[0][2], self.image_id, '3',
polygon_list,
scaler = self.image_size / np.array([self._xymax[1],
self._xymax[0]]))
axarr[0][2].set_ylim(self.image_size[0], 0)
axarr[0][2].set_xlim(0, self.image_size[1])
plot_image(sixteen_band_rescale, axarr[1][0], self.image_id, 'A',
selected_channel = [0, 3, 6])
plot_image(sixteen_band_rescale, axarr[1][1], self.image_id, 'A',
selected_channel = [1, 4, 7])
plot_image(sixteen_band_rescale, axarr[1][2], self.image_id, 'A',
selected_channel = [2, 5, 0])
plot_image(sixteen_band_rescale, axarr[2][0], self.image_id, 'M',
selected_channel = [8, 11, 14])
plot_image(sixteen_band_rescale, axarr[2][1], self.image_id, 'M',
selected_channel = [9, 12, 15])
plot_image(sixteen_band_rescale, axarr[2][2], self.image_id, 'M',
selected_channel = [10, 13, 8])
ax.legend(handles = legend,
bbox_to_anchor = (0.9, 0.95),
bbox_transform = plt.gcf().transFigure,
ncol = 5,
fontsize = 'large',
title = 'Objects-' + self.image_id,
framealpha = 0.3)
def visualize_label(self, x_range = None, y_range = None, alpha = 1.0):
'''
Visualize labels
:param plot_all:
:return:
'''
if self.label is None:
self.create_label()
if not x_range:
x_range = [0, self.image_size[0]]
if not y_range:
y_range = [0, self.image_size[1]]
fig, ax= plt.subplots(figsize = [10, 10])
polygon_list = {}
for cl in CLASSES:
polygon_list[cl] = get_polygon_list(self.image_id, cl)
print '{}: {} \t\tcount = {}'.format(
cl, CLASSES[cl], len(polygon_list[cl]))
three_band_rescale = scale_percentile(self.three_band_image)
legend = plot_overlay(
three_band_rescale, ax, self.image_id, 'P',polygon_list,
scaler=self.image_size / np.array([self._xymax[1], self._xymax[0]]),
alpha = alpha, rgb = True)
ax.set_xlim(x_range[0], x_range[1])
ax.set_ylim(y_range[0], y_range[1])
ax.set_xlabel(x_range[1] - x_range[0])
ax.set_ylabel(x_range[1] - x_range[0])
ax.legend(handles = legend,
bbox_to_anchor = (0.95, 0.99),
bbox_transform = plt.gcf().transFigure,
ncol = 5,
fontsize = 'large',
title = 'Objects-' + self.image_id,
framealpha = 0.3)
def apply_crop(self, patch_size, ref_point = [0, 0], method = 'random'):
if self.image is None:
self.load_image()
crop_area = np.zeros([2, 2])
width = self.image_size[0]
height = self.image_size[1]
assert width >= patch_size > 0 and patch_size <= height
if method == 'random':
ref_point[0] = random.randint(0, width - patch_size)
ref_point[1] = random.randint(0, height - patch_size)
crop_area[0][0] = ref_point[0]
crop_area[1][0] = ref_point[1]
crop_area[0][1] = ref_point[0] + patch_size
crop_area[1][1] = ref_point[1] + patch_size
elif method == 'grid':
assert width > ref_point[0] + patch_size
assert height > ref_point[1] + patch_size
crop_area[0][0] = ref_point[0]
crop_area[1][0] = ref_point[1]
crop_area[0][1] = ref_point[0] + patch_size
crop_area[1][1] = ref_point[1] + patch_size
else:
raise NotImplementedError(
'"method" should either be "random" or "grid"')
self.crop_image = crop(self.image, crop_area)
|
{
"content_hash": "25f307630c714e3958018c40860b2a70",
"timestamp": "",
"source": "github",
"line_count": 942,
"max_line_length": 83,
"avg_line_length": 30.82908704883227,
"alnum_prop": 0.5682999896697772,
"repo_name": "jiangxu87/dstl_unet",
"id": "e9350ea2a6aba6e13184107a15139d90ca535338",
"size": "29041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6568963"
},
{
"name": "Python",
"bytes": "72996"
}
],
"symlink_target": ""
}
|
class CookeryWrongMatch(Exception):
pass
class CookeryWrongNumberOfArguments(Exception):
pass
class CookeryCannotImportModule(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
{
"content_hash": "fa5200a212eab9130f24c13bdea08789",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 47,
"avg_line_length": 21.181818181818183,
"alnum_prop": 0.6909871244635193,
"repo_name": "mikolajb/cookery",
"id": "6b51d55982125f998a807a91dc32844b5d132e3b",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cookery/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43226"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.