text stringlengths 4 1.02M | meta dict |
|---|---|
import collections
import operator
import pytest
from pandas.compat import PY36
import pandas as pd
from pandas.tests.extension import base
import pandas.util.testing as tm
from .array import JSONArray, JSONDtype, make_data
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercible to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {"a": 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{"b": 1}, {"c": 4}, {"a": 2, "c": 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{"b": 1}, {}, {"a": 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray(
[
{"b": 1},
{"b": 1},
{},
{},
{"a": 0, "c": 2},
{"a": 0, "c": 2},
{"b": 1},
{"c": 2},
]
)
class BaseJSON:
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
def assert_series_equal(self, left, right, **kwargs):
if left.dtype.name == "json":
assert left.dtype == right.dtype
left = pd.Series(
JSONArray(left.values.astype(object)), index=left.index, name=left.name
)
right = pd.Series(
JSONArray(right.values.astype(object)),
index=right.index,
name=right.name,
)
tm.assert_series_equal(left, right, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
tm.assert_index_equal(
left.columns,
right.columns,
exact=kwargs.get("check_column_type", "equiv"),
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
obj="{obj}.columns".format(obj=kwargs.get("obj", "DataFrame")),
)
jsons = (left.dtypes == "json").index
for col in jsons:
self.assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray(
[
collections.UserDict({"a": 1}),
collections.UserDict({"b": 2}),
collections.UserDict({"c": 3}),
]
)
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
with pytest.raises(AssertionError):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError):
self.assert_frame_equal(a.to_frame(), b.to_frame())
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
@pytest.mark.skip(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""
@pytest.mark.xfail(reason="dict for NA")
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
# this matches otherwise
return super().test_unstack(data, index)
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
unstable = pytest.mark.skipif(
not PY36, reason="Dictionary order unstable" # 3.6 or higher
)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
@unstable
def test_argsort(self, data_for_sorting):
super().test_argsort(data_for_sorting)
@unstable
def test_argsort_missing(self, data_missing_for_sorting):
super().test_argsort_missing(data_missing_for_sorting)
@unstable
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending):
super().test_sort_values(data_for_sorting, ascending)
@unstable
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super().test_sort_values_missing(data_missing_for_sorting, ascending)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_first(self, data):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
@pytest.mark.skip(reason="broadcasting error")
def test_where_series(self, data, na_value):
# Fails with
# *** ValueError: operands could not be broadcast together
# with shapes (4,) (4,) (0,)
super().test_where_series(data, na_value)
@pytest.mark.skip(reason="Can't compare dicts.")
def test_searchsorted(self, data_for_sorting):
super().test_searchsorted(data_for_sorting)
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@unstable
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
pass
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="unsupported"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
class TestPrinting(BaseJSON, base.BasePrintingTests):
pass
| {
"content_hash": "e8ed34d4c626348c9acebce2e82a6210",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 87,
"avg_line_length": 30.08974358974359,
"alnum_prop": 0.6362377503195569,
"repo_name": "kushalbhola/MyStuff",
"id": "bc75ec6aeb2df0ac2a4007a540eba8ee4189cf22",
"size": "9388",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/tests/extension/json/test_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
} |
from __future__ import division # For type safety in gaussian_kl_divergence
from functools import partial
from math import erfc
import numpy as np
from numpy.random import RandomState
import kl
import threshold
def gaussian_kl_divergence(mu1, s1, mu2, s2):
"Return KL(N(mu1,s1)||N(mu2,s2))"
# http://stats.stackexchange.com/a/7443/40686
return np.log(s2 / s1) + ((s1**2 + (mu1 - mu2)**2) / (2 * s2**2)) - 0.5
def gaussian_log_pdf(mu, s):
def lpdf(x):
normalizing_constant = -(np.log(2 * np.pi) / 2) - np.log(s)
return normalizing_constant - ((x - mu)**2 / (2 * s**2))
return lpdf
def compute_kullback_leibler_check_statistic(n=100, prngstate=None):
"""Compute the lowest of the survival function and the CDF of the exact KL
divergence KL(N(mu1,s1)||N(mu2,s2)) w.r.t. the sample distribution of the
KL divergence drawn by computing log(P(x|N(mu1,s1)))-log(P(x|N(mu2,s2)))
over a sample x~N(mu1,s1). If we are computing the KL divergence
accurately, the exact value should fall squarely in the sample, and the
tail probabilities should be relatively large.
"""
if prngstate is None:
raise TypeError('Must explicitly specify numpy.random.RandomState')
mu1 = mu2 = 0
s1 = 1
s2 = 2
exact = gaussian_kl_divergence(mu1, s1, mu2, s2)
sample = prngstate.normal(mu1, s1, n)
lpdf1 = gaussian_log_pdf(mu1, s1)
lpdf2 = gaussian_log_pdf(mu2, s2)
estimate, std = kl.kullback_leibler(sample, lpdf1, lpdf2)
# This computes the minimum of the left and right tail probabilities of the
# exact KL divergence vs a gaussian fit to the sample estimate. There is a
# distinct negative skew to the samples used to compute `estimate`, so this
# statistic is not uniform. Nonetheless, we do not expect it to get too
# small.
return erfc(abs(exact - estimate) / std) / 2
def kl_test_stat():
prngstate = RandomState(17)
return partial(
compute_kullback_leibler_check_statistic, prngstate=prngstate)
def compute_kl_threshold():
"""Compute the values used in test_kullback_leibler
>>> threshold.compute_sufficiently_stringent_threshold(
kl_test_stat(), 6, 1e-20)
...
TestThreshold(
threshold=4.3883148424367044e-13,
failprob=9.724132259513859e-21,
sample_size=252135
)
This means that after generating 252135 check statistics, it was found that
the least value of six samples will be less than 4.3883148424367044e-13
with probability less than 9.724132259513859e-21 (< 1e-20).
"""
return threshold.compute_sufficiently_stringent_threshold(
kl_test_stat(), 6, 1e-20)
def test_kullback_leibler():
"""Check kullback_leibler_check_statistic doesn't give absurdly low
values."""
# See compute_kl_threshold for derivation
kl_threshold = threshold.TestThreshold(
threshold=4.3883148424367044e-13,
failprob=9.724132259513859e-21,
sample_size=252135
)
threshold.check_generator(kl_test_stat(), 6, kl_threshold.threshold, 1e-20)
| {
"content_hash": "aa72da120526bbf4276054ab291749f9",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 34.32222222222222,
"alnum_prop": 0.6785367432826157,
"repo_name": "probcomp/bayeslite",
"id": "74f0475fb2dc4c2ac5945b4984624a7c3c8ac046",
"size": "3744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_kl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2007"
},
{
"name": "Python",
"bytes": "1079798"
},
{
"name": "Shell",
"bytes": "2560"
},
{
"name": "Yacc",
"bytes": "42578"
}
],
"symlink_target": ""
} |
import os
import utilities.paths as paths
from keras.preprocessing import image
import numpy as np
import json
DRIVE = paths.get_drive(2)
# Setup vars
# Malisiewicz et al.
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
for v in ['V006','V007','V008','V009','V010']:
in_img_dir = DRIVE + 'DATASETS/VIDEO/TENNIS/FULL_VIDEOS/FRAMES/'+v+'/1200x700_512x512'
# img_save_path = 'darkflow-master/out/'
img_save_path = DRIVE + 'DATASETS/VIDEO/TENNIS/BBS/img/'
# npy_save_path = 'darkflow-master/out/'
npy_save_path = DRIVE + 'DATASETS/VIDEO/TENNIS/BBS/npy/'
jsn = False
split_ways = 2 # should be multiple of 2
if os.path.exists('darkflow-master/test/'):
os.system('rm -r darkflow-master/test/')
if os.path.exists('darkflow-master/out/'):
os.system('rm -r darkflow-master/out/')
# Run through images
img_list = []
for file in os.listdir(in_img_dir):
if file.endswith(".png"):
img_list.append(file)
print(img_list)
# Process images in any way - cropping, splitting, resizing
for i in range(1,len(img_list)+1):
if not os.path.exists('darkflow-master/test/'):
os.makedirs('darkflow-master/test/')
file = img_list[i-1]
img = image.img_to_array(image.load_img(os.path.join(in_img_dir, file)))
img_width = np.shape(img)[1]
if img_width != 512:
print('ERROR WITH IMAGE: '+os.path.join(in_img_dir, file))
continue
new_img_size = int(round(img_width/float(split_ways)))
step_size = int(round(new_img_size/2.0))
for tlx in range(2*split_ways-1):
for tly in range(2*split_ways-1):
sub_img = img[tlx*step_size:tlx*step_size+new_img_size,tly*step_size:tly*step_size+new_img_size,:]
sub_img = image.array_to_img(sub_img)
sub_img.save('darkflow-master/test/'+file[:-4]+'_'+str(tlx)+'_'+str(tly) + '.png')
img = image.array_to_img(img)
img.save('darkflow-master/test/' + file)
# break
if i % 50 == 0:
print str(i) + '/' + str(len(img_list))
# Run obj detection scipt
os.system('./darkflow-master/flow --test darkflow-master/test --threshold 0.05 --model darkflow-master/cfg/yolo-voc.cfg --load darkflow-master/bin/yolo-voc.weights --json')
for j in range(i-50,i):
file = img_list[j]
bbs = []
# Load + process results
for tlx in range(2*split_ways-1):
for tly in range(2*split_ways-1):
if os.path.isfile('darkflow-master/test/out/'+file[:-4]+'_'+str(tlx)+'_'+str(tly) + '.json'):
with open('darkflow-master/test/out/'+file[:-4]+'_'+str(tlx)+'_'+str(tly) + '.json') as data_file:
try:
data = json.load(data_file)
for d in data:
if d['label'] == 'person':
bbs.append([int(d['topleft']['x']) + tly * step_size,
int(d['topleft']['y']) + tlx * step_size,
int(d['bottomright']['x']) + tly * step_size,
int(d['bottomright']['y']) + tlx * step_size])
except ValueError:
pass
if os.path.isfile('darkflow-master/test/out/' + file[:-4] + '.json'):
with open('darkflow-master/test/out/' + file[:-4] + '.json') as data_file:
try:
data = json.load(data_file)
for d in data:
if d['label'] == 'person':
bbs.append([int(d['topleft']['x']),
int(d['topleft']['y']),
int(d['bottomright']['x']),
int(d['bottomright']['y'])])
except ValueError:
pass
# Non-Max Supp with .5 overlap
bbs = non_max_suppression_fast(np.asarray(bbs),0.5)
if not os.path.exists(npy_save_path):
os.makedirs(npy_save_path)
np.save(npy_save_path + file[:-4]+'.npy',bbs)
if not os.path.exists(img_save_path):
os.makedirs(img_save_path)
img = image.img_to_array(image.load_img(os.path.join(in_img_dir, file)))
for bb in bbs:
img[bb[1]:bb[3],bb[0]:bb[0]+1,:] = (0,200,0)
img[bb[1]:bb[3],bb[2]:bb[2]+1,:] = (0,200,0)
img[bb[1]:bb[1]+1,bb[0]:bb[2],:] = (0,200,0)
img[bb[3]:bb[3]+1,bb[0]:bb[2],:] = (0,200,0)
img = image.array_to_img(img)
img.save(img_save_path + file)
if os.path.exists('darkflow-master/test/'):
os.system('rm -r darkflow-master/test/')
# if i > 15:
# break
| {
"content_hash": "8c8fbffb8d4ae01aeca297bcf38d1f5a",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 184,
"avg_line_length": 39.027027027027025,
"alnum_prop": 0.49778393351800554,
"repo_name": "HaydenFaulkner/phd",
"id": "e9e5739a86933af397a2a6a75e2c1a1a1e606ab3",
"size": "7229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "object_detection/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1243227"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import argparse
import ast
import collections
import enum
import io
import itertools
import os
import re
import sys
from typing import Generator
from typing import NamedTuple
from typing import Sequence
from classify_imports import Import
from classify_imports import import_obj_from_str
from classify_imports import ImportFrom
from classify_imports import Settings
from classify_imports import sort
CodeType = enum.Enum('CodeType', 'PRE_IMPORT_CODE IMPORT NON_CODE CODE')
Tok = enum.Enum('Tok', 'IMPORT STRING NEWLINE ERROR')
# GENERATED VIA generate-tokenize
COMMENT = r'#[^\r\n]*'
NAME = r'\w+'
PREFIX = r'[RrUu]?'
DOUBLE_3 = r'"""[^"\\]*(?:(?:\\.|\\\n|"(?!""))[^"\\]*)*"""'
SINGLE_3 = r"'''[^'\\]*(?:(?:\\.|\\\n|'(?!''))[^'\\]*)*'''"
DOUBLE_1 = r'"[^"\\]*(?:\\.[^"\\]*)*"'
SINGLE_1 = r"'[^'\\]*(?:\\.[^'\\]*)*'"
# END GENERATED
WS = r'[ \f\t]+'
IMPORT = fr'(?:from|import)(?={WS})'
EMPTY = fr'[ \f\t]*(?=\n|{COMMENT})'
OP = '[,.*]'
ESCAPED_NL = r'\\\n'
NAMES = fr'\((?:\s+|,|{NAME}|{ESCAPED_NL}|{COMMENT})*\)'
STRING = fr'{PREFIX}(?:{DOUBLE_3}|{SINGLE_3}|{DOUBLE_1}|{SINGLE_1})'
def _pat(base: str, pats: tuple[str, ...]) -> re.Pattern[str]:
return re.compile(
fr'{base}'
fr'(?:{"|".join(pats)})*'
fr'(?P<comment>(?:{COMMENT})?)'
fr'(?:\n|$)',
)
TOKENIZE: tuple[tuple[Tok, re.Pattern[str]], ...] = (
(Tok.IMPORT, _pat(IMPORT, (WS, NAME, OP, ESCAPED_NL, NAMES))),
(Tok.NEWLINE, _pat(EMPTY, ())),
(Tok.STRING, _pat(STRING, (WS, STRING, ESCAPED_NL))),
)
def _tokenize(s: str) -> Generator[tuple[Tok, str], None, None]:
pos = 0
while True:
for tp, reg in TOKENIZE:
match = reg.match(s, pos)
if match is not None:
if 'noreorder' in match['comment']:
yield (Tok.ERROR, s[pos:])
return
else:
yield (tp, match[0])
pos = match.end()
break
else:
yield (Tok.ERROR, s[pos:])
return
def partition_source(src: str) -> tuple[str, list[str], str, str]:
sio = io.StringIO(src, newline=None)
src = sio.read().rstrip() + '\n'
if sio.newlines is None:
nl = '\n'
elif isinstance(sio.newlines, str):
nl = sio.newlines
else:
nl = sio.newlines[0]
chunks = []
pre_import = True
for token_type, s in _tokenize(src):
if token_type is Tok.IMPORT:
pre_import = False
chunks.append((CodeType.IMPORT, s))
elif token_type is Tok.NEWLINE:
if s.isspace():
tp = CodeType.NON_CODE
elif pre_import:
tp = CodeType.PRE_IMPORT_CODE
else:
tp = CodeType.CODE
chunks.append((tp, s))
elif pre_import and token_type is Tok.STRING:
chunks.append((CodeType.PRE_IMPORT_CODE, s))
else:
chunks.append((CodeType.CODE, s))
last_idx = 0
for i, (tp, _) in enumerate(chunks):
if tp in (CodeType.PRE_IMPORT_CODE, CodeType.IMPORT):
last_idx = i
pre = []
imports = []
code = []
for i, (tp, src) in enumerate(chunks):
if tp is CodeType.PRE_IMPORT_CODE:
pre.append(src)
elif tp is CodeType.IMPORT:
imports.append(src)
elif tp is CodeType.CODE or i > last_idx:
code.append(src)
return ''.join(pre), imports, ''.join(code), nl
def parse_imports(
imports: list[str],
*,
to_add: tuple[str, ...] = (),
) -> list[tuple[str, Import | ImportFrom]]:
ret = []
for s in itertools.chain(to_add, imports):
obj = import_obj_from_str(s)
if not obj.is_multiple:
ret.append((s, obj))
else:
ret.extend((str(new), new) for new in obj.split())
return ret
class Replacements(NamedTuple):
# (orig_mod, attr) => new_mod
exact: dict[tuple[str, str], str]
# orig_mod => new_mod (no attr)
mods: dict[str, str]
@classmethod
def make(cls, args: list[tuple[str, str, str]]) -> Replacements:
exact = {}
mods = {}
for mod_from, mod_to, attr in args:
if attr:
exact[mod_from, attr] = mod_to
else:
mod_from_base, _, mod_from_attr = mod_from.rpartition('.')
mod_to_base, _, mod_to_attr = mod_to.rpartition('.')
# for example `six.moves.urllib.request=urllib.request`
if (
mod_from_attr and
mod_to_base and
mod_from_attr == mod_to_attr
):
exact[mod_from_base, mod_from_attr] = mod_to_base
mods[mod_from] = mod_to
return cls(exact=exact, mods=mods)
def replace_imports(
imports: list[tuple[str, Import | ImportFrom]],
to_replace: Replacements,
) -> list[tuple[str, Import | ImportFrom]]:
ret = []
for s, import_obj in imports:
if isinstance(import_obj, Import):
mod, asname = import_obj.key
if asname:
if mod in to_replace.mods:
node_i = ast.Import(
names=[ast.alias(to_replace.mods[mod], asname)],
)
obj_i = Import(node_i)
ret.append((str(obj_i), obj_i))
else:
for mod_name in _module_to_base_modules(mod):
if mod_name in to_replace.mods:
new_mod = to_replace.mods[mod_name]
new_mod_s = f'{new_mod}{mod[len(mod_name):]}'
node_i = ast.Import(
names=[ast.alias(new_mod_s, asname)],
)
obj_i = Import(node_i)
ret.append((str(obj_i), obj_i))
break
else:
ret.append((s, import_obj))
else:
ret.append((s, import_obj))
else:
mod, symbol, asname = import_obj.key
mod_symbol = f'{mod}.{symbol}'
# from a.b.c import d => from e.f.g import d
if (mod, symbol) in to_replace.exact:
node = ast.ImportFrom(
module=to_replace.exact[mod, symbol],
names=import_obj.node.names,
level=0,
)
obj = ImportFrom(node)
ret.append((str(obj), obj))
# from a.b.c import d as e => from f import g as e
# from a.b.c import d as e => import f as e
# from a.b import c => import c
elif (
mod_symbol in to_replace.mods and
(asname or to_replace.mods[mod_symbol] == symbol)
):
new_mod = to_replace.mods[mod_symbol]
new_mod, dot, new_sym = new_mod.rpartition('.')
if new_mod:
node = ast.ImportFrom(
module=new_mod,
names=[ast.alias(new_sym, asname)],
level=0,
)
obj = ImportFrom(node)
ret.append((str(obj), obj))
elif not dot:
node_i = ast.Import(names=[ast.alias(new_sym, asname)])
obj_i = Import(node_i)
ret.append((str(obj_i), obj_i))
else:
ret.append((s, import_obj))
# from a.b.c import d => from e import d
elif mod in to_replace.mods:
node = ast.ImportFrom(
module=to_replace.mods[mod],
names=import_obj.node.names,
level=0,
)
obj = ImportFrom(node)
ret.append((str(obj), obj))
else:
for mod_name in _module_to_base_modules(mod):
if mod_name in to_replace.mods:
new_mod = to_replace.mods[mod_name]
node = ast.ImportFrom(
module=f'{new_mod}{mod[len(mod_name):]}',
names=import_obj.node.names,
level=0,
)
obj = ImportFrom(node)
ret.append((str(obj), obj))
break
else:
ret.append((s, import_obj))
return ret
def _module_to_base_modules(s: str) -> Generator[str, None, None]:
"""return all module names that would be imported due to this
import-import
"""
s = s.rpartition('.')[0]
while s:
yield s
s = s.rpartition('.')[0]
def remove_duplicated_imports(
imports: list[tuple[str, Import | ImportFrom]],
*,
to_remove: set[tuple[str, ...]],
) -> list[tuple[str, Import | ImportFrom]]:
seen = set(to_remove)
seen_module_names: set[str] = set()
without_exact_duplicates = []
for s, import_obj in imports:
if import_obj.key not in seen:
seen.add(import_obj.key)
if (
isinstance(import_obj, Import) and
not import_obj.key.asname
):
seen_module_names.update(
_module_to_base_modules(import_obj.module),
)
without_exact_duplicates.append((s, import_obj))
ret = []
for s, import_obj in without_exact_duplicates:
if (
isinstance(import_obj, Import) and
not import_obj.key.asname and
import_obj.key.module in seen_module_names
):
continue
ret.append((s, import_obj))
return ret
def apply_import_sorting(
imports: list[tuple[str, Import | ImportFrom]],
settings: Settings = Settings(),
) -> list[str]:
import_obj_to_s = {v: s for s, v in imports}
sorted_blocks = sort(import_obj_to_s, settings=settings)
new_imports = []
for block in sorted_blocks:
for import_obj in block:
new_imports.append(import_obj_to_s[import_obj])
new_imports.append('\n')
# XXX: I want something like [x].join(...) (like str join) but for now
# this works
if new_imports:
new_imports.pop()
return new_imports
def fix_file_contents(
contents: str,
*,
to_add: tuple[str, ...] = (),
to_remove: set[tuple[str, ...]],
to_replace: Replacements,
settings: Settings = Settings(),
) -> str:
if not contents or contents.isspace():
return ''
# internally use `'\n` as the newline and normalize at the very end
before, imports, after, nl = partition_source(contents)
parsed = parse_imports(imports, to_add=to_add)
parsed = replace_imports(parsed, to_replace=to_replace)
parsed = remove_duplicated_imports(parsed, to_remove=to_remove)
imports = apply_import_sorting(parsed, settings=settings)
return f'{before}{"".join(imports)}{after}'.replace('\n', nl)
def _fix_file(
filename: str,
args: argparse.Namespace,
*,
to_remove: set[tuple[str, ...]],
to_replace: Replacements,
settings: Settings = Settings(),
) -> int:
if filename == '-':
contents_bytes = sys.stdin.buffer.read()
else:
with open(filename, 'rb') as f:
contents_bytes = f.read()
try:
contents = contents_bytes.decode()
except UnicodeDecodeError:
print(
f'{filename} is non-utf-8 (not supported)',
file=sys.stderr,
)
return 1
new_contents = fix_file_contents(
contents,
to_add=tuple(f'{s.strip()}\n' for s in args.add_import),
to_remove=to_remove,
to_replace=to_replace,
settings=settings,
)
if filename == '-':
print(new_contents, end='')
elif contents != new_contents:
print(f'Reordering imports in {filename}', file=sys.stderr)
with open(filename, 'wb') as f:
f.write(new_contents.encode())
if args.exit_zero_even_if_changed:
return 0
else:
return contents != new_contents
REMOVALS: dict[tuple[int, ...], set[str]] = collections.defaultdict(set)
REPLACES: dict[tuple[int, ...], set[str]] = collections.defaultdict(set)
REMOVALS[(3,)].add('from io import open')
# GENERATED VIA generate-future-info
REMOVALS[(2, 2)].add('from __future__ import nested_scopes')
REMOVALS[(2, 3)].add('from __future__ import generators')
REMOVALS[(2, 6)].add('from __future__ import with_statement')
REMOVALS[(3,)].update((
'from __future__ import division',
'from __future__ import absolute_import',
'from __future__ import print_function',
'from __future__ import unicode_literals',
))
REMOVALS[(3, 7)].add('from __future__ import generator_stop')
# END GENERATED
# GENERATED VIA generate-typing-rewrite-info
# Using:
# flake8-typing-imports==1.14.0
# mypy-extensions==0.4.3
# typing-extensions==4.4.0
REPLACES[(3, 6)].update((
'typing_extensions=typing:AsyncIterable',
'typing_extensions=typing:AsyncIterator',
'typing_extensions=typing:Awaitable',
'typing_extensions=typing:ClassVar',
'typing_extensions=typing:ContextManager',
'typing_extensions=typing:Coroutine',
'typing_extensions=typing:DefaultDict',
'typing_extensions=typing:NewType',
'typing_extensions=typing:TYPE_CHECKING',
'typing_extensions=typing:Text',
'typing_extensions=typing:Type',
))
REPLACES[(3, 7)].update((
'mypy_extensions=typing:NoReturn',
'typing_extensions=typing:AsyncContextManager',
'typing_extensions=typing:AsyncGenerator',
'typing_extensions=typing:ChainMap',
'typing_extensions=typing:Counter',
'typing_extensions=typing:Deque',
'typing_extensions=typing:NoReturn',
))
REPLACES[(3, 8)].update((
'mypy_extensions=typing:TypedDict',
'typing_extensions=typing:Final',
'typing_extensions=typing:Literal',
'typing_extensions=typing:OrderedDict',
'typing_extensions=typing:Protocol',
'typing_extensions=typing:SupportsIndex',
'typing_extensions=typing:runtime_checkable',
))
REPLACES[(3, 9)].update((
'typing_extensions=typing:Annotated',
'typing_extensions=typing:get_type_hints',
))
REPLACES[(3, 10)].update((
'typing_extensions=typing:Concatenate',
'typing_extensions=typing:ParamSpecArgs',
'typing_extensions=typing:ParamSpecKwargs',
'typing_extensions=typing:TypeAlias',
'typing_extensions=typing:TypeGuard',
'typing_extensions=typing:get_args',
'typing_extensions=typing:get_origin',
'typing_extensions=typing:is_typeddict',
))
REPLACES[(3, 11)].update((
'typing_extensions=typing:Any',
'typing_extensions=typing:LiteralString',
'typing_extensions=typing:NamedTuple',
'typing_extensions=typing:Never',
'typing_extensions=typing:NotRequired',
'typing_extensions=typing:Required',
'typing_extensions=typing:Self',
'typing_extensions=typing:TypedDict',
'typing_extensions=typing:Unpack',
'typing_extensions=typing:assert_never',
'typing_extensions=typing:assert_type',
'typing_extensions=typing:clear_overloads',
'typing_extensions=typing:dataclass_transform',
'typing_extensions=typing:final',
'typing_extensions=typing:get_overloads',
'typing_extensions=typing:overload',
'typing_extensions=typing:reveal_type',
))
# END GENERATED
# GENERATED VIA generate-typing-pep585-rewrites
REPLACES[(3, 9)].update((
'typing=collections.abc:AsyncGenerator',
'typing=collections.abc:AsyncIterable',
'typing=collections.abc:AsyncIterator',
'typing=collections.abc:Awaitable',
'typing=collections.abc:ByteString',
'typing=collections.abc:Collection',
'typing=collections.abc:Container',
'typing=collections.abc:Coroutine',
'typing=collections.abc:Generator',
'typing=collections.abc:Hashable',
'typing=collections.abc:ItemsView',
'typing=collections.abc:Iterable',
'typing=collections.abc:Iterator',
'typing=collections.abc:KeysView',
'typing=collections.abc:Mapping',
'typing=collections.abc:MappingView',
'typing=collections.abc:MutableMapping',
'typing=collections.abc:MutableSequence',
'typing=collections.abc:MutableSet',
'typing=collections.abc:Reversible',
'typing=collections.abc:Sequence',
'typing=collections.abc:Sized',
'typing=collections.abc:ValuesView',
'typing=collections:ChainMap',
'typing=collections:Counter',
'typing=collections:OrderedDict',
'typing=re:Match',
'typing=re:Pattern',
'typing.re=re:Match',
'typing.re=re:Pattern',
))
REPLACES[(3, 10)].add('typing=collections.abc:Callable')
# END GENERATED
# GENERATED VIA generate-python-future-info
# Using future==0.18.2
REMOVALS[(3,)].update((
'from builtins import *',
'from builtins import ascii',
'from builtins import bytes',
'from builtins import chr',
'from builtins import dict',
'from builtins import filter',
'from builtins import hex',
'from builtins import input',
'from builtins import int',
'from builtins import isinstance',
'from builtins import list',
'from builtins import map',
'from builtins import max',
'from builtins import min',
'from builtins import next',
'from builtins import object',
'from builtins import oct',
'from builtins import open',
'from builtins import pow',
'from builtins import range',
'from builtins import round',
'from builtins import str',
'from builtins import super',
'from builtins import zip',
))
# END GENERATED
# GENERATED VIA generate-six-info
# Using six==1.16.0
REMOVALS[(3,)].update((
'from six import callable',
'from six import next',
'from six.moves import filter',
'from six.moves import input',
'from six.moves import map',
'from six.moves import range',
'from six.moves import zip',
))
REPLACES[(3,)].update((
'six.moves.BaseHTTPServer=http.server',
'six.moves.CGIHTTPServer=http.server',
'six.moves.SimpleHTTPServer=http.server',
'six.moves._dummy_thread=_dummy_thread',
'six.moves._thread=_thread',
'six.moves.builtins=builtins',
'six.moves.cPickle=pickle',
'six.moves.collections_abc=collections.abc',
'six.moves.configparser=configparser',
'six.moves.copyreg=copyreg',
'six.moves.dbm_gnu=dbm.gnu',
'six.moves.dbm_ndbm=dbm.ndbm',
'six.moves.email_mime_base=email.mime.base',
'six.moves.email_mime_image=email.mime.image',
'six.moves.email_mime_multipart=email.mime.multipart',
'six.moves.email_mime_nonmultipart=email.mime.nonmultipart',
'six.moves.email_mime_text=email.mime.text',
'six.moves.html_entities=html.entities',
'six.moves.html_parser=html.parser',
'six.moves.http_client=http.client',
'six.moves.http_cookiejar=http.cookiejar',
'six.moves.http_cookies=http.cookies',
'six.moves.queue=queue',
'six.moves.reprlib=reprlib',
'six.moves.socketserver=socketserver',
'six.moves.tkinter=tkinter',
'six.moves.tkinter_colorchooser=tkinter.colorchooser',
'six.moves.tkinter_commondialog=tkinter.commondialog',
'six.moves.tkinter_constants=tkinter.constants',
'six.moves.tkinter_dialog=tkinter.dialog',
'six.moves.tkinter_dnd=tkinter.dnd',
'six.moves.tkinter_filedialog=tkinter.filedialog',
'six.moves.tkinter_font=tkinter.font',
'six.moves.tkinter_messagebox=tkinter.messagebox',
'six.moves.tkinter_scrolledtext=tkinter.scrolledtext',
'six.moves.tkinter_simpledialog=tkinter.simpledialog',
'six.moves.tkinter_tix=tkinter.tix',
'six.moves.tkinter_tkfiledialog=tkinter.filedialog',
'six.moves.tkinter_tksimpledialog=tkinter.simpledialog',
'six.moves.tkinter_ttk=tkinter.ttk',
'six.moves.urllib.error=urllib.error',
'six.moves.urllib.parse=urllib.parse',
'six.moves.urllib.request=urllib.request',
'six.moves.urllib.response=urllib.response',
'six.moves.urllib.robotparser=urllib.robotparser',
'six.moves.urllib_error=urllib.error',
'six.moves.urllib_parse=urllib.parse',
'six.moves.urllib_robotparser=urllib.robotparser',
'six.moves.xmlrpc_client=xmlrpc.client',
'six.moves.xmlrpc_server=xmlrpc.server',
'six.moves=collections:UserDict',
'six.moves=collections:UserList',
'six.moves=collections:UserString',
'six.moves=functools:reduce',
'six.moves=io:StringIO',
'six.moves=itertools:filterfalse',
'six.moves=itertools:zip_longest',
'six.moves=os:getcwd',
'six.moves=os:getcwdb',
'six.moves=subprocess:getoutput',
'six.moves=sys:intern',
'six=functools:wraps',
'six=io:BytesIO',
'six=io:StringIO',
))
# END GENERATED
# GENERATED VIA generate-mock-info
# up until cpython 3.10.0
REPLACES[(3,)].update((
'mock.mock=unittest.mock:ANY',
'mock.mock=unittest.mock:DEFAULT',
'mock.mock=unittest.mock:FILTER_DIR',
'mock.mock=unittest.mock:MagicMock',
'mock.mock=unittest.mock:Mock',
'mock.mock=unittest.mock:NonCallableMagicMock',
'mock.mock=unittest.mock:NonCallableMock',
'mock.mock=unittest.mock:PropertyMock',
'mock.mock=unittest.mock:call',
'mock.mock=unittest.mock:create_autospec',
'mock.mock=unittest.mock:mock_open',
'mock.mock=unittest.mock:patch',
'mock.mock=unittest.mock:sentinel',
'mock=unittest.mock:ANY',
'mock=unittest.mock:DEFAULT',
'mock=unittest.mock:FILTER_DIR',
'mock=unittest.mock:MagicMock',
'mock=unittest.mock:Mock',
'mock=unittest.mock:NonCallableMagicMock',
'mock=unittest.mock:NonCallableMock',
'mock=unittest.mock:PropertyMock',
'mock=unittest.mock:call',
'mock=unittest.mock:create_autospec',
'mock=unittest.mock:mock_open',
'mock=unittest.mock:patch',
'mock=unittest.mock:sentinel',
))
REPLACES[(3, 7)].update((
'mock.mock=unittest.mock:seal',
'mock=unittest.mock:seal',
))
REPLACES[(3, 8)].update((
'mock.mock=unittest.mock:AsyncMock',
'mock=unittest.mock:AsyncMock',
))
# END GENERATED
# GENERATED VIA generate-deprecated
REPLACES[(3,)].update((
'collections=collections.abc:AsyncGenerator',
'collections=collections.abc:AsyncIterable',
'collections=collections.abc:AsyncIterator',
'collections=collections.abc:Awaitable',
'collections=collections.abc:ByteString',
'collections=collections.abc:Callable',
'collections=collections.abc:Collection',
'collections=collections.abc:Container',
'collections=collections.abc:Coroutine',
'collections=collections.abc:Generator',
'collections=collections.abc:Hashable',
'collections=collections.abc:ItemsView',
'collections=collections.abc:Iterable',
'collections=collections.abc:Iterator',
'collections=collections.abc:KeysView',
'collections=collections.abc:Mapping',
'collections=collections.abc:MappingView',
'collections=collections.abc:MutableMapping',
'collections=collections.abc:MutableSequence',
'collections=collections.abc:MutableSet',
'collections=collections.abc:Reversible',
'collections=collections.abc:Sequence',
'collections=collections.abc:Set',
'collections=collections.abc:Sized',
'collections=collections.abc:ValuesView',
'pipes=shlex:quote',
'xml.etree.cElementTree=xml.etree.ElementTree',
))
# END GENERATED
def _add_version_options(parser: argparse.ArgumentParser) -> None:
versions = sorted(REMOVALS.keys() | REPLACES.keys())
msg = 'Removes/updates obsolete imports; implies all older versions.'
parser.add_argument(
f'--py{"".join(str(n) for n in versions[0])}-plus', help=msg,
action='store_const', dest='min_version', const=versions[0],
default=(0,),
)
for version in versions[1:]:
parser.add_argument(
f'--py{"".join(str(n) for n in version)}-plus', help=msg,
action='store_const', dest='min_version', const=version,
)
def _validate_import(s: str) -> str:
try:
import_obj_from_str(s)
except (SyntaxError, KeyError):
raise argparse.ArgumentTypeError(f'expected import: {s!r}')
else:
return s
def _validate_replace_import(s: str) -> tuple[str, str, str]:
mods, _, attr = s.partition(':')
try:
orig_mod, new_mod = mods.split('=')
except ValueError:
raise argparse.ArgumentTypeError(
f'expected `orig.mod=new.mod` or `orig.mod=new.mod:attr`: {s!r}',
)
else:
return orig_mod, new_mod, attr
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'filenames', nargs='*',
help='If `-` is given, reads from stdin and writes to stdout.',
)
parser.add_argument('--exit-zero-even-if-changed', action='store_true')
parser.add_argument(
'--add-import', action='append', default=[], type=_validate_import,
help='Import to add to each file. Can be specified multiple times.',
)
parser.add_argument(
'--remove-import', action='append', default=[], type=_validate_import,
help=(
'Import to remove from each file. '
'Can be specified multiple times.'
),
)
parser.add_argument(
'--replace-import', action='append', default=[],
type=_validate_replace_import,
help=(
'Module pairs to replace imports. '
'For example: `--replace-import orig.mod=new.mod`. '
'For renames of a specific imported attribute, use the form '
'`--replace-import orig.mod=new.mod:attr`. '
'Can be specified multiple times.'
),
)
parser.add_argument(
'--application-directories', default='.',
help=(
'Colon separated directories that are considered top-level '
'application directories. Defaults to `%(default)s`'
),
)
parser.add_argument(
'--unclassifiable-application-module', action='append', default=[],
dest='unclassifiable',
help=(
'(may be specified multiple times) module names that are '
'considered application modules. this setting is intended to be '
'used for things like C modules which may not always appear on '
'the filesystem'
),
)
_add_version_options(parser)
args = parser.parse_args(argv)
to_remove = {
obj.key
for s in args.remove_import
for obj in import_obj_from_str(s).split()
} | {
import_obj_from_str(s).key
for k, v in REMOVALS.items()
if args.min_version >= k
for s in v
}
for k, v in REPLACES.items():
if args.min_version >= k:
args.replace_import.extend(
_validate_replace_import(replace_s) for replace_s in v
)
to_replace = Replacements.make(args.replace_import)
if os.environ.get('PYTHONPATH'):
sys.stderr.write('$PYTHONPATH set, import order may be unexpected\n')
sys.stderr.flush()
settings = Settings(
application_directories=tuple(args.application_directories.split(':')),
unclassifiable_application_modules=frozenset(args.unclassifiable),
)
retv = 0
for filename in args.filenames:
retv |= _fix_file(
filename,
args,
to_remove=to_remove,
to_replace=to_replace,
settings=settings,
)
return retv
if __name__ == '__main__':
raise SystemExit(main())
| {
"content_hash": "00cb64a34100697428b4079a92661f26",
"timestamp": "",
"source": "github",
"line_count": 838,
"max_line_length": 79,
"avg_line_length": 33.24463007159905,
"alnum_prop": 0.5954987616210201,
"repo_name": "asottile/reorder_python_imports",
"id": "673ea246bd1d8393e030939bb586ace2c9821bfc",
"size": "27859",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "reorder_python_imports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75855"
}
],
"symlink_target": ""
} |
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
# elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| {
"content_hash": "0ba30f2bf9ad2d7e67002258c9dacfb3",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 77,
"avg_line_length": 33.15217391304348,
"alnum_prop": 0.5468852459016393,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "72a8540fcb4d2f177263a11493497fc65ea168a4",
"size": "6283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/site-packages/pip/_vendor/distlib/markers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
} |
import os
import sys
import socket
import urllib
import string
import random
import traceback
import time
class Signal:
def __init__(self):
self.slots = {}
self.meta = {}
def ensure(self, func, context=""):
if context not in self.slots:
self.connect(func, context)
return
if func not in self.slots[context]:
self.connect(func, context)
def connect(self, func, context="", hidden=False):
if context:
self.meta[context] = {
'hidden': hidden
}
if context not in self.slots:
self.slots[context] = []
self.slots[context] += [func]
def clear(self):
self.slots = {}
def disconnect(self, context):
del self.slots[context]
def emit(self, *args, **kwargs):
limit_context = kwargs.get("limit_context", None)
brk = kwargs.get("allow_break", False)
force_brk = kwargs.get("force_break", False)
include_context = kwargs.get("include_context", False)
for ctx, funcs in self.slots.items():
if not limit_context or ctx in limit_context:
for func in funcs:
r = None
if include_context:
r = func(ctx, *args)
else:
r = func(*args)
if brk and r:
return
if force_brk:
return
continue
class Server:
def __init__(self, sock):
self.sock = sock
self.on_msg = Signal()
self.on_data = Signal()
self.on_command = Signal()
self.on_enter = Signal()
self.on_quit = Signal()
self.test = not sock
def send(self, msg):
if self.test:
print msg
else:
self.sock.send(msg)
def say(self, dest, msg):
if type(dest) == type([]):
if TEST:
print "(all) %s" % msg
else:
for chan in dest:
self.sock.send("PRIVMSG %s :%s\n" % (chan, msg))
return
if TEST:
print "(%s) %s" % (dest,msg)
else:
self.sock.send("PRIVMSG %s :%s\n" % (dest, msg))
def broadcast(self, msg):
self.say(CHANS, msg)
def about(self, cmd, nick, dest, msg, plugins):
self.say(dest, "I am litebot (github.com/flipcoder/litebot)")
cmds = filter(lambda x: not self.on_command.meta[x]['hidden'], sorted(self.on_command.slots))
self.say(dest, "Plugins: %s" % (", ".join(plugins)))
self.say(dest, "Commands (prefix with %%): %s" % (", ".join(cmds)))
def handle_error(serv, e, errors, GODS=[], ERROR_SPAM=100, logged_in=False):
try:
ec = errors[e]
if ec < ERROR_SPAM:
ec += 1
elif ec == ERROR_SPAM:
if logged_in:
for god in GODS:
serv.say(god, "Bot is error spamming: " + e)
except KeyError:
print e
errors[e] = 1
if logged_in:
for god in GODS:
serv.say(god, e)
TEST = bool(set(["-t","--test"]) & set(sys.argv[1:]))
if __name__=='__main__':
if os.name == "nt":
import pyreadline as readline
else:
import readline
PLUGINS = None
RECONNECT = False
ERROR_SPAM = 100
IGNORE = []
config_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"config.py"
)
with open(config_path) as source:
eval(compile(source.read(), config_path, 'exec'))
reconnect = RECONNECT
while reconnect:
reconnect = RECONNECT
errors = {}
buf = ""
sock = None
if not TEST:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv = Server(sock)
if not TEST:
sock.connect((HOST, PORT))
sock.send("NICK %s\n" % NICK)
sock.send("USER %s %s %s :%s\n" % (IDENT, NICK, HOST, REALNAME))
plugins_fn = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"plugins"
)
for p in PLUGINS[:]:
try:
with open(os.path.join(plugins_fn, p+".py")) as source:
eval(compile(source.read(), "%s.py" % p, 'exec'))
except Exception:
print >>sys.stderr, "Exception in plugin \"%s\":" % p
print >>sys.stderr, traceback.format_exc()
PLUGINS.remove(p)
logged_in = False or TEST
test_nick = 'user'
#if TEST:
# print 'Test mode on %s.' % CHANS[0]
while True:
try:
if not TEST:
buf = sock.recv(4096)
if not buf:
raise EOFError
# print "buf (hex): " + str(buf).encode("hex")
if buf.find("PING") != -1:
sock.send("PONG %s\r\n" % buf.split()[1]+"\n")
if not logged_in:
time.sleep(1)
sock.send("PRIVMSG nickserv identify %s\n" % PASS)
for chan in CHANS:
sock.send("JOIN %s\n" % chan)
logged_in = True
#print "signed in"
serv.on_enter.emit(serv,
include_context=True, allow_break=True)
continue
serv.on_data.emit(serv, buf, include_context=True, allow_break=True)
if buf.find("PRIVMSG") != -1 or TEST:
if not TEST:
bang_idx = buf.find('!')
if bang_idx == -1:
continue
tokens = []
# print "buf: " + str(buf)
tokens += [buf[1:bang_idx]]
tokens += buf[bang_idx+1:].split(":")
# print 'tokens: ' + str(tokens)
# print str(buf)
nick = tokens[0]
dest = tokens[1].split()[2]
msg = ':'.join(tokens[2:]).rstrip()
# print 'nick: %s, dest: %s, msg: %s' % (nick,dest,msg)
if TEST:
dest = CHANS[0]
nick = test_nick
msg = raw_input('%s> ' % test_nick)
ignore = nick in IGNORE
if not TEST or msg:
if not ignore:
serv.on_msg.emit(serv, nick, dest, msg,
include_context=True, allow_break=True)
if TEST:
if msg.startswith("/n ") or msg.startswith("/nick "):
test_nick = msg[msg.index(" ")+1:]
continue
elif msg.startswith("/n"):
test_nick = 'user'
if msg=="%" or msg=="%help":
if not ignore:
serv.about("about", nick, dest, msg, PLUGINS)
continue
if msg and msg.startswith("%"):
if not ignore:
msg = msg[1:]
msg = msg.strip()
chop = msg.find(" ")
if chop != -1:
cmd = msg[:chop]
msg = msg[chop+1:]
else:
cmd = msg
msg = ""
serv.on_command.emit(serv, nick, dest, msg,
include_context=True, limit_context=[cmd], force_break=True
)
continue
except EOFError, e:
serv.on_quit.emit(serv, include_context=True)
break # may reconnect depending on settings
except Exception, e:
print "Exception"
handle_error(serv, e, errors, GODS, ERROR_SPAM, logged_in)
| {
"content_hash": "40f1e2d39e6cddeade7e5d9acf5b1849",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 101,
"avg_line_length": 34.39525691699605,
"alnum_prop": 0.41116984601241097,
"repo_name": "flipcoder/litebot",
"id": "333f65774c1fb5e1068632ac30de3d0952614899",
"size": "8726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "litebot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30634"
}
],
"symlink_target": ""
} |
"""
Created on Nov 30, 2021
@author: CyberiaResurrection
"""
import unittest
import re
import sys
sys.path.append('../PyRoute')
from Galaxy import Galaxy
from Galaxy import Sector
class testGalaxy(unittest.TestCase):
"""
A very simple, barebones test to check that Verge and Reft end up in their correct relative positions
- Verge being immediately rimward of Reft
"""
def testVerticalOrdering(self):
galaxy = Galaxy(0)
reft = Sector("Reft", "# -3, 0")
self.assertEqual(-3, reft.x)
self.assertEqual(0, reft.y)
verge = Sector("Verge", "# -3, -1")
self.assertEqual(-3, verge.x)
self.assertEqual(-1, verge.y)
galaxy.sectors[reft.name] = reft
galaxy.sectors[verge.name] = verge
# verify, before bounding sectors gets run, nothing is hooked up
self.assertIsNone(galaxy.sectors[reft.name].coreward)
self.assertIsNone(galaxy.sectors[reft.name].rimward)
self.assertIsNone(galaxy.sectors[reft.name].spinward)
self.assertIsNone(galaxy.sectors[reft.name].trailing)
self.assertIsNone(galaxy.sectors[verge.name].coreward)
self.assertIsNone(galaxy.sectors[verge.name].rimward)
self.assertIsNone(galaxy.sectors[verge.name].spinward)
self.assertIsNone(galaxy.sectors[verge.name].trailing)
# set bounding sectors
galaxy.set_bounding_sectors()
# now assert that Reft is coreward from Verge, and (likewise), Verge is rimward from Reft, and nothing else
# got set
self.assertEqual(galaxy.sectors[reft.name], galaxy.sectors[verge.name].coreward, "Reft should be coreward of Verge")
self.assertIsNone(galaxy.sectors[verge.name].rimward, "Nothing should be rimward of Verge")
self.assertIsNone(galaxy.sectors[verge.name].spinward, "Nothing should be spinward of Verge")
self.assertIsNone(galaxy.sectors[verge.name].trailing, "Nothing should be trailing of Verge")
self.assertIsNone(galaxy.sectors[reft.name].coreward, "Nothing should be coreward of Reft")
self.assertIsNone(galaxy.sectors[reft.name].trailing, "Nothing should be trailing of Reft")
self.assertIsNone(galaxy.sectors[reft.name].spinward, "Nothing should be spinward of Reft")
self.assertEqual(galaxy.sectors[verge.name], galaxy.sectors[reft.name].rimward, "Verge should be rimward of Reft")
"""
A very simple, barebones test to check that Dagudashaag and Core end up in their correct relative positions
- Dagudashaag being immediately spinward of Core
"""
def testHorizontalOrdering(self):
galaxy = Galaxy(0)
core = Sector("Core", "# 0, 0")
self.assertEqual(0, core.x)
self.assertEqual(0, core.y)
dagudashaag = Sector("Dagudashaag", "# -1, 0")
self.assertEqual(-1, dagudashaag.x)
self.assertEqual(0, dagudashaag.y)
galaxy.sectors[core.name] = core
galaxy.sectors[dagudashaag.name] = dagudashaag
# verify, before bounding sectors gets run, nothing is hooked up
self.assertIsNone(galaxy.sectors[core.name].coreward)
self.assertIsNone(galaxy.sectors[core.name].rimward)
self.assertIsNone(galaxy.sectors[core.name].spinward)
self.assertIsNone(galaxy.sectors[core.name].trailing)
self.assertIsNone(galaxy.sectors[dagudashaag.name].coreward)
self.assertIsNone(galaxy.sectors[dagudashaag.name].rimward)
self.assertIsNone(galaxy.sectors[dagudashaag.name].spinward)
self.assertIsNone(galaxy.sectors[dagudashaag.name].trailing)
# set bounding sectors
galaxy.set_bounding_sectors()
# now assert that Dagudashaag is spinward from Core, Core is trailing of Dagudashaag, and nothing else
# got set
self.assertEqual(galaxy.sectors[dagudashaag.name], galaxy.sectors[core.name].spinward, "Dagudashaag should be spinward of core")
self.assertIsNone(galaxy.sectors[core.name].coreward, "Nothing should be coreward of Core")
self.assertIsNone(galaxy.sectors[core.name].rimward, "Nothing should be rimward of Core")
self.assertIsNone(galaxy.sectors[core.name].trailing, "Nothing should be trailing of core")
self.assertIsNone(galaxy.sectors[dagudashaag.name].coreward, "Nothing should be coreward of Dagudashaag")
self.assertIsNone(galaxy.sectors[dagudashaag.name].rimward, "Nothing should be rimward of Dagudashaag")
self.assertIsNone(galaxy.sectors[dagudashaag.name].spinward, "Nothing should be spinward of Dagudashaag")
self.assertEqual(galaxy.sectors[core.name], galaxy.sectors[dagudashaag.name].trailing, "Core should be trailing of Dagudashaag")
| {
"content_hash": "9a1a726f1830bf4c50ee077596b854ef",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 136,
"avg_line_length": 46.663366336633665,
"alnum_prop": 0.7031614682792277,
"repo_name": "makhidkarun/traveller_pyroute",
"id": "a11c3d72105134f3cd78ad0e461a7ff2f92aa01d",
"size": "4713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/testGalaxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "348119"
}
],
"symlink_target": ""
} |
from datetime import datetime
import os
import sys
from django.core.management.base import BaseCommand, CommandError
from workflows import module_importer
from workflows.management.commands import export_package_old as export_package
from workflows.management.commands import import_package_old as import_package
from optparse import make_option
class Command(BaseCommand):
help = 'Automatically iterates through all installed workflows sub-applications/projects/packages and imports their database entires. ' \
'Note: Installed workflows packages are defined in mothra/settings.py via variable INSTALLED_APPS and begin with the string "workflows.". ' \
'Auto import procedure does the following:\n' \
' - Creates database export of all definition objects using export_package command.\n'\
' - Export file goes to folder specified in mothra/settings.py/BACKUP_DIR and is timestamped\n'\
' For each installed package:\n' \
' - Loads package settings from "workflows/<package_name>/settings.py\n' \
' - If settings do not exist or settings.py/AUTO_IMPORT_DB == False then exit\n' \
' - Else tries to import all the files specified in settings.py/AUTO_IMPORT_DB_FILES list\n' \
' - If some files are missing skip them.\n' \
' - Imports are done using import_package command using -r option if settings.py/AUTO_IMPORT_DB_REPLACE_OPTION == True'
option_list = BaseCommand.option_list + (
make_option('-n', '--nobackup',
action="store_true",
dest='nobackup',
default=False,
help='No backup is created prior starting the import process.'
),
make_option('-a', '--ask',
action="store_true",
dest='ask',
default=False,
help='Ask to import packages which are marked not to be imported.'
),
)
def handle(self, *args, **options):
auto_import_all_packages(self.stdout.write, options['nobackup'], options['ask'])
self.stdout.write('Auto import procedure finished.\n')
def auto_import_all_packages(writeFunc, nobackup, ask):
if ask:
writeFunc('The procedure will interactively ask to import packages marked as not to be auto imported due to "--ask" option.\n')
if nobackup:
writeFunc('No backup will be created due to "--nobackup" option.\n')
else:
try:
from mothra.settings import BACKUP_DIR
except:
raise CommandError('Do not know where to backup existing database: BACKUP_DIR variable not found in mothra/settings.py. Consider using "--nobackup" option.')
if not os.path.exists(BACKUP_DIR): os.makedirs(BACKUP_DIR)
timeStamp = datetime.now().strftime('_%Y%m%d_%H%M%S.json')
backupDir = os.path.join(BACKUP_DIR,"db_backup"+timeStamp)
writeFunc('Exporting to backup...\n')
result = export_package.export_package_string(lambda text: writeFunc(' '+text), ('all',), False, False, True, 1)
try:
f = open(backupDir, 'w')
f.write(result.encode('utf-8'))
f.close()
writeFunc('Backup successfully written.\n')
except Exception as e:
raise CommandError('There was a problem with writing to the given backup file "%s". Problem: %s'%(backupDir, e))
writeFunc('Export procedure successfully finished. Results written to the file "%s".\n' %backupDir)
#get all relevant package settings:
packageSetts = module_importer.import_all_packages_libs_as_dict("settings")
for pckSett in packageSetts:
writeFunc('--------------------------------------------------------------------------------\n')
writeFunc('Auto importing package "%s":\n'%pckSett)
sett = packageSetts[pckSett]
if sett is None:
writeFunc(' No settings found for this package.\n')
continue
try:
imp = sett.AUTO_IMPORT_DB
files = sett.AUTO_IMPORT_DB_FILES
except:
writeFunc(' Either AUTO_IMPORT_DB or AUTO_IMPORT_DB_FILES not found in package\'s settings.\n')
continue
replace = False
try:
replace = sett.AUTO_IMPORT_DB_REPLACE_OPTION
except:
pass
if not imp:
writeFunc(' AUTO_IMPORT_DB set to false in package\'s settings.\n')
if not ask or not query_yes_no(' Do you want to import this package anyway?\n'):
continue
for fileName in files:
writeFunc(' Importing file "%s":\n' % fileName)
try:
fileContent = open(fileName, 'r').read()
except:
writeFunc(' Cannot open or read given package data file.\n')
else:
import_package.import_package_string(lambda text: writeFunc(' '+text), fileContent, replace)
writeFunc(' Done with file "%s":\n' % fileName)
writeFunc('--------------------------------------------------------------------------------\n')
return
def query_yes_no(question, default=None):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " \
"(or 'y' or 'n').\n") | {
"content_hash": "106c7e69165807770d5d5640cef19569",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 169,
"avg_line_length": 45.34532374100719,
"alnum_prop": 0.5887672536887196,
"repo_name": "xflows/clowdflows-backend",
"id": "82051172fb1dd2390c823313dab3ca54b8ff8bb6",
"size": "6303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflows/management/commands/auto_import_packages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "74413"
},
{
"name": "JavaScript",
"bytes": "10945"
},
{
"name": "Python",
"bytes": "372594"
},
{
"name": "Shell",
"bytes": "453"
}
],
"symlink_target": ""
} |
"""Tests for annotation type hint casting."""
from __future__ import unicode_literals
from collections import (
deque,
Counter
)
import json
import pytest
import six
from typingplus import *
@pytest.mark.parametrize('type_, expected', [
(AnyStr, six.string_types + (bytes, bytearray)),
(str, str),
(ByteString, (bytes, bytearray)),
(bytes, bytes),
(bytearray, bytearray)
])
def test_cast_string(type_, expected):
"""Test that casting string types gives the correct types."""
assert isinstance(cast(type_, 'abc'), expected)
assert isinstance(cast(type_, b'abc'), expected)
assert isinstance(cast(type_, u'abc'), expected)
def test_cast_numeric():
"""Test casting numeric types."""
assert isinstance(cast(int, '4'), int)
assert isinstance(cast(float, '4.0'), float)
assert isinstance(cast(complex, '4+3j'), complex)
assert isinstance(cast(int, 4.0), int)
assert isinstance(cast(float, 4), float)
assert isinstance(cast(complex, 4), complex)
assert isinstance(cast(complex, 4.0), complex)
@pytest.mark.parametrize('type_, expected_type, expected', [
(tuple, tuple, ('1', '2', '3', '3', '4')),
(Tuple, tuple, ('1', '2', '3', '3', '4')),
(Tuple[int, int, int, int, int], tuple, (1, 2, 3, 3, 4)),
(list, list, ['1', '2', '3', '3', '4']),
(List, list, ['1', '2', '3', '3', '4']),
(List[int], list, [1, 2, 3, 3, 4]),
(MutableSequence, list, ['1', '2', '3', '3', '4']),
(MutableSequence[int], list, [1, 2, 3, 3, 4]),
(set, set, {'1', '2', '3', '4'}),
(Set, set, {'1', '2', '3', '4'}),
(Set[int], set, {1, 2, 3, 4}),
(MutableSet, set, {'1', '2', '3', '4'}),
(MutableSet[int], set, {1, 2, 3, 4}),
(frozenset, frozenset, frozenset(['1', '2', '3', '4'])),
(FrozenSet, frozenset, frozenset(['1', '2', '3', '4'])),
(FrozenSet[int], frozenset, frozenset([1, 2, 3, 4])),
(deque, deque, deque(['1', '2', '3', '3', '4'])),
(Counter, Counter, Counter({'1': 1, '2': 1, '3': 2, '4': 1}))
])
def test_cast_iterables(type_, expected_type, expected):
"""Test casting sequence types."""
actual = cast(type_, ['1', '2', '3', '3', '4'])
assert isinstance(actual, expected_type)
assert actual == expected
@pytest.mark.parametrize('type_, expected_type, expected', [
(dict, dict, {'1': 1}),
(Dict, dict, {'1': 1}),
(Dict[str, int], dict, {'1': 1}),
(Dict[str, str], dict, {'1': '1'}),
(Dict[int, float], dict, {1: 1.0}),
(Dict[complex, ByteString], dict, {complex(1): b'1'})
])
def test_cast_mappings(type_, expected_type, expected):
"""Test casting mapping types."""
actual = cast(type_, {'1': 1})
assert isinstance(actual, expected_type)
assert actual == expected
def test_cast_any():
"""Test casting to Any."""
assert isinstance(cast(Any, 'abc'), six.string_types)
assert isinstance(cast(Any, 1), int)
assert isinstance(cast(Any, 1.0), float)
def test_cast_typevar():
"""Test casting to Any."""
constrained_types = (int, str)
T = TypeVar('T', *constrained_types)
assert isinstance(cast(T, 1), constrained_types)
assert isinstance(cast(T, b'1'), constrained_types)
def test_bad_cast():
"""Test that a bad cast raises a TypeError."""
with pytest.raises(TypeError):
cast(int, 'abc')
def test_bad_tuple_Cast():
"""Test that casting a bad tuple raises a TypeError."""
with pytest.raises(TypeError):
cast(Tuple[int], [1, 2, 3])
| {
"content_hash": "c9a608bc3ddcc659196b16ec819b251d",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 65,
"avg_line_length": 32.28703703703704,
"alnum_prop": 0.5815887582449096,
"repo_name": "contains-io/typingplus",
"id": "b5bcde43b6d77223ad68258ffc0919c0b7995804",
"size": "3511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_casting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32057"
}
],
"symlink_target": ""
} |
from subprocess import check_output
import re
import urllib2
import sys
oldest = sys.argv[1]
last_hash = sys.argv[2]
RE_COMMIT_HASH = r"^commit.([0-9a-fA-F]+)$"
RE_BUG_NUMBERS = r"(?:cases?|Bug[sz] ?IDs?)[: ] *\d+(?:[,: ] *\d+)*"
# The above regex makes mistakes for messages like "bugzid 1: 42 lines added"
# Use the below line instead to require a colon after the bug numbers
# RE_BUG_NUMBERS = r"(?:cases?|Bug[sz] ?IDs?)[: ] *\d+(?:[,: ] *\d+):*"
BUGZ_URL = "https://spiralwork.fogbugz.com/"
IXREPOSITORY="1"
FOGBUGZ_URL_FORMAT = BUGZ_URL + "/cvsSubmit.asp?ixBug={}&sFile={}&sPrev={}&sNew={}&ixRepository=" + IXREPOSITORY
def get_commit_hashes(oldest, last_hash):
# get all of the commit hashes
output = check_output(['git', 'log', "{}..{}".format(oldest, last_hash)])
hashes = re.findall(RE_COMMIT_HASH, output, flags=re.MULTILINE)
hashes.append(oldest)
hashes.reverse() # make the newest commit last
return hashes
def get_bug_numbers(commit_hash):
# get the bug numbers
output = check_output(['git', 'cat-file', 'commit', commit_hash])
match = re.search(RE_BUG_NUMBERS, output, flags=re.MULTILINE | re.IGNORECASE)
if match:
return re.findall('\d+', match.group(0))
return None
def get_files_committed(commit_hash):
# get the list of the files checked in
output = check_output(['git', 'log', '-1', '--name-only', '--pretty=format:""', last_hash])
files = output.strip('\n"').split('\n')
return files
commits = get_commit_hashes(oldest, last_hash)
for i, rev in enumerate(commits[1:]): # don't do the parent, it was done previously
bug_numbers = get_bug_numbers(rev)
if bug_numbers: # don't bother if no bugs were committed against
files = get_files_committed(rev)
for bug_number in bug_numbers:
for file in files:
urllib2.urlopen(FOGBUGZ_URL_FORMAT.format(bug_number, file, commits[i-1], rev))
| {
"content_hash": "3f5b8c629ae4bf242d5ba64634a4053f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 112,
"avg_line_length": 35.888888888888886,
"alnum_prop": 0.6460268317853457,
"repo_name": "shiliang-spiralwks/Opened-Panda",
"id": "ef12e0337fe8d6a31aee712770714b8b9d1ae8a3",
"size": "1961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fbNotify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1961"
},
{
"name": "Shell",
"bytes": "100"
}
],
"symlink_target": ""
} |
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
import scipy as sc
import sklearn
from sklearn import decomposition
from sklearn import cluster
import seaborn as sns
import math
## what modes can be script run in
run_modes = ["find_day_correlated_pcs", "2d-pca-multiplot", "2d-pca-single", "3d-pca", "hierarchy", "pseudotime"]
## sets with parameter look like:
# operation set_name parameter
# for ex.: color day_4 clue
accepted_sets_with_parameter = ["color", "outline-color", "size", "name", "shape", "superimpose", "superimpose-for-spearman"]
## sets without parameter look like:
# operation set_name
# for ex.: remove low_read_count_cells
accepted_sets_without_parameter = ["remove"]
## parameters supposed to be set once
accepted_parameters = ["number_of_genes"]
## this class reads settings of the run and keeps them in its attributes
# if settings file in incorrect, the init method prints error and terminates application
class settings:
## read sets of cells later used to refer to them (to remove/color/superimpose etc...)
#
# format:
#
# cell_set_name <tab> cell <tab> cell ...
#
# cell_set_name <tab> cell <tab> cell ...
def read_cell_sets(self,cellset_file):
cell_sets = {}
with open(cellset_file) as f:
for line in f:
x = line.rstrip().split("\t")
cell_sets[x[0]] = x[1:]
return cell_sets
def __init__(self, settings_file, cellset_file):
self.cell_sets = self.read_cell_sets(cellset_file)
self.sets = {}
# inniciate all sets to empty
for i in accepted_sets_with_parameter+accepted_sets_without_parameter:
self.sets[i] = set()
# in some cases we'll want to keep information which PCs to plot
self.pcs = []
# how many genes per PC do we want to save (for gene onthology analysis)
self.parameters = {}
self.parameters["number_of_genes"] = "1000"
with open(settings_file) as f:
# first line defines name of the output
self.result_filename = f.readline().rstrip()
# second line defines analysis to run
mode_line = f.readline().rstrip()
self.run_mode = mode_line.split("\t")[0]
if self.run_mode not in run_modes:
print "Unkown run mode (line 2 in settings file): ",self.run_mode
exit(1)
# if we're plotting pca, we want list of PCs to use
if mode_line in ["2d-pca-single", "3d-pca"]:
self.pcs = map(int,type_line.split("\t")[1].split(","))
if not(
((mode_line == "2d-pca-single")and(len(self.pcs)==2))
or((mode_line == "3d-pca")and(len(self.pcs)==3))
):
print "Invalid number of PCs given! ",self.mode_line
exit(1)
# from third line onwards, the script reads different operations carried out on defined cell sets
for line in f:
if(line.startswith("#")):
continue
x = line.rstrip().split("\t")
if(x[0] in accepted_sets_without_parameter):
self.sets[x[0]].add(x[1])
elif(x[0] in accepted_sets_with_parameter):
self.sets[x[0]].add(tuple(x[1:3]))
elif(x[0] in accepted_parameters):
self.parameters[x[0]] = x[1]
else:
print "Unknown option:",line
exit(1)
## function takes expression file and settings object and returns:
# - pd.DataFrame with [log transformed] expression values [genes expressed over min_expression in at least min_cells]
# - pd.DataFrame with annotations for each cell. Expression table and annotation table have the same rows
def read_expression(expression_file, settings, min_expression = 0.1, min_cells = 10, log_transform = True):
# read expression
expression_table = pd.read_csv(expression_file, sep=",").transpose()
print "Read expression table with shape:",expression_table.shape
# remove unwanted cells
for s in settings.sets["remove"]:
print "Removed cells from set:",s,settings.cell_sets[s]
expression_table.drop(settings.cell_sets[s], inplace=True, errors="ignore")
# log transform
if(log_transform):
expression_table += 1
expression_table = expression_table.apply(np.log2)
print "Log transformed data"
# remove genes with less then min_cells expressing them
expressed_genes = (expression_table > min_expression).sum() > min_cells
expression_table = expression_table.loc[ : , expressed_genes]
print "Removed genes that are not expressed >",min_expression," in at least",min_cells ,"cells"
print "Expression table has now shape:",expression_table.shape
# create annotation table and populate it with default values
annotation = pd.DataFrame(index=expression_table.index)
annotation["color"] = "black"
annotation["superimpose"] = False
annotation["superimpose-for-spearman"] = False
annotation["size"] = 5.0
annotation["name"] = ""
for s in accepted_sets_with_parameter: # iterating over dictionary operation->set
for i in settings.sets[s]: # iterating over set
subset = set(settings.cell_sets[i[0]]).intersection(annotation.index)
annotation.loc[subset,s] = i[1]
annotation["size"] = pd.to_numeric(annotation["size"])
# where outline color was not defined, set it to the color of the cell
annotation.loc[annotation["outline-color"]!=annotation["outline-color"], "outline-color"] = annotation["color"]
# define day and treatment columns
day_labels = ["day_4","day_6","day_8","day_12"]
treatment_labels = ["shCtrl","sh733", "sh737"]
for i in day_labels:
subset = set(settings.cell_sets[i]).intersection(annotation.index)
annotation.loc[subset,"day"]=int(i.split("_")[1])
for i in treatment_labels:
subset = set(settings.cell_sets[i]).intersection(annotation.index)
annotation.loc[subset,"treatment"]=i
# crop annotation dataframe to only rows, that are in expression table
annotation = annotation.loc[expression_table.index]
return (expression_table, annotation)
## runs PCA and returns:
# - PCA transformed coordinates
# - sklearn.decomposition.pca object
def run_PCA(expression_table, annotation, n_components):
pca = decomposition.PCA(n_components=n_components, svd_solver="full")
expression_table_for_PCA = expression_table.loc[annotation[annotation["superimpose"]==False].index]
print "Calculating PCA on table of shape:",expression_table_for_PCA.shape
pca.fit(expression_table_for_PCA)
print "Explained variance: ", pca.explained_variance_
print "Explained variance ratio: ", pca.explained_variance_ratio_
# transform expression using PCA vectors
transformed_expression = pd.DataFrame(pca.transform(expression_table), index=expression_table.index, columns = range(1,n_components+1))
return transformed_expression, pca
## save genes correlated with a PC to file
def get_isoforms_correlated_with_pc(expression_table, pc, n, filename):
pc = int(pc)
df = pd.Series(pca.components_[pc], index=expression_table.columns)
df = df.reindex(df.abs().sort_values(inplace=False, ascending=False).index).iloc[0:n]
csv_filename = settings.result_filename+"_PC"+str(pc)+".csv"
df.to_csv(csv_filename, sep="\t")
## create annotation label for a point on axis if it's far enough from other points
# used internally by plotting functions
def annotate_df(row,df,min_dist,ax):
dist = (df - row).abs().sum(axis=1).sort_values()[1]
if(dist > min_dist):
ax.annotate(row.name, list(row.values),
xytext=(5,-3),
textcoords='offset points',
size=10,
color='darkslategrey')
## create plot of 6 PC combinations
# PC1 vs PC2, PC3 vs PC4 etc.
# arguments are:
# - pd.DataFrame with PCA transformed gene expression
# - annotation pd.DataFrame
# - pca sklearn.decomposition object
# - settings object
def plot_2d_pca_multiplot(transformed_expression, annotation, pca, settings):
fig, ax = plt.subplots(2,3, figsize=(15,10))
markers = list(annotation["shape"].unique())
for pc in range(0,12,2):
for m in markers:
cells_with_this_shape = annotation["shape"]==m
ann = annotation.loc[cells_with_this_shape]
#import pdb; pdb.set_trace()
#import code; code.interact(local=locals())
transformed_expression.loc[cells_with_this_shape].plot.scatter(
x=pc+1,
y=pc+2,
ax=ax[pc/6][(pc/2)%3],
s=ann["size"].values,
c=ann["color"].values,
legend=True,
alpha=0.8,
#edgecolor="black",
marker = m
)
explained_variance1 = "{0:.2f}".format(pca.explained_variance_ratio_[pc]*100)+"%"
explained_variance2 = "{0:.2f}".format(pca.explained_variance_ratio_[pc+1]*100)+"%"
ax[pc/6][(pc/2)%3].set_xlabel("PCA "+str(pc+1)+" ["+explained_variance1+" of variance]")
ax[pc/6][(pc/2)%3].set_ylabel("PCA "+str(pc+2)+" ["+explained_variance2+" of variance]")
plt.tight_layout()
plt.subplots_adjust(hspace=0.15, wspace=0.15, left=0.05, bottom=0.05)
plt.savefig(settings.result_filename+"-pca-multiplot.png", dpi=200)
plt.show()
## plot cells of defined pair of PCs
# arguments are:
# - pd.DataFrame with PCA transformed gene expression
# - annotation pd.DataFrame
# - pca sklearn.decomposition object
# - settings object
def plot_2d_pca_single_plot(transformed_expression, annotation, pca, settings):
fig,ax = plt.subplots(figsize=(5,5))
markers = list(annotation["shape"].unique())
for m in markers:
cells_with_this_shape = annotation["shape"]==m
ann = annotation.loc[cells_with_this_shape]
transformed_expression.loc[cells_with_this_shape].plot.scatter(
x=settings.pcs[0],
y=settings.pcs[1],
ax=ax,
s=ann["size"].values,
c=ann["color"].values,
legend=True,
alpha=0.8,
edgecolor=ann["outline-color"].values,
marker = m
)
for cell in transformed_expression.index:
row = transformed_expression.loc[cell,[int(settings.pcs[0]),int(settings.pcs[1])]]
df = transformed_expression.loc[ : ,[int(settings.pcs[0]),int(settings.pcs[1])]]
annotate_df(row, df, 8.0, ax)
#ax.set_xlim([-100,100])
#ax.set_ylim([-100,100])
plt.xlabel("PCA "+str(settings.pcs[0]))
plt.ylabel("PCA "+str(settings.pcs[1]))
plt.tight_layout()
plt.subplots_adjust(right=0.94)
plt.savefig(settings.result_filename+"PC-"+str(settings.pcs[0])+"-"+str(settings.pcs[1])+".png", dpi=200)
plt.show()
#plt.close()
## create 3d PCA plot using plotly library
# arguments are:
# - pd.DataFrame with PCA transformed gene expression
# - annotation pd.DataFrame
# - settings object
def plot_3d_pca(transformed_expression, annotation, settings, height = 1080, width = 1600):
import plotly.plotly as py
import plotly.graph_objs as go
layout = dict(
width=width,
height=height,
autosize=False,
#title='Test',
scene=dict(
xaxis=dict(
title="PC "+str(settings.pcs[0]),
gridcolor='rgb(0, 0, 0)',
zerolinecolor='rgb(255, 0, 0)',
showbackground=True,
backgroundcolor='#bababa'
),
yaxis=dict(
title="PC "+str(settings.pcs[1]),
gridcolor='rgb(0, 0, 0)',
zerolinecolor='rgb(255, 0, 0)',
showbackground=True,
backgroundcolor='#bababa'
),
zaxis=dict(
title="PC "+str(settings.pcs[2]),
gridcolor='rgb(0, 0, 0)',
zerolinecolor='rgb(255, 0, 0)',
showbackground=True,
backgroundcolor='#bababa'
),
aspectmode = 'manual'
),
)
data = []
trace = dict(
text = transformed_expression.index,# + " "+ transformed_expression["day"], #+ "\n" + transformed_expression["branch"],
x = transformed_expression[settings.pcs[0]],
y = transformed_expression[settings.pcs[1]],
z = transformed_expression[settings.pcs[2]],
type = "scatter3d",
mode = 'markers',
marker = dict(
size=annotation["size"].values,
color=annotation["color"].values,
symbol=annotation["shape"].values,
line=dict(width=1) )
)
data.append( trace )
fig = dict(data=data, layout=layout)
url = py.plot(fig, filename=settings.result_filename, validate=False)
## plot hierarchycal clustering
# arguments are:
# - pd.DataFrame with PCA transformed gene expression
# - annotation pd.DataFrame
# - settings object
# - filename for output picture
def plot_hierarchycal_clusterings(transformed_expression, annotation, settings):
link_color = {}
def link_color_func(node):
return link_color[node]
def colorize_links(linkage):
l_color = {}
n = expression_table.shape[0]
for i in range(0,n):
l_color[i] = annotation.iloc[i,]["color"]
#print l_color
for i in range(0,linkage.shape[0]):
clust1 = int(linkage[i,0])
clust2 = int(linkage[i,1])
#print clust1, clust2
if(l_color[clust1] == l_color[clust2]):
l_color[n+i] = l_color[clust1]
else:
l_color[n+i] = "gray"
#print l_color
return l_color
scipy_linkage_methods = [ "complete", "average", "single", "centroid", "median", "ward"] #"single",weighted
# plot clusterings on one magor figure
fig,ax = plt.subplots(nrows=2, ncols=3, figsize=(50, 30))
i=0
for method in scipy_linkage_methods:
linkage = sc.cluster.hierarchy.linkage(expression_table, method=method)
link_color = colorize_links(linkage)
dendro = sc.cluster.hierarchy.dendrogram(
linkage,
ax=ax[i/3,i%3],
labels = expression_table.index,
link_color_func = link_color_func,
#color_threshold = 0,
#above_threshold_color = "black",
count_sort = "ascending") #, title=method
ax[i/3,i%3].set_title(method)
tick_labels = ax[i/3,i%3].get_xmajorticklabels()
for lbl in tick_labels:
lbl.set_color(annotation.loc[lbl.get_text()]["color"])
i += 1
plt.tight_layout()
plt.savefig(settings.result_filename+"-clustering.png", dpi=200)
plt.show()
## rotate transformed expression matrix by defined angle
# used internally in order to define pseudotime
# arguments are:
# - pd.DataFrame with PCA transformed gene expression
# - x,y = PCs to rotate
# - angle in degrees
# returns:
# pdDataFrame with values in columns x,y rotated by angle
def rotate_expression(transformed_expression,x,y,angle):
theta = math.radians(angle)
ret = transformed_expression.copy()
ret[x] = transformed_expression[x]*math.cos(theta) - transformed_expression[y]*math.sin(theta)
ret[y] = transformed_expression[x]*math.sin(theta) + transformed_expression[y]*math.cos(theta)
return ret
## function
# - finds pair of 2 PCs that are most correlated with time labels (as defined by "day" column in annotation table) using spearman correlation
# - finds rotation of this PCs so X axis has best correlation with time
#
# returns: pseudotime for each cell, defined as linear combination of PCs, having best time correlation
#
# arguments are:
# - pd.DataFrame with PCA transformed gene expression
# - annotation pd.DataFrame
# - pca sklearn.decomposition object
# - settings object
def find_pseudotime(transformed_expression, annotation, pca, settings):
n_pca = len(transformed_expression.columns)
transformed_expression["day"] = annotation["day"]
transformed_expression_without_superimposed = transformed_expression.loc[annotation[annotation["superimpose-for-spearman"]==False].index]
print "Finding best rotation for Spearman correlation. Shape of used table:",transformed_expression_without_superimposed.shape
spearman = transformed_expression_without_superimposed.corr(method="spearman").loc["day",range(1,n_pca+1)].abs().sort_values(ascending=False)
#plot_spearman correlations and explained variation
searman_filename = settings.result_filename.replace(".png", "_spearman.png")
width=0.4
fig,ax = plt.subplots(figsize=(8,5))
ax2= ax.twinx()
spearman.plot.bar(ax=ax, width=width, position=1, color="blue")
pd.Series(pca.explained_variance_ratio_, index=range(1,n_pca+1)).loc[spearman.index].plot.bar(ax=ax2, width=width, position=0, color="red")
ax.set_xlabel("PC component")
ax.set_ylabel("Spearman correlation\nto days [blue]")
ax2.set_ylabel("% variance explained [red]")
plt.tight_layout()
low,high = plt.xlim()
plt.xlim(low-0.5, high)
plt.savefig(searman_filename, dpi=200)
settings.pcs = map(str,spearman.iloc[0:2].index)
# find best rotation
best_angle = 0
best_spearman = 0
for a in range(0,360):
te = rotate_expression(transformed_expression_without_superimposed, int(settings.pcs[0]), int(settings.pcs[1]), a)
spearman = te.corr(method="spearman").loc["day",int(settings.pcs[0])]
#print "Trying angle: ",a," spearman: ",spearman
if(spearman > best_spearman):
best_angle = a
best_spearman = spearman
del(transformed_expression["day"])
print settings.pcs
print "Best rotation: ",best_angle
rotated_expression = rotate_expression(transformed_expression, int(settings.pcs[0]), int(settings.pcs[1]), best_angle)
#plot_2d_pca_single_plot(rotated_expression, filename = settings.result_filename.replace(".png","-rotated.png"))
pt = rotated_expression[int(settings.pcs[0])]
pt.name = "pseudotime"
return pt
## plots gene expression over pseudotime
# arguments are:
# - pd.DataFrame with gene expression
# - pd.Series with pseudotime coordinates for each cell
# - Ensamble transcript ID
def plot_gene_with_pseudotime(exp, pseudotime, transcript_id):
expression_over_pseudotime = pd.DataFrame(pseudotime)
expression_over_pseudotime["expression"] = exp[transcript_id]
expression_over_pseudotime.plot.scatter(x="pseudotime", y="expression")
plt.show()
## main function
# when run separately, program expects following arguments:
# - argv[1] = comma separated file with expression values
# - argv[2] = file with cell sets (see settings.read_cell_sets())
def main():
# get parameters
expression_file = sys.argv[1]
cellset_file = sys.argv[2]
settings_file = sys.argv[3]
n_pca = 20
# read settings and cell_set files
sett = settings(settings_file, cellset_file)
# read expression table
expression_table, annotation = read_expression(expression_file, sett)
# calculate PCA
PC_expression,pca = run_PCA(expression_table, annotation, n_pca)
#print "Running in mode:",sett.run_mode
if(sett.run_mode=="2d-pca-multiplot"):
plot_2d_pca_multiplot(PC_expression, annotation, pca, sett)
elif(sett.run_mode=="2d-pca-single"):
plot_2d_pca_single_plot(PC_expression, annotation, pca, sett)
elif(sett.run_mode=="3d-pca"):
plot_3d_pca(PC_expression, annotation, sett)
elif(sett.run_mode=="hierarchy"):
plot_hierarchycal_clusterings(PC_expression, annotation, sett)
elif(sett.run_mode=="pseudotime"):
pseudotime = find_pseudotime(PC_expression, annotation, pca, sett)
#plot_gene_with_pseudotime(expression_table, pseudotime.copy(), "ENST00000611179")
if __name__ == "__main__":
main()
| {
"content_hash": "1c71eedf14aa6a83b4a30dc9fab501e6",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 142,
"avg_line_length": 37.932914046121596,
"alnum_prop": 0.7071957554990604,
"repo_name": "MacanPN/single-cell-tools",
"id": "964c570143b0707867693bcac50cc531b2e72cac",
"size": "18240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc-pseudotime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18240"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from .views import ControllPoint
urlpatterns = patterns(
'seek.views',
url(r'^$', ControllPoint.as_view(), name='controll'),
) | {
"content_hash": "fd98024c11032c5ea207002b2ca1187e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 22.75,
"alnum_prop": 0.7307692307692307,
"repo_name": "HenriqueLR/watchful",
"id": "b805991c69334813df43dd3a3ca6473cc8df0bd3",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/seek/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1704"
},
{
"name": "HTML",
"bytes": "2953"
},
{
"name": "Perl",
"bytes": "132"
},
{
"name": "Python",
"bytes": "14228"
},
{
"name": "Shell",
"bytes": "328"
}
],
"symlink_target": ""
} |
catname = "Andromeda"
print len(catname)
dloc = catname.index("m")
print dloc
print catname.count("d")
print catname[0:5]
# Andromeda is a cutie!
message = "Andromeda is not a cutie!"
print message[0:13] + message[17:25]
print("%s %s") % (message[0:12], message[17:25])
print message[::-1]
# I'm so happy we have her!
newmsg = "I'm so sad we have such a bad cat"
print("%s" "happy" "%s" "her!") % (newmsg[0:7], newmsg[10:19])
print catname.upper()
print "Rachel".upper()
if message.startswith("Andromeda"):
print newmsg
splitmsg = message.split(" ")
print " ".join(splitmsg)
| {
"content_hash": "e4eba454dedf913a297a952b4fd195ad",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 19.566666666666666,
"alnum_prop": 0.6643952299829642,
"repo_name": "TruthLacksLyricism/learning-python",
"id": "26476f964c16d4fec3e1bf251cb8516979225ad6",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic_string_ops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3230"
}
],
"symlink_target": ""
} |
from django import forms
from django.utils.translation import ugettext_lazy as _
from feincms.admin.editor import ItemEditorForm
from feincms_markup.widgets import MarkItUpWidget
class MarkupContentAdminForm(ItemEditorForm):
"""
Admin Form to use markitup editor
"""
#markup = forms.CharField(widget=MarkItUpWidget(), required=True,
# label=_('markup'))
markup = forms.CharField(widget=forms.Textarea, required=True,
label=_('markup'))
#feincms_item_editor_classes = {
# 'markup': 'markItUp',
#}
def __init__(self, *args, **kwargs):
super(MarkupContentAdminForm, self).__init__(*args, **kwargs)
self.fields['markup'].widget.attrs.update({'class': 'multiSet'})
#for field in self.feincms_item_editor_classes.keys():
# self.fields[field].widget.attrs.update({'id': '%s' %
# self.feincms_item_editor_classes[field]})
#def save(self, *args, **kwargs):
# super(MarkupContentAdminForm, self).save(*args, **kwargs)
class Meta:
exclude = ('markup_html',)
| {
"content_hash": "767db033c44f8153994f03ed9ea54119",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 35.96666666666667,
"alnum_prop": 0.6478220574606117,
"repo_name": "indexofire/feincms-markup",
"id": "32512b1942e09864c6ca5832fd75a9ef01ba3b2b",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feincms_markup/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "58257"
},
{
"name": "Python",
"bytes": "12215"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from authtools.admin import NamedUserAdmin
from .models import Profile
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
User = get_user_model()
class UserProfileInline(admin.StackedInline):
model = Profile
class NewUserAdmin(NamedUserAdmin):
inlines = [UserProfileInline]
list_display = ('is_active', 'email', 'name', 'permalink',
'is_superuser', 'is_staff',)
# 'View on site' didn't work since the original User model needs to
# have get_absolute_url defined. So showing on the list display
# was a workaround.
def permalink(self, obj):
url = reverse("profiles:show",
kwargs={"slug": obj.profile.slug})
# Unicode hex b6 is the Pilcrow sign
return '<a href="{}">{}</a>'.format(url, '\xb6')
permalink.allow_tags = True
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
| {
"content_hash": "cbdf336ac4edb9068ae9b4b9b7b63dc0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 33.483870967741936,
"alnum_prop": 0.6608863198458574,
"repo_name": "The-Akatsuki/thirdp",
"id": "886e996e5c14d5b1c4baf056f58f4a0d366f9c58",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/profiles/admin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "196798"
},
{
"name": "HTML",
"bytes": "97337"
},
{
"name": "JavaScript",
"bytes": "11808"
},
{
"name": "Python",
"bytes": "48364"
},
{
"name": "Shell",
"bytes": "549"
}
],
"symlink_target": ""
} |
import requests
import json
jsonFile=open('LocalConfiguration/accountCredentials.json')
accountCredentials=json.load(jsonFile)
jsonFile.close()
clientID=accountCredentials['Client ID']
redirectURI=accountCredentials['Redirect URI']
scopes=accountCredentials['Scopes']
state=accountCredentials['State']
clientIdString='client_id='+clientID
payload={'client_id':clientID,'redirect_uri':redirectURI,'scope':scopes,'state':state}
url='https://github.com/login/oauth/authorize'
r = requests.get(url,data=json.dumps(payload))
print r
print r.status_code
| {
"content_hash": "e03415bf53a7a25f1b77e60cd62d9dc7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 86,
"avg_line_length": 24.17391304347826,
"alnum_prop": 0.7985611510791367,
"repo_name": "luisibanez/github-api-whisperer",
"id": "e2150cf5b62d8e289509490e90167d9dfe573315",
"size": "1522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getAuthenticationCode.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2208"
},
{
"name": "Shell",
"bytes": "1084"
}
],
"symlink_target": ""
} |
import os
import sys
import warnings
from itertools import takewhile
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError, no_translations
from django.core.management.utils import run_formatters
from django.db import DEFAULT_DB_ALIAS, OperationalError, connections, router
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner,
MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.utils import get_migration_name_timestamp
from django.db.migrations.writer import MigrationWriter
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="app_label",
nargs="*",
help="Specify the app label(s) to create migrations for.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Just show what migrations would be made; don't actually write them.",
)
parser.add_argument(
"--merge",
action="store_true",
help="Enable fixing of migration conflicts.",
)
parser.add_argument(
"--empty",
action="store_true",
help="Create an empty migration.",
)
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"-n",
"--name",
help="Use this name for migration file(s).",
)
parser.add_argument(
"--no-header",
action="store_false",
dest="include_header",
help="Do not add header comments to new migration file(s).",
)
parser.add_argument(
"--check",
action="store_true",
dest="check_changes",
help="Exit with a non-zero status if model changes are missing migrations.",
)
parser.add_argument(
"--scriptable",
action="store_true",
dest="scriptable",
help=(
"Divert log output and input prompts to stderr, writing only "
"paths of generated migration files to stdout."
),
)
@property
def log_output(self):
return self.stderr if self.scriptable else self.stdout
def log(self, msg):
self.log_output.write(msg)
@no_translations
def handle(self, *app_labels, **options):
self.written_files = []
self.verbosity = options["verbosity"]
self.interactive = options["interactive"]
self.dry_run = options["dry_run"]
self.merge = options["merge"]
self.empty = options["empty"]
self.migration_name = options["name"]
if self.migration_name and not self.migration_name.isidentifier():
raise CommandError("The migration name must be a valid Python identifier.")
self.include_header = options["include_header"]
check_changes = options["check_changes"]
self.scriptable = options["scriptable"]
# If logs and prompts are diverted to stderr, remove the ERROR style.
if self.scriptable:
self.stderr.style_func = None
# Make sure the app they asked for exists
app_labels = set(app_labels)
has_bad_labels = False
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError as err:
self.stderr.write(str(err))
has_bad_labels = True
if has_bad_labels:
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Raise an error if any migrations are applied before their dependencies.
consistency_check_labels = {config.label for config in apps.get_app_configs()}
# Non-default databases are only checked if database routers used.
aliases_to_check = (
connections if settings.DATABASE_ROUTERS else [DEFAULT_DB_ALIAS]
)
for alias in sorted(aliases_to_check):
connection = connections[alias]
if connection.settings_dict["ENGINE"] != "django.db.backends.dummy" and any(
# At least one model must be migrated to the database.
router.allow_migrate(
connection.alias, app_label, model_name=model._meta.object_name
)
for app_label in consistency_check_labels
for model in apps.get_app_config(app_label).get_models()
):
try:
loader.check_consistent_history(connection)
except OperationalError as error:
warnings.warn(
"Got an error checking a consistent migration history "
"performed for database connection '%s': %s" % (alias, error),
RuntimeWarning,
)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for
# unspecified apps.
if app_labels:
conflicts = {
app_label: conflict
for app_label, conflict in conflicts.items()
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app) for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.log("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(
specified_apps=app_labels,
dry_run=self.dry_run,
prompt_output=self.log_output,
)
else:
questioner = NonInteractiveMigrationQuestioner(
specified_apps=app_labels,
dry_run=self.dry_run,
verbosity=self.verbosity,
log=self.log,
)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError(
"You must supply at least one app label when using --empty."
)
# Make a fake changes() result we can pass to arrange_for_graph
changes = {app: [Migration("custom", app)] for app in app_labels}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if app_labels:
if len(app_labels) == 1:
self.log("No changes detected in app '%s'" % app_labels.pop())
else:
self.log(
"No changes detected in apps '%s'"
% ("', '".join(app_labels))
)
else:
self.log("No changes detected")
else:
self.write_migration_files(changes)
if check_changes:
sys.exit(1)
def write_migration_files(self, changes):
"""
Take a changes dict and write them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.log(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label))
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration, self.include_header)
if self.verbosity >= 1:
# Display a relative path if it's below the current working
# directory, or an absolute path otherwise.
try:
migration_string = os.path.relpath(writer.path)
except ValueError:
migration_string = writer.path
if migration_string.startswith(".."):
migration_string = writer.path
self.log(" %s\n" % self.style.MIGRATE_LABEL(migration_string))
for operation in migration.operations:
self.log(" - %s" % operation.describe())
if self.scriptable:
self.stdout.write(migration_string)
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
os.makedirs(migrations_directory, exist_ok=True)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "w", encoding="utf-8") as fh:
fh.write(migration_string)
self.written_files.append(writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will log the migrations rather than saving the file to
# the disk.
self.log(
self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename
)
)
self.log(writer.as_string())
run_formatters(self.written_files)
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner(prompt_output=self.log_output)
else:
questioner = MigrationQuestioner(defaults={"ask_merge": True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig
for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
def all_items_equal(seq):
return all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*(m.ancestry for m in merge_migrations))
common_ancestor_count = sum(
1
for common_ancestor_generation in takewhile(
all_items_equal, merge_migrations_generations
)
)
if not common_ancestor_count:
raise ValueError(
"Could not find common ancestor of %s" % migration_names
)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (
loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch
)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.log(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.log(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.log(" - %s" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type(
"Migration",
(Migration,),
{
"dependencies": [
(app_label, migration.name)
for migration in merge_migrations
],
},
)
parts = ["%04i" % (biggest_number + 1)]
if self.migration_name:
parts.append(self.migration_name)
else:
parts.append("merge")
leaf_names = "_".join(
sorted(migration.name for migration in merge_migrations)
)
if len(leaf_names) > 47:
parts.append(get_migration_name_timestamp())
else:
parts.append(leaf_names)
migration_name = "_".join(parts)
new_migration = subclass(migration_name, app_label)
writer = MigrationWriter(new_migration, self.include_header)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "w", encoding="utf-8") as fh:
fh.write(writer.as_string())
run_formatters([writer.path])
if self.verbosity > 0:
self.log("\nCreated new merge migration %s" % writer.path)
if self.scriptable:
self.stdout.write(writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will log the merge migrations rather than saving the file
# to the disk.
self.log(
self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename
)
)
self.log(writer.as_string())
| {
"content_hash": "8fd7eb924b3c5e0d0c9624110bf6c268",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 88,
"avg_line_length": 42.535980148883375,
"alnum_prop": 0.5301015050752538,
"repo_name": "solarissmoke/django",
"id": "8938fb6309ca58ac85eb983a589565f83e0c36b1",
"size": "17142",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/core/management/commands/makemigrations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87587"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146495"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "15962069"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
import numpy as np
from sklearn.utils.extmath import _safe_accumulator_op
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count):
"""
Note. Most of this script is taken from scikit-learn, except for the last line.
--- Original doc ---
Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : array-like, shape (n_features,)
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : array, shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = _safe_accumulator_op(np.nansum, X, axis=0)
new_sample_count = np.sum(~np.isnan(X), axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = (
_safe_accumulator_op(np.nanvar, X, axis=0) * new_sample_count
)
last_unnormalized_variance = last_variance * last_sample_count
with np.errstate(divide="ignore", invalid="ignore"):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance
+ new_unnormalized_variance
+ last_over_new_count
/ updated_sample_count
* (last_sum / last_over_new_count - new_sum) ** 2
)
zeros = last_sample_count == 0
# updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
# This line is replaced by the following, because dask-array does not
# support item assignment.
updated_unnormalized_variance = np.where(
zeros, new_unnormalized_variance, updated_unnormalized_variance
)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
| {
"content_hash": "4ceca9069a07627ae741b9ba89fece2c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 83,
"avg_line_length": 33.89719626168224,
"alnum_prop": 0.6476426799007444,
"repo_name": "dask/dask-ml",
"id": "623659e85580ca7536a39b901f5e15441a7f60c7",
"size": "3627",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dask_ml/decomposition/extmath.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "798280"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
from pants.testutil.test_base import TestBase
from pants_test.contrib.go.targets.go_local_source_test_base import GoLocalSourceTestBase
from pants.contrib.go.targets.go_binary import GoBinary
class GoBinaryTest(GoLocalSourceTestBase, TestBase):
@property
def target_type(self):
return GoBinary
| {
"content_hash": "75d123df3540418472cc33203f1a1ccf",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 89,
"avg_line_length": 31.3,
"alnum_prop": 0.792332268370607,
"repo_name": "tdyas/pants",
"id": "151a392076dce91bd2ae5ae7ccb68e7effcc82d3",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/go/tests/python/pants_test/contrib/go/targets/test_go_binary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
from futurist._futures import Future # noqa
from futurist._futures import GreenFuture # noqa
from futurist._futures import CancelledError # noqa
from futurist._futures import TimeoutError # noqa
from futurist._futures import GreenThreadPoolExecutor # noqa
from futurist._futures import ProcessPoolExecutor # noqa
from futurist._futures import SynchronousExecutor # noqa
from futurist._futures import ThreadPoolExecutor # noqa
from futurist._futures import RejectedSubmission # noqa
from futurist._futures import ExecutorStatistics # noqa
| {
"content_hash": "e54a530266393d348b8bb8dc26a3ed48",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 39.42857142857143,
"alnum_prop": 0.8115942028985508,
"repo_name": "openstack/futurist",
"id": "43231eb697af18fc820e388bcf8e60fd30598e2f",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "futurist/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "118189"
}
],
"symlink_target": ""
} |
import salt.client
def test_salt_states():
caller = salt.client.Caller()
output = caller.sminion.functions['state.highstate']()
for k, v in output.items():
assert v['result'], v
def test_locale_state():
caller = salt.client.Caller()
output = caller.sminion.functions['state.sls']('locale')
for k, v in output.items():
assert v['result'], v
def test_python_states():
caller = salt.client.Caller()
output = caller.sminion.functions['state.sls']('python')
for k, v in output.items():
assert v['result'], v
def test_virtualenv_base_states():
caller = salt.client.Caller()
output = caller.sminion.functions['state.sls']('virtualenv.base')
for k, v in output.items():
assert v['result'], v
def test_virtualenv_scilibs_states():
caller = salt.client.Caller()
output = caller.sminion.functions['state.sls']('virtualenv.scilibs')
for k, v in output.items():
assert v['result'], v
| {
"content_hash": "ed11185e5aa9baa5d0e6e52a0362852c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 27.25,
"alnum_prop": 0.6381243628950051,
"repo_name": "sciboxes/numpybox",
"id": "6074b751a4705a4e504fb11e4d4ce9124ae36db0",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "981"
},
{
"name": "Scheme",
"bytes": "106"
}
],
"symlink_target": ""
} |
from bigdl.dllib.utils.file_utils import callZooFunc
from pyspark.sql.types import IntegerType, ShortType, LongType, FloatType, DecimalType, \
DoubleType, BooleanType
from pyspark.sql.functions import broadcast, udf
from bigdl.dllib.utils.log4Error import *
import warnings
from bigdl.dllib.utils.log4Error import *
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
if TYPE_CHECKING:
from bigdl.friesian.feature.table import FeatureTable, TargetCode
from pyspark.sql.dataframe import DataFrame as SparkDataFrame
def compute(df: "SparkDataFrame"):
return callZooFunc("float", "compute", df)
def log_with_clip(df: "SparkDataFrame", columns: List[str], clip: bool=True) -> "SparkDataFrame":
return callZooFunc("float", "log", df, columns, clip)
def generate_string_idx(df: "SparkDataFrame",
columns: List[str],
freq_limit: Optional[str],
order_by_freq: bool) -> List["SparkDataFrame"]:
return callZooFunc("float", "generateStringIdx", df, columns, freq_limit, order_by_freq)
def fill_na(df: "SparkDataFrame",
fill_val: Union[int, str, float],
columns: List[str]) -> "SparkDataFrame":
return callZooFunc("float", "fillNa", df, fill_val, columns)
def fill_na_int(df: "SparkDataFrame",
fill_val: int,
columns: Optional[List[str]]) -> "SparkDataFrame":
return callZooFunc("float", "fillNaInt", df, fill_val, columns)
def clip(df: "SparkDataFrame",
columns: List[str],
min: Optional[int],
max: Optional[int]) -> "SparkDataFrame":
return callZooFunc("float", "clip", df, columns, min, max)
def fill_median(df: "SparkDataFrame", columns: List[str]) -> "SparkDataFrame":
return callZooFunc("float", "fillMedian", df, columns)
def median(df: "SparkDataFrame",
columns: List[str],
relative_error: float=0.001) -> "SparkDataFrame":
return callZooFunc("float", "median", df, columns, relative_error)
# TODO: ADD UTS
def cross_columns(df,
cross_column_list: Union[List[str], List[List[str]], str],
bucket_sizes: int):
return callZooFunc("float", "crossColumns", df, cross_column_list, bucket_sizes)
def check_col_exists(df: "SparkDataFrame", columns: List[str]) -> None:
df_cols = df.columns
col_not_exist = list(filter(lambda x: x not in df_cols, columns))
if len(col_not_exist) > 0:
invalidInputError(False,
str(col_not_exist) + " do not exist in this Table")
def add_negative_samples(df: "SparkDataFrame",
item_size: int,
item_col: str,
label_col: str,
neg_num: int) -> "SparkDataFrame":
return callZooFunc("float", "addNegSamples", df, item_size, item_col, label_col, neg_num)
def add_hist_seq(df: "SparkDataFrame",
cols: List[str],
user_col: str,
sort_col: str,
min_len: int,
max_len: int,
num_seqs: int) -> "SparkDataFrame":
return callZooFunc("float", "addHistSeq", df, cols, user_col, sort_col, min_len, max_len,
num_seqs)
def add_neg_hist_seq(df: "SparkDataFrame",
item_size: int,
item_history_col: str,
neg_num: int) -> "SparkDataFrame":
return callZooFunc("float", "addNegHisSeq", df, item_size, item_history_col, neg_num)
def add_value_features(df: "SparkDataFrame",
cols: List[str],
map_df: "SparkDataFrame",
key: str,
value: str) -> "SparkDataFrame":
return callZooFunc("float", "addValueFeatures", df, cols, map_df, key, value)
def mask(df: "SparkDataFrame", mask_cols: Optional[Union[str, List[str]]], seq_len: int):
return callZooFunc("float", "mask", df, mask_cols, seq_len)
def pad(df: "SparkDataFrame",
cols: List[str],
seq_len: int,
mask_cols: Optional[Union[str, List[str]]],
mask_token: Union[int, str]) -> "SparkDataFrame":
df = callZooFunc("float", "mask", df, mask_cols, seq_len) if mask_cols else df
df = callZooFunc("float", "postPad", df, cols, seq_len, mask_token)
return df
def check_column_numeric(df: "SparkDataFrame", column: str) -> bool:
return df.schema[column].dataType in [IntegerType(), ShortType(),
LongType(), FloatType(),
DecimalType(), DoubleType()]
def ordinal_shuffle_partition(df: "SparkDataFrame") -> "SparkDataFrame":
return callZooFunc("float", "ordinalShufflePartition", df)
def write_parquet(df: "SparkDataFrame", path: str, mode: str) -> None:
callZooFunc("float", "dfWriteParquet", df, path, mode)
def check_col_str_list_exists(df: "SparkDataFrame",
column: Union[List[str], str],
arg_name: str) -> None:
if isinstance(column, str):
invalidInputError(column in df.columns,
column + " in " + arg_name + " does not exist in Table")
elif isinstance(column, list):
for single_column in column:
invalidInputError(single_column in df.columns,
"{} in {} does not exist in Table".format(single_column, arg_name))
else:
invalidInputError(False,
"elements in cat_cols should be str or list of str but"
" get " + str(column))
def get_nonnumeric_col_type(df: "SparkDataFrame",
columns: Optional[Union[str, List[str]]]) \
-> List[Union[Tuple[str, str], Any]]:
return list(filter(
lambda x: x[0] in columns and not (x[1] == "smallint" or x[1] == "int" or
x[1] == "bigint" or x[1] == "float" or x[1] == "double"),
df.dtypes))
def gen_cols_name(columns: Union[List[str], str], name_sep: str="_") -> str:
if isinstance(columns, str):
return columns
elif isinstance(columns, list):
return name_sep.join(columns)
else:
invalidInputError(False,
"item should be either str or list of str")
def encode_target_(tbl: "FeatureTable",
targets: List["TargetCode"],
target_cols: Optional[List[str]]=None,
drop_cat: bool=True,
drop_fold: bool=True,
fold_col: Optional[str]=None) -> "FeatureTable":
for target_code in targets:
cat_col = target_code.cat_col
out_target_mean = target_code.out_target_mean
join_tbl = tbl._clone(target_code.df)
invalidInputError("target_encode_count" in join_tbl.df.columns,
"target_encode_count should be in target_code")
# (keys of out_target_mean) should include (output columns)
output_columns = list(filter(lambda x:
((isinstance(cat_col, str) and x != cat_col) or
(isinstance(cat_col, list) and x not in cat_col)) and
(fold_col is not None and x != fold_col) and
(x != "target_encode_count"),
join_tbl.df.columns))
for column in output_columns:
invalidInputError(column in out_target_mean, column + " should be in out_target_mean")
column_mean = out_target_mean[column][1]
invalidInputError(isinstance(column_mean, int) or isinstance(column_mean, float),
"mean in target_mean should be numeric but get {} of type"
" {} in {}".format(column_mean, type(column_mean), out_target_mean))
# select target_cols to join
if target_cols is not None:
new_out_target_mean = {}
for out_col, target_mean in out_target_mean.items():
if target_mean[0] not in target_cols:
join_tbl = join_tbl.drop(out_col)
else:
new_out_target_mean[out_col] = target_mean
out_target_mean = new_out_target_mean
all_size = join_tbl.size()
limit_size = 100000000
t_df = join_tbl.df
top_df = t_df if all_size <= limit_size \
else t_df.sort(t_df.target_encode_count.desc()).limit(limit_size)
br_df = broadcast(top_df.drop("target_encode_count"))
if fold_col is None:
join_key = cat_col
else:
join_key = [cat_col, fold_col] if isinstance(cat_col, str) else cat_col + [fold_col]
if all_size <= limit_size:
joined = tbl.df.join(br_df, on=join_key, how="left")
else:
keyset = set(top_df.select(cat_col).rdd.map(lambda r: r[0]).collect())
filter_udf = udf(lambda key: key in keyset, BooleanType())
df1 = tbl.df.filter(filter_udf(cat_col))
df2 = tbl.df.subtract(df1)
joined1 = df1.join(br_df, on=join_key)
joined2 = df2.join(t_df.drop("target_encode_count").subtract(br_df),
on=join_key, how="left")
joined = joined1.union(joined2)
tbl = tbl._clone(joined)
# for new columns, fill na with mean
for out_col, target_mean in out_target_mean.items():
if out_col in tbl.df.columns:
tbl = tbl.fillna(target_mean[1], out_col)
if drop_cat:
for target_code in targets:
if isinstance(target_code.cat_col, str):
tbl = tbl.drop(target_code.cat_col)
else:
tbl = tbl.drop(*target_code.cat_col)
if drop_fold:
if fold_col is not None:
tbl = tbl.drop(fold_col)
return tbl
def str_to_list(arg: Union[List[str], str], arg_name: str) -> List[str]:
if isinstance(arg, str):
return [arg]
invalidInputError(isinstance(arg, list), arg_name + " should be str or a list of str")
return arg
def featuretable_to_xshards(tbl: "SparkDataFrame",
convert_cols: Optional[Union[List[str], str]]=None):
from bigdl.orca.learn.utils import dataframe_to_xshards_of_feature_dict
# TODO: partition < node num
if convert_cols is None:
convert_cols = tbl.columns
if convert_cols and not isinstance(convert_cols, list):
convert_cols = [convert_cols]
return dataframe_to_xshards_of_feature_dict(tbl.df, convert_cols, accept_str_col=True)
| {
"content_hash": "b194b46ca1b8b41e8640c7690abeda92",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 100,
"avg_line_length": 40.262172284644194,
"alnum_prop": 0.5689302325581396,
"repo_name": "yangw1234/BigDL",
"id": "1bb9db6cec4c034e78eb77a9c0620da7f8d8b7ce",
"size": "11337",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/friesian/src/bigdl/friesian/feature/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
} |
from pyBN.learning.structure.constraint.fast_iamb import *
from pyBN.learning.structure.constraint.grow_shrink import *
from pyBN.learning.structure.constraint.iamb import *
from pyBN.learning.structure.constraint.lambda_iamb import *
from pyBN.learning.structure.constraint.path_condition import * | {
"content_hash": "14aa20ee65a24e2ea1b19c6027a667eb",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 63,
"avg_line_length": 59.6,
"alnum_prop": 0.8389261744966443,
"repo_name": "ncullen93/pyBN",
"id": "e6339908f746d7019ee114d60142b86e58c106b2",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyBN/learning/structure/constraint/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222493"
}
],
"symlink_target": ""
} |
"""
mfupw module. Contains the ModflowUpw class. Note that the user can access
the ModflowUpw class as `flopy.modflow.ModflowUpw`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/upw_upstream_weighting_package.htm>`_.
"""
import sys
import numpy as np
from .mfpar import ModflowPar as mfpar
from ..pakbase import Package
from ..utils import Util2d, Util3d
class ModflowUpw(Package):
"""
Upstream weighting package class
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 53)
hdry : float
Is the head that is assigned to cells that are converted to dry during
a simulation. Although this value plays no role in the model
calculations, it is useful as an indicator when looking at the
resulting heads that are output from the model. HDRY is thus similar
to HNOFLO in the Basic Package, which is the value assigned to cells
that are no-flow cells at the start of a model simulation. (default
is -1.e30).
iphdry : int
iphdry is a flag that indicates whether groundwater head will be set to
hdry when the groundwater head is less than 0.0001 above the cell bottom
(units defined by lenuni in the discretization package). If iphdry=0,
then head will not be set to hdry. If iphdry>0, then head will be set to
hdry. If the head solution from one simulation will be used as starting
heads for a subsequent simulation, or if the Observation Process is used
(Harbaugh and others, 2000), then hdry should not be printed to the output
file for dry cells (that is, the upw package input variable should be set
as iphdry=0). (default is 0)
noparcheck : bool
noparcheck turns off the checking that a value is defined for all cells
when parameters are used to define layer data.
laytyp : int or array of ints (nlay)
Layer type (default is 0).
layavg : int or array of ints (nlay)
Layer average (default is 0).
0 is harmonic mean
1 is logarithmic mean
2 is arithmetic mean of saturated thickness and logarithmic mean of
of hydraulic conductivity
chani : float or array of floats (nlay)
contains a value for each layer that is a flag or the horizontal
anisotropy. If CHANI is less than or equal to 0, then variable HANI
defines horizontal anisotropy. If CHANI is greater than 0, then CHANI
is the horizontal anisotropy for the entire layer, and HANI is not
read. If any HANI parameters are used, CHANI for all layers must be
less than or equal to 0. Use as many records as needed to enter a
value of CHANI for each layer. The horizontal anisotropy is the ratio
of the hydraulic conductivity along columns (the Y direction) to the
hydraulic conductivity along rows (the X direction).
layvka : float or array of floats (nlay)
a flag for each layer that indicates whether variable VKA is vertical
hydraulic conductivity or the ratio of horizontal to vertical
hydraulic conductivity.
laywet : float or array of floats (nlay)
contains a flag for each layer that indicates if wetting is active.
laywet should always be zero for the UPW Package because all cells
initially active are wettable.
hk : float or array of floats (nlay, nrow, ncol)
is the hydraulic conductivity along rows. HK is multiplied by
horizontal anisotropy (see CHANI and HANI) to obtain hydraulic
conductivity along columns. (default is 1.0).
hani : float or array of floats (nlay, nrow, ncol)
is the ratio of hydraulic conductivity along columns to hydraulic
conductivity along rows, where HK of item 10 specifies the hydraulic
conductivity along rows. Thus, the hydraulic conductivity along
columns is the product of the values in HK and HANI.
(default is 1.0).
vka : float or array of floats (nlay, nrow, ncol)
is either vertical hydraulic conductivity or the ratio of horizontal
to vertical hydraulic conductivity depending on the value of LAYVKA.
(default is 1.0).
ss : float or array of floats (nlay, nrow, ncol)
is specific storage unless the STORAGECOEFFICIENT option is used.
When STORAGECOEFFICIENT is used, Ss is confined storage coefficient.
(default is 1.e-5).
sy : float or array of floats (nlay, nrow, ncol)
is specific yield. (default is 0.15).
vkcb : float or array of floats (nlay, nrow, ncol)
is the vertical hydraulic conductivity of a Quasi-three-dimensional
confining bed below a layer. (default is 0.0).
extension : string
Filename extension (default is 'upw')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output name will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf(m)
"""
def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0,
laywet=0, ipakcb=None, hdry=-1E+30, iphdry=0,
hk=1.0, hani=1.0, vka=1.0, ss=1e-5, sy=0.15, vkcb=0.0,
noparcheck=False,
extension='upw', unitnumber=None, filenames=None):
if model.version != 'mfnwt':
err = 'Error: model version must be mfnwt to use ' + \
'{} package'.format(ModflowUpw.ftype())
raise Exception(err)
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowUpw.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowUpw.ftype())
else:
ipakcb = 0
# Fill namefile items
name = [ModflowUpw.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'upw_upstream_weighting_package.htm'
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# item 1
self.ipakcb = ipakcb
self.hdry = hdry # Head in cells that are converted to dry during a simulation
self.npupw = 0 # number of UPW parameters
self.iphdry = iphdry
self.laytyp = Util2d(model, (nlay,), np.int, laytyp, name='laytyp')
self.layavg = Util2d(model, (nlay,), np.int, layavg, name='layavg')
self.chani = Util2d(model, (nlay,), np.int, chani, name='chani')
self.layvka = Util2d(model, (nlay,), np.int, layvka, name='vka')
self.laywet = Util2d(model, (nlay,), np.int, laywet, name='laywet')
self.options = ' '
if noparcheck: self.options = self.options + 'NOPARCHECK '
self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk',
locat=self.unit_number[0])
self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani,
name='hani', locat=self.unit_number[0])
keys = []
for k in range(nlay):
key = 'vka'
if self.layvka[k] != 0:
key = 'vani'
keys.append(key)
self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka,
name=keys, locat=self.unit_number[0])
self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name='ss',
locat=self.unit_number[0])
self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy',
locat=self.unit_number[0])
self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb,
name='vkcb', locat=self.unit_number[0])
self.parent.add_package(self)
def write_file(self, check=True):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
if check: # allows turning off package checks when writing files at model level
self.check(f='{}.chk'.format(self.name[0]),
verbose=self.parent.verbose, level=1)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper # Open file for writing
f_upw = open(self.fn_path, 'w')
# Item 0: text
f_upw.write('{}\n'.format(self.heading))
# Item 1: IBCFCB, HDRY, NPLPF
f_upw.write('{0:10d}{1:10.3G}{2:10d}{3:10d}{4:s}\n'.format(self.ipakcb,
self.hdry,
self.npupw,
self.iphdry,
self.options))
# LAYTYP array
f_upw.write(self.laytyp.string);
# LAYAVG array
f_upw.write(self.layavg.string);
# CHANI array
f_upw.write(self.chani.string);
# LAYVKA array
f_upw.write(self.layvka.string)
# LAYWET array
f_upw.write(self.laywet.string);
# Item 7: WETFCT, IWETIT, IHDWET
iwetdry = self.laywet.sum()
if iwetdry > 0:
raise Exception('LAYWET should be 0 for UPW')
transient = not self.parent.get_package('DIS').steady.all()
for k in range(nlay):
f_upw.write(self.hk[k].get_file_entry())
if self.chani[k] < 1:
f_upw.write(self.hani[k].get_file_entry())
f_upw.write(self.vka[k].get_file_entry())
if transient == True:
f_upw.write(self.ss[k].get_file_entry())
if self.laytyp[k] != 0:
f_upw.write(self.sy[k].get_file_entry())
if self.parent.get_package('DIS').laycbd[k] > 0:
f_upw.write(self.vkcb[k].get_file_entry())
if (self.laywet[k] != 0 and self.laytyp[k] != 0):
f_upw.write(self.laywet[k].get_file_entry())
f_upw.close()
@staticmethod
def load(f, model, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
check : boolean
Check package data for common errors. (default True)
Returns
-------
dis : ModflowUPW object
ModflowLpf object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> upw = flopy.modflow.ModflowUpw.load('test.upw', m)
"""
if model.verbose:
sys.stdout.write('loading upw package file...\n')
if model.version != 'mfnwt':
msg = "Warning: model version was reset from " + \
"'{}' to 'mfnwt' in order to load a UPW file".format(
model.version)
print(msg)
model.version = 'mfnwt'
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# Item 1: IBCFCB, HDRY, NPLPF - line already read above
if model.verbose:
print(' loading ipakcb, HDRY, NPUPW, IPHDRY...')
t = line.strip().split()
ipakcb, hdry, npupw, iphdry = int(t[0]), \
float(t[1]), \
int(t[2]), \
int(t[3])
# if ipakcb != 0:
# model.add_pop_key_list(ipakcb)
# ipakcb = 53
# options
noparcheck = False
if len(t) > 3:
for k in range(3, len(t)):
if 'NOPARCHECK' in t[k].upper():
noparcheck = True
# LAYTYP array
if model.verbose:
print(' loading LAYTYP...')
line = f.readline()
t = line.strip().split()
laytyp = np.array((t[0:nlay]), dtype=np.int)
# LAYAVG array
if model.verbose:
print(' loading LAYAVG...')
line = f.readline()
t = line.strip().split()
layavg = np.array((t[0:nlay]), dtype=np.int)
# CHANI array
if model.verbose:
print(' loading CHANI...')
line = f.readline()
t = line.strip().split()
chani = np.array((t[0:nlay]), dtype=np.float32)
# LAYVKA array
if model.verbose:
print(' loading LAYVKA...')
line = f.readline()
t = line.strip().split()
layvka = np.array((t[0:nlay]), dtype=np.int)
# LAYWET array
if model.verbose:
print(' loading LAYWET...')
line = f.readline()
t = line.strip().split()
laywet = np.array((t[0:nlay]), dtype=np.int)
# Item 7: WETFCT, IWETIT, IHDWET
wetfct, iwetit, ihdwet = None, None, None
iwetdry = laywet.sum()
if iwetdry > 0:
raise Exception('LAYWET should be 0 for UPW')
# get parameters
par_types = []
if npupw > 0:
par_types, parm_dict = mfpar.load(f, npupw, model.verbose)
# get arrays
transient = not model.get_package('DIS').steady.all()
hk = [0] * nlay
hani = [0] * nlay
vka = [0] * nlay
ss = [0] * nlay
sy = [0] * nlay
vkcb = [0] * nlay
for k in range(nlay):
if model.verbose:
print(' loading hk layer {0:3d}...'.format(k + 1))
if 'hk' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict,
findlayer=k)
hk[k] = t
if chani[k] < 1:
if model.verbose:
print(' loading hani layer {0:3d}...'.format(k + 1))
if 'hani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hani',
parm_dict, findlayer=k)
hani[k] = t
if model.verbose:
print(' loading vka layer {0:3d}...'.format(k + 1))
if 'vk' not in par_types and 'vani' not in par_types:
key = 'vka'
if layvka[k] != 0:
key = 'vani'
t = Util2d.load(f, model, (nrow, ncol), np.float32, key,
ext_unit_dict)
else:
line = f.readline()
key = 'vka'
if 'vani' in par_types:
key = 'vani'
t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict,
findlayer=k)
vka[k] = t
if transient:
if model.verbose:
print(' loading ss layer {0:3d}...'.format(k + 1))
if 'ss' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'ss',
parm_dict, findlayer=k)
ss[k] = t
if laytyp[k] != 0:
if model.verbose:
print(' loading sy layer {0:3d}...'.format(k + 1))
if 'sy' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'sy',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'sy',
parm_dict, findlayer=k)
sy[k] = t
if model.get_package('DIS').laycbd[k] > 0:
if model.verbose:
print(' loading vkcb layer {0:3d}...'.format(k + 1))
if 'vkcb' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb',
parm_dict, findlayer=k)
vkcb[k] = t
# determine specified unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowUpw.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create upw object
upw = ModflowUpw(model, ipakcb=ipakcb, iphdry=iphdry, hdry=hdry,
noparcheck=noparcheck,
laytyp=laytyp, layavg=layavg, chani=chani,
layvka=layvka, laywet=laywet,
hk=hk, hani=hani, vka=vka, ss=ss, sy=sy, vkcb=vkcb,
unitnumber=unitnumber, filenames=filenames)
if check:
upw.check(f='{}.chk'.format(upw.name[0]),
verbose=upw.parent.verbose, level=0)
# return upw object
return upw
@staticmethod
def ftype():
return 'UPW'
@staticmethod
def defaultunit():
return 31
| {
"content_hash": "07ad055252f69d758ca6a9551618e9aa",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 95,
"avg_line_length": 42.21456692913386,
"alnum_prop": 0.5193751457216135,
"repo_name": "bdestombe/flopy-1",
"id": "be283a93075d09155ce1a1ae2b4fc7bcdb06d93a",
"size": "21445",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flopy/modflow/mfupw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "2372593"
}
],
"symlink_target": ""
} |
""" This module contains ShowBase, an application framework responsible
for opening a graphical display, setting up input devices and creating
the scene graph. """
__all__ = ['ShowBase', 'WindowControls']
# This module redefines the builtin import function with one
# that prints out every import it does in a hierarchical form
# Annoying and very noisy, but sometimes useful
#import VerboseImport
from panda3d.core import *
from panda3d.direct import throw_new_frame, init_app_for_gui
from panda3d.direct import storeAccessibilityShortcutKeys, allowAccessibilityShortcutKeys
from . import DConfig
# Register the extension methods for NodePath.
from direct.extensions_native import NodePath_extensions
# This needs to be available early for DirectGUI imports
import sys
if sys.version_info >= (3, 0):
import builtins
else:
import __builtin__ as builtins
builtins.config = DConfig
from direct.directnotify.DirectNotifyGlobal import directNotify, giveNotify
from .MessengerGlobal import messenger
from .BulletinBoardGlobal import bulletinBoard
from direct.task.TaskManagerGlobal import taskMgr
from .JobManagerGlobal import jobMgr
from .EventManagerGlobal import eventMgr
#from PythonUtil import *
from direct.interval import IntervalManager
from direct.showbase.BufferViewer import BufferViewer
from direct.task import Task
from . import Loader
import time
import atexit
import importlib
from direct.showbase import ExceptionVarDump
from . import DirectObject
from . import SfxPlayer
if __debug__:
from direct.showbase import GarbageReport
from direct.directutil import DeltaProfiler
from . import OnScreenDebug
from . import AppRunnerGlobal
@atexit.register
def exitfunc():
if getattr(builtins, 'base', None) is not None:
builtins.base.destroy()
# Now ShowBase is a DirectObject. We need this so ShowBase can hang
# hooks on messages, particularly on window-event. This doesn't
# *seem* to cause anyone any problems.
class ShowBase(DirectObject.DirectObject):
config = DConfig
notify = directNotify.newCategory("ShowBase")
def __init__(self, fStartDirect = True, windowType = None):
self.__dev__ = self.config.GetBool('want-dev', __debug__)
builtins.__dev__ = self.__dev__
logStackDump = (self.config.GetBool('log-stack-dump', False) or
self.config.GetBool('client-log-stack-dump', False))
uploadStackDump = self.config.GetBool('upload-stack-dump', False)
if logStackDump or uploadStackDump:
ExceptionVarDump.install(logStackDump, uploadStackDump)
if __debug__:
self.__autoGarbageLogging = self.__dev__ and self.config.GetBool('auto-garbage-logging', False)
## The directory containing the main Python file of this application.
self.mainDir = ExecutionEnvironment.getEnvironmentVariable("MAIN_DIR")
self.main_dir = self.mainDir
## This contains the global appRunner instance, as imported from
## AppRunnerGlobal. This will be None if we are not running in the
## runtime environment (ie. from a .p3d file).
self.appRunner = AppRunnerGlobal.appRunner
self.app_runner = self.appRunner
#debug running multiplier
self.debugRunningMultiplier = 4
# [gjeon] to disable sticky keys
if self.config.GetBool('disable-sticky-keys', 0):
storeAccessibilityShortcutKeys()
allowAccessibilityShortcutKeys(False)
self.printEnvDebugInfo()
vfs = VirtualFileSystem.getGlobalPtr()
self.nextWindowIndex = 1
self.__directStarted = False
self.__deadInputs = 0
# Store dconfig variables
self.sfxActive = self.config.GetBool('audio-sfx-active', 1)
self.musicActive = self.config.GetBool('audio-music-active', 1)
self.wantFog = self.config.GetBool('want-fog', 1)
self.wantRender2dp = self.config.GetBool('want-render2dp', 1)
self.screenshotExtension = self.config.GetString('screenshot-extension', 'jpg')
self.musicManager = None
self.musicManagerIsValid = None
self.sfxManagerList = []
self.sfxManagerIsValidList = []
self.wantStats = self.config.GetBool('want-pstats', 0)
self.wantTk = False
self.wantWx = False
## Fill this in with a function to invoke when the user "exits"
## the program by closing the main window.
self.exitFunc = None
## Add final-exit callbacks to this list. These will be called
## when sys.exit() is called, after Panda has unloaded, and
## just before Python is about to shut down.
self.finalExitCallbacks = []
# Set up the TaskManager to reset the PStats clock back
# whenever we resume from a pause. This callback function is
# a little hacky, but we can't call it directly from within
# the TaskManager because he doesn't know about PStats (and
# has to run before libpanda is even loaded).
taskMgr.resumeFunc = PStatClient.resumeAfterPause
if self.__dev__:
self.__setupProfile()
# If the aspect ratio is 0 or None, it means to infer the
# aspect ratio from the window size.
# If you need to know the actual aspect ratio call base.getAspectRatio()
self.__configAspectRatio = ConfigVariableDouble('aspect-ratio', 0).getValue()
# This variable is used to see if the aspect ratio has changed when
# we get a window-event.
self.__oldAspectRatio = None
## This is set to the value of the window-type config variable, but may
## optionally be overridden in the Showbase constructor. Should either be
## 'onscreen' (the default), 'offscreen' or 'none'.
self.windowType = windowType
if self.windowType is None:
self.windowType = self.config.GetString('window-type', 'onscreen')
self.requireWindow = self.config.GetBool('require-window', 1)
## This is the main, or only window; see winList for a list of *all* windows.
self.win = None
self.frameRateMeter = None
self.sceneGraphAnalyzerMeter = None
self.winList = []
self.winControls = []
self.mainWinMinimized = 0
self.mainWinForeground = 0
self.pipe = None
self.pipeList = []
self.mouse2cam = None
self.buttonThrowers = None
self.mouseWatcher = None
self.mouseWatcherNode = None
self.pointerWatcherNodes = None
self.mouseInterface = None
self.drive = None
self.trackball = None
self.texmem = None
self.showVertices = None
self.deviceButtonThrowers = []
## This is a NodePath pointing to the Camera object set up for the 3D scene.
## This is usually a child of self.camera.
self.cam = None
self.cam2d = None
self.cam2dp = None
## This is the NodePath that should be used to manipulate the camera. This
## is the node to which the default camera is attached.
self.camera = None
self.camera2d = None
self.camera2dp = None
## This is a list of all cameras created with makeCamera, including base.cam.
self.camList = []
## Convenience accessor for base.cam.node()
self.camNode = None
## Convenience accessor for base.camNode.get_lens()
self.camLens = None
self.camFrustumVis = None
self.direct = None
## This is used to store the wx.Application object used when want-wx is
## set or base.startWx() is called.
self.wxApp = None
self.wxAppCreated = False
self.tkRoot = None
self.tkRootCreated = False
# This is used for syncing multiple PCs in a distributed cluster
try:
# Has the cluster sync variable been set externally?
self.clusterSyncFlag = clusterSyncFlag
except NameError:
# Has the clusterSyncFlag been set via a config variable
self.clusterSyncFlag = self.config.GetBool('cluster-sync', 0)
self.hidden = NodePath('hidden')
## The global graphics engine, ie. GraphicsEngine.getGlobalPtr()
self.graphicsEngine = GraphicsEngine.getGlobalPtr()
self.graphics_engine = self.graphicsEngine
self.setupRender()
self.setupRender2d()
self.setupDataGraph()
if self.wantRender2dp:
self.setupRender2dp()
## This is a placeholder for a CollisionTraverser. If someone
## stores a CollisionTraverser pointer here, we'll traverse it
## in the collisionLoop task.
self.cTrav = 0
self.shadowTrav = 0
self.cTravStack = Stack()
# Ditto for an AppTraverser.
self.appTrav = 0
# This is the DataGraph traverser, which we might as well
# create now.
self.dgTrav = DataGraphTraverser()
# Maybe create a RecorderController to record and/or play back
# the user session.
self.recorder = None
playbackSession = self.config.GetString('playback-session', '')
recordSession = self.config.GetString('record-session', '')
if playbackSession:
self.recorder = RecorderController()
self.recorder.beginPlayback(Filename.fromOsSpecific(playbackSession))
elif recordSession:
self.recorder = RecorderController()
self.recorder.beginRecord(Filename.fromOsSpecific(recordSession))
if self.recorder:
# If we're either playing back or recording, pass the
# random seed into the system so each session will have
# the same random seed.
import random #, whrandom
seed = self.recorder.getRandomSeed()
random.seed(seed)
#whrandom.seed(seed & 0xff, (seed >> 8) & 0xff, (seed >> 16) & 0xff)
# For some reason, wx needs to be initialized before the graphics window
if sys.platform == "darwin":
if self.config.GetBool("want-wx", 0):
wx = importlib.import_module('wx')
self.wxApp = wx.App()
# Same goes for Tk, which uses a conflicting NSApplication
if self.config.GetBool("want-tk", 0):
Pmw = importlib.import_module('Pmw')
self.tkRoot = Pmw.initialise()
# Open the default rendering window.
if self.windowType != 'none':
props = WindowProperties.getDefault()
if (self.config.GetBool('read-raw-mice', 0)):
props.setRawMice(1)
self.openDefaultWindow(startDirect = False, props=props)
# The default is trackball mode, which is more convenient for
# ad-hoc development in Python using ShowBase. Applications
# can explicitly call base.useDrive() if they prefer a drive
# interface.
self.mouseInterface = self.trackball
self.useTrackball()
self.loader = Loader.Loader(self)
self.graphicsEngine.setDefaultLoader(self.loader.loader)
## The global event manager, as imported from EventManagerGlobal.
self.eventMgr = eventMgr
## The global messenger, as imported from MessengerGlobal.
self.messenger = messenger
## The global bulletin board, as imported from BulletinBoardGlobal.
self.bboard = bulletinBoard
## The global task manager, as imported from TaskManagerGlobal.
self.taskMgr = taskMgr
self.task_mgr = taskMgr
## The global job manager, as imported from JobManagerGlobal.
self.jobMgr = jobMgr
## Particle manager
self.particleMgr = None
self.particleMgrEnabled = 0
## Physics manager
self.physicsMgr = None
self.physicsMgrEnabled = 0
self.physicsMgrAngular = 0
## This is the global input device manager, which keeps track of
## connected input devices.
self.devices = InputDeviceManager.getGlobalPtr()
self.__inputDeviceNodes = {}
self.createStats()
self.AppHasAudioFocus = 1
# Get a pointer to Panda's global ClockObject, used for
# synchronizing events between Python and C.
globalClock = ClockObject.getGlobalClock()
# Since we have already started up a TaskManager, and probably
# a number of tasks; and since the TaskManager had to use the
# TrueClock to tell time until this moment, make sure the
# globalClock object is exactly in sync with the TrueClock.
trueClock = TrueClock.getGlobalPtr()
globalClock.setRealTime(trueClock.getShortTime())
globalClock.tick()
# Now we can make the TaskManager start using the new globalClock.
taskMgr.globalClock = globalClock
# client CPU affinity is determined by, in order:
# - client-cpu-affinity-mask config
# - pcalt-# (# is CPU number, 0-based)
# - client-cpu-affinity config
# - auto-single-cpu-affinity config
affinityMask = self.config.GetInt('client-cpu-affinity-mask', -1)
if affinityMask != -1:
TrueClock.getGlobalPtr().setCpuAffinity(affinityMask)
else:
# this is useful on machines that perform better with each process
# assigned to a single CPU
autoAffinity = self.config.GetBool('auto-single-cpu-affinity', 0)
affinity = None
if autoAffinity and hasattr(builtins, 'clientIndex'):
affinity = abs(int(builtins.clientIndex))
else:
affinity = self.config.GetInt('client-cpu-affinity', -1)
if (affinity in (None, -1)) and autoAffinity:
affinity = 0
if affinity not in (None, -1):
# Windows XP supports a 32-bit affinity mask
TrueClock.getGlobalPtr().setCpuAffinity(1 << (affinity % 32))
# Make sure we're not making more than one ShowBase.
if hasattr(builtins, 'base'):
raise Exception("Attempt to spawn multiple ShowBase instances!")
# DO NOT ADD TO THIS LIST. We're trying to phase out the use of
# built-in variables by ShowBase. Use a Global module if necessary.
builtins.base = self
builtins.render2d = self.render2d
builtins.aspect2d = self.aspect2d
builtins.pixel2d = self.pixel2d
builtins.render = self.render
builtins.hidden = self.hidden
builtins.camera = self.camera
builtins.loader = self.loader
builtins.taskMgr = self.taskMgr
builtins.jobMgr = self.jobMgr
builtins.eventMgr = self.eventMgr
builtins.messenger = self.messenger
builtins.bboard = self.bboard
# Config needs to be defined before ShowBase is constructed
#builtins.config = self.config
builtins.ostream = Notify.out()
builtins.directNotify = directNotify
builtins.giveNotify = giveNotify
builtins.globalClock = globalClock
builtins.vfs = vfs
builtins.cpMgr = ConfigPageManager.getGlobalPtr()
builtins.cvMgr = ConfigVariableManager.getGlobalPtr()
builtins.pandaSystem = PandaSystem.getGlobalPtr()
builtins.wantUberdog = self.config.GetBool('want-uberdog', 1)
if __debug__:
builtins.deltaProfiler = DeltaProfiler.DeltaProfiler("ShowBase")
self.onScreenDebug = OnScreenDebug.OnScreenDebug()
builtins.onScreenDebug = self.onScreenDebug
if self.wantRender2dp:
builtins.render2dp = self.render2dp
builtins.aspect2dp = self.aspect2dp
builtins.pixel2dp = self.pixel2dp
# Now add this instance to the ShowBaseGlobal module scope.
from . import ShowBaseGlobal
builtins.run = ShowBaseGlobal.run
ShowBaseGlobal.base = self
ShowBaseGlobal.__dev__ = self.__dev__
if self.__dev__:
ShowBase.notify.debug('__dev__ == %s' % self.__dev__)
else:
ShowBase.notify.info('__dev__ == %s' % self.__dev__)
self.createBaseAudioManagers()
if self.__dev__ and self.config.GetBool('track-gui-items', False):
# dict of guiId to gui item, for tracking down leaks
if not hasattr(ShowBase, 'guiItems'):
ShowBase.guiItems = {}
# optionally restore the default gui sounds from 1.7.2 and earlier
if ConfigVariableBool('orig-gui-sounds', False).getValue():
from direct.gui import DirectGuiGlobals as DGG
DGG.setDefaultClickSound(self.loader.loadSfx("audio/sfx/GUI_click.wav"))
DGG.setDefaultRolloverSound(self.loader.loadSfx("audio/sfx/GUI_rollover.wav"))
# Now hang a hook on the window-event from Panda. This allows
# us to detect when the user resizes, minimizes, or closes the
# main window.
self.__prevWindowProperties = None
self.accept('window-event', self.windowEvent)
# Transition effects (fade, iris, etc)
from . import Transitions
self.transitions = Transitions.Transitions(self.loader)
if self.win:
# Setup the window controls - handy for multiwindow applications
self.setupWindowControls()
# Client sleep
sleepTime = self.config.GetFloat('client-sleep', 0.0)
self.clientSleep = 0.0
self.setSleep(sleepTime)
# Extra sleep for running 4+ clients on a single machine
# adds a sleep right after the main render in igloop
# tends to even out the frame rate and keeps it from going
# to zero in the out of focus windows
if self.config.GetBool('multi-sleep', 0):
self.multiClientSleep = 1
else:
self.multiClientSleep = 0
# Offscreen buffer viewing utility.
# This needs to be allocated even if the viewer is off.
if self.wantRender2dp:
self.bufferViewer = BufferViewer(self.win, self.render2dp)
else:
self.bufferViewer = BufferViewer(self.win, self.render2d)
if self.windowType != 'none':
if fStartDirect: # [gjeon] if this is False let them start direct manually
self.__doStartDirect()
if self.config.GetBool('show-tex-mem', False):
if not self.texmem or self.texmem.cleanedUp:
self.toggleTexMem()
taskMgr.finalInit()
# Start IGLOOP
self.restart()
# add a collision traverser via pushCTrav and remove it via popCTrav
# that way the owner of the new cTrav doesn't need to hold onto the
# previous one in order to put it back
def pushCTrav(self, cTrav):
self.cTravStack.push(self.cTrav)
self.cTrav = cTrav
def popCTrav(self):
self.cTrav = self.cTravStack.pop()
def __setupProfile(self):
""" Sets up the Python profiler, if available, according to
some Panda config settings. """
try:
profile = importlib.import_module('profile')
pstats = importlib.import_module('pstats')
except ImportError:
return
profile.Profile.bias = float(self.config.GetString("profile-bias","0"))
def f8(x):
return ("%" + "8.%df" % self.config.GetInt("profile-decimals", 3)) % x
pstats.f8 = f8
# temp; see ToonBase.py
def getExitErrorCode(self):
return 0
def printEnvDebugInfo(self):
"""Print some information about the environment that we are running
in. Stuff like the model paths and other paths. Feel free to
add stuff to this.
"""
if self.config.GetBool('want-env-debug-info', 0):
print("\n\nEnvironment Debug Info {")
print("* model path:")
print(getModelPath())
#print "* dna path:"
#print getDnaPath()
print("}")
def destroy(self):
""" Call this function to destroy the ShowBase and stop all
its tasks, freeing all of the Panda resources. Normally, you
should not need to call it explicitly, as it is bound to the
exitfunc and will be called at application exit time
automatically.
This function is designed to be safe to call multiple times."""
for cb in self.finalExitCallbacks[:]:
cb()
# Remove the built-in base reference
if getattr(builtins, 'base', None) is self:
del builtins.run
del builtins.base
del builtins.loader
del builtins.taskMgr
ShowBaseGlobal = sys.modules.get('direct.showbase.ShowBaseGlobal', None)
if ShowBaseGlobal:
del ShowBaseGlobal.base
self.aspect2d.node().removeAllChildren()
# [gjeon] restore sticky key settings
if self.config.GetBool('disable-sticky-keys', 0):
allowAccessibilityShortcutKeys(True)
self.ignoreAll()
self.shutdown()
if getattr(self, 'musicManager', None):
self.musicManager.shutdown()
self.musicManager = None
for sfxManager in self.sfxManagerList:
sfxManager.shutdown()
self.sfxManagerList = []
if getattr(self, 'loader', None):
self.loader.destroy()
self.loader = None
if getattr(self, 'graphicsEngine', None):
self.graphicsEngine.removeAllWindows()
try:
self.direct.panel.destroy()
except:
pass
if hasattr(self, 'win'):
del self.win
del self.winList
del self.pipe
def makeDefaultPipe(self, printPipeTypes = None):
"""
Creates the default GraphicsPipe, which will be used to make
windows unless otherwise specified.
"""
assert self.pipe == None
if printPipeTypes is None:
# When the user didn't specify an explicit setting, take the value
# from the config variable. We could just omit the parameter, however
# this way we can keep backward compatibility.
printPipeTypes = ConfigVariableBool("print-pipe-types", True)
selection = GraphicsPipeSelection.getGlobalPtr()
if printPipeTypes:
selection.printPipeTypes()
self.pipe = selection.makeDefaultPipe()
if not self.pipe:
self.notify.error(
"No graphics pipe is available!\n"
"Your Config.prc file must name at least one valid panda display\n"
"library via load-display or aux-display.")
self.notify.info("Default graphics pipe is %s (%s)." % (
self.pipe.getType().getName(), self.pipe.getInterfaceName()))
self.pipeList.append(self.pipe)
def makeModulePipe(self, moduleName):
"""
Returns a GraphicsPipe from the indicated module,
e.g. 'pandagl' or 'pandadx9'. Does not affect base.pipe or
base.pipeList.
"""
selection = GraphicsPipeSelection.getGlobalPtr()
return selection.makeModulePipe(moduleName)
def makeAllPipes(self):
"""
Creates all GraphicsPipes that the system knows about and fill up
self.pipeList with them.
"""
selection = GraphicsPipeSelection.getGlobalPtr()
selection.loadAuxModules()
# First, we should make sure the default pipe exists.
if self.pipe == None:
self.makeDefaultPipe()
# Now go through the list of known pipes, and make each one if
# we don't have one already.
numPipeTypes = selection.getNumPipeTypes()
for i in range(numPipeTypes):
pipeType = selection.getPipeType(i)
# Do we already have a pipe of this type on the list?
# This operation is n-squared, but presumably there won't
# be more than a handful of pipe types, so who cares.
already = 0
for pipe in self.pipeList:
if pipe.getType() == pipeType:
already = 1
if not already:
pipe = selection.makePipe(pipeType)
if pipe:
self.notify.info("Got aux graphics pipe %s (%s)." % (
pipe.getType().getName(), pipe.getInterfaceName()))
self.pipeList.append(pipe)
else:
self.notify.info("Could not make graphics pipe %s." % (
pipeType.getName()))
def openWindow(self, props = None, fbprops = None, pipe = None, gsg = None,
host = None, type = None, name = None, size = None,
aspectRatio = None, makeCamera = True, keepCamera = False,
scene = None, stereo = None, unexposedDraw = None,
callbackWindowDict = None, requireWindow = None):
"""
Creates a window and adds it to the list of windows that are
to be updated every frame.
props is the WindowProperties that describes the window.
type is either 'onscreen', 'offscreen', or 'none'.
If keepCamera is true, the existing base.cam is set up to
render into the new window.
If keepCamera is false but makeCamera is true, a new camera is
set up to render into the new window.
If unexposedDraw is not None, it specifies the initial value
of GraphicsWindow.setUnexposedDraw().
If callbackWindowDict is not None, a CallbackGraphicWindow is
created instead, which allows the caller to create the actual
window with its own OpenGL context, and direct Panda's
rendering into that window.
If requireWindow is true, it means that the function should
raise an exception if the window fails to open correctly.
"""
# Save this lambda here for convenience; we'll use it to call
# down to the underlying _doOpenWindow() with all of the above
# parameters.
func = lambda : self._doOpenWindow(
props = props, fbprops = fbprops, pipe = pipe, gsg = gsg,
host = host, type = type, name = name, size = size,
aspectRatio = aspectRatio, makeCamera = makeCamera,
keepCamera = keepCamera, scene = scene, stereo = stereo,
unexposedDraw = unexposedDraw,
callbackWindowDict = callbackWindowDict)
if self.win:
# If we've already opened a window before, this is just a
# pass-through to _doOpenWindow().
win = func()
self.graphicsEngine.openWindows()
return win
if type is None:
type = self.windowType
if requireWindow is None:
requireWindow = self.requireWindow
win = func()
# Give the window a chance to truly open.
self.graphicsEngine.openWindows()
if win != None and not win.isValid():
self.notify.info("Window did not open, removing.")
self.closeWindow(win)
win = None
if win == None and pipe == None:
# Try a little harder if the window wouldn't open.
self.makeAllPipes()
try:
self.pipeList.remove(self.pipe)
except ValueError:
pass
while self.win == None and self.pipeList:
self.pipe = self.pipeList[0]
self.notify.info("Trying pipe type %s (%s)" % (
self.pipe.getType(), self.pipe.getInterfaceName()))
win = func()
self.graphicsEngine.openWindows()
if win != None and not win.isValid():
self.notify.info("Window did not open, removing.")
self.closeWindow(win)
win = None
if win == None:
self.pipeList.remove(self.pipe)
if win == None:
self.notify.warning("Unable to open '%s' window." % (type))
if requireWindow:
# Unless require-window is set to false, it is an
# error not to open a window.
raise Exception('Could not open window.')
else:
self.notify.info("Successfully opened window of type %s (%s)" % (
win.getType(), win.getPipe().getInterfaceName()))
return win
def _doOpenWindow(self, props = None, fbprops = None, pipe = None,
gsg = None, host = None, type = None, name = None,
size = None, aspectRatio = None,
makeCamera = True, keepCamera = False,
scene = None, stereo = None, unexposedDraw = None,
callbackWindowDict = None):
if pipe == None:
pipe = self.pipe
if pipe == None:
self.makeDefaultPipe()
pipe = self.pipe
if pipe == None:
# We couldn't get a pipe.
return None
if isinstance(gsg, GraphicsOutput):
# If the gsg is a window or buffer, it means to use the
# GSG from that buffer.
host = gsg
gsg = gsg.getGsg()
# If we are using DirectX, force a new GSG to be created,
# since at the moment DirectX seems to misbehave if we do
# not do this. This will cause a delay while all textures
# etc. are reloaded, so we should revisit this later if we
# can fix the underlying bug in our DirectX support.
if pipe.getType().getName().startswith('wdx'):
gsg = None
if type == None:
type = self.windowType
if props == None:
props = WindowProperties.getDefault()
if fbprops == None:
fbprops = FrameBufferProperties.getDefault()
if size != None:
# If we were given an explicit size, use it; otherwise,
# the size from the properties is used.
props = WindowProperties(props)
props.setSize(size[0], size[1])
if name == None:
name = 'window%s' % (self.nextWindowIndex)
self.nextWindowIndex += 1
win = None
flags = GraphicsPipe.BFFbPropsOptional
if type == 'onscreen':
flags = flags | GraphicsPipe.BFRequireWindow
elif type == 'offscreen':
flags = flags | GraphicsPipe.BFRefuseWindow
if callbackWindowDict:
flags = flags | GraphicsPipe.BFRequireCallbackWindow
if host:
assert host.isValid()
win = self.graphicsEngine.makeOutput(pipe, name, 0, fbprops,
props, flags, host.getGsg(), host)
elif gsg:
win = self.graphicsEngine.makeOutput(pipe, name, 0, fbprops,
props, flags, gsg)
else:
win = self.graphicsEngine.makeOutput(pipe, name, 0, fbprops,
props, flags)
if win == None:
# Couldn't create a window!
return None
if unexposedDraw is not None and hasattr(win, 'setUnexposedDraw'):
win.setUnexposedDraw(unexposedDraw)
if callbackWindowDict:
# If we asked for (and received) a CallbackGraphicsWindow,
# we now have to assign the callbacks, before we start
# trying to do anything with the window.
for callbackName in ['Events', 'Properties', 'Render']:
func = callbackWindowDict.get(callbackName, None)
if not func:
continue
setCallbackName = 'set%sCallback' % (callbackName)
setCallback = getattr(win, setCallbackName)
setCallback(PythonCallbackObject(func))
# We also need to set up the mouse/keyboard objects.
for inputName in callbackWindowDict.get('inputDevices', ['mouse']):
win.createInputDevice(inputName)
if hasattr(win, "requestProperties"):
win.requestProperties(props)
mainWindow = False
if self.win == None:
mainWindow = True
self.win = win
self.winList.append(win)
# Set up a 3-d camera for the window by default.
if keepCamera:
self.makeCamera(win, scene = scene, aspectRatio = aspectRatio,
stereo = stereo, useCamera = self.cam)
elif makeCamera:
self.makeCamera(win, scene = scene, aspectRatio = aspectRatio,
stereo = stereo)
messenger.send('open_window', [win, mainWindow])
if mainWindow:
messenger.send('open_main_window')
return win
def closeWindow(self, win, keepCamera = False, removeWindow = True):
"""
Closes the indicated window and removes it from the list of
windows. If it is the main window, clears the main window
pointer to None.
"""
win.setActive(False)
# First, remove all of the cameras associated with display
# regions on the window.
numRegions = win.getNumDisplayRegions()
for i in range(numRegions):
dr = win.getDisplayRegion(i)
# [gjeon] remove drc in base.direct.drList
if self.direct is not None:
for drc in self.direct.drList:
if drc.cam == dr.getCamera():
self.direct.drList.displayRegionList.remove(drc)
break
cam = NodePath(dr.getCamera())
dr.setCamera(NodePath())
if not cam.isEmpty() and \
cam.node().getNumDisplayRegions() == 0 and \
not keepCamera:
# If the camera is used by no other DisplayRegions,
# remove it.
if self.camList.count(cam) != 0:
self.camList.remove(cam)
# Don't throw away self.camera; we want to
# preserve it for reopening the window.
if cam == self.cam:
self.cam = None
if cam == self.cam2d:
self.cam2d = None
if cam == self.cam2dp:
self.cam2dp = None
cam.removeNode()
# [gjeon] remove winControl
for winCtrl in self.winControls:
if winCtrl.win == win:
self.winControls.remove(winCtrl)
break
# Now we can actually close the window.
if removeWindow:
self.graphicsEngine.removeWindow(win)
self.winList.remove(win)
mainWindow = False
if win == self.win:
mainWindow = True
self.win = None
if self.frameRateMeter:
self.frameRateMeter.clearWindow()
self.frameRateMeter = None
if self.sceneGraphAnalyzerMeter:
self.sceneGraphAnalyzerMeter.clearWindow()
self.sceneGraphAnalyzerMeter = None
messenger.send('close_window', [win, mainWindow])
if mainWindow:
messenger.send('close_main_window')
if not self.winList:
# Give the window(s) a chance to actually close before we
# continue.
self.graphicsEngine.renderFrame()
def openDefaultWindow(self, *args, **kw):
# Creates the main window for the first time, without being
# too particular about the kind of graphics API that is
# chosen. The suggested window type from the load-display
# config variable is tried first; if that fails, the first
# window type that can be successfully opened at all is
# accepted. Returns true on success, false otherwise.
#
# This is intended to be called only once, at application
# startup. It is normally called automatically unless
# window-type is configured to 'none'.
startDirect = kw.get('startDirect', True)
if 'startDirect' in kw:
del kw['startDirect']
self.openMainWindow(*args, **kw)
if startDirect:
self.__doStartDirect()
return self.win != None
def openMainWindow(self, *args, **kw):
"""
Creates the initial, main window for the application, and sets
up the mouse and render2d structures appropriately for it. If
this method is called a second time, it will close the
previous main window and open a new one, preserving the lens
properties in base.camLens.
The return value is true on success, or false on failure (in
which case base.win may be either None, or the previous,
closed window).
"""
keepCamera = kw.get('keepCamera', False)
success = 1
oldWin = self.win
oldLens = self.camLens
oldClearColorActive = None
if self.win != None:
# Close the previous window.
oldClearColorActive = self.win.getClearColorActive()
oldClearColor = VBase4(self.win.getClearColor())
oldClearDepthActive = self.win.getClearDepthActive()
oldClearDepth = self.win.getClearDepth()
oldClearStencilActive = self.win.getClearStencilActive()
oldClearStencil = self.win.getClearStencil()
self.closeWindow(self.win, keepCamera = keepCamera)
# Open a new window.
self.openWindow(*args, **kw)
if self.win == None:
self.win = oldWin
self.winList.append(oldWin)
success = 0
if self.win != None:
if isinstance(self.win, GraphicsWindow):
self.setupMouse(self.win)
self.makeCamera2d(self.win)
if self.wantRender2dp:
self.makeCamera2dp(self.win)
if oldLens != None:
# Restore the previous lens properties.
self.camNode.setLens(oldLens)
self.camLens = oldLens
if oldClearColorActive != None:
# Restore the previous clear properties.
self.win.setClearColorActive(oldClearColorActive)
self.win.setClearColor(oldClearColor)
self.win.setClearDepthActive(oldClearDepthActive)
self.win.setClearDepth(oldClearDepth)
self.win.setClearStencilActive(oldClearStencilActive)
self.win.setClearStencil(oldClearStencil)
flag = self.config.GetBool('show-frame-rate-meter', False)
if self.appRunner is not None and self.appRunner.allowPythonDev:
# In an allow_python_dev p3d application, we always
# start up with the frame rate meter enabled, to
# provide a visual reminder that this flag has been
# set.
flag = True
self.setFrameRateMeter(flag)
flag = self.config.GetBool('show-scene-graph-analyzer-meter', False)
self.setSceneGraphAnalyzerMeter(flag)
return success
def setSleep(self, amount):
"""
Sets up a task that calls python 'sleep' every frame. This is a simple
way to reduce the CPU usage (and frame rate) of a panda program.
"""
if (self.clientSleep == amount):
return
self.clientSleep = amount
if (amount == 0.0):
self.taskMgr.remove('clientSleep')
else:
# Spawn it after igloop (at the end of each frame)
self.taskMgr.remove('clientSleep')
self.taskMgr.add(self.__sleepCycleTask, 'clientSleep', sort = 55)
def __sleepCycleTask(self, task):
Thread.sleep(self.clientSleep)
#time.sleep(self.clientSleep)
return Task.cont
def setFrameRateMeter(self, flag):
"""
Turns on or off (according to flag) a standard frame rate
meter in the upper-right corner of the main window.
"""
if flag:
if not self.frameRateMeter:
self.frameRateMeter = FrameRateMeter('frameRateMeter')
self.frameRateMeter.setupWindow(self.win)
else:
if self.frameRateMeter:
self.frameRateMeter.clearWindow()
self.frameRateMeter = None
def setSceneGraphAnalyzerMeter(self, flag):
"""
Turns on or off (according to flag) a standard frame rate
meter in the upper-right corner of the main window.
"""
if flag:
if not self.sceneGraphAnalyzerMeter:
self.sceneGraphAnalyzerMeter = SceneGraphAnalyzerMeter('sceneGraphAnalyzerMeter', self.render.node())
self.sceneGraphAnalyzerMeter.setupWindow(self.win)
else:
if self.sceneGraphAnalyzerMeter:
self.sceneGraphAnalyzerMeter.clearWindow()
self.sceneGraphAnalyzerMeter = None
# [gjeon] now you can add more winControls after creating a showbase instance
def setupWindowControls(self, winCtrl=None):
if winCtrl is None:
winCtrl = WindowControls(
self.win, mouseWatcher=self.mouseWatcher,
cam=self.camera, camNode = self.camNode, cam2d=self.camera2d,
mouseKeyboard = self.dataRoot.find("**/*"))
self.winControls.append(winCtrl)
def setupRender(self):
"""
Creates the render scene graph, the primary scene graph for
rendering 3-d geometry.
"""
## This is the root of the 3-D scene graph.
self.render = NodePath('render')
self.render.setAttrib(RescaleNormalAttrib.makeDefault())
self.render.setTwoSided(0)
self.backfaceCullingEnabled = 1
self.textureEnabled = 1
self.wireframeEnabled = 0
def setupRender2d(self):
"""
Creates the render2d scene graph, the primary scene graph for
2-d objects and gui elements that are superimposed over the
3-d geometry in the window.
"""
## This is the root of the 2-D scene graph.
self.render2d = NodePath('render2d')
# Set up some overrides to turn off certain properties which
# we probably won't need for 2-d objects.
# It's probably important to turn off the depth test, since
# many 2-d objects will be drawn over each other without
# regard to depth position.
# We used to avoid clearing the depth buffer before drawing
# render2d, but nowadays we clear it anyway, since we
# occasionally want to put 3-d geometry under render2d, and
# it's simplest (and seems to be easier on graphics drivers)
# if the 2-d scene has been cleared first.
self.render2d.setDepthTest(0)
self.render2d.setDepthWrite(0)
self.render2d.setMaterialOff(1)
self.render2d.setTwoSided(1)
# We've already created aspect2d in ShowBaseGlobal, for the
# benefit of creating DirectGui elements before ShowBase.
from . import ShowBaseGlobal
## The normal 2-d DisplayRegion has an aspect ratio that
## matches the window, but its coordinate system is square.
## This means anything we parent to render2d gets stretched.
## For things where that makes a difference, we set up
## aspect2d, which scales things back to the right aspect
## ratio along the X axis (Z is still from -1 to 1)
self.aspect2d = ShowBaseGlobal.aspect2d
self.aspect2d.reparentTo(self.render2d)
aspectRatio = self.getAspectRatio()
self.aspect2d.setScale(1.0 / aspectRatio, 1.0, 1.0)
self.a2dBackground = self.aspect2d.attachNewNode("a2dBackground")
## The Z position of the top border of the aspect2d screen.
self.a2dTop = 1.0
## The Z position of the bottom border of the aspect2d screen.
self.a2dBottom = -1.0
## The X position of the left border of the aspect2d screen.
self.a2dLeft = -aspectRatio
## The X position of the right border of the aspect2d screen.
self.a2dRight = aspectRatio
self.a2dTopCenter = self.aspect2d.attachNewNode("a2dTopCenter")
self.a2dTopCenterNs = self.aspect2d.attachNewNode("a2dTopCenterNS")
self.a2dBottomCenter = self.aspect2d.attachNewNode("a2dBottomCenter")
self.a2dBottomCenterNs = self.aspect2d.attachNewNode("a2dBottomCenterNS")
self.a2dLeftCenter = self.aspect2d.attachNewNode("a2dLeftCenter")
self.a2dLeftCenterNs = self.aspect2d.attachNewNode("a2dLeftCenterNS")
self.a2dRightCenter = self.aspect2d.attachNewNode("a2dRightCenter")
self.a2dRightCenterNs = self.aspect2d.attachNewNode("a2dRightCenterNS")
self.a2dTopLeft = self.aspect2d.attachNewNode("a2dTopLeft")
self.a2dTopLeftNs = self.aspect2d.attachNewNode("a2dTopLeftNS")
self.a2dTopRight = self.aspect2d.attachNewNode("a2dTopRight")
self.a2dTopRightNs = self.aspect2d.attachNewNode("a2dTopRightNS")
self.a2dBottomLeft = self.aspect2d.attachNewNode("a2dBottomLeft")
self.a2dBottomLeftNs = self.aspect2d.attachNewNode("a2dBottomLeftNS")
self.a2dBottomRight = self.aspect2d.attachNewNode("a2dBottomRight")
self.a2dBottomRightNs = self.aspect2d.attachNewNode("a2dBottomRightNS")
# Put the nodes in their places
self.a2dTopCenter.setPos(0, 0, self.a2dTop)
self.a2dTopCenterNs.setPos(0, 0, self.a2dTop)
self.a2dBottomCenter.setPos(0, 0, self.a2dBottom)
self.a2dBottomCenterNs.setPos(0, 0, self.a2dBottom)
self.a2dLeftCenter.setPos(self.a2dLeft, 0, 0)
self.a2dLeftCenterNs.setPos(self.a2dLeft, 0, 0)
self.a2dRightCenter.setPos(self.a2dRight, 0, 0)
self.a2dRightCenterNs.setPos(self.a2dRight, 0, 0)
self.a2dTopLeft.setPos(self.a2dLeft, 0, self.a2dTop)
self.a2dTopLeftNs.setPos(self.a2dLeft, 0, self.a2dTop)
self.a2dTopRight.setPos(self.a2dRight, 0, self.a2dTop)
self.a2dTopRightNs.setPos(self.a2dRight, 0, self.a2dTop)
self.a2dBottomLeft.setPos(self.a2dLeft, 0, self.a2dBottom)
self.a2dBottomLeftNs.setPos(self.a2dLeft, 0, self.a2dBottom)
self.a2dBottomRight.setPos(self.a2dRight, 0, self.a2dBottom)
self.a2dBottomRightNs.setPos(self.a2dRight, 0, self.a2dBottom)
## This special root, pixel2d, uses units in pixels that are relative
## to the window. The upperleft corner of the window is (0, 0),
## the lowerleft corner is (xsize, -ysize), in this coordinate system.
self.pixel2d = self.render2d.attachNewNode(PGTop("pixel2d"))
self.pixel2d.setPos(-1, 0, 1)
xsize, ysize = self.getSize()
if xsize > 0 and ysize > 0:
self.pixel2d.setScale(2.0 / xsize, 1.0, 2.0 / ysize)
def setupRender2dp(self):
"""
Creates a render2d scene graph, the secondary scene graph for
2-d objects and gui elements that are superimposed over the
2-d and 3-d geometry in the window.
"""
self.render2dp = NodePath('render2dp')
# Set up some overrides to turn off certain properties which
# we probably won't need for 2-d objects.
# It's probably important to turn off the depth test, since
# many 2-d objects will be drawn over each other without
# regard to depth position.
dt = DepthTestAttrib.make(DepthTestAttrib.MNone)
dw = DepthWriteAttrib.make(DepthWriteAttrib.MOff)
self.render2dp.setDepthTest(0)
self.render2dp.setDepthWrite(0)
self.render2dp.setMaterialOff(1)
self.render2dp.setTwoSided(1)
## The normal 2-d DisplayRegion has an aspect ratio that
## matches the window, but its coordinate system is square.
## This means anything we parent to render2dp gets stretched.
## For things where that makes a difference, we set up
## aspect2dp, which scales things back to the right aspect
## ratio along the X axis (Z is still from -1 to 1)
self.aspect2dp = self.render2dp.attachNewNode(PGTop("aspect2dp"))
self.aspect2dp.node().setStartSort(16384)
aspectRatio = self.getAspectRatio()
self.aspect2dp.setScale(1.0 / aspectRatio, 1.0, 1.0)
## The Z position of the top border of the aspect2dp screen.
self.a2dpTop = 1.0
## The Z position of the bottom border of the aspect2dp screen.
self.a2dpBottom = -1.0
## The X position of the left border of the aspect2dp screen.
self.a2dpLeft = -aspectRatio
## The X position of the right border of the aspect2dp screen.
self.a2dpRight = aspectRatio
self.a2dpTopCenter = self.aspect2dp.attachNewNode("a2dpTopCenter")
self.a2dpBottomCenter = self.aspect2dp.attachNewNode("a2dpBottomCenter")
self.a2dpLeftCenter = self.aspect2dp.attachNewNode("a2dpLeftCenter")
self.a2dpRightCenter = self.aspect2dp.attachNewNode("a2dpRightCenter")
self.a2dpTopLeft = self.aspect2dp.attachNewNode("a2dpTopLeft")
self.a2dpTopRight = self.aspect2dp.attachNewNode("a2dpTopRight")
self.a2dpBottomLeft = self.aspect2dp.attachNewNode("a2dpBottomLeft")
self.a2dpBottomRight = self.aspect2dp.attachNewNode("a2dpBottomRight")
# Put the nodes in their places
self.a2dpTopCenter.setPos(0, 0, self.a2dpTop)
self.a2dpBottomCenter.setPos(0, 0, self.a2dpBottom)
self.a2dpLeftCenter.setPos(self.a2dpLeft, 0, 0)
self.a2dpRightCenter.setPos(self.a2dpRight, 0, 0)
self.a2dpTopLeft.setPos(self.a2dpLeft, 0, self.a2dpTop)
self.a2dpTopRight.setPos(self.a2dpRight, 0, self.a2dpTop)
self.a2dpBottomLeft.setPos(self.a2dpLeft, 0, self.a2dpBottom)
self.a2dpBottomRight.setPos(self.a2dpRight, 0, self.a2dpBottom)
## This special root, pixel2d, uses units in pixels that are relative
## to the window. The upperleft corner of the window is (0, 0),
## the lowerleft corner is (xsize, -ysize), in this coordinate system.
self.pixel2dp = self.render2dp.attachNewNode(PGTop("pixel2dp"))
self.pixel2dp.node().setStartSort(16384)
self.pixel2dp.setPos(-1, 0, 1)
xsize, ysize = self.getSize()
if xsize > 0 and ysize > 0:
self.pixel2dp.setScale(2.0 / xsize, 1.0, 2.0 / ysize)
def setAspectRatio(self, aspectRatio):
""" Sets the global aspect ratio of the main window. Set it
to None to restore automatic scaling. """
self.__configAspectRatio = aspectRatio
self.adjustWindowAspectRatio(self.getAspectRatio())
def getAspectRatio(self, win = None):
# Returns the actual aspect ratio of the indicated (or main
# window), or the default aspect ratio if there is not yet a
# main window.
# If the config it set, we return that
if self.__configAspectRatio:
return self.__configAspectRatio
aspectRatio = 1
if win == None:
win = self.win
if win != None and win.hasSize() and win.getSbsLeftYSize() != 0:
aspectRatio = float(win.getSbsLeftXSize()) / float(win.getSbsLeftYSize())
else:
if win == None or not hasattr(win, "getRequestedProperties"):
props = WindowProperties.getDefault()
else:
props = win.getRequestedProperties()
if not props.hasSize():
props = WindowProperties.getDefault()
if props.hasSize() and props.getYSize() != 0:
aspectRatio = float(props.getXSize()) / float(props.getYSize())
if aspectRatio == 0:
return 1
return aspectRatio
def getSize(self, win = None):
# Returns the actual size of the indicated (or main
# window), or the default size if there is not yet a
# main window.
if win == None:
win = self.win
if win != None and win.hasSize():
return win.getXSize(), win.getYSize()
else:
if win == None or not hasattr(win, "getRequestedProperties"):
props = WindowProperties.getDefault()
else:
props = win.getRequestedProperties()
if not props.hasSize():
props = WindowProperties.getDefault()
return props.getXSize(), props.getYSize()
def makeCamera(self, win, sort = 0, scene = None,
displayRegion = (0, 1, 0, 1), stereo = None,
aspectRatio = None, clearDepth = 0, clearColor = None,
lens = None, camName = 'cam', mask = None,
useCamera = None):
"""
Makes a new 3-d camera associated with the indicated window,
and creates a display region in the indicated subrectangle.
If stereo is True, then a stereo camera is created, with a
pair of DisplayRegions. If stereo is False, then a standard
camera is created. If stereo is None or omitted, a stereo
camera is created if the window says it can render in stereo.
If useCamera is not None, it is a NodePath to be used as the
camera to apply to the window, rather than creating a new
camera.
"""
# self.camera is the parent node of all cameras: a node that
# we can move around to move all cameras as a group.
if self.camera == None:
# We make it a ModelNode with the PTLocal flag, so that
# a wayward flatten operations won't attempt to mangle the
# camera.
self.camera = self.render.attachNewNode(ModelNode('camera'))
self.camera.node().setPreserveTransform(ModelNode.PTLocal)
builtins.camera = self.camera
self.mouse2cam.node().setNode(self.camera.node())
if useCamera:
# Use the existing camera node.
cam = useCamera
camNode = useCamera.node()
assert(isinstance(camNode, Camera))
lens = camNode.getLens()
cam.reparentTo(self.camera)
else:
# Make a new Camera node.
camNode = Camera(camName)
if lens == None:
lens = PerspectiveLens()
if aspectRatio == None:
aspectRatio = self.getAspectRatio(win)
lens.setAspectRatio(aspectRatio)
cam = self.camera.attachNewNode(camNode)
if lens != None:
camNode.setLens(lens)
if scene != None:
camNode.setScene(scene)
if mask != None:
if (isinstance(mask, int)):
mask = BitMask32(mask)
camNode.setCameraMask(mask)
if self.cam == None:
self.cam = cam
self.camNode = camNode
self.camLens = lens
self.camList.append(cam)
# Now, make a DisplayRegion for the camera.
if stereo is not None:
if stereo:
dr = win.makeStereoDisplayRegion(*displayRegion)
else:
dr = win.makeMonoDisplayRegion(*displayRegion)
else:
dr = win.makeDisplayRegion(*displayRegion)
dr.setSort(sort)
# By default, we do not clear 3-d display regions (the entire
# window will be cleared, which is normally sufficient). But
# we will if clearDepth is specified.
if clearDepth:
dr.setClearDepthActive(1)
if clearColor:
dr.setClearColorActive(1)
dr.setClearColor(clearColor)
dr.setCamera(cam)
return cam
def makeCamera2d(self, win, sort = 10,
displayRegion = (0, 1, 0, 1), coords = (-1, 1, -1, 1),
lens = None, cameraName = None):
"""
Makes a new camera2d associated with the indicated window, and
assigns it to render the indicated subrectangle of render2d.
"""
dr = win.makeMonoDisplayRegion(*displayRegion)
dr.setSort(sort)
# Enable clearing of the depth buffer on this new display
# region (see the comment in setupRender2d, above).
dr.setClearDepthActive(1)
# Make any texture reloads on the gui come up immediately.
dr.setIncompleteRender(False)
left, right, bottom, top = coords
# Now make a new Camera node.
if (cameraName):
cam2dNode = Camera('cam2d_' + cameraName)
else:
cam2dNode = Camera('cam2d')
if lens == None:
lens = OrthographicLens()
lens.setFilmSize(right - left, top - bottom)
lens.setFilmOffset((right + left) * 0.5, (top + bottom) * 0.5)
lens.setNearFar(-1000, 1000)
cam2dNode.setLens(lens)
# self.camera2d is the analog of self.camera, although it's
# not as clear how useful it is.
if self.camera2d == None:
self.camera2d = self.render2d.attachNewNode('camera2d')
camera2d = self.camera2d.attachNewNode(cam2dNode)
dr.setCamera(camera2d)
if self.cam2d == None:
self.cam2d = camera2d
return camera2d
def makeCamera2dp(self, win, sort = 20,
displayRegion = (0, 1, 0, 1), coords = (-1, 1, -1, 1),
lens = None, cameraName = None):
"""
Makes a new camera2dp associated with the indicated window, and
assigns it to render the indicated subrectangle of render2dp.
"""
dr = win.makeMonoDisplayRegion(*displayRegion)
dr.setSort(sort)
# Unlike render2d, we don't clear the depth buffer for
# render2dp. Caveat emptor.
if hasattr(dr, 'setIncompleteRender'):
dr.setIncompleteRender(False)
left, right, bottom, top = coords
# Now make a new Camera node.
if (cameraName):
cam2dNode = Camera('cam2dp_' + cameraName)
else:
cam2dNode = Camera('cam2dp')
if lens == None:
lens = OrthographicLens()
lens.setFilmSize(right - left, top - bottom)
lens.setFilmOffset((right + left) * 0.5, (top + bottom) * 0.5)
lens.setNearFar(-1000, 1000)
cam2dNode.setLens(lens)
# self.camera2d is the analog of self.camera, although it's
# not as clear how useful it is.
if self.camera2dp == None:
self.camera2dp = self.render2dp.attachNewNode('camera2dp')
camera2dp = self.camera2dp.attachNewNode(cam2dNode)
dr.setCamera(camera2dp)
if self.cam2dp == None:
self.cam2dp = camera2dp
return camera2dp
def setupDataGraph(self):
"""
Creates the data graph and populates it with the basic input
devices.
"""
self.dataRoot = NodePath('dataRoot')
# Cache the node so we do not ask for it every frame
self.dataRootNode = self.dataRoot.node()
# Now we have the main trackball & drive interfaces.
# useTrackball() and useDrive() switch these in and out; only
# one is in use at a given time.
self.trackball = NodePath(Trackball('trackball'))
self.drive = NodePath(DriveInterface('drive'))
self.mouse2cam = NodePath(Transform2SG('mouse2cam'))
# [gjeon] now you can create multiple mouse watchers to support multiple windows
def setupMouse(self, win, fMultiWin=False):
"""
Creates the structures necessary to monitor the mouse input,
using the indicated window. If the mouse has already been set
up for a different window, those structures are deleted first.
The return value is the ButtonThrower NodePath created for
this window.
If fMultiWin is true, then the previous mouse structures are
not deleted; instead, multiple windows are allowed to monitor
the mouse input. However, in this case, the trackball
controls are not set up, and must be set up by hand if
desired.
"""
if not fMultiWin and self.buttonThrowers != None:
for bt in self.buttonThrowers:
mw = bt.getParent()
mk = mw.getParent()
bt.removeNode()
mw.removeNode()
mk.removeNode()
bts, pws = self.setupMouseCB(win)
if fMultiWin:
return bts[0]
self.buttonThrowers = bts[:]
self.pointerWatcherNodes = pws[:]
self.mouseWatcher = self.buttonThrowers[0].getParent()
self.mouseWatcherNode = self.mouseWatcher.node()
if self.mouseInterface:
self.mouseInterface.reparentTo(self.mouseWatcher)
if self.recorder:
# If we have a recorder, the mouseWatcher belongs under a
# special MouseRecorder node, which may intercept the
# mouse activity.
mw = self.buttonThrowers[0].getParent()
mouseRecorder = MouseRecorder('mouse')
self.recorder.addRecorder('mouse', mouseRecorder)
np = mw.getParent().attachNewNode(mouseRecorder)
mw.reparentTo(np)
mw = self.buttonThrowers[0].getParent()
## A special ButtonThrower to generate keyboard events and
## include the time from the OS. This is separate only to
## support legacy code that did not expect a time parameter; it
## will eventually be folded into the normal ButtonThrower,
## above.
self.timeButtonThrower = mw.attachNewNode(ButtonThrower('timeButtons'))
self.timeButtonThrower.node().setPrefix('time-')
self.timeButtonThrower.node().setTimeFlag(1)
# Tell the gui system about our new mouse watcher.
self.aspect2d.node().setMouseWatcher(mw.node())
self.pixel2d.node().setMouseWatcher(mw.node())
if self.wantRender2dp:
self.aspect2dp.node().setMouseWatcher(mw.node())
self.pixel2dp.node().setMouseWatcher(mw.node())
mw.node().addRegion(PGMouseWatcherBackground())
return self.buttonThrowers[0]
# [gjeon] this function is seperated from setupMouse to allow multiple mouse watchers
def setupMouseCB(self, win):
# For each mouse/keyboard device, we create
# - MouseAndKeyboard
# - MouseWatcher
# - ButtonThrower
# The ButtonThrowers are stored in a list, self.buttonThrowers.
# Given a ButtonThrower, one can access the MouseWatcher and
# MouseAndKeyboard using getParent.
#
# The MouseAndKeyboard generates mouse events and mouse
# button/keyboard events; the MouseWatcher passes them through
# unchanged when the mouse is not over a 2-d button, and passes
# nothing through when the mouse *is* over a 2-d button. Therefore,
# objects that don't want to get events when the mouse is over a
# button, like the driveInterface, should be parented to
# MouseWatcher, while objects that want events in all cases, like the
# chat interface, should be parented to the MouseAndKeyboard.
buttonThrowers = []
pointerWatcherNodes = []
for i in range(win.getNumInputDevices()):
name = win.getInputDeviceName(i)
mk = self.dataRoot.attachNewNode(MouseAndKeyboard(win, i, name))
mw = mk.attachNewNode(MouseWatcher("watcher%s" % (i)))
if win.getSideBySideStereo():
# If the window has side-by-side stereo enabled, then
# we should constrain the MouseWatcher to the window's
# DisplayRegion. This will enable the MouseWatcher to
# track the left and right halves of the screen
# individually.
mw.node().setDisplayRegion(win.getOverlayDisplayRegion())
mb = mw.node().getModifierButtons()
mb.addButton(KeyboardButton.shift())
mb.addButton(KeyboardButton.control())
mb.addButton(KeyboardButton.alt())
mb.addButton(KeyboardButton.meta())
mw.node().setModifierButtons(mb)
bt = mw.attachNewNode(ButtonThrower("buttons%s" % (i)))
if (i != 0):
bt.node().setPrefix('mousedev%s-' % (i))
mods = ModifierButtons()
mods.addButton(KeyboardButton.shift())
mods.addButton(KeyboardButton.control())
mods.addButton(KeyboardButton.alt())
mods.addButton(KeyboardButton.meta())
bt.node().setModifierButtons(mods)
buttonThrowers.append(bt)
if (win.hasPointer(i)):
pointerWatcherNodes.append(mw.node())
return buttonThrowers, pointerWatcherNodes
def enableSoftwareMousePointer(self):
"""
Creates some geometry and parents it to render2d to show
the currently-known mouse position. Useful if the mouse
pointer is invisible for some reason.
"""
mouseViz = render2d.attachNewNode('mouseViz')
lilsmiley = loader.loadModel('lilsmiley')
lilsmiley.reparentTo(mouseViz)
aspectRatio = self.getAspectRatio()
# Scale the smiley face to 32x32 pixels.
height = self.win.getSbsLeftYSize()
lilsmiley.setScale(
32.0 / height / aspectRatio,
1.0, 32.0 / height)
self.mouseWatcherNode.setGeometry(mouseViz.node())
def getAlt(self):
return self.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.alt())
def getShift(self):
return self.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.shift())
def getControl(self):
return self.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.control())
def getMeta(self):
return self.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.meta())
def attachInputDevice(self, device, prefix=None):
"""
This function attaches an input device to the data graph, which will
cause the device to be polled and generate events. If a prefix is
given and not None, it is used to prefix events generated by this
device, separated by a hyphen.
If you call this, you should consider calling detachInputDevice when
you are done with the device or when it is disconnected.
"""
# Protect against the same device being attached multiple times.
assert device not in self.__inputDeviceNodes
idn = self.dataRoot.attachNewNode(InputDeviceNode(device, device.name))
# Setup the button thrower to generate events for the device.
bt = idn.attachNewNode(ButtonThrower(device.name))
if prefix is not None:
bt.node().setPrefix(prefix + '-')
assert self.notify.debug("Attached input device {0} with prefix {1}".format(device, prefix))
self.__inputDeviceNodes[device] = idn
self.deviceButtonThrowers.append(bt)
def detachInputDevice(self, device):
"""
This should be called after attaching an input device using
attachInputDevice and the device is disconnected or you no longer wish
to keep polling this device for events.
You do not strictly need to call this if you expect the device to be
reconnected (but be careful that you don't reattach it).
"""
if device not in self.__inputDeviceNodes:
assert device in self.__inputDeviceNodes
return
assert self.notify.debug("Detached device {0}".format(device.name))
# Remove the ButtonThrower from the deviceButtonThrowers list.
idn = self.__inputDeviceNodes[device]
for bt in self.deviceButtonThrowers:
if idn.isAncestorOf(bt):
self.deviceButtonThrowers.remove(bt)
break
idn.removeNode()
del self.__inputDeviceNodes[device]
def addAngularIntegrator(self):
if not self.physicsMgrAngular:
physics = importlib.import_module('panda3d.physics')
self.physicsMgrAngular = 1
integrator = physics.AngularEulerIntegrator()
self.physicsMgr.attachAngularIntegrator(integrator)
def enableParticles(self):
if not self.particleMgrEnabled:
# Use importlib to prevent this import from being picked up
# by modulefinder when packaging an application.
if not self.particleMgr:
PMG = importlib.import_module('direct.particles.ParticleManagerGlobal')
self.particleMgr = PMG.particleMgr
self.particleMgr.setFrameStepping(1)
if not self.physicsMgr:
PMG = importlib.import_module('direct.showbase.PhysicsManagerGlobal')
physics = importlib.import_module('panda3d.physics')
self.physicsMgr = PMG.physicsMgr
integrator = physics.LinearEulerIntegrator()
self.physicsMgr.attachLinearIntegrator(integrator)
self.particleMgrEnabled = 1
self.physicsMgrEnabled = 1
self.taskMgr.remove('manager-update')
self.taskMgr.add(self.updateManagers, 'manager-update')
def disableParticles(self):
if self.particleMgrEnabled:
self.particleMgrEnabled = 0
self.physicsMgrEnabled = 0
self.taskMgr.remove('manager-update')
def toggleParticles(self):
if self.particleMgrEnabled == 0:
self.enableParticles()
else:
self.disableParticles()
def isParticleMgrEnabled(self):
return self.particleMgrEnabled
def isPhysicsMgrEnabled(self):
return self.physicsMgrEnabled
def updateManagers(self, state):
dt = globalClock.getDt()
if (self.particleMgrEnabled == 1):
self.particleMgr.doParticles(dt)
if (self.physicsMgrEnabled == 1):
self.physicsMgr.doPhysics(dt)
return Task.cont
def createStats(self, hostname=None, port=None):
# You can specify pstats-host in your Config.prc or use ~pstats/~aipstats
# The default is localhost
if not self.wantStats:
return False
if PStatClient.isConnected():
PStatClient.disconnect()
# these default values match the C++ default values
if hostname is None:
hostname = ''
if port is None:
port = -1
PStatClient.connect(hostname, port)
return PStatClient.isConnected()
def addSfxManager(self, extraSfxManager):
# keep a list of sfx manager objects to apply settings to,
# since there may be others in addition to the one we create here
self.sfxManagerList.append(extraSfxManager)
newSfxManagerIsValid = (extraSfxManager!=None) and extraSfxManager.isValid()
self.sfxManagerIsValidList.append(newSfxManagerIsValid)
if newSfxManagerIsValid:
extraSfxManager.setActive(self.sfxActive)
def createBaseAudioManagers(self):
self.sfxPlayer = SfxPlayer.SfxPlayer()
sfxManager = AudioManager.createAudioManager()
self.addSfxManager(sfxManager)
self.musicManager = AudioManager.createAudioManager()
self.musicManagerIsValid=self.musicManager!=None \
and self.musicManager.isValid()
if self.musicManagerIsValid:
# ensure only 1 midi song is playing at a time:
self.musicManager.setConcurrentSoundLimit(1)
self.musicManager.setActive(self.musicActive)
# enableMusic/enableSoundEffects are meant to be called in response
# to a user request so sfxActive/musicActive represent how things
# *should* be, regardless of App/OS/HW state
def enableMusic(self, bEnableMusic):
# don't setActive(1) if no audiofocus
if self.AppHasAudioFocus and self.musicManagerIsValid:
self.musicManager.setActive(bEnableMusic)
self.musicActive = bEnableMusic
if bEnableMusic:
# This is useful when we want to play different music
# from what the manager has queued
messenger.send("MusicEnabled")
self.notify.debug("Enabling music")
else:
self.notify.debug("Disabling music")
def SetAllSfxEnables(self, bEnabled):
for i in range(len(self.sfxManagerList)):
if (self.sfxManagerIsValidList[i]):
self.sfxManagerList[i].setActive(bEnabled)
def enableSoundEffects(self, bEnableSoundEffects):
# don't setActive(1) if no audiofocus
if self.AppHasAudioFocus or (bEnableSoundEffects==0):
self.SetAllSfxEnables(bEnableSoundEffects)
self.sfxActive=bEnableSoundEffects
if bEnableSoundEffects:
self.notify.debug("Enabling sound effects")
else:
self.notify.debug("Disabling sound effects")
# enable/disableAllAudio allow a programmable global override-off
# for current audio settings. they're meant to be called when app
# loses audio focus (switched out), so we can turn off sound without
# affecting internal sfxActive/musicActive sound settings, so things
# come back ok when the app is switched back to
def disableAllAudio(self):
self.AppHasAudioFocus = 0
self.SetAllSfxEnables(0)
if self.musicManagerIsValid:
self.musicManager.setActive(0)
self.notify.debug("Disabling audio")
def enableAllAudio(self):
self.AppHasAudioFocus = 1
self.SetAllSfxEnables(self.sfxActive)
if self.musicManagerIsValid:
self.musicManager.setActive(self.musicActive)
self.notify.debug("Enabling audio")
# This function should only be in the loader but is here for
# backwards compatibility. Please do not add code here, add
# it to the loader.
def loadSfx(self, name):
assert self.notify.warning("base.loadSfx is deprecated, use base.loader.loadSfx instead.")
return self.loader.loadSfx(name)
# This function should only be in the loader but is here for
# backwards compatibility. Please do not add code here, add
# it to the loader.
def loadMusic(self, name):
assert self.notify.warning("base.loadMusic is deprecated, use base.loader.loadMusic instead.")
return self.loader.loadMusic(name)
def playSfx(
self, sfx, looping = 0, interrupt = 1, volume = None,
time = 0.0, node = None, listener = None, cutoff = None):
# This goes through a special player for potential localization
return self.sfxPlayer.playSfx(sfx, looping, interrupt, volume, time, node, listener, cutoff)
def playMusic(self, music, looping = 0, interrupt = 1, volume = None, time = 0.0):
if music:
if volume != None:
music.setVolume(volume)
# if interrupt was set to 0, start over even if it's
# already playing
if interrupt or (music.status() != AudioSound.PLAYING):
music.setTime(time)
music.setLoop(looping)
music.play()
def __resetPrevTransform(self, state):
# Clear out the previous velocity deltas now, after we have
# rendered (the previous frame). We do this after the render,
# so that we have a chance to draw a representation of spheres
# along with their velocities. At the beginning of the frame
# really means after the command prompt, which allows the user
# to interactively query these deltas meaningfully.
PandaNode.resetAllPrevTransform()
return Task.cont
def __dataLoop(self, state):
# Check if there were newly connected devices.
self.devices.update()
# traverse the data graph. This reads all the control
# inputs (from the mouse and keyboard, for instance) and also
# directly acts upon them (for instance, to move the avatar).
self.dgTrav.traverse(self.dataRootNode)
return Task.cont
def __ivalLoop(self, state):
# Execute all intervals in the global ivalMgr.
IntervalManager.ivalMgr.step()
return Task.cont
def initShadowTrav(self):
if not self.shadowTrav:
# set up the shadow collision traverser
self.shadowTrav = CollisionTraverser("base.shadowTrav")
self.shadowTrav.setRespectPrevTransform(False)
def __shadowCollisionLoop(self, state):
# run the collision traversal if we have a
# CollisionTraverser set.
if self.shadowTrav:
self.shadowTrav.traverse(self.render)
return Task.cont
def __collisionLoop(self, state):
# run the collision traversal if we have a
# CollisionTraverser set.
if self.cTrav:
self.cTrav.traverse(self.render)
if self.appTrav:
self.appTrav.traverse(self.render)
if self.shadowTrav:
self.shadowTrav.traverse(self.render)
messenger.send("collisionLoopFinished")
return Task.cont
def __audioLoop(self, state):
if (self.musicManager != None):
self.musicManager.update()
for x in self.sfxManagerList:
x.update()
return Task.cont
def __garbageCollectStates(self, state):
""" This task is started only when we have
garbage-collect-states set in the Config.prc file, in which
case we're responsible for taking out Panda's garbage from
time to time. This is not to be confused with Python's
garbage collection. """
TransformState.garbageCollect()
RenderState.garbageCollect()
return Task.cont
def __igLoop(self, state):
if __debug__:
# We render the watch variables for the onScreenDebug as soon
# as we reasonably can before the renderFrame().
self.onScreenDebug.render()
if self.recorder:
self.recorder.recordFrame()
# Finally, render the frame.
self.graphicsEngine.renderFrame()
if self.clusterSyncFlag:
self.graphicsEngine.syncFrame()
if self.multiClientSleep:
time.sleep(0)
if __debug__:
# We clear the text buffer for the onScreenDebug as soon
# as we reasonably can after the renderFrame().
self.onScreenDebug.clear()
if self.recorder:
self.recorder.playFrame()
if self.mainWinMinimized:
# If the main window is minimized, slow down the app a bit
# by sleeping here in igLoop so we don't use all available
# CPU needlessly.
# Note: this isn't quite right if multiple windows are
# open. We should base this on whether *all* windows are
# minimized, not just the main window. But it will do for
# now until someone complains.
time.sleep(0.1)
# Lerp stuff needs this event, and it must be generated in
# C++, not in Python.
throw_new_frame()
return Task.cont
def __igLoopSync(self, state):
if __debug__:
# We render the watch variables for the onScreenDebug as soon
# as we reasonably can before the renderFrame().
self.onScreenDebug.render()
if self.recorder:
self.recorder.recordFrame()
self.cluster.collectData()
# Finally, render the frame.
self.graphicsEngine.renderFrame()
if self.clusterSyncFlag:
self.graphicsEngine.syncFrame()
if self.multiClientSleep:
time.sleep(0)
if __debug__:
# We clear the text buffer for the onScreenDebug as soon
# as we reasonably can after the renderFrame().
self.onScreenDebug.clear()
if self.recorder:
self.recorder.playFrame()
if self.mainWinMinimized:
# If the main window is minimized, slow down the app a bit
# by sleeping here in igLoop so we don't use all available
# CPU needlessly.
# Note: this isn't quite right if multiple windows are
# open. We should base this on whether *all* windows are
# minimized, not just the main window. But it will do for
# now until someone complains.
time.sleep(0.1)
self.graphicsEngine.readyFlip()
self.cluster.waitForFlipCommand()
self.graphicsEngine.flipFrame()
# Lerp stuff needs this event, and it must be generated in
# C++, not in Python.
throw_new_frame()
return Task.cont
def restart(self, clusterSync=False, cluster=None):
self.shutdown()
# __resetPrevTransform goes at the very beginning of the frame.
self.taskMgr.add(
self.__resetPrevTransform, 'resetPrevTransform', sort = -51)
# give the dataLoop task a reasonably "early" sort,
# so that it will get run before most tasks
self.taskMgr.add(self.__dataLoop, 'dataLoop', sort = -50)
self.__deadInputs = 0
# spawn the ivalLoop with a later sort, so that it will
# run after most tasks, but before igLoop.
self.taskMgr.add(self.__ivalLoop, 'ivalLoop', sort = 20)
# make the collisionLoop task run before igLoop,
# but leave enough room for the app to insert tasks
# between collisionLoop and igLoop
self.taskMgr.add(self.__collisionLoop, 'collisionLoop', sort = 30)
if ConfigVariableBool('garbage-collect-states').getValue():
self.taskMgr.add(self.__garbageCollectStates, 'garbageCollectStates', sort = 46)
# give the igLoop task a reasonably "late" sort,
# so that it will get run after most tasks
self.cluster = cluster
if (not clusterSync or (cluster == None)):
self.taskMgr.add(self.__igLoop, 'igLoop', sort = 50)
else:
self.taskMgr.add(self.__igLoopSync, 'igLoop', sort = 50)
# the audioLoop updates the positions of 3D sounds.
# as such, it needs to run after the cull traversal in the igLoop.
self.taskMgr.add(self.__audioLoop, 'audioLoop', sort = 60)
self.eventMgr.restart()
def shutdown(self):
self.taskMgr.remove('audioLoop')
self.taskMgr.remove('igLoop')
self.taskMgr.remove('shadowCollisionLoop')
self.taskMgr.remove('collisionLoop')
self.taskMgr.remove('dataLoop')
self.taskMgr.remove('resetPrevTransform')
self.taskMgr.remove('ivalLoop')
self.taskMgr.remove('garbageCollectStates')
self.eventMgr.shutdown()
def getBackgroundColor(self, win = None):
"""
Returns the current window background color. This assumes
the window is set up to clear the color each frame (this is
the normal setting).
"""
if win == None:
win = self.win
return VBase4(win.getClearColor())
def setBackgroundColor(self, r = None, g = None, b = None, a = 0.0, win = None):
"""
Sets the window background color to the indicated value.
This assumes the window is set up to clear the color each
frame (this is the normal setting).
The color may be either a VBase3 or a VBase4, or a 3-component
tuple, or the individual r, g, b parameters.
"""
if g != None:
color = VBase4(r, g, b, a)
else:
arg = r
if isinstance(arg, VBase4):
color = arg
else:
color = VBase4(arg[0], arg[1], arg[2], a)
if win == None:
win = self.win
if win:
win.setClearColor(color)
def toggleBackface(self):
if self.backfaceCullingEnabled:
self.backfaceCullingOff()
else:
self.backfaceCullingOn()
def backfaceCullingOn(self):
if not self.backfaceCullingEnabled:
self.render.setTwoSided(0)
self.backfaceCullingEnabled = 1
def backfaceCullingOff(self):
if self.backfaceCullingEnabled:
self.render.setTwoSided(1)
self.backfaceCullingEnabled = 0
def toggleTexture(self):
if self.textureEnabled:
self.textureOff()
else:
self.textureOn()
def textureOn(self):
self.render.clearTexture()
self.textureEnabled = 1
def textureOff(self):
self.render.setTextureOff(100)
self.textureEnabled = 0
def toggleWireframe(self):
if self.wireframeEnabled:
self.wireframeOff()
else:
self.wireframeOn()
def wireframeOn(self):
self.render.setRenderModeWireframe(100)
self.render.setTwoSided(1)
self.wireframeEnabled = 1
def wireframeOff(self):
self.render.clearRenderMode()
render.setTwoSided(not self.backfaceCullingEnabled)
self.wireframeEnabled = 0
def disableMouse(self):
"""
Temporarily disable the mouse control of the camera, either
via the drive interface or the trackball, whichever is
currently in use.
"""
# We don't reparent the drive interface or the trackball;
# whichever one was there before will remain in the data graph
# and active. This way they won't lose button events while
# the mouse is disabled. However, we do move the mouse2cam
# object out of there, so we won't be updating the camera any
# more.
if self.mouse2cam:
self.mouse2cam.detachNode()
def enableMouse(self):
"""
Reverse the effect of a previous call to disableMouse().
useDrive() also implicitly enables the mouse.
"""
if self.mouse2cam:
self.mouse2cam.reparentTo(self.mouseInterface)
def silenceInput(self):
"""
This is a heavy-handed way of temporarily turning off
all inputs. Bring them back with reviveInput().
"""
if not self.__deadInputs:
self.__deadInputs = taskMgr.remove('dataLoop')
def reviveInput(self):
"""
Restores inputs after a previous call to silenceInput.
"""
if self.__deadInputs:
self.eventMgr.doEvents()
self.dgTrav.traverse(self.dataRootNode)
self.eventMgr.eventQueue.clear()
self.taskMgr.add(self.__dataLoop, 'dataLoop', sort = -50)
self.__deadInputs = 0
def setMouseOnNode(self, newNode):
if self.mouse2cam:
self.mouse2cam.node().setNode(newNode)
def changeMouseInterface(self, changeTo):
"""
Switch mouse action
"""
# Get rid of the prior interface:
self.mouseInterface.detachNode()
# Update the mouseInterface to point to the drive
self.mouseInterface = changeTo
self.mouseInterfaceNode = self.mouseInterface.node()
# Hookup the drive to the camera.
if self.mouseWatcher:
self.mouseInterface.reparentTo(self.mouseWatcher)
if self.mouse2cam:
self.mouse2cam.reparentTo(self.mouseInterface)
def useDrive(self):
"""
Switch mouse action to drive mode
"""
if self.drive:
self.changeMouseInterface(self.drive)
# Set the height to a good eyeheight
self.mouseInterfaceNode.reset()
self.mouseInterfaceNode.setZ(4.0)
def useTrackball(self):
"""
Switch mouse action to trackball mode
"""
if self.trackball:
self.changeMouseInterface(self.trackball)
def toggleTexMem(self):
""" Toggles a handy texture memory watcher. See TexMemWatcher
for more information. """
if self.texmem and not self.texmem.cleanedUp:
self.texmem.cleanup()
self.texmem = None
return
# Use importlib to prevent this import from being picked up
# by modulefinder when packaging an application.
TMW = importlib.import_module('direct.showutil.TexMemWatcher')
self.texmem = TMW.TexMemWatcher()
def toggleShowVertices(self):
""" Toggles a mode that visualizes vertex density per screen
area. """
if self.showVertices:
# Clean up the old mode.
self.showVertices.node().setActive(0)
dr = self.showVertices.node().getDisplayRegion(0)
self.win.removeDisplayRegion(dr)
self.showVertices.removeNode()
self.showVertices = None
return
dr = self.win.makeDisplayRegion()
dr.setSort(1000)
cam = Camera('showVertices')
cam.setLens(self.camLens)
# Set up a funny state to render only vertices.
override = 100000
t = NodePath('t')
t.setColor(1, 0, 1, 0.02, override)
t.setColorScale(1, 1, 1, 1, override)
t.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOneMinusIncomingAlpha), override)
t.setAttrib(RenderModeAttrib.make(RenderModeAttrib.MPoint, 10), override)
t.setTwoSided(True, override)
t.setBin('fixed', 0, override)
t.setDepthTest(False, override)
t.setDepthWrite(False, override)
t.setLightOff(override)
t.setShaderOff(override)
t.setFogOff(override)
t.setAttrib(AntialiasAttrib.make(AntialiasAttrib.MNone), override)
t.setAttrib(RescaleNormalAttrib.make(RescaleNormalAttrib.MNone), override)
t.setTextureOff(override)
# Make the spots round, so there's less static in the display.
# This forces software point generation on many drivers, so
# it's not on by default.
if self.config.GetBool('round-show-vertices', False):
spot = PNMImage(256, 256, 1)
spot.renderSpot((1, 1, 1, 1), (0, 0, 0, 0), 0.8, 1)
tex = Texture('spot')
tex.load(spot)
tex.setFormat(tex.FAlpha)
t.setTexture(tex, override)
t.setAttrib(TexGenAttrib.make(TextureStage.getDefault(), TexGenAttrib.MPointSprite), override)
cam.setInitialState(t.getState())
cam.setCameraMask(~PandaNode.getOverallBit())
self.showVertices = self.cam.attachNewNode(cam)
dr.setCamera(self.showVertices)
def oobe(self, cam = None):
"""
Enable a special "out-of-body experience" mouse-interface
mode. This can be used when a "god" camera is needed; it
moves the camera node out from under its normal node and sets
the world up in trackball state. Button events are still sent
to the normal mouse action node (e.g. the DriveInterface), and
mouse events, if needed, may be sent to the normal node by
holding down the Control key.
This is different than useTrackball(), which simply changes
the existing mouse action to a trackball interface. In fact,
OOBE mode doesn't care whether useDrive() or useTrackball() is
in effect; it just temporarily layers a new trackball
interface on top of whatever the basic interface is. You can
even switch between useDrive() and useTrackball() while OOBE
mode is in effect.
This is a toggle; the second time this function is called, it
disables the mode.
"""
if cam is None:
cam = self.cam
# If oobeMode was never set, set it to false and create the
# structures we need to implement OOBE.
if not hasattr(self, 'oobeMode'):
self.oobeMode = 0
self.oobeCamera = self.hidden.attachNewNode('oobeCamera')
self.oobeCameraTrackball = self.oobeCamera.attachNewNode('oobeCameraTrackball')
self.oobeLens = PerspectiveLens()
self.oobeLens.setAspectRatio(self.getAspectRatio())
self.oobeLens.setNearFar(0.1, 10000.0)
self.oobeLens.setMinFov(40)
self.oobeTrackball = NodePath(Trackball('oobeTrackball'))
self.oobe2cam = self.oobeTrackball.attachNewNode(Transform2SG('oobe2cam'))
self.oobe2cam.node().setNode(self.oobeCameraTrackball.node())
self.oobeVis = loader.loadModel('models/misc/camera', okMissing = True)
if not self.oobeVis:
# Sometimes we have default-model-extension set to
# egg, but the file might be a bam file.
self.oobeVis = loader.loadModel('models/misc/camera.bam', okMissing = True)
if not self.oobeVis:
self.oobeVis = NodePath('oobeVis')
self.oobeVis.node().setFinal(1)
self.oobeVis.setLightOff(1)
self.oobeCullFrustum = None
self.accept('oobe-down', self.__oobeButton, extraArgs = [''])
self.accept('oobe-repeat', self.__oobeButton, extraArgs = ['-repeat'])
self.accept('oobe-up', self.__oobeButton, extraArgs = ['-up'])
if self.oobeMode:
# Disable OOBE mode.
if self.oobeCullFrustum != None:
# First, disable OOBE cull mode.
self.oobeCull(cam = cam)
if self.oobeVis:
self.oobeVis.reparentTo(self.hidden)
# Restore the mouse interface node, and remove the oobe
# trackball from the data path.
self.mouseInterfaceNode.clearButton(KeyboardButton.shift())
self.oobeTrackball.detachNode()
bt = self.buttonThrowers[0].node()
bt.setSpecificFlag(1)
bt.setButtonDownEvent('')
bt.setButtonRepeatEvent('')
bt.setButtonUpEvent('')
cam.reparentTo(self.camera)
#if cam == self.cam:
# self.camNode.setLens(self.camLens)
self.oobeCamera.reparentTo(self.hidden)
self.oobeMode = 0
bboard.post('oobeEnabled', False)
else:
bboard.post('oobeEnabled', True)
try:
cameraParent = localAvatar
except:
# Make oobeCamera be a sibling of wherever camera is now.
cameraParent = self.camera.getParent()
self.oobeCamera.reparentTo(cameraParent)
self.oobeCamera.clearMat()
# Make the regular MouseInterface node respond only when
# the shift button is pressed. And the oobe node will
# respond only when shift is *not* pressed.
self.mouseInterfaceNode.requireButton(KeyboardButton.shift(), True)
self.oobeTrackball.node().requireButton(KeyboardButton.shift(), False)
self.oobeTrackball.reparentTo(self.mouseWatcher)
# Set our initial OOB position to be just behind the camera.
mat = Mat4.translateMat(0, -10, 3) * self.camera.getMat(cameraParent)
mat.invertInPlace()
self.oobeTrackball.node().setMat(mat)
cam.reparentTo(self.oobeCameraTrackball)
# Temporarily disable button events by routing them
# through the oobe filters.
bt = self.buttonThrowers[0].node()
bt.setSpecificFlag(0)
bt.setButtonDownEvent('oobe-down')
bt.setButtonRepeatEvent('oobe-repeat')
bt.setButtonUpEvent('oobe-up')
# Don't change the camera lens--keep it with the original lens.
#if cam == self.cam:
# self.camNode.setLens(self.oobeLens)
if self.oobeVis:
self.oobeVis.reparentTo(self.camera)
self.oobeMode = 1
def __oobeButton(self, suffix, button):
if button.startswith('mouse'):
# Eat mouse buttons.
return
# Transmit other buttons.
messenger.send(button + suffix)
def oobeCull(self, cam = None):
"""
While in OOBE mode (see above), cull the viewing frustum as if
it were still attached to our original camera. This allows us
to visualize the effectiveness of our bounding volumes.
"""
if cam is None:
cam = self.cam
# First, make sure OOBE mode is enabled.
if not getattr(self, 'oobeMode', False):
self.oobe(cam = cam)
if self.oobeCullFrustum == None:
# Enable OOBE culling.
pnode = LensNode('oobeCull')
pnode.setLens(self.camLens)
pnode.showFrustum()
self.oobeCullFrustum = self.camera.attachNewNode(pnode)
# Tell the camera to cull from here instead of its own
# origin.
for c in self.camList:
c.node().setCullCenter(self.oobeCullFrustum)
if cam.node().isOfType(Camera):
cam.node().setCullCenter(self.oobeCullFrustum)
for c in cam.findAllMatches('**/+Camera'):
c.node().setCullCenter(self.oobeCullFrustum)
else:
# Disable OOBE culling.
for c in self.camList:
c.node().setCullCenter(NodePath())
if cam.node().isOfType(Camera):
cam.node().setCullCenter(self.oobeCullFrustum)
for c in cam.findAllMatches('**/+Camera'):
c.node().setCullCenter(NodePath())
self.oobeCullFrustum.removeNode()
self.oobeCullFrustum = None
def showCameraFrustum(self):
# Create a visible representation of the frustum.
self.removeCameraFrustum()
geom = self.camLens.makeGeometry()
if geom != None:
gn = GeomNode('frustum')
gn.addGeom(geom)
self.camFrustumVis = self.camera.attachNewNode(gn)
def removeCameraFrustum(self):
if self.camFrustumVis:
self.camFrustumVis.removeNode()
def screenshot(self, namePrefix = 'screenshot',
defaultFilename = 1, source = None,
imageComment=""):
""" Captures a screenshot from the main window or from the
specified window or Texture and writes it to a filename in the
current directory (or to a specified directory).
If defaultFilename is True, the filename is synthesized by
appending namePrefix to a default filename suffix (including
the filename extension) specified in the Config variable
screenshot-filename. Otherwise, if defaultFilename is False,
the entire namePrefix is taken to be the filename to write,
and this string should include a suitable filename extension
that will be used to determine the type of image file to
write.
Normally, the source is a GraphicsWindow, GraphicsBuffer or
DisplayRegion. If a Texture is supplied instead, it must have
a ram image (that is, if it was generated by
makeTextureBuffer() or makeCubeMap(), the parameter toRam
should have been set true). If it is a cube map texture as
generated by makeCubeMap(), namePrefix should contain the hash
mark ('#') character.
The return value is the filename if successful, or None if
there is a problem.
"""
if source == None:
source = self.win
if defaultFilename:
filename = GraphicsOutput.makeScreenshotFilename(namePrefix)
else:
filename = Filename(namePrefix)
if isinstance(source, Texture):
if source.getZSize() > 1:
saved = source.write(filename, 0, 0, 1, 0)
else:
saved = source.write(filename)
else:
saved = source.saveScreenshot(filename, imageComment)
if saved:
# Announce to anybody that a screenshot has been taken
messenger.send('screenshot', [filename])
return filename
return None
def saveCubeMap(self, namePrefix = 'cube_map_#.png',
defaultFilename = 0, source = None,
camera = None, size = 128,
cameraMask = PandaNode.getAllCameraMask(),
sourceLens = None):
"""
Similar to screenshot(), this sets up a temporary cube map
Texture which it uses to take a series of six snapshots of the
current scene, one in each of the six cube map directions.
This requires rendering a new frame.
Unlike screenshot(), source may only be a GraphicsWindow,
GraphicsBuffer, or DisplayRegion; it may not be a Texture.
camera should be the node to which the cubemap cameras will be
parented. The default is the camera associated with source,
if source is a DisplayRegion, or base.camera otherwise.
The return value is the filename if successful, or None if
there is a problem.
"""
if source == None:
source = self.win
if camera == None:
if hasattr(source, "getCamera"):
camera = source.getCamera()
if camera == None:
camera = self.camera
if sourceLens == None:
sourceLens = self.camLens
if hasattr(source, "getWindow"):
source = source.getWindow()
rig = NodePath(namePrefix)
buffer = source.makeCubeMap(namePrefix, size, rig, cameraMask, 1)
if buffer == None:
raise Exception("Could not make cube map.")
# Set the near and far planes from the default lens.
lens = rig.find('**/+Camera').node().getLens()
lens.setNearFar(sourceLens.getNear(), sourceLens.getFar())
# Now render a frame to fill up the texture.
rig.reparentTo(camera)
self.graphicsEngine.openWindows()
self.graphicsEngine.renderFrame()
self.graphicsEngine.renderFrame()
self.graphicsEngine.syncFrame()
tex = buffer.getTexture()
saved = self.screenshot(namePrefix = namePrefix,
defaultFilename = defaultFilename,
source = tex)
self.graphicsEngine.removeWindow(buffer)
rig.removeNode()
return saved
def saveSphereMap(self, namePrefix = 'spheremap.png',
defaultFilename = 0, source = None,
camera = None, size = 256,
cameraMask = PandaNode.getAllCameraMask(),
numVertices = 1000, sourceLens = None):
"""
This works much like saveCubeMap(), and uses the graphics
API's hardware cube-mapping ability to get a 360-degree view
of the world. But then it converts the six cube map faces
into a single fisheye texture, suitable for applying as a
static environment map (sphere map).
For eye-relative static environment maps, sphere maps are
often preferable to cube maps because they require only a
single texture and because they are supported on a broader
range of hardware.
The return value is the filename if successful, or None if
there is a problem.
"""
if source == None:
source = self.win
if camera == None:
if hasattr(source, "getCamera"):
camera = source.getCamera()
if camera == None:
camera = self.camera
if sourceLens == None:
sourceLens = self.camLens
if hasattr(source, "getWindow"):
source = source.getWindow()
# First, make an offscreen buffer to convert the cube map to a
# sphere map. We make it first so we can guarantee the
# rendering order for the cube map.
toSphere = source.makeTextureBuffer(namePrefix, size, size,
Texture(), 1)
# Now make the cube map buffer.
rig = NodePath(namePrefix)
buffer = toSphere.makeCubeMap(namePrefix, size, rig, cameraMask, 0)
if buffer == None:
self.graphicsEngine.removeWindow(toSphere)
raise Exception("Could not make cube map.")
# Set the near and far planes from the default lens.
lens = rig.find('**/+Camera').node().getLens()
lens.setNearFar(sourceLens.getNear(), sourceLens.getFar())
# Set up the scene to convert the cube map. It's just a
# simple scene, with only the FisheyeMaker object in it.
dr = toSphere.makeMonoDisplayRegion()
camNode = Camera('camNode')
lens = OrthographicLens()
lens.setFilmSize(2, 2)
lens.setNearFar(-1000, 1000)
camNode.setLens(lens)
root = NodePath('buffer')
cam = root.attachNewNode(camNode)
dr.setCamera(cam)
fm = FisheyeMaker('card')
fm.setNumVertices(numVertices)
fm.setSquareInscribed(1, 1.1)
fm.setReflection(1)
card = root.attachNewNode(fm.generate())
card.setTexture(buffer.getTexture())
# Now render a frame. This will render out the cube map and
# then apply it to the the card in the toSphere buffer.
rig.reparentTo(camera)
self.graphicsEngine.openWindows()
self.graphicsEngine.renderFrame()
# One more frame for luck.
self.graphicsEngine.renderFrame()
self.graphicsEngine.syncFrame()
saved = self.screenshot(namePrefix = namePrefix,
defaultFilename = defaultFilename,
source = toSphere.getTexture())
self.graphicsEngine.removeWindow(buffer)
self.graphicsEngine.removeWindow(toSphere)
rig.removeNode()
return saved
def movie(self, namePrefix = 'movie', duration = 1.0, fps = 30,
format = 'png', sd = 4, source = None):
"""
Spawn a task to capture a movie using the screenshot function.
- namePrefix will be used to form output file names (can include
path information (e.g. '/i/beta/frames/myMovie')
- duration is the length of the movie in seconds
- fps is the frame rate of the resulting movie
- format specifies output file format (e.g. png, bmp)
- sd specifies number of significant digits for frame count in the
output file name (e.g. if sd = 4, movie_0001.png)
- source is the Window, Buffer, DisplayRegion, or Texture from which
to save the resulting images. The default is the main window.
The task is returned, so that it can be awaited.
"""
globalClock.setMode(ClockObject.MNonRealTime)
globalClock.setDt(1.0/float(fps))
t = self.taskMgr.add(self._movieTask, namePrefix + '_task')
t.frameIndex = 0 # Frame 0 is not captured.
t.numFrames = int(duration * fps)
t.source = source
t.outputString = namePrefix + '_%0' + repr(sd) + 'd.' + format
t.setUponDeath(lambda state: globalClock.setMode(ClockObject.MNormal))
return t
def _movieTask(self, state):
if state.frameIndex != 0:
frameName = state.outputString % state.frameIndex
self.notify.info("Capturing frame: " + frameName)
self.screenshot(namePrefix = frameName, defaultFilename = 0,
source = state.source)
state.frameIndex += 1
if state.frameIndex > state.numFrames:
return Task.done
else:
return Task.cont
def windowEvent(self, win):
if win != self.win:
# This event isn't about our window.
return
properties = win.getProperties()
if properties != self.__prevWindowProperties:
self.__prevWindowProperties = properties
self.notify.debug("Got window event: %s" % (repr(properties)))
if not properties.getOpen():
# If the user closes the main window, we should exit.
self.notify.info("User closed main window.")
if __debug__:
if self.__autoGarbageLogging:
GarbageReport.b_checkForGarbageLeaks()
self.userExit()
if properties.getForeground() and not self.mainWinForeground:
self.mainWinForeground = 1
elif not properties.getForeground() and self.mainWinForeground:
self.mainWinForeground = 0
if __debug__:
if self.__autoGarbageLogging:
GarbageReport.b_checkForGarbageLeaks()
if properties.getMinimized() and not self.mainWinMinimized:
# If the main window is minimized, throw an event to
# stop the music.
self.mainWinMinimized = 1
messenger.send('PandaPaused')
elif not properties.getMinimized() and self.mainWinMinimized:
# If the main window is restored, throw an event to
# restart the music.
self.mainWinMinimized = 0
messenger.send('PandaRestarted')
# If we have not forced the aspect ratio, let's see if it has
# changed and update the camera lenses and aspect2d parameters
self.adjustWindowAspectRatio(self.getAspectRatio())
if win.hasSize() and win.getSbsLeftYSize() != 0:
self.pixel2d.setScale(2.0 / win.getSbsLeftXSize(), 1.0, 2.0 / win.getSbsLeftYSize())
if self.wantRender2dp:
self.pixel2dp.setScale(2.0 / win.getSbsLeftXSize(), 1.0, 2.0 / win.getSbsLeftYSize())
else:
xsize, ysize = self.getSize()
if xsize > 0 and ysize > 0:
self.pixel2d.setScale(2.0 / xsize, 1.0, 2.0 / ysize)
if self.wantRender2dp:
self.pixel2dp.setScale(2.0 / xsize, 1.0, 2.0 / ysize)
def adjustWindowAspectRatio(self, aspectRatio):
""" This function is normally called internally by
windowEvent(), but it may also be called to explicitly adjust
the aspect ratio of the render/render2d DisplayRegion, by a
class that has redefined these. """
if self.__configAspectRatio:
aspectRatio = self.__configAspectRatio
if aspectRatio != self.__oldAspectRatio:
self.__oldAspectRatio = aspectRatio
# Fix up some anything that depends on the aspectRatio
if self.camLens:
self.camLens.setAspectRatio(aspectRatio)
if aspectRatio < 1:
# If the window is TALL, lets expand the top and bottom
self.aspect2d.setScale(1.0, aspectRatio, aspectRatio)
self.a2dTop = 1.0 / aspectRatio
self.a2dBottom = - 1.0 / aspectRatio
self.a2dLeft = -1
self.a2dRight = 1.0
# Don't forget 2dp
if self.wantRender2dp:
self.aspect2dp.setScale(1.0, aspectRatio, aspectRatio)
self.a2dpTop = 1.0 / aspectRatio
self.a2dpBottom = - 1.0 / aspectRatio
self.a2dpLeft = -1
self.a2dpRight = 1.0
else:
# If the window is WIDE, lets expand the left and right
self.aspect2d.setScale(1.0 / aspectRatio, 1.0, 1.0)
self.a2dTop = 1.0
self.a2dBottom = -1.0
self.a2dLeft = -aspectRatio
self.a2dRight = aspectRatio
# Don't forget 2dp
if self.wantRender2dp:
self.aspect2dp.setScale(1.0 / aspectRatio, 1.0, 1.0)
self.a2dpTop = 1.0
self.a2dpBottom = -1.0
self.a2dpLeft = -aspectRatio
self.a2dpRight = aspectRatio
# Reposition the aspect2d marker nodes
self.a2dTopCenter.setPos(0, 0, self.a2dTop)
self.a2dTopCenterNs.setPos(0, 0, self.a2dTop)
self.a2dBottomCenter.setPos(0, 0, self.a2dBottom)
self.a2dBottomCenterNs.setPos(0, 0, self.a2dBottom)
self.a2dLeftCenter.setPos(self.a2dLeft, 0, 0)
self.a2dLeftCenterNs.setPos(self.a2dLeft, 0, 0)
self.a2dRightCenter.setPos(self.a2dRight, 0, 0)
self.a2dRightCenterNs.setPos(self.a2dRight, 0, 0)
self.a2dTopLeft.setPos(self.a2dLeft, 0, self.a2dTop)
self.a2dTopLeftNs.setPos(self.a2dLeft, 0, self.a2dTop)
self.a2dTopRight.setPos(self.a2dRight, 0, self.a2dTop)
self.a2dTopRightNs.setPos(self.a2dRight, 0, self.a2dTop)
self.a2dBottomLeft.setPos(self.a2dLeft, 0, self.a2dBottom)
self.a2dBottomLeftNs.setPos(self.a2dLeft, 0, self.a2dBottom)
self.a2dBottomRight.setPos(self.a2dRight, 0, self.a2dBottom)
self.a2dBottomRightNs.setPos(self.a2dRight, 0, self.a2dBottom)
# Reposition the aspect2dp marker nodes
if self.wantRender2dp:
self.a2dpTopCenter.setPos(0, 0, self.a2dpTop)
self.a2dpBottomCenter.setPos(0, 0, self.a2dpBottom)
self.a2dpLeftCenter.setPos(self.a2dpLeft, 0, 0)
self.a2dpRightCenter.setPos(self.a2dpRight, 0, 0)
self.a2dpTopLeft.setPos(self.a2dpLeft, 0, self.a2dpTop)
self.a2dpTopRight.setPos(self.a2dpRight, 0, self.a2dpTop)
self.a2dpBottomLeft.setPos(self.a2dpLeft, 0, self.a2dpBottom)
self.a2dpBottomRight.setPos(self.a2dpRight, 0, self.a2dpBottom)
# If anybody needs to update their GUI, put a callback on this event
messenger.send("aspectRatioChanged")
def userExit(self):
# The user has requested we exit the program. Deal with this.
if self.exitFunc:
self.exitFunc()
self.notify.info("Exiting ShowBase.")
self.finalizeExit()
def finalizeExit(self):
sys.exit()
# [gjeon] start wxPython
def startWx(self, fWantWx = True):
fWantWx = bool(fWantWx)
if self.wantWx != fWantWx:
self.wantWx = fWantWx
if self.wantWx:
self.spawnWxLoop()
def spawnWxLoop(self):
""" Call this method to hand the main loop over to wxPython.
This sets up a wxTimer callback so that Panda still gets
updated, but wxPython owns the main loop (which seems to make
it happier than the other way around). """
if self.wxAppCreated:
# Don't do this twice.
return
init_app_for_gui()
# Use importlib to prevent this import from being picked up
# by modulefinder when packaging an application.
wx = importlib.import_module('wx')
if not self.wxApp:
# Create a new base.wxApp.
self.wxApp = wx.PySimpleApp(redirect = False)
if ConfigVariableBool('wx-main-loop', True):
# Put wxPython in charge of the main loop. It really
# seems to like this better; some features of wx don't
# work properly unless this is true.
# Set a timer to run the Panda frame 60 times per second.
wxFrameRate = ConfigVariableDouble('wx-frame-rate', 60.0)
self.wxTimer = wx.Timer(self.wxApp)
self.wxTimer.Start(1000.0 / wxFrameRate.getValue())
self.wxApp.Bind(wx.EVT_TIMER, self.__wxTimerCallback)
# wx is now the main loop, not us any more.
self.run = self.wxRun
self.taskMgr.run = self.wxRun
builtins.run = self.wxRun
if self.appRunner:
self.appRunner.run = self.wxRun
else:
# Leave Panda in charge of the main loop. This is
# friendlier for IDE's and interactive editing in general.
def wxLoop(task):
# First we need to ensure that the OS message queue is
# processed.
self.wxApp.Yield()
# Now do all the wxPython events waiting on this frame.
while self.wxApp.Pending():
self.wxApp.Dispatch()
return task.again
self.taskMgr.add(wxLoop, 'wxLoop')
self.wxAppCreated = True
def __wxTimerCallback(self, event):
if Thread.getCurrentThread().getCurrentTask():
# This happens when the wxTimer expires while igLoop is
# rendering. Ignore it.
return
self.taskMgr.step()
def wxRun(self):
""" This method replaces base.run() after we have called
spawnWxLoop(). Since at this point wxPython now owns the main
loop, this method is a call to wxApp.MainLoop(). """
if Thread.getCurrentThread().getCurrentTask():
# This happens in the p3d environment during startup.
# Ignore it.
return
self.wxApp.MainLoop()
def startTk(self, fWantTk = True):
fWantTk = bool(fWantTk)
if self.wantTk != fWantTk:
self.wantTk = fWantTk
if self.wantTk:
self.spawnTkLoop()
def spawnTkLoop(self):
""" Call this method to hand the main loop over to Tkinter.
This sets up a timer callback so that Panda still gets
updated, but Tkinter owns the main loop (which seems to make
it happier than the other way around). """
if self.tkRootCreated:
# Don't do this twice.
return
# Use importlib to prevent this import from being picked up
# by modulefinder when packaging an application.
tkinter = importlib.import_module('_tkinter')
Pmw = importlib.import_module('Pmw')
# Create a new Tk root.
if not self.tkRoot:
self.tkRoot = Pmw.initialise()
builtins.tkroot = self.tkRoot
init_app_for_gui()
if ConfigVariableBool('tk-main-loop', True):
# Put Tkinter in charge of the main loop. It really
# seems to like this better; the GUI otherwise becomes
# largely unresponsive on Mac OS X unless this is true.
# Set a timer to run the Panda frame 60 times per second.
tkFrameRate = ConfigVariableDouble('tk-frame-rate', 60.0)
self.tkDelay = int(1000.0 / tkFrameRate.getValue())
self.tkRoot.after(self.tkDelay, self.__tkTimerCallback)
# wx is now the main loop, not us any more.
self.run = self.tkRun
self.taskMgr.run = self.tkRun
builtins.run = self.tkRun
if self.appRunner:
self.appRunner.run = self.tkRun
else:
# Leave Panda in charge of the main loop. This is
# friendlier for IDE's and interactive editing in general.
def tkLoop(task):
# Do all the tkinter events waiting on this frame
# dooneevent will return 0 if there are no more events
# waiting or 1 if there are still more.
# DONT_WAIT tells tkinter not to block waiting for events
while self.tkRoot.dooneevent(tkinter.ALL_EVENTS | tkinter.DONT_WAIT):
pass
return task.again
self.taskMgr.add(tkLoop, 'tkLoop')
self.tkRootCreated = True
def __tkTimerCallback(self):
if not Thread.getCurrentThread().getCurrentTask():
self.taskMgr.step()
self.tkRoot.after(self.tkDelay, self.__tkTimerCallback)
def tkRun(self):
""" This method replaces base.run() after we have called
spawnTkLoop(). Since at this point Tkinter now owns the main
loop, this method is a call to tkRoot.mainloop(). """
if Thread.getCurrentThread().getCurrentTask():
# This happens in the p3d environment during startup.
# Ignore it.
return
self.tkRoot.mainloop()
def startDirect(self, fWantDirect = 1, fWantTk = 1, fWantWx = 0):
self.startTk(fWantTk)
self.startWx(fWantWx)
self.wantDirect = fWantDirect
if self.wantDirect:
# Use importlib to prevent this import from being picked up
# by modulefinder when packaging an application.
DirectSession = importlib.import_module('direct.directtools.DirectSession')
self.direct = DirectSession.DirectSession()
self.direct.enable()
builtins.direct = self.direct
else:
builtins.direct = self.direct = None
def getRepository(self):
return None
def getAxes(self):
return loader.loadModel("models/misc/xyzAxis.bam")
def __doStartDirect(self):
if self.__directStarted:
return
self.__directStarted = False
# Start Tk, Wx and DIRECT if specified by Config.prc
fTk = self.config.GetBool('want-tk', 0)
fWx = self.config.GetBool('want-wx', 0)
# Start DIRECT if specified in Config.prc or in cluster mode
fDirect = (self.config.GetBool('want-directtools', 0) or
(self.config.GetString("cluster-mode", '') != ''))
# Set fWantTk to 0 to avoid starting Tk with this call
self.startDirect(fWantDirect = fDirect, fWantTk = fTk, fWantWx = fWx)
def run(self):
""" This method runs the TaskManager when self.appRunner is
None, which is to say, when we are not running from within a
p3d file. When we *are* within a p3d file, the Panda
runtime has to be responsible for running the main loop, so
we can't allow the application to do it. """
if self.appRunner is None or self.appRunner.dummy or \
(self.appRunner.interactiveConsole and not self.appRunner.initialAppImport):
self.taskMgr.run()
# Snake-case aliases, for people who prefer these. We're in the process
# of migrating everyone to use the snake-case alternatives.
make_default_pipe = makeDefaultPipe
make_module_pipe = makeModulePipe
make_all_pipes = makeAllPipes
open_window = openWindow
close_window = closeWindow
open_default_window = openDefaultWindow
open_main_window = openMainWindow
set_sleep = setSleep
set_frame_rate_meter = setFrameRateMeter
set_scene_graph_analyzer_meter = setSceneGraphAnalyzerMeter
setup_window_controls = setupWindowControls
setup_render = setupRender
setup_render2d = setupRender2d
setup_render2dp = setupRender2dp
set_aspect_ratio = setAspectRatio
get_aspect_ratio = getAspectRatio
get_size = getSize
make_camera = makeCamera
make_camera2d = makeCamera2d
make_camera2dp = makeCamera2dp
setup_data_graph = setupDataGraph
setup_mouse = setupMouse
setup_mouse_cb = setupMouseCB
enable_software_mouse_pointer = enableSoftwareMousePointer
detach_input_device = detachInputDevice
attach_input_device = attachInputDevice
add_angular_integrator = addAngularIntegrator
enable_particles = enableParticles
disable_particles = disableParticles
toggle_particles = toggleParticles
create_stats = createStats
add_sfx_manager = addSfxManager
enable_music = enableMusic
enable_sound_effects = enableSoundEffects
disable_all_audio = disableAllAudio
enable_all_audio = enableAllAudio
init_shadow_trav = initShadowTrav
get_background_color = getBackgroundColor
set_background_color = setBackgroundColor
toggle_backface = toggleBackface
backface_culling_on = backfaceCullingOn
backface_culling_off = backfaceCullingOff
toggle_texture = toggleTexture
texture_on = textureOn
texture_off = textureOff
toggle_wireframe = toggleWireframe
wireframe_on = wireframeOn
wireframe_off = wireframeOff
disable_mouse = disableMouse
enable_mouse = enableMouse
silence_input = silenceInput
revive_input = reviveInput
set_mouse_on_node = setMouseOnNode
change_mouse_interface = changeMouseInterface
use_drive = useDrive
use_trackball = useTrackball
toggle_tex_mem = toggleTexMem
toggle_show_vertices = toggleShowVertices
oobe_cull = oobeCull
show_camera_frustum = showCameraFrustum
remove_camera_frustum = removeCameraFrustum
save_cube_map = saveCubeMap
save_sphere_map = saveSphereMap
start_wx = startWx
start_tk = startTk
start_direct = startDirect
# A class to encapsulate information necessary for multiwindow support.
class WindowControls:
def __init__(
self, win, cam=None, camNode=None, cam2d=None, mouseWatcher=None,
mouseKeyboard=None, closeCmd=lambda: 0, grid=None):
self.win = win
self.camera = cam
if camNode is None and cam is not None:
camNode = cam.node()
self.camNode = camNode
self.camera2d = cam2d
self.mouseWatcher = mouseWatcher
self.mouseKeyboard = mouseKeyboard
self.closeCommand = closeCmd
self.grid = grid
def __str__(self):
s = "window = " + str(self.win) + "\n"
s += "camera = " + str(self.camera) + "\n"
s += "camNode = " + str(self.camNode) + "\n"
s += "camera2d = " + str(self.camera2d) + "\n"
s += "mouseWatcher = " + str(self.mouseWatcher) + "\n"
s += "mouseAndKeyboard = " + str(self.mouseKeyboard) + "\n"
return s
| {
"content_hash": "d48ac051e4089c1af73842e98390e8c3",
"timestamp": "",
"source": "github",
"line_count": 3205,
"max_line_length": 149,
"avg_line_length": 39.3984399375975,
"alnum_prop": 0.6127011530663964,
"repo_name": "chandler14362/panda3d",
"id": "bff3b4652c4a7c3654a53e60efa5ca4b5b798e61",
"size": "126272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "direct/src/showbase/ShowBase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "5288285"
},
{
"name": "C++",
"bytes": "27114399"
},
{
"name": "Emacs Lisp",
"bytes": "229264"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3113"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "61448"
},
{
"name": "Nemerle",
"bytes": "3001"
},
{
"name": "Objective-C",
"bytes": "27625"
},
{
"name": "Objective-C++",
"bytes": "258129"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl 6",
"bytes": "27055"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5568942"
},
{
"name": "R",
"bytes": "421"
},
{
"name": "Roff",
"bytes": "3432"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""
Exceptions for pyproj
"""
class ProjError(RuntimeError):
"""Raised when a Proj error occurs."""
internal_proj_error = None
def __init__(self, error_message: str) -> None:
if self.internal_proj_error is not None:
error_message = (
f"{error_message}: (Internal Proj Error: {self.internal_proj_error})"
)
ProjError.clear()
super().__init__(error_message)
@staticmethod
def clear() -> None:
"""
This will clear the internal PROJ erro message.
"""
ProjError.internal_proj_error = None
class CRSError(ProjError):
"""Raised when a CRS error occurs."""
class GeodError(RuntimeError):
"""Raised when a Geod error occurs."""
class DataDirError(RuntimeError):
"""Raised when a the data directory was not found."""
| {
"content_hash": "4294080b464c57f69b901fbbc8704092",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 23.61111111111111,
"alnum_prop": 0.6011764705882353,
"repo_name": "ocefpaf/pyproj",
"id": "906435407b4ff5476f02a5aa4e931e95c1310758",
"size": "850",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pyproj/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "194556"
},
{
"name": "Makefile",
"bytes": "2671"
},
{
"name": "Python",
"bytes": "649387"
},
{
"name": "Shell",
"bytes": "10347"
}
],
"symlink_target": ""
} |
'''Michael Lange <klappnase (at) freakmail (dot) de>
The ToolTip class provides a flexible tooltip widget for Tkinter; it is based on IDLE's ToolTip
module which unfortunately seems to be broken (at least the version I saw).
INITIALIZATION OPTIONS:
anchor : where the text should be positioned inside the widget, must be on of "n", "s", "e", "w", "nw" and so on;
default is "center"
bd : borderwidth of the widget; default is 1 (NOTE: don't use "borderwidth" here)
bg : background color to use for the widget; default is "lightyellow" (NOTE: don't use "background")
delay : time in ms that it takes for the widget to appear on the screen when the mouse pointer has
entered the parent widget; default is 1500 (XXX: changed to 800)
fg : foreground (i.e. text) color to use; default is "black" (NOTE: don't use "foreground")
follow_mouse : if set to 1 the tooltip will follow the mouse pointer instead of being displayed
outside of the parent widget; this may be useful if you want to use tooltips for
large widgets like listboxes or canvases; default is 0
font : font to use for the widget; default is system specific
justify : how multiple lines of text will be aligned, must be "left", "right" or "center"; default is "left"
padx : extra space added to the left and right within the widget; default is 4
pady : extra space above and below the text; default is 2
relief : one of "flat", "ridge", "groove", "raised", "sunken" or "solid"; default is "solid"
state : must be "normal" or "disabled"; if set to "disabled" the tooltip will not appear; default is "normal"
text : the text that is displayed inside the widget
textvariable : if set to an instance of Tkinter.StringVar() the variable's value will be used as text for the widget
width : width of the widget; the default is 0, which means that "wraplength" will be used to limit the widgets width
wraplength : limits the number of characters in each line; default is 150 (XXX: changed to 200)
WIDGET METHODS:
configure(**opts) : change one or more of the widget's options as described above; the changes will take effect the
next time the tooltip shows up; NOTE: follow_mouse cannot be changed after widget initialization
Other widget methods that might be useful if you want to subclass ToolTip:
enter() : callback when the mouse pointer enters the parent widget
leave() : called when the mouse pointer leaves the parent widget
motion() : is called when the mouse pointer moves inside the parent widget if follow_mouse is set to 1 and the
tooltip has shown up to continually update the coordinates of the tooltip window
coords() : calculates the screen coordinates of the tooltip window
create_contents() : creates the contents of the tooltip window (by default a Tkinter.Label)
'''
# Ideas gleaned from PySol
import Tkinter
class ToolTip:
# XXX Changed the default delay from 1500 to 800
# XXX Changed the default wraplength from 150 to 200
def __init__(self, master, text='Your text here', delay=800, **opts):
self.master = master
self._opts = {'anchor':'center', 'bd':1, 'bg':'lightyellow', 'delay':delay, 'fg':'black',\
'follow_mouse':0, 'font':None, 'justify':'left', 'padx':4, 'pady':2,\
'relief':'solid', 'state':'normal', 'text':text, 'textvariable':None,\
'width':0, 'wraplength':200}
self.configure(**opts)
self._tipwindow = None
self._id = None
self._id1 = self.master.bind("<Enter>", self.enter, '+')
self._id2 = self.master.bind("<Leave>", self.leave, '+')
self._id3 = self.master.bind("<ButtonPress>", self.leave, '+')
self._follow_mouse = 0
if self._opts['follow_mouse']:
self._id4 = self.master.bind("<Motion>", self.motion, '+')
self._follow_mouse = 1
def configure(self, **opts):
for key in opts:
if self._opts.has_key(key):
self._opts[key] = opts[key]
else:
KeyError = 'KeyError: Unknown option: "%s"' %key
raise KeyError
##----these methods handle the callbacks on "<Enter>", "<Leave>" and "<Motion>"---------------##
##----events on the parent widget; override them if you want to change the widget's behavior--##
def enter(self, event=None):
self._schedule()
def leave(self, event=None):
self._unschedule()
self._hide()
def motion(self, event=None):
if self._tipwindow and self._follow_mouse:
x, y = self.coords()
self._tipwindow.wm_geometry("+%d+%d" % (x, y))
##------the methods that do the work:---------------------------------------------------------##
def _schedule(self):
self._unschedule()
if self._opts['state'] == 'disabled':
return
self._id = self.master.after(self._opts['delay'], self._show)
def _unschedule(self):
id = self._id
self._id = None
if id:
self.master.after_cancel(id)
def _show(self):
if self._opts['state'] == 'disabled':
self._unschedule()
return
if not self._tipwindow:
self._tipwindow = tw = Tkinter.Toplevel(self.master)
# hide the window until we know the geometry
tw.withdraw()
tw.wm_overrideredirect(1)
if tw.tk.call("tk", "windowingsystem") == 'aqua':
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "none")
self.create_contents()
tw.update_idletasks()
x, y = self.coords()
tw.wm_geometry("+%d+%d" % (x, y))
tw.deiconify()
def _hide(self):
tw = self._tipwindow
self._tipwindow = None
if tw:
tw.destroy()
##----these methods might be overridden in derived classes:----------------------------------##
def coords(self):
# The tip window must be completely outside the master widget;
# otherwise when the mouse enters the tip window we get
# a leave event and it disappears, and then we get an enter
# event and it reappears, and so on forever :-(
# or we take care that the mouse pointer is always outside the tipwindow :-)
tw = self._tipwindow
twx, twy = tw.winfo_reqwidth(), tw.winfo_reqheight()
w, h = tw.winfo_screenwidth(), tw.winfo_screenheight()
# calculate the y coordinate:
if self._follow_mouse:
y = tw.winfo_pointery() + 20
# make sure the tipwindow is never outside the screen:
if y + twy > h:
y = y - twy - 30
else:
y = self.master.winfo_rooty() + self.master.winfo_height() + 3
if y + twy > h:
y = self.master.winfo_rooty() - twy - 3
# we can use the same x coord in both cases:
x = tw.winfo_pointerx() - twx / 2
if x < 0:
x = 0
elif x + twx > w:
x = w - twx
return x, y
def create_contents(self):
opts = self._opts.copy()
for opt in ('delay', 'follow_mouse', 'state'):
del opts[opt]
label = Tkinter.Label(self._tipwindow, **opts)
label.pack()
##---------demo code-----------------------------------##
def demo():
root = Tkinter.Tk(className='ToolTip-demo')
l = Tkinter.Listbox(root)
l.insert('end', "I'm a listbox")
l.pack(side='top')
t1 = ToolTip(l, follow_mouse=1, text="I'm a tooltip with follow_mouse set to 1, so I won't be placed outside my parent")
b = Tkinter.Button(root, text='Quit', command=root.quit)
b.pack(side='bottom')
t2 = ToolTip(b, text='Enough of this')
root.mainloop()
if __name__ == '__main__':
demo()
| {
"content_hash": "3ab07c9e6bdb1b31ae1254ad2aea15fd",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 124,
"avg_line_length": 46.21264367816092,
"alnum_prop": 0.5886083820420346,
"repo_name": "TomT0m/Boolean-c10t",
"id": "afaae0a99d98ce5d77d0fdf69c9723334850abf1",
"size": "8149",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gui/c10t-tk/tooltip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7824"
},
{
"name": "C++",
"bytes": "327074"
},
{
"name": "JavaScript",
"bytes": "7619"
},
{
"name": "PHP",
"bytes": "11767"
},
{
"name": "PowerShell",
"bytes": "6884"
},
{
"name": "Python",
"bytes": "43517"
},
{
"name": "Shell",
"bytes": "8152"
}
],
"symlink_target": ""
} |
from google.cloud import dialogflow_v2
async def sample_list_versions():
# Create a client
client = dialogflow_v2.VersionsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.ListVersionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_versions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_v2_generated_Versions_ListVersions_async]
| {
"content_hash": "12be40bfc06960c13d57443d44b9fe0a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 25.05,
"alnum_prop": 0.7045908183632734,
"repo_name": "googleapis/python-dialogflow",
"id": "2ad8a6ad3c58caa5f11f5e6e8779b458cbb5adcf",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v2_generated_versions_list_versions_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
"""Classes corresponding to W3C XML Schema components.
Class names and behavior should conform to the schema components described in
U{XML Schema Part 1: Structures<http://www.w3.org/TR/xmlschema-1/>}.
References to sections in the documentation of this module generally refers to
that document.
Each class has a C{CreateFromDOM} class method that creates an instance and
initializes it from a DOM node. Only the L{Wildcard}, L{Particle}, and
L{ModelGroup} components are created from non-DOM sources. However, the
requirements on DOM interface are restricted to attributes, child nodes, and
basic fields, though all these must support namespaces.
@group Mixins: *_mixin
@group Ur Type Specializations: *UrType*
@group Utilities: _ImportElementInformationItem
"""
import pyxb
import pyxb.xmlschema
from xml.dom import Node
import types
import re
import logging
import pyxb.namespace.archive
import pyxb.namespace.resolution
from pyxb.binding import basis
from pyxb.binding import datatypes
from pyxb.binding import facets
from pyxb.utils import domutils
import pyxb.utils.utility
import copy
import urlparse
import os.path
_log = logging.getLogger(__name__)
# Flag indicating that the built in types have been registered
_PastAddBuiltInTypes = False
# Make it easier to check node names in the XMLSchema namespace
from pyxb.namespace import XMLSchema as xsd
class _SchemaComponent_mixin (pyxb.namespace._ComponentDependency_mixin,
pyxb.namespace.archive._ArchivableObject_mixin,
pyxb.utils.utility.PrivateTransient_mixin,
pyxb.utils.utility.Locatable_mixin):
"""A mix-in that marks the class as representing a schema component.
This exists so that we can determine the owning schema for any
component we encounter. This is normally done at construction
time by passing a C{schema=val} parameter to the constructor.
"""
# This class suppports transient instance variables. These variables are
# added to the set of transients at the point of declaration in the class.
__PrivateTransient = set()
def _namespaceContext (self):
"""The namespace context for this schema.
This defines where it looks things up, where it puts things it
creates, the in-scope namespace declarations, etc. Must be defined
for anything that does any sort of QName interpretation. The value is
generally a reference to a namespace context associated with the DOM
element node corresponding to this component."""
if self.__namespaceContext is None:
raise pyxb.LogicError('Attempt to access missing namespace context for %s' % (self,))
return self.__namespaceContext
def _clearNamespaceContext (self):
# Calculate the binding sort key for any archive before we discard the
# namespace context, which we might need.
self.schemaOrderSortKey()
self.__namespaceContext = None
return self
__namespaceContext = None
__PrivateTransient.add('namespaceContext')
# The name by which this component is known within the binding module.
# This is in component rather than _NamedComponent_mixin because some
# unnamed components (like ModelGroup and Wildcard) have Python objects to
# represent them, so need a binding-level name.
__nameInBinding = None
# The schema component that owns this. If C{None}, the component is owned
# directly by the schema.
__owner = None
__PrivateTransient.add('owner')
# The schema components owned by this component.
__ownedComponents = None
__PrivateTransient.add('ownedComponent')
def _scope (self):
"""The context into which declarations in or subordinate to this nodeare placed."""
return self.__scope
__scope = None
def _scopeIsIndeterminate (self):
"""Return True iff nobody has defined a scope for this node."""
return _ScopedDeclaration_mixin.ScopeIsIndeterminate(self._scope())
def _scopeIsGlobal (self):
"""Return True iff this component has global scope."""
return _ScopedDeclaration_mixin.ScopeIsGlobal(self._scope())
def _setScope (self, ctd):
"""Set the scope of this instance after construction.
This should only be invoked on cloned declarations being incorporated
into a complex type definition. Note that the source of the clone may
be any scope: indeterminate if from a model (attribute) group
definition; global if a reference to a global component; or ctd if
inherited from a complex base type."""
assert self.__cloneSource is not None
assert isinstance(self, _ScopedDeclaration_mixin)
assert isinstance(ctd, ComplexTypeDefinition)
self.__scope = ctd
return self
def __init__ (self, *args, **kw):
"""Initialize portions of a component.
@keyword scope: The scope in which the component is defined
@keyword namespace_context: The NamespaceContext to use within this component
@keyword node: If no C{namespace_context} is provided, a DOM node must
be provided from which a namespace context can be identified.
@keyword owner: Reference to the component that owns this one (the
immediately enclosing component). Is C{None} in the case of top-level
components.
@keyword schema: Reference to the L{Schema} component to which the
component belongs. Required for every component except L{Schema},
L{Annotation}, and L{Wildcard}.
"""
self.__ownedComponents = set()
self.__scope = kw.get('scope')
self.__namespaceContext = kw.get('namespace_context')
node = kw.get('node')
owner = kw.get('owner')
if self.__namespaceContext is None:
if node is None:
raise pyxb.LogicError('Schema component constructor must be given namespace_context or node')
self.__namespaceContext = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node)
if self.__namespaceContext is None:
raise pyxb.LogicError('No namespace_context for schema component')
super(_SchemaComponent_mixin, self).__init__(*args, **kw)
self._namespaceContext().targetNamespace()._associateComponent(self)
self._setOwner(owner)
if isinstance(node, pyxb.utils.utility.Locatable_mixin):
self._setLocation(node._location())
elif isinstance(owner, pyxb.utils.utility.Locatable_mixin):
self._setLocation(owner._location())
schema = kw.get('schema')
if schema is not None:
self._setObjectOrigin(schema.originRecord())
else:
assert isinstance(self, (Schema, Annotation, Wildcard)), 'No origin available for type %s' % (type(self),)
if isinstance(self, ComplexTypeDefinition):
assert 1 < len(self.__namespaceContext.inScopeNamespaces())
def _dissociateFromNamespace (self):
"""Dissociate this component from its owning namespace.
This should only be done whwen there are no other references to the
component, and you want to ensure it does not appear in the model."""
self._namespaceContext().targetNamespace()._replaceComponent(self, None)
return self
def _setOwner (self, owner):
"""Set the owner of this component.
If C{owner} is C{None}, this has no effect. Otherwise, the
component's current owner must be either C{None} or the same as the
input C{owner}."""
if owner is not None:
assert (self.__owner is None) or (self.__owner == owner), 'Owner was %s set to %s' % (self.__owner, owner)
self.__owner = owner
owner.__ownedComponents.add(self)
return self
def owner (self):
return self.__owner
# A reference to the instance from which this instance was cloned.
__cloneSource = None
__PrivateTransient.add('cloneSource')
def _cloneSource (self):
"""The source component from which this is a clone.
Returns C{None} if this is not a clone."""
return self.__cloneSource
# A set of references to all instances that are clones of this one.
__clones = None
__PrivateTransient.add('clones')
def _clones (self):
"""The set of instances cloned from this component.
Returns None if no instances have been cloned from this."""
return self.__clones
def _resetClone_csc (self, **kw):
"""Virtual method to clear whatever attributes should be reset in a
cloned component.
This instance should be an instance created by copy.copy().
The implementation in this class clears the owner and dependency
relations.
Returns C{self}.
"""
assert self.__cloneSource is not None
owner = kw['owner']
self.__nameInBinding = None
self.__owner = owner
assert not (isinstance(self, ComplexTypeDefinition) and isinstance(owner, Schema))
self.__ownedComponents = set()
self.__clones = None
owner._namespaceContext().targetNamespace()._associateComponent(self)
if self.__namespaceContext is None:
# When cloning imported components, loan them the owner's
# namespace context, only so that their cloned children can be
# associated with the same namespace.
self.__namespaceContext = owner._namespaceContext()
self_fn = lambda *_args, **_kw: self
return getattr(super(_SchemaComponent_mixin, self), '_resetClone_csc', self_fn)(**kw)
def _clone (self, owner, origin):
"""Create a copy of this instance suitable for adoption by some other
component.
This is used for creating a locally-scoped declaration from a
declaration in a named model or attribute group."""
# We only care about cloning declarations, and they should
# have an unassigned scope. However, we do clone
# non-declarations that contain cloned declarations.
#assert (not isinstance(self, _ScopedDeclaration_mixin)) or self._scopeIsIndeterminate()
if isinstance(self, pyxb.namespace.resolution._Resolvable_mixin):
assert self.isResolved()
assert owner is not None
that = copy.copy(self)
that.__cloneSource = self
if self.__clones is None:
self.__clones = set()
self.__clones.add(that)
that._resetClone_csc(owner=owner, origin=origin)
if isinstance(that, pyxb.namespace.resolution._Resolvable_mixin):
assert that.isResolved()
return that
def isTypeDefinition (self):
"""Return True iff this component is a simple or complex type
definition."""
return isinstance(self, (SimpleTypeDefinition, ComplexTypeDefinition))
def isUrTypeDefinition (self):
"""Return True iff this component is a simple or complex type
definition."""
return isinstance(self, (_SimpleUrTypeDefinition, _UrTypeDefinition))
def bestNCName (self):
"""Return the name of this component, as best it can be determined.
For example, ModelGroup instances will be named by their
ModelGroupDefinition, if available. Returns None if no name can be
inferred."""
if isinstance(self, _NamedComponent_mixin):
return self.name()
if isinstance(self, ModelGroup):
agd = self.modelGroupDefinition()
if agd is not None:
return agd.name()
return None
def nameInBinding (self):
"""Return the name by which this component is known in the generated
binding.
@note: To support builtin datatypes, type definitions with an
associated L{pythonSupport<SimpleTypeDefinition.pythonSupport>} class
initialize their binding name from the class name when the support
association is created. As long as no built-in datatype conflicts
with a language keyword, this should be fine."""
return self.__nameInBinding
def hasBinding (self):
"""Return C{True} iff this is a component which has a user-visible
Python construct which serves as its binding.
Type definitions have classes as their bindings. Global element
declarations have instances of L{pyxb.binding.basis.element} as their
bindings."""
return self.isTypeDefinition() or (isinstance(self, ElementDeclaration) and self._scopeIsGlobal())
def setNameInBinding (self, name_in_binding):
"""Set the name by which this component shall be known in the XSD binding."""
self.__nameInBinding = name_in_binding
return self
def _updateFromOther_csc (self, other):
"""Override fields in this instance with those from the other.
Post-extended; description in leaf implementation in
ComplexTypeDefinition and SimpleTypeDefinition."""
assert self != other
self_fn = lambda *_args, **_kw: self
getattr(super(_SchemaComponent_mixin, self), '_updateFromOther_csc', self_fn)(other)
# The only thing we update is the binding name, and that only if it's new.
if self.__nameInBinding is None:
self.__nameInBinding = other.__nameInBinding
return self
def schemaOrderSortKey (self):
"""A key to be used when sorting components for binding generation.
This is a tuple comprising the namespace URI, schema location, and
line and column of the component definition within the schema. The
purpose is to ensure consistent order of binding components in
generated code, to simplify maintenance involving the generated
sources.
To support Python 3 values that are C{None} are replaced with the
default value for whatever type belongs in the corresponding
position: (uri:str, locbase:str, locline:int, loccol:int) """
if self.__schemaOrderSortKey is None:
ns = None
if isinstance(self, _NamedComponent_mixin):
ns = self.bindingNamespace()
if ns is None:
ns = self._namespaceContext().targetNamespace()
elif isinstance(self, _ParticleTree_mixin):
ns = self._namespaceContext().targetNamespace()
ns_uri = ''
if (ns is not None) and (ns.uri() is not None):
ns_uri = ns.uri()
key_elts = [ns_uri]
loc = self._location()
v = ''
if (loc is not None) and (loc.locationBase is not None):
v = loc.locationBase
key_elts.append(v)
v = 0
if (loc is not None) and (loc.lineNumber is not None):
v = loc.lineNumber
key_elts.append(v)
v = 0
if (loc is not None) and (loc.columnNumber is not None):
v = loc.columnNumber
key_elts.append(v)
self.__schemaOrderSortKey = tuple(key_elts)
return self.__schemaOrderSortKey
__schemaOrderSortKey = None
def facStateSortKey (self):
"""A sort key matching preferred content order.
This is an ordinal (integer) used to control which candidate
transitions are attempted first when testing symbols against the
content automaton state.
@note: The value associated with a node (especially a L{ModelGroup} or
L{Particle} will be different for different complex types, and is
valid only during generation of the automata code for a given type."""
assert self.__facStateSortKey is not None
return self.__facStateSortKey
def _setFacStateSortKey (self, key):
"""Set the automata state sort key.
@param key: the ordinal used for sorting."""
self.__facStateSortKey = key
__facStateSortKey = None
__PrivateTransient.add('facStateSortKey')
class _ParticleTree_mixin (pyxb.cscRoot):
def _walkParticleTree (self, visit, arg):
"""Mix-in supporting walks of L{Particle} trees.
This invokes a provided function on each node in a tree defining the
content model of a particle, both on the way down the tree and on the
way back up. A standard implementation would be::
def _walkParticleTree (self, visit, arg):
visit(self, True, arg)
self.__term.walkParticleTree(visit, arg)
visit(self, False, arg)
@param visit: A callable with parameters C{node, entering, arg} where
C{node} is an instance of a class inheriting L{_ParticleTree_mixin},
C{entering} indicates tree transition status, and C{arg} is a
caller-provided state parameter. C{entering} is C{True} if C{node}
has particle children and the call is before they are visited;
C{None} if the C{node} has no particle children; and C{False} if
C{node} has particle children and they have been visited.
@param arg: The caller-provided state parameter to be passed along
with the node and entry/exit status in the invocation of C{visit}.
"""
raise NotImplementedError('%s._walkParticleTree' % (self.__class__.__name__,))
class _Singleton_mixin (pyxb.cscRoot):
"""This class is a mix-in which guarantees that only one instance
of the class will be created. It is used to ensure that the
ur-type instances are pointer-equivalent even when unpickling.
See ComplexTypeDefinition.UrTypeDefinition()."""
def __new__ (cls, *args, **kw):
singleton_property = '_%s__singleton' % (cls.__name__,)
if not (singleton_property in cls.__dict__):
setattr(cls, singleton_property, super(_Singleton_mixin, cls).__new__(cls, *args, **kw))
return cls.__dict__[singleton_property]
class _Annotated_mixin (pyxb.cscRoot):
"""Mix-in that supports an optional single annotation that describes the component.
Most schema components have annotations. The ones that don't are
L{AttributeUse}, L{Particle}, and L{Annotation}. L{ComplexTypeDefinition}
and L{Schema} support multiple annotations, so do not mix-in this
class."""
# Optional Annotation instance
__annotation = None
def __init__ (self, *args, **kw):
super(_Annotated_mixin, self).__init__(*args, **kw)
self.__annotation = kw.get('annotation')
def _annotationFromDOM (self, node):
cn = domutils.LocateUniqueChild(node, 'annotation')
if cn is not None:
kw = { }
if isinstance(self, _SchemaComponent_mixin):
kw['owner'] = self
self.__annotation = Annotation.CreateFromDOM(cn, **kw)
def _updateFromOther_csc (self, other):
"""Override fields in this instance with those from the other.
Post-extended; description in leaf implementation in
ComplexTypeDefinition and SimpleTypeDefinition."""
assert self != other
self_fn = lambda *_args, **_kw: self
getattr(super(_Annotated_mixin, self), '_updateFromOther_csc', self_fn)(other)
# @todo: make this a copy?
self.__annotation = other.__annotation
return self
def annotation (self):
return self.__annotation
class _PickledAnonymousReference (object):
"""A helper that encapsulates a reference to an anonymous type in a different namespace.
Normally references to components in other namespaces can be made using
the component's name. This is not the case when a namespace derives from
a base type in another namespace and needs to reference the attribute or
element declarations held in that type. If these declarations are local
to the base complex type, they cannot be identified by name. This class
provides a pickleable representation for them that behaves rather like an
L{pyxb.namespace.ExpandedName} instance in that it can be used to
dereference various component types."""
__AnonymousCategory = pyxb.namespace.archive.NamespaceArchive._AnonymousCategory()
__namespace = None
__anonymousName = None
def __init__ (self, namespace, anonymous_name):
"""Create a new anonymous reference.
@param namespace: The namespace in which the component is declared.
@type namespace: L{pyxb.namespace.Namespace}
@param anonymous_name: A generated name guaranteed to be unique within
the namespace. See L{_NamedComponent_mixin._anonymousName}.
@type anonymous_name: C{basestring}.
"""
self.__namespace = namespace
self.__anonymousName = anonymous_name
assert self.__anonymousName is not None
@classmethod
def FromPickled (cls, object_reference):
"""Return the component referred to by the provided reference,
regardless of whether it is a normal or anonymous reference."""
if not isinstance(object_reference, _PickledAnonymousReference):
assert isinstance(object_reference, tuple)
object_reference = pyxb.namespace.ExpandedName(object_reference)
return object_reference
def namespace (self):
return self.__namespace
def anonymousName (self):
return self.__anonymousName
def validateComponentModel (self):
"""Forward to the associated namespace."""
return self.__namespace.validateComponentModel()
def __lookupObject (self):
return self.__namespace.categoryMap(self.__AnonymousCategory).get(self.__anonymousName)
typeDefinition = __lookupObject
attributeGroupDefinition = __lookupObject
modelGroupDefinition = __lookupObject
attributeDeclaration = __lookupObject
elementDeclaration = __lookupObject
identityConstraintDefinition = __lookupObject
notationDeclaration = __lookupObject
def __str__ (self):
"""Represent the anonymous reference in a form recognizable by a developer."""
return 'ANONYMOUS:%s' % (pyxb.namespace.ExpandedName(self.__namespace, self.__anonymousName),)
class _NamedComponent_mixin (pyxb.utils.utility.PrivateTransient_mixin, pyxb.cscRoot):
"""Mix-in to hold the name and targetNamespace of a component.
The name may be None, indicating an anonymous component. The
targetNamespace is never None, though it could be an empty namespace. The
name and targetNamespace values are immutable after creation.
This class overrides the pickling behavior: when pickling a Namespace,
objects that do not belong to that namespace are pickled as references,
not as values. This ensures the uniqueness of objects when multiple
namespace definitions are pre-loaded.
This class must follow L{_SchemaComponent_mixin} in the MRO.
"""
__PrivateTransient = set()
def name (self):
"""Name of the component within its scope or namespace.
This is an NCName. The value isNone if the component is
anonymous. The attribute is immutable after the component is
created creation."""
return self.__name
__name = None
def isAnonymous (self):
"""Return true iff this instance is locally scoped (has no name)."""
return self.__name is None
def _setAnonymousName (self, namespace, unique_id=None, anon_name=None):
# If this already has a name, keep using it.
if self.__anonymousName is not None:
return
assert self.__needAnonymousSupport()
assert namespace is not None
if self.bindingNamespace() is not None:
assert self.bindingNamespace() == namespace
if self.targetNamespace() is not None:
assert self.targetNamespace() == namespace
if anon_name is None:
anon_name = self.nameInBinding()
if anon_name is None:
anon_name = self.name()
if anon_name is None:
anon_name = 'ANON_IN_GROUP'
if unique_id is not None:
anon_name = '%s_%s' % (anon_name, unique_id)
anon_name = pyxb.utils.utility.MakeUnique(anon_name, set(namespace.categoryMap(self.__AnonymousCategory).iterkeys()))
self.__anonymousName = anon_name
namespace.addCategoryObject(self.__AnonymousCategory, anon_name, self)
def _anonymousName (self, namespace=None):
assert self.__anonymousName is not None, '%x %s %s in %s missing anonymous name' % (id(self), type(self), self.name(), self.targetNamespace())
return self.__anonymousName
__anonymousName = None
def targetNamespace (self):
"""The targetNamespace of a component.
This is None, or a reference to a Namespace in which the
component is declared (either as a global or local to one of
the namespace's complex type definitions). This is immutable
after creation.
"""
return self.__targetNamespace
__targetNamespace = None
def bindingNamespace (self):
"""The namespace in which this component's binding is placed."""
return self.__bindingNamespace
def _setBindingNamespace (self, namespace):
self.__bindingNamespace = namespace
__bindingNamespace = None
def _templateMap (self):
"""A map from template keys to component-specific values.
This is used in code generation to maintain unique names for accessor
methods, identifiers, keys, and other characteristics associated with
the code generated in support of the binding for this component."""
return self.__templateMap
__templateMap = None
__AnonymousCategory = pyxb.namespace.archive.NamespaceArchive._AnonymousCategory()
def __needAnonymousSupport (self):
# If this component doesn't have a name, or if it's in some scope in
# which it cannot be located in a category map, we'll need a unique
# name for it.
return self.isAnonymous() or (self._scopeIsIndeterminate() and not isinstance(self, (AttributeGroupDefinition, ModelGroupDefinition)))
def _schema (self):
"""Return the schema component from which this component was defined.
Needed so we can distinguish components that came from different
locations, since that imposes an external order dependency on them and
on cross-namespace inclusions.
@note: This characteristic is removed when the component is stored in
a namespace archive."""
return self.__schema
__schema = None
__PrivateTransient.add('schema')
def _prepareForArchive_csc (self, module_record):
if self.__needAnonymousSupport():
self._setAnonymousName(module_record.namespace(), unique_id=module_record.generationUID())
self_fn = lambda *_args, **_kw: self
return getattr(super(_NamedComponent_mixin, self), '_prepareForArchive_csc', self_fn)(module_record)
def _picklesInArchive (self, archive):
"""Return C{True} if this component should be pickled by value in the
given namespace.
When pickling, a declaration component is considered to belong to the
namespace if it has a local scope which belongs to the namespace. In
that case, the declaration is a clone of something that does not
belong to the namespace; but the clone does.
@see: L{_bindsInNamespace}
@return: C{False} if the component should be pickled by reference.
"""
if isinstance(self._scope(), ComplexTypeDefinition):
return self._scope()._picklesInArchive(archive)
assert not (self.targetNamespace() is None), '%s has no tns, scope %s, location %s, schema %s' % (self, self._scope(), self._location(), self._schema().targetNamespace())
assert not (self._objectOrigin() is None)
new_flag = (self._objectOrigin().generationUID() == archive.generationUID())
return new_flag
def _bindsInNamespace (self, ns):
"""Return C{True} if the binding for this component should be
generated in the given namespace.
This is the case when the component is in the given namespace. It's
also the case when the component has no associated namespace (but not
an absent namespace). Be aware that cross-namespace inheritance means
you will get references to elements in another namespace when
generating code for a subclass; that's fine, and those references
should not be generated locally.
"""
return self.targetNamespace() in (ns, None)
def expandedName (self):
"""Return the L{pyxb.namespace.ExpandedName} of this object."""
if self.name() is None:
return None
return pyxb.namespace.ExpandedName(self.targetNamespace(), self.name())
def __new__ (cls, *args, **kw):
"""Pickling support.
Normally, we just create a new instance of this class.
However, if we're unpickling a reference in a loadable schema,
we need to return the existing component instance by looking
up the name in the component map of the desired namespace. We
can tell the difference because no normal constructors that
inherit from this have positional arguments; only invocations
by unpickling with a value returned in __getnewargs__ do.
This does require that the dependent namespace already have
been validated (or that it be validated here). That shouldn't
be a problem, except for the dependency loop resulting from
use of xml:lang in the XMLSchema namespace. For that issue,
see pyxb.namespace._XMLSchema.
"""
if 0 == len(args):
rv = super(_NamedComponent_mixin, cls).__new__(cls)
return rv
( object_reference, scope, icls ) = args
object_reference = _PickledAnonymousReference.FromPickled(object_reference)
# Explicitly validate here: the lookup operations won't do so,
# but will abort if the namespace hasn't been validated yet.
object_reference.validateComponentModel()
rv = None
if isinstance(scope, (tuple, _PickledAnonymousReference)):
# Scope is the expanded name of the complex type in which the
# named value can be located.
scope_ref = _PickledAnonymousReference.FromPickled(scope)
if object_reference.namespace() != scope_ref.namespace():
scope_ref.validateComponentModel()
assert 'typeDefinition' in scope_ref.namespace().categories()
scope_ctd = scope_ref.typeDefinition()
if scope_ctd is None:
raise pyxb.SchemaValidationError('Unable to resolve local scope %s' % (scope_ref,))
if issubclass(icls, AttributeDeclaration):
rv = scope_ctd.lookupScopedAttributeDeclaration(object_reference)
elif issubclass(icls, ElementDeclaration):
rv = scope_ctd.lookupScopedElementDeclaration(object_reference)
if rv is None:
raise pyxb.SchemaValidationError('Unable to resolve %s as %s in scope %s' % (object_reference, icls, scope_ref))
elif _ScopedDeclaration_mixin.ScopeIsGlobal(scope) or _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope):
if (issubclass(icls, SimpleTypeDefinition) or issubclass(icls, ComplexTypeDefinition)):
rv = object_reference.typeDefinition()
elif issubclass(icls, AttributeGroupDefinition):
rv = object_reference.attributeGroupDefinition()
elif issubclass(icls, ModelGroupDefinition):
rv = object_reference.modelGroupDefinition()
elif issubclass(icls, AttributeDeclaration):
rv = object_reference.attributeDeclaration()
elif issubclass(icls, ElementDeclaration):
rv = object_reference.elementDeclaration()
elif issubclass(icls, IdentityConstraintDefinition):
rv = object_reference.identityConstraintDefinition()
if rv is None:
raise pyxb.SchemaValidationError('Unable to resolve %s as %s' % (object_reference, icls))
if rv is None:
raise pyxb.SchemaValidationError('Unable to resolve reference %s, scope %s ns %s type %s, class %s' % (object_reference, scope, (scope is None and "<unknown>" or scope.targetNamespace()), type(scope), icls))
return rv
def __init__ (self, *args, **kw):
assert 0 == len(args)
name = kw.get('name')
# Must be None or a valid NCName
assert (name is None) or (0 > name.find(':')), 'name %s' % (name,)
self.__name = name
# Target namespace is taken from the context, unless somebody
# overrides it (as is done for local declarations if the form is
# unqualified).
self.__targetNamespace = kw.get('target_namespace', self._namespaceContext().targetNamespace())
self.__bindingNamespace = kw.get('binding_namespace')
self.__templateMap = {}
self.__schema = kw.get('schema')
assert self._schema() is not None
# Do parent invocations after we've set the name: they might need it.
super(_NamedComponent_mixin, self).__init__(*args, **kw)
def isNameEquivalent (self, other):
"""Return true iff this and the other component share the same name and target namespace.
Anonymous components are inherently name inequivalent, except to
themselves. This relies on equivalence as defined for
pyxb.namespace.ExpandedName, for which None is not equivalent to any
non-anonymous name."""
# Note that unpickled objects
return (self == other) or ((not self.isAnonymous()) and (self.expandedName() == other.expandedName()))
def isTypeEquivalent (self, other):
"""Return True iff this and the other component have matching types.
It appears that name equivalence is used; two complex type definitions
with identical structures are not considered equivalent (at least, per
XMLSpy).
"""
return (type(self) == type(other)) and self.isNameEquivalent(other)
def isDerivationConsistent (self, other):
"""Return True iff this type can serve as a restriction of the other
type for the purposes of U{element consistency<http://www.w3.org/TR/xmlschema-1/#cos-element-consistent>}.
It appears that name equivalence is normally used; two complex type
definitions with identical structures are not considered equivalent
(at least, per XMLSpy). However, some OpenGIS standards demonstrate
that derivation by restriction from the other type is also acceptable.
That opens a whole can of worms; see
L{ElementDeclaration.isAdaptable}.
"""
this = self
# can this succeed if component types are not equivalent?
while this is not None:
if this.isTypeEquivalent(other):
return True
# Assumption from ElementDeclaration.isAdaptable
assert this.isResolved() and other.isResolved()
if isinstance(self, ComplexTypeDefinition):
if self.DM_restriction != this.derivationMethod():
return False
else:
assert isinstance(self, SimpleTypeDefinition)
if self._DA_restriction != this._derivationAlternative():
return False
if not this.baseTypeDefinition().isDerivationConsistent(other):
return False
this = this.baseTypeDefinition()
return False
def _picklingReference (self):
if self.__needAnonymousSupport():
assert self._anonymousName() is not None
return _PickledAnonymousReference(self.targetNamespace(), self._anonymousName())
return self.expandedName().uriTuple()
def __pickleAsReference (self):
if self.targetNamespace() is None:
return False
# Get the namespace we're pickling. If the namespace is None,
# we're not pickling; we're probably cloning, and in that case
# we don't want to use the reference state encoding.
pickling_archive = pyxb.namespace.archive.NamespaceArchive.PicklingArchive()
if pickling_archive is None:
return False
# If this thing is scoped in a complex type that belongs to the
# namespace being pickled, then it gets pickled as an object even if
# its target namespace isn't this one.
assert self._objectOrigin() is not None
if self._picklesInArchive(pickling_archive):
return False
# Note that anonymous objects must use their fallback
return True
def __getstate__ (self):
if self.__pickleAsReference():
# NB: This instance may be a scoped declaration, but in
# this case (unlike getnewargs) we don't care about trying
# to look up a previous instance, so we don't need to
# encode the scope in the reference tuple.
return self._picklingReference()
if self.targetNamespace() is None:
# The only internal named objects that should exist are
# ones that have a non-global scope (including those with
# absent scope).
# @todo: this is wrong for schema that are not bound to a
# namespace, unless we use an unbound Namespace instance
#assert isinstance(self, _ScopedDeclaration_mixin)
#assert self.SCOPE_global != self.scope()
# NOTE: The name of the scope may be None. This is not a
# problem unless somebody tries to extend or restrict the
# scope type, which at the moment I'm thinking is
# impossible for anonymous types. If it isn't, we're
# gonna need some other sort of ID, like a UUID associated
# with the anonymous class at the time it's written to the
# preprocessed schema file.
pass
return super(_NamedComponent_mixin, self).__getstate__()
def __getnewargs__ (self):
"""Pickling support.
If this instance is being pickled as a reference, provide the
arguments that are necessary so that the unpickler can locate
the appropriate component rather than create a duplicate
instance."""
if self.__pickleAsReference():
scope = self._scope()
if isinstance(self, _ScopedDeclaration_mixin):
# If scope is global, we can look it up in the namespace.
# If scope is indeterminate, this must be within a group in
# another namespace. Why are we serializing it?
# If scope is local, provide the namespace and name of
# the type that holds it
if self.SCOPE_global == self.scope():
pass
elif isinstance(self.scope(), ComplexTypeDefinition):
scope = self.scope()._picklingReference()
assert isinstance(scope, (tuple, _PickledAnonymousReference)), self
else:
assert self._scopeIsIndeterminate()
# This is actually OK: we made sure both the scope and
# this instance can be looked up by a unique identifier.
else:
assert isinstance(self, _NamedComponent_mixin), 'Pickling unnamed component %s in indeterminate scope by reference' % (self,)
assert not isinstance(scope, ComplexTypeDefinition), '%s %s %s %s' % (self, self.name(), scope, self._objectOrigin())
rv = ( self._picklingReference(), scope, self.__class__ )
return rv
return ()
def __setstate__ (self, state):
if isinstance(state, tuple):
# We don't actually have to set any state here; we just
# make sure that we resolved to an already-configured
# instance.
assert self.targetNamespace() is not None
assert self.targetNamespace().uri() == state[0]
assert self.name() == state[1]
return
if isinstance(state, _PickledAnonymousReference):
assert self.targetNamespace() is not None
assert self.targetNamespace() == state.namespace()
assert self.__needAnonymousSupport()
assert self._anonymousName() == state.anonymousName()
return
self.__dict__.update(state)
def _resetClone_csc (self, **kw):
self.__schema = None
self_fn = lambda *_args, **_kw: self
rv = getattr(super(_NamedComponent_mixin, self), '_resetClone_csc', self_fn)(**kw)
self.__templateMap = { }
origin = kw.get('origin')
self.__anonymousName = None
self._setObjectOrigin(origin, override=True)
return rv
class _ValueConstraint_mixin (pyxb.cscRoot):
"""Mix-in indicating that the component contains a simple-type
value that may be constrained."""
VC_na = 0 #<<< No value constraint applies
VC_default = 1 #<<< Provided value constraint is default value
VC_fixed = 2 #<<< Provided value constraint is fixed value
# None, or a tuple containing a string followed by one of the VC_*
# values above.
__valueConstraint = None
def valueConstraint (self):
"""A constraint on the value of the attribute or element.
Either None, or a pair consisting of a string in the lexical
space of the typeDefinition and one of VC_default and
VC_fixed."""
return self.__valueConstraint
def default (self):
"""If this instance constraints a default value, return that
value; otherwise return None."""
if not isinstance(self.__valueConstraint, tuple):
return None
if self.VC_default != self.__valueConstraint[1]:
return None
return self.__valueConstraint[0]
def fixed (self):
"""If this instance constraints a fixed value, return that
value; otherwise return None."""
if not isinstance(self.__valueConstraint, tuple):
return None
if self.VC_fixed != self.__valueConstraint[1]:
return None
return self.__valueConstraint[0]
def _valueConstraintFromDOM (self, node):
adefault = domutils.NodeAttribute(node, 'default')
afixed = domutils.NodeAttribute(node, 'fixed')
ause = domutils.NodeAttribute(node, 'use')
if (adefault is not None) and (afixed is not None):
raise pyxb.SchemaValidationError('Attributes default and fixed may not both appear (3.2.3r1)')
if adefault is not None:
if (ause is not None) and ('optional' != ause):
raise pyxb.SchemaValidationError('Attribute use must be optional when default present (3.2.3r2)')
self.__valueConstraint = (adefault, self.VC_default)
return self
if afixed is not None:
self.__valueConstraint = (afixed, self.VC_fixed)
return self
self.__valueConstraint = None
return self
class _ScopedDeclaration_mixin (pyxb.cscRoot):
"""Mix-in class for named components that have a scope.
Scope is important when doing cross-namespace inheritance,
e.g. extending or restricting a complex type definition that is
from a different namespace. In this case, we will need to retain
a reference to the external component when the schema is
serialized.
This is done in the pickling process by including the scope when
pickling a component as a reference. The scope is the
SCOPE_global if global; otherwise, it is a tuple containing the
external namespace URI and the NCName of the complex type
definition in that namespace. We assume that the complex type
definition has global scope; otherwise, it should not have been
possible to extend or restrict it. (Should this be untrue, there
are comments in the code about a possible solution.)
@warning: This mix-in must follow L{_NamedComponent_mixin} in the C{mro}.
"""
SCOPE_global = 'global' #<<< Marker to indicate global scope
XSCOPE_indeterminate = 'indeterminate' #<<< Marker to indicate scope has not been assigned
@classmethod
def IsValidScope (cls, value):
return (cls.SCOPE_global == value) or isinstance(value, ComplexTypeDefinition)
@classmethod
def ScopeIsIndeterminate (cls, value):
return (cls.XSCOPE_indeterminate == value)
@classmethod
def ScopeIsGlobal (cls, value):
return (cls.SCOPE_global == value)
def _scopeIsCompatible (self, scope):
"""Return True if this scope currently assigned to this instance is compatible with the given scope.
If either scope is indeterminate, presume they will ultimately be
compatible. Scopes that are equal are compatible, as is a local scope
if this already has a global scope."""
if self.ScopeIsIndeterminate(scope) or self.ScopeIsIndeterminate(self.scope()):
return True
if self.scope() == scope:
return True
return (self.SCOPE_global == self.scope()) and isinstance(scope, ComplexTypeDefinition)
# The scope for the element. Valid values are SCOPE_global or a
# complex type definition. None is an invalid value, but may
# appear if scope is determined by an ancestor component.
def scope (self):
"""The scope for the declaration.
Valid values are SCOPE_global, or a complex type definition.
A value of None means a non-global declaration that is not
owned by a complex type definition. These can only appear in
attribute group definitions or model group definitions.
@todo: For declarations in named model groups (viz., local
elements that aren't references), the scope needs to be set by
the owning complex type.
"""
return self._scope()
# The base declaration is the original _ScopedDeclaration_mixin which
# introduced the element into its scope. This is used to retain a
# particular defining declaration when each extension type gets its own
# clone adapted for its scope.
__baseDeclaration = None
def baseDeclaration (self):
return self.__baseDeclaration or self
def _baseDeclaration (self, referenced_declaration):
self.__baseDeclaration = referenced_declaration.baseDeclaration()
return self.__baseDeclaration
class _AttributeWildcard_mixin (pyxb.cscRoot):
"""Support for components that accept attribute wildcards.
That is L{AttributeGroupDefinition} and L{ComplexTypeDefinition}. The
calculations of the appropriate wildcard are sufficiently complex that
they need to be abstracted out to a mix-in class."""
# Optional wildcard that constrains attributes
__attributeWildcard = None
def attributeWildcard (self):
"""Return the L{Wildcard} component associated with attributes of this
instance, or C{None} if attribute wildcards are not present in the
instance."""
return self.__attributeWildcard
def _setAttributeWildcard (self, attribute_wildcard):
"""Set the attribute wildcard property for this instance."""
assert (attribute_wildcard is None) or isinstance(attribute_wildcard, Wildcard)
self.__attributeWildcard = attribute_wildcard
return self
def _attributeRelevantChildren (self, node_list):
"""Return the nodes that are relevant for attribute processing.
@param node_list: A sequence of nodes found in a definition content
information item.
@return: A tuple C{( attributes, attributeGroups, attributeWildcard)}
where C{attributes} is the subsequence of C{node_list} that are
XMLSchema C{attribute} nodes; C{attributeGroups} is analogous; and
C{attributeWildcard} is a single DOM node with XMLSchema name
C{anyAttribute} (or C{None}, if no such node is present in the list).
@raise pyxb.SchemaValidationError: An C{attributeGroup} node is
present but does not have the required C{ref} attribute.
@raise pyxb.SchemaValidationError: Multiple C{anyAttribute} nodes are
identified.
"""
attributes = []
attribute_groups = []
any_attribute = None
# Handle clauses 1 and 2 (common between simple and complex types)
for node in node_list:
if Node.ELEMENT_NODE != node.nodeType:
continue
if xsd.nodeIsNamed(node, 'attribute'):
# Note: This attribute use instance may have use=prohibited
attributes.append(node)
elif xsd.nodeIsNamed(node, 'attributeGroup'):
# This must be an attributeGroupRef
agd_en = domutils.NodeAttributeQName(node, 'ref')
if agd_en is None:
raise pyxb.SchemaValidationError('Require ref attribute on internal attributeGroup elements')
attribute_groups.append(agd_en)
elif xsd.nodeIsNamed(node, 'anyAttribute'):
if any_attribute is not None:
raise pyxb.SchemaValidationError('Multiple anyAttribute children are not allowed')
any_attribute = node
return (attributes, attribute_groups, any_attribute)
@classmethod
def CompleteWildcard (cls, namespace_context, attribute_groups, local_wildcard):
"""Implement the algorithm as described the
U{specification<http://www.w3.org/TR/xmlschema-1/#declare-type>}.
@param namespace_context: The L{pyxb.namespace.NamespaceContext} to be
associated with any created L{Wildcard} instance
@param attribute_groups: A list of L{AttributeGroupDefinition} instances
@param local_wildcard: A L{Wildcard} instance computed from a relevant
XMLSchema C{anyAttribute} element, or C{None} if no attribute wildcard
is relevant
"""
# Non-absent wildcard properties of attribute groups
agd_wildcards = []
for agd in attribute_groups:
assert isinstance(agd, AttributeGroupDefinition)
if agd.attributeWildcard() is not None:
agd_wildcards.append(agd.attributeWildcard())
agd_constraints = [ _agd.namespaceConstraint() for _agd in agd_wildcards ]
# Clause 2.1
if 0 == len(agd_wildcards):
return local_wildcard
if local_wildcard is not None:
# Clause 2.2.1
return Wildcard(process_contents=local_wildcard.processContents(),
namespace_constraint=Wildcard.IntensionalIntersection(agd_constraints + [local_wildcard.namespaecConstraint()]),
annotation=local_wildcard.annotation(),
namespace_context=namespace_context)
# Clause 2.2.2
return Wildcard(process_contents=agd_wildcards[0].processContents(),
namespace_constraint=Wildcard.IntensionalIntersection(agd_constraints),
namespace_context=namespace_context)
class AttributeDeclaration (_SchemaComponent_mixin, _NamedComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin, _Annotated_mixin, _ValueConstraint_mixin, _ScopedDeclaration_mixin):
"""An XMLSchema U{Attribute Declaration<http://www.w3.org/TR/xmlschema-1/#cAttribute_Declarations>} component.
"""
# The STD to which attribute values must conform
__typeDefinition = None
def typeDefinition (self):
"""The simple type definition to which an attribute value must
conform."""
return self.__typeDefinition
# The expanded name content of the XSD type attribute
__typeExpandedName = None
def __init__ (self, *args, **kw):
super(AttributeDeclaration, self).__init__(*args, **kw)
assert 'scope' in kw
def __str__ (self):
if self.typeDefinition():
return 'AD[%s:%s]' % (self.name(), self.typeDefinition().expandedName())
return 'AD[%s:?]' % (self.expandedName(),)
@classmethod
def CreateBaseInstance (cls, name, schema, std=None):
"""Create an attribute declaration component for a specified namespace."""
kw = { 'name' : name,
'schema' : schema,
'namespace_context' : schema.targetNamespace().initialNamespaceContext(),
'scope' : _ScopedDeclaration_mixin.SCOPE_global }
assert schema is not None
bi = cls(**kw)
if std is not None:
bi.__typeDefinition = std
bi.__typeExpandedName = None
return bi
# CFD:AD CFD:AttributeDeclaration
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create an attribute declaration from the given DOM node.
wxs is a Schema instance within which the attribute is being
declared.
node is a DOM element. The name must be one of ( 'all',
'choice', 'sequence' ), and the node must be in the XMLSchema
namespace.
scope is the _ScopeDeclaration_mxin context into which the
attribute declaration is placed. It can be SCOPE_global, a
complex type definition, or XSCOPE_indeterminate if this is an
anonymous declaration within an attribute group. It is a
required parameter for this function.
"""
scope = kw['scope']
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or _ScopedDeclaration_mixin.IsValidScope(scope)
# Node should be an XMLSchema attribute node
assert xsd.nodeIsNamed(node, 'attribute')
name = domutils.NodeAttribute(node, 'name')
# Implement per section 3.2.2
if xsd.nodeIsNamed(node.parentNode, 'schema'):
assert cls.SCOPE_global == scope
elif domutils.NodeAttribute(node, 'ref') is None:
# This is an anonymous declaration within an attribute use
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or isinstance(scope, ComplexTypeDefinition)
else:
raise pyxb.SchemaValidationError('Internal attribute declaration by reference')
rv = cls(name=name, node=node, **kw)
rv._annotationFromDOM(node)
rv._valueConstraintFromDOM(node)
rv.__typeExpandedName = domutils.NodeAttributeQName(node, 'type')
kw.pop('node', None)
kw['owner'] = rv
st_node = domutils.LocateUniqueChild(node, 'simpleType')
if st_node is not None:
rv.__typeDefinition = SimpleTypeDefinition.CreateFromDOM(st_node, **kw)
elif rv.__typeExpandedName is None:
rv.__typeDefinition = SimpleTypeDefinition.SimpleUrTypeDefinition()
if rv.__typeDefinition is None:
rv._queueForResolution('creation')
return rv
def isResolved (self):
return self.__typeDefinition is not None
# res:AD res:AttributeDeclaration
def _resolve (self):
if self.isResolved():
return self
# Although the type definition may not be resolved, *this* component
# is resolved, since we don't look into the type definition for anything.
assert self.__typeExpandedName is not None, 'AD %s is unresolved but had no type attribute field' % (self.expandedName(),)
self.__typeDefinition = self.__typeExpandedName.typeDefinition()
if self.__typeDefinition is None:
raise pyxb.SchemaValidationError('Type reference %s cannot be found' % (self.__typeExpandedName,))
if not isinstance(self.__typeDefinition, SimpleTypeDefinition):
raise pyxb.SchemaValidationError('Need %s to be a simple type' % (self.__typeExpandedName,))
return self
def _updateFromOther_csc (self, other):
"""Override fields in this instance with those from the other.
This method is invoked only by Schema._addNamedComponent, and
then only when a built-in type collides with a schema-defined
type. Material like facets is not (currently) held in the
built-in copy, so the DOM information is copied over to the
built-in STD, which is subsequently re-resolved.
Returns self.
"""
assert self != other
assert self.name() is not None
assert self.isNameEquivalent(other)
super(AttributeDeclaration, self)._updateFromOther_csc(other)
# The other STD should be an unresolved schema-defined type.
# Mark this instance as unresolved so it is re-examined
if not other.isResolved():
if pyxb.namespace.BuiltInObjectUID == self._objectOrigin().generationUID():
#assert self.isResolved(), 'Built-in %s is not resolved' % (self.expandedName(),)
_log.warning('Not destroying builtin %s: %s', self.expandedName(), self.__typeDefinition)
else:
self.__typeDefinition = None
return self
# bR:AD
def _bindingRequires_vx (self, include_lax):
"""Attribute declarations require their type."""
return frozenset([ self.__typeDefinition ])
class AttributeUse (_SchemaComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin, _ValueConstraint_mixin):
"""An XMLSchema U{Attribute Use<http://www.w3.org/TR/xmlschema-1/#cAttribute_Use>} component."""
# How this attribute can be used. The component property
# "required" is true iff the value is USE_required.
__use = None
USE_required = 0x01 #<<< The attribute is required
USE_optional = 0x02 #<<< The attribute may or may not appear
USE_prohibited = 0x04 #<<< The attribute must not appear
def required (self):
return self.USE_required == self.__use
def prohibited (self):
return self.USE_prohibited == self.__use
# The expanded name value of the XSD ref attribute
__refExpandedName = None
__restrictionOf = None
def restrictionOf (self):
return self.__restrictionOf
def _setRestrictionOf (self, au):
assert isinstance(au, AttributeUse)
# Might re-assign if had to suspend resolution
assert (self.__restrictionOf is None) or (self.__restrictionOf == au)
self.__restrictionOf = au
# A reference to an AttributeDeclaration
def attributeDeclaration (self):
"""The attribute declaration for this use.
When the use scope is assigned, the declaration is cloned (if
necessary) so that each declaration corresponds to only one use. We
rely on this in code generation, because the template map for the use
is stored in its declaration."""
return self.__attributeDeclaration
__attributeDeclaration = None
# Define so superclasses can take keywords
def __init__ (self, **kw):
super(AttributeUse, self).__init__(**kw)
def matchingQNameMembers (self, au_set):
"""Return the subset of au_set for which the use names match this use."""
if not self.isResolved():
return None
this_ad = self.attributeDeclaration()
rv = set()
for au in au_set:
if not au.isResolved():
return None
that_ad = au.attributeDeclaration()
if this_ad.isNameEquivalent(that_ad):
rv.add(au)
return rv
@classmethod
def CreateBaseInstance (cls, schema, attribute_declaration, use=USE_optional):
kw = { 'schema' : schema,
'namespace_context' : schema.targetNamespace().initialNamespaceContext() }
bi = cls(**kw)
assert isinstance(attribute_declaration, AttributeDeclaration)
bi.__attributeDeclaration = attribute_declaration
bi.__use = use
return bi
# CFD:AU CFD:AttributeUse
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create an Attribute Use from the given DOM node.
wxs is a Schema instance within which the attribute use is
being defined.
node is a DOM element. The name must be 'attribute', and the
node must be in the XMLSchema namespace.
scope is the _ScopeDeclaration_mixin context into which any
required anonymous attribute declaration is put. This must be
a complex type definition, or None if this use is in an
attribute group.
"""
scope = kw['scope']
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or isinstance(scope, ComplexTypeDefinition)
assert xsd.nodeIsNamed(node, 'attribute')
schema = kw['schema']
rv = cls(node=node, **kw)
rv.__use = cls.USE_optional
use = domutils.NodeAttribute(node, 'use')
if use is not None:
if 'required' == use:
rv.__use = cls.USE_required
elif 'optional' == use:
rv.__use = cls.USE_optional
elif 'prohibited' == use:
rv.__use = cls.USE_prohibited
else:
raise pyxb.SchemaValidationError('Unexpected value %s for attribute use attribute' % (use,))
rv._valueConstraintFromDOM(node)
rv.__refExpandedName = domutils.NodeAttributeQName(node, 'ref')
if rv.__refExpandedName is None:
# Create an anonymous declaration
kw.pop('node', None)
kw['owner'] = rv
kw['target_namespace'] = schema.targetNamespaceForNode(node, AttributeDeclaration)
rv.__attributeDeclaration = AttributeDeclaration.CreateFromDOM(node, **kw)
if not rv.isResolved():
rv._queueForResolution('creation')
return rv
def isResolved (self):
return self.__attributeDeclaration is not None
def _resolve (self):
if self.isResolved():
return self
self.__attributeDeclaration = self.__refExpandedName.attributeDeclaration()
if self.__attributeDeclaration is None:
raise pyxb.SchemaValidationError('Attribute declaration %s cannot be found' % (self.__refExpandedName,))
assert isinstance(self.__attributeDeclaration, AttributeDeclaration)
return self
# bR:AU
def _bindingRequires_vx (self, include_lax):
"""Attribute uses require their declarations, but only if lax."""
if not include_lax:
return frozenset()
return frozenset([ self.attributeDeclaration() ])
# aFS:AU
def _adaptForScope (self, ctd):
"""Adapt this instance for the given complex type.
If the attribute declaration for this use is not associated with a
complex type definition, then associate a clone of it with this CTD,
and clone a new attribute use that uses the associated declaration.
This attribute use is then inherited by extensions and restrictions,
while retaining its original scope."""
rv = self
assert self.isResolved()
ad = self.__attributeDeclaration
assert ad.scope() is not None
assert isinstance(ctd, ComplexTypeDefinition)
if not isinstance(ad.scope(), ComplexTypeDefinition):
rv = self._clone(ctd, ctd._objectOrigin())
rv.__attributeDeclaration = ad._clone(rv, ctd._objectOrigin())
rv.__attributeDeclaration._setScope(ctd)
ctd._recordLocalDeclaration(rv.__attributeDeclaration)
return rv
def __str__ (self):
return 'AU[%s]' % (self.attributeDeclaration(),)
class ElementDeclaration (_ParticleTree_mixin, _SchemaComponent_mixin, _NamedComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin, _Annotated_mixin, _ValueConstraint_mixin, _ScopedDeclaration_mixin):
"""An XMLSchema U{Element Declaration<http://www.w3.org/TR/xmlschema-1/#cElement_Declarations>} component."""
# Simple or complex type definition
__typeDefinition = None
def typeDefinition (self):
"""The simple or complex type to which the element value conforms."""
return self.__typeDefinition
def _typeDefinition (self, type_definition):
self.__typeDefinition = type_definition
if (type_definition is not None) and (self.valueConstraint() is not None):
failed = True
if isinstance(self.__typeDefinition, SimpleTypeDefinition):
failed = False
elif isinstance(self.__typeDefinition, ComplexTypeDefinition):
# The corresponding type may not be resolved so we can't check
# its contentType, but we should know whether it could be
# complex.
ct = type_definition.contentType()
if ct is None:
if False == self.__typeDefinition._isComplexContent():
failed = False
else:
_log.error('Unable to check value constraint on %s due to incomplete resolution of type', self.expandedName())
else:
failed = not (isinstance(ct, tuple) and (ComplexTypeDefinition.CT_SIMPLE == ct[0]))
if failed:
if self.__typeExpandedName is None:
raise pyxb.SchemaValidationError('Value constraint on element %s with non-simple content' % (self.expandedName(),))
raise pyxb.SchemaValidationError('Value constraint on element %s with non-simple type %s' % (self.expandedName(), self.__typeExpandedName))
return self
__substitutionGroupExpandedName = None
__typeExpandedName = None
__nillable = False
def nillable (self):
return self.__nillable
__identityConstraintDefinitions = None
def identityConstraintDefinitions (self):
"""A list of IdentityConstraintDefinition instances."""
return self.__identityConstraintDefinitions
__substitutionGroupAffiliation = None
def substitutionGroupAffiliation (self):
"""None, or a reference to an ElementDeclaration."""
return self.__substitutionGroupAffiliation
SGE_none = 0 #<<< No substitution group exclusion specified
SGE_extension = 0x01 #<<< Substitution by an extension of the base type
SGE_restriction = 0x02 #<<< Substitution by a restriction of the base type
SGE_substitution = 0x04 #<<< Substitution by replacement (?)
_SGE_Map = { 'extension' : SGE_extension
, 'restriction' : SGE_restriction }
_DS_Map = _SGE_Map.copy()
_DS_Map.update( { 'substitution' : SGE_substitution } )
# Subset of SGE marks formed by bitmask. SGE_substitution is disallowed.
__substitutionGroupExclusions = SGE_none
# Subset of SGE marks formed by bitmask
__disallowedSubstitutions = SGE_none
__abstract = False
def abstract (self):
return self.__abstract
def hasWildcardElement (self):
"""Return False, since element declarations are not wildcards."""
return False
# bR:ED
def _bindingRequires_vx (self, include_lax):
"""Element declarations depend on the type definition of their
content."""
return frozenset([self.__typeDefinition])
def __init__ (self, *args, **kw):
super(ElementDeclaration, self).__init__(*args, **kw)
# CFD:ED CFD:ElementDeclaration
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create an element declaration from the given DOM node.
wxs is a Schema instance within which the element is being
declared.
scope is the _ScopeDeclaration_mixin context into which the
element declaration is recorded. It can be SCOPE_global, a
complex type definition, or None in the case of elements
declared in a named model group.
node is a DOM element. The name must be 'element', and the
node must be in the XMLSchema namespace."""
scope = kw['scope']
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or _ScopedDeclaration_mixin.IsValidScope(scope)
# Node should be an XMLSchema element node
assert xsd.nodeIsNamed(node, 'element')
# Might be top-level, might be local
name = domutils.NodeAttribute(node, 'name')
if xsd.nodeIsNamed(node.parentNode, 'schema'):
assert _ScopedDeclaration_mixin.SCOPE_global == scope
elif domutils.NodeAttribute(node, 'ref') is None:
# Scope may be None or a CTD.
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or isinstance(scope, ComplexTypeDefinition)
else:
raise pyxb.SchemaValidationError('Created reference as element declaration')
rv = cls(name=name, node=node, **kw)
rv._annotationFromDOM(node)
rv._valueConstraintFromDOM(node)
rv.__substitutionGroupExpandedName = domutils.NodeAttributeQName(node, 'substitutionGroup')
kw.pop('node', None)
kw['owner'] = rv
identity_constraints = []
for cn in node.childNodes:
if (Node.ELEMENT_NODE == cn.nodeType) and xsd.nodeIsNamed(cn, 'key', 'unique', 'keyref'):
identity_constraints.append(IdentityConstraintDefinition.CreateFromDOM(cn, **kw))
rv.__identityConstraintDefinitions = identity_constraints
rv.__typeDefinition = None
rv.__typeExpandedName = domutils.NodeAttributeQName(node, 'type')
simpleType_node = domutils.LocateUniqueChild(node, 'simpleType')
complexType_node = domutils.LocateUniqueChild(node, 'complexType')
if rv.__typeExpandedName is not None:
if (simpleType_node is not None) and (complexType_node is not None):
raise pyxb.SchemaValidationError('Cannot combine type attribute with simpleType or complexType child')
if (rv.__typeDefinition is None) and (simpleType_node is not None):
rv._typeDefinition(SimpleTypeDefinition.CreateFromDOM(simpleType_node, **kw))
if (rv.__typeDefinition is None) and (complexType_node is not None):
rv._typeDefinition(ComplexTypeDefinition.CreateFromDOM(complexType_node, **kw))
if rv.__typeDefinition is None:
if rv.__typeExpandedName is None:
# Scan for particle types which were supposed to be enclosed in a complexType
for cn in node.childNodes:
if Particle.IsParticleNode(cn):
raise pyxb.SchemaValidationError('Node %s in element must be wrapped by complexType.' % (cn.localName,))
rv._typeDefinition(ComplexTypeDefinition.UrTypeDefinition())
rv.__isResolved = (rv.__typeDefinition is not None) and (rv.__substitutionGroupExpandedName is None)
if not rv.__isResolved:
rv._queueForResolution('creation')
attr_val = domutils.NodeAttribute(node, 'nillable')
if attr_val is not None:
rv.__nillable = datatypes.boolean(attr_val)
attr_val = domutils.NodeAttribute(node, 'abstract')
if attr_val is not None:
rv.__abstract = datatypes.boolean(attr_val)
schema = kw['schema']
rv.__disallowedSubstitutions = schema.blockForNode(node, cls._DS_Map)
rv.__substitutionGroupExclusions = schema.finalForNode(node, cls._SGE_Map)
return rv
def isAdaptable (self, ctd):
"""Determine whether this element declaration is adaptable.
OK, this gets ugly. First, if this declaration isn't resolved, it's
clearly not adaptable.
Now: For it to be adaptable, we must know enough about its type to
verify that it is derivation-consistent with any other uses of the
same name in the same complex type. If the element's type is
resolved, that's good enough.
If the element's type isn't resolved, we're golden as long as
type-equivalent types were used. But it's also allowed for the
derived ctd to use the element name constraining it to a derivation of
the element base type. (Go see namespace
http://www.opengis.net/ows/1.1 types PositionType, PositionType2D,
BoundingBox, and WGS84BoundingBox for an example). So, we really do
have to have the element's type resolved.
Except that if a CTD's content incorporates an element with the same
type as the CTD (i.e., nested), this will never happen, because the
CTD can't get resolved until after it has been resolved.
(Go see {http://www.opengis.net/ows/1.1}ContentsBaseType and
{http://www.opengis.net/ows/1.1}DatasetDescriptionSummaryBaseType for
an example).
So, we give the world a break and assume that if the type we're trying
to resolve is the same as the type of an element in that type, then
the element type will be resolved by the point it's needed. In point
of fact, it won't, but we'll only notice that if a CTD contains an
element whose type is a restriction of the CTD. In that case,
isDerivationConsistent will blow chunks and somebody'll have to come
back and finish up this mess.
"""
if not self.isResolved():
return False
if self.typeDefinition().isResolved():
return True
# Aw, dammit. See if we're gonna need the type resolved before we can
# adapt this thing.
existing_decl = ctd.lookupScopedElementDeclaration(self.expandedName())
if existing_decl is None:
# Nobody else has this name, so we don't have to check for
# consistency.
return True
# OK, we've got a name clash. Are the two types trivially equivalent?
if self.typeDefinition().isTypeEquivalent(existing_decl.typeDefinition()):
# Yes! Go for it.
return True
# No. Can't proceed until the type definition is resolved. Hope it
# can be....
_log.warning('Require %s to be resolved; might be a loop.', self.typeDefinition())
return False
# aFS:ED
def _adaptForScope (self, owner, ctd):
rv = self
assert isinstance(ctd, ComplexTypeDefinition), '%s is not a CTD' % (ctd,)
if not isinstance(self.scope(), ComplexTypeDefinition):
assert owner is not None
rv = self._clone(owner, ctd._objectOrigin())
rv._setScope(ctd)
ctd._recordLocalDeclaration(rv)
return rv
__isResolved = False
def isResolved (self):
return self.__isResolved
# res:ED res:ElementDeclaration
def _resolve (self):
if self.isResolved():
return self
#if self._scopeIsIndeterminate():
# _log.debug('WARNING: Resolving ED %s with indeterminate scope (is this a problem?)', self.expandedName())
if self.__substitutionGroupExpandedName is not None:
sga = self.__substitutionGroupExpandedName.elementDeclaration()
if sga is None:
raise pyxb.SchemaValidationError('Element declaration refers to unrecognized substitution group %s' % (self.__substitutionGroupExpandedName,))
self.__substitutionGroupAffiliation = sga
if self.__typeDefinition is None:
assert self.__typeExpandedName is not None
td = self.__typeExpandedName.typeDefinition()
if td is None:
raise pyxb.SchemaValidationError('Type declaration %s cannot be found' % (self.__typeExpandedName,))
self._typeDefinition(td)
self.__isResolved = True
return self
def _walkParticleTree (self, visit, arg):
visit(self, None, arg)
def __str__ (self):
if self.typeDefinition() is not None:
return 'ED[%s:%s]' % (self.name(), self.typeDefinition().name())
return 'ED[%s:?]' % (self.name(),)
class ComplexTypeDefinition (_SchemaComponent_mixin, _NamedComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin, _Annotated_mixin, _AttributeWildcard_mixin):
__PrivateTransient = set()
# The type resolved from the base attribute.
__baseTypeDefinition = None
def baseTypeDefinition (self):
"The type resolved from the base attribute."""
return self.__baseTypeDefinition
DM_empty = 0 #<<< No derivation method specified
DM_extension = 0x01 #<<< Derivation by extension
DM_restriction = 0x02 #<<< Derivation by restriction
_DM_Map = { 'extension' : DM_extension
, 'restriction' : DM_restriction }
# How the type was derived (a DM_* value)
# (This field is used to identify unresolved definitions.)
__derivationMethod = None
def derivationMethod (self):
"""How the type was derived."""
return self.__derivationMethod
# Derived from the final and finalDefault attributes
__final = DM_empty
# Derived from the abstract attribute
__abstract = False
def abstract (self):
return self.__abstract
# A frozenset() of AttributeUse instances.
__attributeUses = None
def attributeUses (self):
"""A frozenset() of AttributeUse instances."""
return self.__attributeUses
# A map from NCNames to AttributeDeclaration instances that are
# local to this type.
__scopedAttributeDeclarations = None
def lookupScopedAttributeDeclaration (self, expanded_name):
"""Find an attribute declaration with the given name that is local to this type.
Returns None if there is no such local attribute declaration."""
if self.__scopedAttributeDeclarations is None:
return None
return self.__scopedAttributeDeclarations.get(expanded_name)
# A map from NCNames to ElementDeclaration instances that are
# local to this type.
__scopedElementDeclarations = None
def lookupScopedElementDeclaration (self, expanded_name):
"""Find an element declaration with the given name that is local to this type.
Returns None if there is no such local element declaration."""
if self.__scopedElementDeclarations is None:
return None
return self.__scopedElementDeclarations.get(expanded_name)
__localScopedDeclarations = None
def localScopedDeclarations (self, reset=False):
"""Return a list of element and attribute declarations that were
introduced in this definition (i.e., their scope is this CTD).
@note: This specifically returns a list, with element declarations
first, because name binding should privilege the elements over the
attributes. Within elements and attributes, the components are sorted
by expanded name, to ensure consistency across a series of binding
generations.
@keyword reset: If C{False} (default), a cached previous value (if it
exists) will be returned.
"""
if reset or (self.__localScopedDeclarations is None):
rve = [ _ed for _ed in self.__scopedElementDeclarations.itervalues() if (self == _ed.scope()) ]
rve.sort(key=lambda _x: _x.expandedName())
rva = [ _ad for _ad in self.__scopedAttributeDeclarations.itervalues() if (self == _ad.scope()) ]
rva.sort(key=lambda _x: _x.expandedName())
self.__localScopedDeclarations = rve
self.__localScopedDeclarations.extend(rva)
return self.__localScopedDeclarations
def _recordLocalDeclaration (self, decl):
"""Record the given declaration as being locally scoped in
this type."""
assert isinstance(decl, _ScopedDeclaration_mixin)
if isinstance(decl, ElementDeclaration):
scope_map = self.__scopedElementDeclarations
elif isinstance(decl, AttributeDeclaration):
scope_map = self.__scopedAttributeDeclarations
else:
raise pyxb.LogicError('Unexpected instance of %s recording as local declaration' % (type(decl),))
decl_en = decl.expandedName()
existing_decl = scope_map.setdefault(decl_en, decl)
if decl != existing_decl:
if isinstance(decl, ElementDeclaration):
# Test cos-element-consistent
existing_type = existing_decl.typeDefinition()
pending_type = decl.typeDefinition()
if not pending_type.isDerivationConsistent(existing_type):
raise pyxb.SchemaValidationError('Conflicting element declarations for %s: existing %s versus new %s' % (decl.expandedName(), existing_type, pending_type))
elif isinstance(decl, AttributeDeclaration):
raise pyxb.SchemaValidationError('Multiple attribute declarations for %s' % (decl.expandedName(),))
else:
assert False, 'Unrecognized type %s' % (type(decl),)
decl._baseDeclaration(existing_decl)
return self
def _isHierarchyRoot (self):
"""Return C{True} iff this is the root of a complex type definition hierarchy.
"""
base = self.__baseTypeDefinition
return isinstance(base, SimpleTypeDefinition) or base.isUrTypeDefinition()
CT_EMPTY = 'EMPTY' #<<< No content
CT_SIMPLE = 'SIMPLE' #<<< Simple (character) content
CT_MIXED = 'MIXED' #<<< Children may be elements or other (e.g., character) content
CT_ELEMENT_ONLY = 'ELEMENT_ONLY' #<<< Expect only element content.
def _contentTypeTag (self):
"""Return the value of the content type identifier, i.e. one of the
CT_ constants. Return value is None if no content type has been
defined."""
if isinstance(self.__contentType, tuple):
return self.__contentType[0]
return self.__contentType
def _contentTypeComponent (self):
if isinstance(self.__contentType, tuple):
return self.__contentType[1]
return None
# Identify the sort of content in this type.
__contentType = None
def contentType (self):
"""Identify the sort of content in this type.
Valid values are:
- C{CT_EMPTY}
- ( C{CT_SIMPLE}, a L{SimpleTypeDefinition} instance )
- ( C{CT_MIXED}, a L{Particle} instance )
- ( C{CT_ELEMENT_ONLY}, a L{Particle} instance )
"""
return self.__contentType
def contentTypeAsString (self):
if self.CT_EMPTY == self.contentType():
return 'EMPTY'
( tag, particle ) = self.contentType()
if self.CT_SIMPLE == tag:
return 'Simple [%s]' % (particle,)
if self.CT_MIXED == tag:
return 'Mixed [%s]' % (particle,)
if self.CT_ELEMENT_ONLY == tag:
return 'Element [%s]' % (particle,)
raise pyxb.LogicError('Unhandled content type')
# Derived from the block and blockDefault attributes
__prohibitedSubstitutions = DM_empty
# @todo: Extracted from children of various types
__annotations = None
def __init__ (self, *args, **kw):
super(ComplexTypeDefinition, self).__init__(*args, **kw)
self.__derivationMethod = kw.get('derivation_method')
self.__scopedElementDeclarations = { }
self.__scopedAttributeDeclarations = { }
def hasWildcardElement (self):
"""Return True iff this type includes a wildcard element in
its content model."""
if self.CT_EMPTY == self.contentType():
return False
( tag, particle ) = self.contentType()
if self.CT_SIMPLE == tag:
return False
return particle.hasWildcardElement()
def _updateFromOther_csc (self, other):
"""Override fields in this instance with those from the other.
This method is invoked only by Schema._addNamedComponent, and
then only when a built-in type collides with a schema-defined
type. Material like facets is not (currently) held in the
built-in copy, so the DOM information is copied over to the
built-in STD, which is subsequently re-resolved.
Returns self.
"""
assert self != other
assert self.isNameEquivalent(other)
super(ComplexTypeDefinition, self)._updateFromOther_csc(other)
if not other.isResolved():
if pyxb.namespace.BuiltInObjectUID != self._objectOrigin().generationUID():
self.__derivationMethod = None
return self
__UrTypeDefinition = None
@classmethod
def UrTypeDefinition (cls, schema=None, in_builtin_definition=False):
"""Create the ComplexTypeDefinition instance that approximates
the ur-type.
See section 3.4.7.
"""
# The first time, and only the first time, this is called, a
# namespace should be provided which is the XMLSchema
# namespace for this run of the system. Please, do not try to
# allow this by clearing the type definition.
#if in_builtin_definition and (cls.__UrTypeDefinition is not None):
# raise pyxb.LogicError('Multiple definitions of UrType')
if cls.__UrTypeDefinition is None:
# NOTE: We use a singleton subclass of this class
assert schema is not None
ns_ctx = schema.targetNamespace().initialNamespaceContext()
kw = { 'name' : 'anyType',
'schema' : schema,
'namespace_context' : ns_ctx,
'binding_namespace' : schema.targetNamespace(),
'derivation_method' : cls.DM_restriction,
'scope' : _ScopedDeclaration_mixin.SCOPE_global }
bi = _UrTypeDefinition(**kw)
# The ur-type is its own baseTypeDefinition
bi.__baseTypeDefinition = bi
# No constraints on attributes
bi._setAttributeWildcard(Wildcard(namespace_constraint=Wildcard.NC_any, process_contents=Wildcard.PC_lax, **kw))
# There isn't anything to look up, but context is still global.
# No declarations will be created, so use indeterminate scope to
# be consistent with validity checks in Particle constructor.
# Content is mixed, with elements completely unconstrained. @todo:
# not associated with a schema (it should be)
kw = { 'namespace_context' : ns_ctx
, 'schema' : schema
, 'scope': _ScopedDeclaration_mixin.XSCOPE_indeterminate }
w = Wildcard(namespace_constraint=Wildcard.NC_any, process_contents=Wildcard.PC_lax, **kw)
p = Particle(w, min_occurs=0, max_occurs=None, **kw)
m = ModelGroup(compositor=ModelGroup.C_SEQUENCE, particles=[ p ], **kw)
bi.__contentType = ( cls.CT_MIXED, Particle(m, **kw) )
# No attribute uses
bi.__attributeUses = set()
# No constraints on extension or substitution
bi.__final = cls.DM_empty
bi.__prohibitedSubstitutions = cls.DM_empty
bi.__abstract = False
# Refer to it by name
bi.setNameInBinding(bi.name())
# The ur-type is always resolved
bi.__derivationMethod = cls.DM_restriction
cls.__UrTypeDefinition = bi
return cls.__UrTypeDefinition
def isBuiltin (self):
"""Indicate whether this simple type is a built-in type."""
return (self.UrTypeDefinition() == self)
# bR:CTD
def _bindingRequires_vx (self, include_lax):
"""Complex type definitions depend on their base type definition, the
type definitions of any local attribute declarations, and if strict
the type definitions of any local element declarations."""
rv = set()
assert self.__baseTypeDefinition is not None
rv.add(self.__baseTypeDefinition)
for decl in self.localScopedDeclarations():
if include_lax or isinstance(decl, AttributeDeclaration):
rv.add(decl.typeDefinition())
if include_lax:
ct = self._contentTypeComponent()
if ct is not None:
rv.add(ct)
return frozenset(rv)
# CFD:CTD CFD:ComplexTypeDefinition
@classmethod
def CreateFromDOM (cls, node, **kw):
# Node should be an XMLSchema complexType node
assert xsd.nodeIsNamed(node, 'complexType')
name = domutils.NodeAttribute(node, 'name')
rv = cls(name=name, node=node, derivation_method=None, **kw)
if name is None:
assert not isinstance(rv.owner(), Schema)
# Most of the time, the scope will be global. It can be something
# else only if this is an anonymous CTD (created within an element
# declaration which itself may be global, in a containing CTD, or in a
# model group).
if not (rv._scopeIsGlobal() or rv.isAnonymous()):
raise pyxb.LogicError('Attempt to create non-global complex type definition')
kw.pop('node', None)
kw['owner'] = rv
kw['scope'] = rv
return rv.__setContentFromDOM(node, **kw)
__baseExpandedName = None
__ckw = None
__anyAttribute = None
__attributeGroupNames = None
__usesC1 = None
__usesC1C2 = None
__attributeGroups = None
__PrivateTransient.update(['ckw', 'anyAttribute', 'attributeGroupNames', 'usesC1', 'usesC1C2', 'attributeGroups' ])
# Handle attributeUses, attributeWildcard, contentType
def __completeProcessing (self, method, content_style):
if self.__usesC1C2 is None:
# Handle clauses 1 and 2 (common between simple and complex types)
uses_c1 = self.__usesC1 # attribute children
uses_c2 = set() # attribute group children
self.__attributeGroups = []
for ag_en in self.__attributeGroupNames:
agd = ag_en.attributeGroupDefinition()
if agd is None:
raise pyxb.SchemaValidationError('Attribute group %s cannot be found' % (ag_en,))
if not agd.isResolved():
self._queueForResolution('unresolved attribute group', depends_on=agd)
return self
self.__attributeGroups.append(agd)
uses_c2.update(agd.attributeUses())
uses_c1c2 = uses_c1.union(uses_c2)
for au in uses_c1c2:
if not au.isResolved():
self._queueForResolution('attribute use not resolved')
return self
ad = au.attributeDeclaration()
if not ad.isResolved():
ad_en = ad.expandedName()
self._queueForResolution('unresolved attribute declaration %s from base type' % (ad_en,), depends_on=ad)
return self
self.__usesC1C2 = frozenset([ _u._adaptForScope(self) for _u in uses_c1c2 ])
# Handle clause 3. Note the slight difference in description between
# simple and complex content is just that the complex content doesn't
# bother to check that the base type definition is a complex type
# definition. So the same code should work for both, and we don't
# bother to check content_style.
uses_c3 = set() # base attributes
if isinstance(self.__baseTypeDefinition, ComplexTypeDefinition):
# NB: The base type definition should be resolved, which means
# that all its attribute uses have been adapted for scope already
uses_c3 = set(self.__baseTypeDefinition.__attributeUses)
assert self.__baseTypeDefinition.isResolved()
for au in uses_c3:
if not au.isResolved():
self._queueForResolution('unresolved attribute use from base type', depends_on=au)
return self
ad = au.attributeDeclaration()
if not ad.isResolved():
ad_en = ad.expandedName()
self._queueForResolution('unresolved attribute declaration %s from base type' % (ad_en,), depends_on=ad)
return self
assert not au.attributeDeclaration()._scopeIsIndeterminate()
if self.DM_restriction == method:
# Exclude attributes per clause 3. Note that this process
# handles both 3.1 and 3.2, since we have not yet filtered
# uses_c1 for prohibited attributes.
for au in self.__usesC1C2:
matching_uses = au.matchingQNameMembers(uses_c3)
assert matching_uses is not None
assert 1 >= len(matching_uses), 'Multiple inherited attribute uses with name %s'
for au2 in matching_uses:
assert au2.isResolved()
uses_c3.remove(au2)
au._setRestrictionOf(au2)
else:
# In theory, the same attribute name can't appear in the base
# and sub types because that would violate the local
# declaration constraint.
assert self.DM_extension == method
use_map = { }
for au in self.__usesC1C2.union(uses_c3):
assert au.isResolved()
ad_en = au.attributeDeclaration().expandedName()
if ad_en in use_map:
raise pyxb.SchemaValidationError('Multiple definitions for %s in CTD %s' % (ad_en, self.expandedName()))
use_map[ad_en] = au
# Past the last point where we might not resolve this instance. Store
# the attribute uses, also recording local attribute declarations.
self.__attributeUses = frozenset(use_map.itervalues())
if not self._scopeIsIndeterminate():
for au in self.__attributeUses:
assert not au.attributeDeclaration()._scopeIsIndeterminate(), 'indeterminate scope for %s' % (au,)
# @todo: Handle attributeWildcard
# Clause 1
local_wildcard = None
if self.__anyAttribute is not None:
local_wildcard = Wildcard.CreateFromDOM(self.__anyAttribute)
# Clause 2
complete_wildcard = _AttributeWildcard_mixin.CompleteWildcard(self._namespaceContext(), self.__attributeGroups, local_wildcard)
# Clause 3
if self.DM_restriction == method:
# Clause 3.1
self._setAttributeWildcard(complete_wildcard)
else:
assert (self.DM_extension == method)
assert self.baseTypeDefinition().isResolved()
# 3.2.1
base_wildcard = None
if isinstance(self.baseTypeDefinition(), ComplexTypeDefinition):
base_wildcard = self.baseTypeDefinition().attributeWildcard()
# 3.2.2
if base_wildcard is not None:
if complete_wildcard is None:
# 3.2.2.1.1
self._setAttributeWildcard(base_wildcard)
else:
# 3.2.2.1.2
self._setAttributeWildcard(Wildcard (process_contents=complete_wildcard.processContents(),
namespace_constraint = Wildcard.IntensionalUnion([complete_wildcard.namespaceConstraint(),
base_wildcard.namespaceConstraint()]),
annotation=complete_wildcard.annotation(),
namespace_context=self._namespaceContext()))
else:
# 3.2.2.2
self._setAttributeWildcard(complete_wildcard)
# @todo: Make sure we didn't miss any child nodes
# Remove local attributes we will never use again
del self.__usesC1
del self.__usesC1C2
del self.__attributeGroups
self.__ckw = None
# Only now that we've succeeded do we store the method, which
# marks this component resolved.
self.__derivationMethod = method
return self
def __simpleContent (self, method, **kw):
# Do content type
if isinstance(self.__baseTypeDefinition, ComplexTypeDefinition):
# Clauses 1, 2, and 3 might apply
parent_content_type = self.__baseTypeDefinition.__contentType
if ((type(parent_content_type) == tuple) \
and (self.CT_SIMPLE == parent_content_type[0]) \
and (self.DM_restriction == method)):
# Clause 1
assert self.__ctscRestrictionNode is not None
std = self.__ctscClause2STD
if std is None:
std = parent_content_type[1]
assert isinstance(std, SimpleTypeDefinition)
if not std.isResolved():
return None
restriction_node = self.__ctscRestrictionNode
self.__ctscClause2STD = None
self.__ctscRestrictionNode = None
return ( self.CT_SIMPLE, std._createRestriction(self, restriction_node) )
if ((type(parent_content_type) == tuple) \
and (self.CT_MIXED == parent_content_type[0]) \
and parent_content_type[1].isEmptiable()):
# Clause 2
assert isinstance(self.__ctscClause2STD, SimpleTypeDefinition)
return ( self.CT_SIMPLE, self.__ctscClause2STD )
# Clause 3
return parent_content_type
# Clause 4
return ( self.CT_SIMPLE, self.__baseTypeDefinition )
__ctscClause2STD = None
__ctscRestrictionNode = None
__PrivateTransient.update(['ctscRestrictionNode' ])
__effectiveMixed = None
__effectiveContent = None
__pendingDerivationMethod = None
__isComplexContent = None
def _isComplexContent (self):
return self.__isComplexContent
__ctscRestrictionMode = None
__contentStyle = None
def __setComplexContentFromDOM (self, type_node, content_node, definition_node_list, method, **kw):
# Do content type. Cache the keywords that need to be used
# for newly created schema components.
ckw = kw.copy()
ckw['namespace_context'] = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(type_node)
# Definition 1: effective mixed
mixed_attr = None
if content_node is not None:
mixed_attr = domutils.NodeAttribute(content_node, 'mixed')
if mixed_attr is None:
mixed_attr = domutils.NodeAttribute(type_node, 'mixed')
if mixed_attr is not None:
effective_mixed = datatypes.boolean(mixed_attr)
else:
effective_mixed = False
# Definition 2: effective content
test_2_1_1 = True
test_2_1_2 = False
test_2_1_3 = False
typedef_node = None
for cn in definition_node_list:
if Node.ELEMENT_NODE != cn.nodeType:
continue
if xsd.nodeIsNamed(cn, 'simpleContent', 'complexContent'):
# Should have found the content node earlier.
raise pyxb.LogicError('Missed explicit wrapper in complexType content')
if Particle.IsTypedefNode(cn):
typedef_node = cn
test_2_1_1 = False
if xsd.nodeIsNamed(cn, 'all', 'sequence') \
and (not domutils.HasNonAnnotationChild(cn)):
test_2_1_2 = True
if xsd.nodeIsNamed(cn, 'choice') \
and (not domutils.HasNonAnnotationChild(cn)):
mo_attr = domutils.NodeAttribute(cn, 'minOccurs')
if ((mo_attr is not None) \
and (0 == datatypes.integer(mo_attr))):
test_2_1_3 = True
satisfied_predicates = 0
if test_2_1_1:
satisfied_predicates += 1
if test_2_1_2:
satisfied_predicates += 1
if test_2_1_3:
satisfied_predicates += 1
if 1 == satisfied_predicates:
if effective_mixed:
# Clause 2.1.4
assert (typedef_node is None) or test_2_1_2
m = ModelGroup(compositor=ModelGroup.C_SEQUENCE, particles=[], **ckw)
effective_content = Particle(m, **ckw)
else:
# Clause 2.1.5
effective_content = self.CT_EMPTY
else:
# Clause 2.2
assert typedef_node is not None
effective_content = Particle.CreateFromDOM(typedef_node, **kw)
# For issues related to soapenc:Array and the fact that PyXB
# determines the content of types derived from it is empty, see
# http://tech.groups.yahoo.com/group/soapbuilders/message/5879 and
# lament the fact that the WSDL spec is not compatible with XSD. It
# is *not* an error in PyXB.
self.__effectiveMixed = effective_mixed
self.__effectiveContent = effective_content
self.__ckw = ckw
def __complexContent (self, method):
ckw = self.__ckw
# Shared from clause 3.1.2
if self.__effectiveMixed:
ct = self.CT_MIXED
else:
ct = self.CT_ELEMENT_ONLY
# Clause 3
if self.DM_restriction == method:
# Clause 3.1
if self.CT_EMPTY == self.__effectiveContent:
# Clause 3.1.1
content_type = self.CT_EMPTY # ASSIGN CT_EMPTY
else:
# Clause 3.1.2(.2)
content_type = ( ct, self.__effectiveContent ) # ASSIGN RESTRICTION
assert 0 == len(self.__scopedElementDeclarations)
# Reference the parent element declarations; normally this
# would happen naturally as a consequence of appending this
# type's content model to the parent's, but with restriction
# there is no such re-use unless we do this.
self.__scopedElementDeclarations.update(self.__baseTypeDefinition.__scopedElementDeclarations)
else:
# Clause 3.2
assert self.DM_extension == method
assert self.__baseTypeDefinition.isResolved()
parent_content_type = self.__baseTypeDefinition.contentType()
if self.CT_EMPTY == self.__effectiveContent:
content_type = parent_content_type # ASSIGN EXTENSION PARENT ONLY
elif self.CT_EMPTY == parent_content_type:
# Clause 3.2.2
content_type = ( ct, self.__effectiveContent ) # ASSIGN EXTENSION LOCAL ONLY
else:
assert type(parent_content_type) == tuple
m = ModelGroup(compositor=ModelGroup.C_SEQUENCE, particles=[ parent_content_type[1], self.__effectiveContent ], **ckw)
content_type = ( ct, Particle(m, **ckw) ) # ASSIGN EXTENSION PARENT AND LOCAL
assert (self.CT_EMPTY == content_type) or ((type(content_type) == tuple) and (content_type[1] is not None))
return content_type
def isResolved (self):
"""Indicate whether this complex type is fully defined.
All built-in type definitions are resolved upon creation.
Schema-defined type definitionss are held unresolved until the
schema has been completely read, so that references to later
schema-defined types can be resolved. Resolution is performed
after the entire schema has been scanned and type-definition
instances created for all topLevel{Simple,Complex}Types.
If a built-in type definition is also defined in a schema
(which it should be), the built-in definition is kept, with
the schema-related information copied over from the matching
schema-defined type definition. The former then replaces the
latter in the list of type definitions to be resolved. See
Schema._addNamedComponent.
"""
# Only unresolved nodes have an unset derivationMethod
return (self.__derivationMethod is not None)
# Back door to allow the ur-type to re-resolve itself. Only needed when
# we're generating bindings for XMLSchema itself.
def _setDerivationMethod (self, derivation_method):
self.__derivationMethod = derivation_method
return self
def __setContentFromDOM (self, node, **kw):
schema = kw.get('schema')
assert schema is not None
self.__prohibitedSubstitutions = schema.blockForNode(node, self._DM_Map)
self.__final = schema.finalForNode(node, self._DM_Map)
attr_val = domutils.NodeAttribute(node, 'abstract')
if attr_val is not None:
self.__abstract = datatypes.boolean(attr_val)
# Assume we're in the short-hand case: the entire content is
# implicitly wrapped in a complex restriction of the ur-type.
definition_node_list = node.childNodes
is_complex_content = True
self.__baseTypeDefinition = ComplexTypeDefinition.UrTypeDefinition()
method = self.DM_restriction
# Determine whether above assumption is correct by looking for
# element content and seeing if it's one of the wrapper
# elements.
first_elt = domutils.LocateFirstChildElement(node)
content_node = None
clause2_std = None
ctsc_restriction_node = None
if first_elt:
have_content = False
if xsd.nodeIsNamed(first_elt, 'simpleContent'):
have_content = True
is_complex_content = False
elif xsd.nodeIsNamed(first_elt, 'complexContent'):
have_content = True
else:
# Not one of the wrappers; use implicit wrapper around
# the children
if not Particle.IsParticleNode(first_elt, 'attributeGroup', 'attribute', 'anyAttribute'):
raise pyxb.SchemaValidationError('Unexpected element %s at root of complexType' % (first_elt.nodeName,))
if have_content:
# Repeat the search to verify that only the one child is present.
content_node = domutils.LocateFirstChildElement(node, require_unique=True)
assert content_node == first_elt
# Identify the contained restriction or extension
# element, and extract the base type.
ions = domutils.LocateFirstChildElement(content_node, absent_ok=False)
if xsd.nodeIsNamed(ions, 'restriction'):
method = self.DM_restriction
if not is_complex_content:
# Clause 2 of complex type with simple content
ctsc_restriction_node = ions
ions_st = domutils.LocateUniqueChild(ions,'simpleType')
if ions_st is not None:
clause2_std = SimpleTypeDefinition.CreateFromDOM(ions_st, **kw)
elif xsd.nodeIsNamed(ions, 'extension'):
method = self.DM_extension
else:
raise pyxb.SchemaValidationError('Expected restriction or extension as sole child of %s in %s' % (content_node.nodeName, self.name()))
self.__baseExpandedName = domutils.NodeAttributeQName(ions, 'base')
if self.__baseExpandedName is None:
raise pyxb.SchemaValidationError('Element %s missing base attribute' % (ions.nodeName,))
self.__baseTypeDefinition = None
# The content is defined by the restriction/extension element
definition_node_list = ions.childNodes
# deriviationMethod is assigned after resolution completes
self.__pendingDerivationMethod = method
self.__isComplexContent = is_complex_content
self.__ctscRestrictionNode = ctsc_restriction_node
self.__ctscClause2STD = clause2_std
(attributes, attribute_group_names, any_attribute) = self._attributeRelevantChildren(definition_node_list)
self.__usesC1 = set()
for cn in attributes:
au = AttributeUse.CreateFromDOM(cn, **kw)
self.__usesC1.add(au)
self.__attributeGroupNames = attribute_group_names
self.__anyAttribute = any_attribute
if self.__isComplexContent:
self.__setComplexContentFromDOM(node, content_node, definition_node_list, self.__pendingDerivationMethod, **kw)
# Creation does not attempt to do resolution. Queue up the newly created
# whatsis so we can resolve it after everything's been read in.
self._annotationFromDOM(node)
if not self.isResolved():
self._queueForResolution('creation')
return self
# Resolution of a CTD can be delayed for the following reasons:
#
# * It extends or restricts a base type that has not been resolved
# [_resolve]
#
# * It refers to an attribute or attribute group that has not been
# resolved [__completeProcessing]
#
# * It includes an attribute that matches in NCName and namespace
# an unresolved attribute from the base type
# [__completeProcessing]
#
# * The content model includes a particle which cannot be resolved
# (so has not contributed any local element declarations).
# res:CTD
def _resolve (self):
if self.isResolved():
return self
# @todo: implement prohibitedSubstitutions, final, annotations
# See whether we've resolved through to the base type
if self.__baseTypeDefinition is None:
base_type = self.__baseExpandedName.typeDefinition()
if base_type is None:
raise pyxb.SchemaValidationError('Cannot locate %s: need import?' % (self.__baseExpandedName,))
if not base_type.isResolved():
# Have to delay resolution until the type this
# depends on is available.
self._queueForResolution('unresolved base type %s' % (self.__baseExpandedName,), depends_on=base_type)
return self
self.__baseTypeDefinition = base_type
# Only build the content once. This will not complete if the content
# is a restriction of an unresolved simple type; otherwise, it only
# depends on the base type which we know is good.
if self.__contentType is None:
if self.__isComplexContent:
content_type = self.__complexContent(self.__pendingDerivationMethod)
self.__contentStyle = 'complex'
else:
# The definition node list is not relevant to simple content
content_type = self.__simpleContent(self.__pendingDerivationMethod)
if content_type is None:
self._queueForResolution('restriction of unresolved simple type')
return self
self.__contentStyle = 'simple'
assert content_type is not None
self.__contentType = content_type
# Last chance for failure is if we haven't been able to
# extract all the element declarations that might appear in
# this complex type. That technically wouldn't stop this from
# being resolved, but it does prevent us from using it as a
# context.
if isinstance(self.__contentType, tuple) and isinstance(self.__contentType[1], Particle):
prt = self.__contentType[1]
if not prt.isAdaptable(self):
self._queueForResolution('content particle %s is not deep-resolved' % (prt,))
return self
self.__contentType = (self.__contentType[0], prt._adaptForScope(self, self))
return self.__completeProcessing(self.__pendingDerivationMethod, self.__contentStyle)
def pythonSupport (self):
"""Complex type definitions have no built-in type support."""
return None
def __str__ (self):
if self.isAnonymous():
return 'CTD{Anonymous}[%x]' % (id(self),)
return 'CTD[%s]' % (self.expandedName(),)
class _UrTypeDefinition (ComplexTypeDefinition, _Singleton_mixin):
"""Subclass ensures there is only one ur-type."""
def pythonSupport (self):
"""The ur-type does have a Python class backing it up."""
return datatypes.anyType
def _resolve (self):
# The ur type is always resolved, except when it gets unresolved
# through being updated from an instance read from the schema.
return self._setDerivationMethod(self.DM_restriction)
class AttributeGroupDefinition (_SchemaComponent_mixin, _NamedComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin, _Annotated_mixin, _AttributeWildcard_mixin):
"""An XMLSchema U{Attribute Group Definition<http://www.w3.org/TR/xmlschema-1/#cAttribute_Group_Definitions>} component."""
__PrivateTransient = set()
# A frozenset of AttributeUse instances
__attributeUses = None
def __init__ (self, *args, **kw):
super(AttributeGroupDefinition, self).__init__(*args, **kw)
#assert 'scope' in kw
#assert self._scopeIsIndeterminate()
def __str__ (self):
return 'AGD[%s]' % (self.expandedName(),)
@classmethod
def CreateBaseInstance (cls, name, schema, attribute_uses):
"""Create an attribute declaration component for a specified namespace."""
kw = { 'name' : name,
'schema' : schema,
'namespace_context' : schema.targetNamespace().initialNamespaceContext(),
'scope' : _ScopedDeclaration_mixin.SCOPE_global }
bi = cls(**kw)
bi.__attributeUses = frozenset(attribute_uses)
bi.__isResolved = True
return bi
__anyAttribute = None
__attributeGroupNames = None
__PrivateTransient.update(['anyAttribute', 'attributeGroupNames'])
# CFD:AGD CFD:AttributeGroupDefinition
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create an attribute group definition from the given DOM node.
"""
assert xsd.nodeIsNamed(node, 'attributeGroup')
name = domutils.NodeAttribute(node, 'name')
# Attribute group definitions can only appear at the top level of the
# schema, and any definitions in them are scope indeterminate until
# they're referenced in a complex type.
kw.update({ 'scope' : _ScopedDeclaration_mixin.XSCOPE_indeterminate })
rv = cls(name=name, node=node, **kw)
rv._annotationFromDOM(node)
# Attribute group definitions must not be references
if domutils.NodeAttribute(node, 'ref'):
raise pyxb.SchemaValidationError('Attribute reference at top level')
kw.pop('node', None)
kw['owner'] = rv
(attributes, attribute_group_names, any_attribute) = rv._attributeRelevantChildren(node.childNodes)
rv.__attributeUses = set()
for cn in attributes:
rv.__attributeUses.add(AttributeUse.CreateFromDOM(cn, **kw))
rv.__attributeGroupNames = attribute_group_names
rv.__anyAttribute = any_attribute
# Unconditionally queue for resolution, to avoid repeating the
# wildcard code.
rv._queueForResolution('creation')
return rv
# Indicates whether we have resolved any references
__isResolved = False
def isResolved (self):
return self.__isResolved
def _resolve (self):
if self.__isResolved:
return self
uses = self.__attributeUses
attribute_groups = []
for ag_en in self.__attributeGroupNames:
agd = ag_en.attributeGroupDefinition()
if agd is None:
raise pyxb.SchemaValidationError('Attribute group %s cannot be found' % (ag_en,))
attribute_groups.append(agd)
uses = uses.union(agd.attributeUses())
self.__attributeUses = frozenset(uses)
# "Complete wildcard" per CTD
local_wildcard = None
if self.__anyAttribute is not None:
local_wildcard = Wildcard.CreateFromDOM(self.__anyAttribute)
self._setAttributeWildcard(_AttributeWildcard_mixin.CompleteWildcard(self._namespaceContext(), attribute_groups, local_wildcard))
self.__isResolved = True
return self
# bR:AGD
def _bindingRequires_vx (self, include_lax):
"""Attribute group declarations require their uses, but only if lax."""
if not include_lax:
return frozenset()
return frozenset(self.attributeUses())
def attributeUses (self):
return self.__attributeUses
class ModelGroupDefinition (_SchemaComponent_mixin, _NamedComponent_mixin, _Annotated_mixin):
"""An XMLSchema U{Model Group Definition<http://www.w3.org/TR/xmlschema-1/#cModel_Group_Definitions>} component."""
# Reference to a _ModelGroup
__modelGroup = None
def modelGroup (self):
"""The model group for which this definition provides a name."""
return self.__modelGroup
# CFD:MGD CFD:ModelGroupDefinition
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create a Model Group Definition from a DOM element node.
wxs is a Schema instance within which the model group is being
defined.
node is a DOM element. The name must be 'group', and the node
must be in the XMLSchema namespace. The node must have a
'name' attribute, and must not have a 'ref' attribute.
"""
assert xsd.nodeIsNamed(node, 'group')
assert domutils.NodeAttribute(node, 'ref') is None
name = domutils.NodeAttribute(node, 'name')
kw['scope'] = _ScopedDeclaration_mixin.XSCOPE_indeterminate
rv = cls(name=name, node=node, **kw)
rv._annotationFromDOM(node)
kw.pop('node', None)
kw['owner'] = rv
for cn in node.childNodes:
if Node.ELEMENT_NODE != cn.nodeType:
continue
if ModelGroup.IsGroupMemberNode(cn):
assert not rv.__modelGroup
# Model group definitions always occur at the top level of the
# schema, so the elements declared in them are not bound to a
# scope until they are referenced in a complex type.
rv.__modelGroup = ModelGroup.CreateFromDOM(cn, model_group_definition=rv, **kw)
assert rv.__modelGroup is not None
return rv
# bR:MGD
def _bindingRequires_vx (self, include_lax):
"""Model group definitions depend on the contained model group."""
if not include_lax:
return frozenset()
return frozenset([self.__modelGroup])
def __str__ (self):
return 'MGD[%s: %s]' % (self.name(), self.modelGroup())
class ModelGroup (_ParticleTree_mixin, _SchemaComponent_mixin, _Annotated_mixin):
"""An XMLSchema U{Model Group<http://www.w3.org/TR/xmlschema-1/#cModel_Group>} component."""
C_INVALID = 0
C_ALL = 0x01
C_CHOICE = 0x02
C_SEQUENCE = 0x03
# One of the C_* values above. Set at construction time from the
# keyword parameter "compositor".
__compositor = C_INVALID
def compositor (self):
return self.__compositor
@classmethod
def CompositorToString (cls, compositor):
"""Map a compositor value to a string."""
if cls.C_ALL == compositor:
return 'all'
if cls.C_CHOICE == compositor:
return 'choice'
if cls.C_SEQUENCE == compositor:
return 'sequence'
return 'invalid'
def compositorToString (self):
"""Return a string representing the compositor value."""
return self.CompositorToString(self.__compositor)
# A list of Particle instances. Set at construction time from
# the keyword parameter "particles".
__particles = None
def particles (self):
return self.__particles
def isAdaptable (self, ctd):
"""A model group has an unresolvable particle if any of its
particles is unresolvable. Duh."""
for p in self.particles():
if not p.isAdaptable(ctd):
return False
return True
def effectiveTotalRange (self, particle):
"""Return the minimum and maximum of the number of elements that can
appear in a sequence matched by this particle.
See U{http://www.w3.org/TR/xmlschema-1/#cos-seq-range}
"""
if self.__compositor in (self.C_ALL, self.C_SEQUENCE):
sum_minoccurs = 0
sum_maxoccurs = 0
for prt in self.__particles:
(prt_min, prt_max) = prt.effectiveTotalRange()
sum_minoccurs += prt_min
if sum_maxoccurs is not None:
if prt_max is None:
sum_maxoccurs = None
else:
sum_maxoccurs += prt_max
prod_maxoccurs = particle.maxOccurs()
if prod_maxoccurs is not None:
if sum_maxoccurs is None:
prod_maxoccurs = None
else:
prod_maxoccurs *= sum_maxoccurs
return (sum_minoccurs * particle.minOccurs(), prod_maxoccurs)
assert self.__compositor == self.C_CHOICE
if 0 == len(self.__particles):
min_minoccurs = 0
max_maxoccurs = 0
else:
(min_minoccurs, max_maxoccurs) = self.__particles[0].effectiveTotalRange()
for prt in self.__particles[1:]:
(prt_min, prt_max) = prt.effectiveTotalRange()
if prt_min < min_minoccurs:
min_minoccurs = prt_min
if prt_max is None:
max_maxoccurs = None
elif (max_maxoccurs is not None) and (prt_max > max_maxoccurs):
max_maxoccurs = prt_max
min_minoccurs *= particle.minOccurs()
if (max_maxoccurs is not None) and (particle.maxOccurs() is not None):
max_maxoccurs *= particle.maxOccurs()
return (min_minoccurs, max_maxoccurs)
# The ModelGroupDefinition that names this ModelGroup, or None if
# the ModelGroup is anonymous. This is set at construction time
# from the keyword parameter "model_group_definition".
__modelGroupDefinition = None
def modelGroupDefinition (self):
"""The ModelGroupDefinition that names this group, or None if it is unnamed."""
return self.__modelGroupDefinition
def __init__ (self, compositor, particles, *args, **kw):
"""Create a new model group.
compositor must be a legal compositor value (one of C_ALL, C_CHOICE, C_SEQUENCE).
particles must be a list of zero or more Particle instances.
scope is the _ScopeDeclaration_mixin context into which new
declarations are recorded. It can be SCOPE_global, a complex
type definition, or None if this is (or is within) a named
model group.
model_group_definition is an instance of ModelGroupDefinition
if this is a named model group. It defaults to None
indicating a local group.
"""
super(ModelGroup, self).__init__(*args, **kw)
assert 'scope' in kw
self.__compositor = compositor
self.__particles = particles
self.__modelGroupDefinition = kw.get('model_group_definition')
def hasWildcardElement (self):
"""Return True if the model includes a wildcard amongst its particles."""
for p in self.particles():
if p.hasWildcardElement():
return True
return False
# bR:MG
def _bindingRequires_vx (self, include_lax):
if not include_lax:
return frozenset()
return frozenset(self.__particles)
# CFD:MG CFD:ModelGroup
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create a model group from the given DOM node.
wxs is a Schema instance within which the model group is being
defined.
node is a DOM element. The name must be one of ( 'all',
'choice', 'sequence' ), and the node must be in the XMLSchema
namespace.
scope is the _ScopeDeclaration_mxin context that is assigned
to declarations that appear within the model group. It can be
None, indicating no scope defined, or a complex type
definition.
"""
scope = kw['scope']
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or isinstance(scope, ComplexTypeDefinition)
if xsd.nodeIsNamed(node, 'all'):
compositor = cls.C_ALL
elif xsd.nodeIsNamed(node, 'choice'):
compositor = cls.C_CHOICE
else:
assert xsd.nodeIsNamed(node, 'sequence')
compositor = cls.C_SEQUENCE
particles = []
# Remove the owner from particle constructor arguments: we need to set it later
kw.pop('owner', None)
for cn in node.childNodes:
if Node.ELEMENT_NODE != cn.nodeType:
continue
if Particle.IsParticleNode(cn):
# NB: Ancestor of particle is set in the ModelGroup constructor
particles.append(Particle.CreateFromDOM(node=cn, **kw))
elif not xsd.nodeIsNamed(cn, 'annotation'):
raise pyxb.SchemaValidationError('Unexpected element %s in model group' % (cn.nodeName,))
rv = cls(compositor, particles, node=node, **kw)
for p in particles:
p._setOwner(rv)
rv._annotationFromDOM(node)
return rv
@classmethod
def IsGroupMemberNode (cls, node):
return xsd.nodeIsNamed(node, 'all', 'choice', 'sequence')
# aFS:MG
def _adaptForScope (self, owner, ctd):
rv = self
assert isinstance(ctd, ComplexTypeDefinition)
maybe_rv = self._clone(owner, ctd._objectOrigin())
scoped_particles = [ _p._adaptForScope(maybe_rv, ctd) for _p in self.particles() ]
do_clone = (self._scope() != ctd) or (self.particles() != scoped_particles)
if do_clone:
rv = maybe_rv
rv.__particles = scoped_particles
return rv
def _walkParticleTree (self, visit, arg):
visit(self, True, arg)
for p in self.particles():
p._walkParticleTree(visit, arg)
visit(self, False, arg)
def __str__ (self):
comp = None
if self.C_ALL == self.compositor():
comp = 'ALL'
elif self.C_CHOICE == self.compositor():
comp = 'CHOICE'
elif self.C_SEQUENCE == self.compositor():
comp = 'SEQUENCE'
return '%s:(%s)' % (comp, u",".join( [ unicode(_p) for _p in self.particles() ] ) )
class Particle (_ParticleTree_mixin, _SchemaComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin):
"""An XMLSchema U{Particle<http://www.w3.org/TR/xmlschema-1/#cParticle>} component."""
# The minimum number of times the term may appear.
__minOccurs = 1
def minOccurs (self):
"""The minimum number of times the term may appear.
Defaults to 1."""
return self.__minOccurs
# Upper limit on number of times the term may appear.
__maxOccurs = 1
def maxOccurs (self):
"""Upper limit on number of times the term may appear.
If None, the term may appear any number of times; otherwise,
this is an integral value indicating the maximum number of times
the term may appear. The default value is 1; the value, unless
None, must always be at least minOccurs().
"""
return self.__maxOccurs
# A reference to a ModelGroup, WildCard, or ElementDeclaration
__term = None
def term (self):
"""A reference to a ModelGroup, Wildcard, or ElementDeclaration."""
return self.__term
__pendingTerm = None
__refExpandedName = None
__resolvableType = None
def effectiveTotalRange (self):
"""Extend the concept of effective total range to all particles.
See U{http://www.w3.org/TR/xmlschema-1/#cos-seq-range} and
U{http://www.w3.org/TR/xmlschema-1/#cos-choice-range}
"""
if isinstance(self.__term, ModelGroup):
return self.__term.effectiveTotalRange(self)
return (self.minOccurs(), self.maxOccurs())
def isEmptiable (self):
"""Return C{True} iff this particle can legitimately match an empty
sequence (no content).
See U{http://www.w3.org/TR/xmlschema-1/#cos-group-emptiable}
"""
return 0 == self.effectiveTotalRange()[0]
def hasWildcardElement (self):
"""Return True iff this particle has a wildcard in its term.
Note that the wildcard may be in a nested model group."""
return self.term().hasWildcardElement()
def __init__ (self, term, *args, **kw):
"""Create a particle from the given DOM node.
term is a XML Schema Component: one of ModelGroup,
ElementDeclaration, and Wildcard.
The following keyword arguments are processed:
min_occurs is a non-negative integer value with default 1,
denoting the minimum number of terms required by the content
model.
max_occurs is a positive integer value with default 1, or None
indicating unbounded, denoting the maximum number of terms
allowed by the content model.
scope is the _ScopeDeclaration_mxin context that is assigned
to declarations that appear within the particle. It can be
None, indicating no scope defined, or a complex type
definition.
"""
super(Particle, self).__init__(*args, **kw)
min_occurs = kw.get('min_occurs', 1)
max_occurs = kw.get('max_occurs', 1)
assert 'scope' in kw
assert (self._scopeIsIndeterminate()) or isinstance(self._scope(), ComplexTypeDefinition)
if term is not None:
self.__term = term
assert isinstance(min_occurs, (types.IntType, types.LongType))
self.__minOccurs = min_occurs
assert (max_occurs is None) or isinstance(max_occurs, (types.IntType, types.LongType))
self.__maxOccurs = max_occurs
if self.__maxOccurs is not None:
if self.__minOccurs > self.__maxOccurs:
raise pyxb.LogicError('Particle minOccurs %s is greater than maxOccurs %s on creation' % (min_occurs, max_occurs))
# res:Particle
def _resolve (self):
if self.isResolved():
return self
# @RESOLUTION@
if ModelGroup == self.__resolvableType:
group_decl = self.__refExpandedName.modelGroupDefinition()
if group_decl is None:
raise pyxb.SchemaValidationError('Model group reference %s cannot be found' % (self.__refExpandedName,))
self.__pendingTerm = group_decl.modelGroup()
assert self.__pendingTerm is not None
elif ElementDeclaration == self.__resolvableType:
# 3.9.2 says use 3.3.2, which is Element. The element inside a
# particle is a localElement, so we either get the one it refers
# to (which is top-level), or create a local one here.
if self.__refExpandedName is not None:
assert self.__pendingTerm is None
self.__pendingTerm = self.__refExpandedName.elementDeclaration()
if self.__pendingTerm is None:
raise pyxb.SchemaValidationError('Unable to locate element referenced by %s' % (self.__refExpandedName,))
assert self.__pendingTerm is not None
# Whether this is a local declaration or one pulled in from the
# global type definition symbol space, its name is now reserved in
# this type.
assert self.__pendingTerm is not None
else:
assert False
self.__term = self.__pendingTerm
assert self.__term is not None
return self
def isResolved (self):
return self.__term is not None
# CFD:Particle
@classmethod
def CreateFromDOM (cls, node, **kw):
"""Create a particle from the given DOM node.
wxs is a Schema instance within which the model group is being
defined.
node is a DOM element. The name must be one of ( 'group',
'element', 'any', 'all', 'choice', 'sequence' ), and the node
must be in the XMLSchema namespace.
scope is the _ScopeDeclaration_mxin context that is assigned
to declarations that appear within the model group. It can be
None, indicating no scope defined, or a complex type
definition.
"""
scope = kw['scope']
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or isinstance(scope, ComplexTypeDefinition)
kw.update({ 'min_occurs' : 1
, 'max_occurs' : 1
, 'node' : node })
if not Particle.IsParticleNode(node):
raise pyxb.LogicError('Attempted to create particle from illegal element %s' % (node.nodeName,))
attr_val = domutils.NodeAttribute(node, 'minOccurs')
if attr_val is not None:
kw['min_occurs'] = datatypes.nonNegativeInteger(attr_val)
attr_val = domutils.NodeAttribute(node, 'maxOccurs')
if attr_val is not None:
if 'unbounded' == attr_val:
kw['max_occurs'] = None
else:
kw['max_occurs'] = datatypes.nonNegativeInteger(attr_val)
rv = cls(None, **kw)
kw.pop('node', None)
kw['owner'] = rv
rv.__refExpandedName = domutils.NodeAttributeQName(node, 'ref')
rv.__pendingTerm = None
rv.__resolvableType = None
if xsd.nodeIsNamed(node, 'group'):
# 3.9.2 says use 3.8.2, which is ModelGroup. The group
# inside a particle is a groupRef. If there is no group
# with that name, this throws an exception as expected.
if rv.__refExpandedName is None:
raise pyxb.SchemaValidationError('group particle without reference')
rv.__resolvableType = ModelGroup
elif xsd.nodeIsNamed(node, 'element'):
if rv.__refExpandedName is None:
schema = kw.get('schema')
assert schema is not None
target_namespace = schema.targetNamespaceForNode(node, ElementDeclaration)
incoming_tns = kw.get('target_namespace')
if incoming_tns is not None:
assert incoming_tns == target_namespace
else:
kw['target_namespace'] = target_namespace
rv.__term = ElementDeclaration.CreateFromDOM(node=node, **kw)
else:
# NOTE: 3.3.3 clause 2.2 specifies that if ref is used, all
# the other configuration attributes like nillable and default
# must be absent.
for tag in ('nillable', 'default', 'fixed', 'form', 'block', 'type'):
av = domutils.NodeAttribute(node, tag)
if av is not None:
raise pyxb.SchemaValidationError('element with "ref" cannot have "%s"' % (tag,))
rv.__resolvableType = ElementDeclaration
assert not xsd.nodeIsNamed(node.parentNode, 'schema')
elif xsd.nodeIsNamed(node, 'any'):
# 3.9.2 says use 3.10.2, which is Wildcard.
rv.__term = Wildcard.CreateFromDOM(node=node)
elif ModelGroup.IsGroupMemberNode(node):
# Choice, sequence, and all inside a particle are explicit
# groups (or a restriction of explicit group, in the case
# of all)
rv.__term = ModelGroup.CreateFromDOM(node, **kw)
else:
raise pyxb.LogicError('Unhandled node in Particle.CreateFromDOM: %s' % (node.toxml("utf-8"),))
if not rv.isResolved():
rv._queueForResolution('creation')
return rv
# bR:PRT
def _bindingRequires_vx (self, include_lax):
if not include_lax:
return frozenset()
return frozenset([ self.__term ])
# aFS:PRT
def _adaptForScope (self, owner, ctd):
rv = self
assert isinstance(ctd, ComplexTypeDefinition)
maybe_rv = self._clone(owner, ctd._objectOrigin())
term = rv.__term._adaptForScope(maybe_rv, ctd)
do_clone = (self._scope() != ctd) or (rv.__term != term)
if do_clone:
rv = maybe_rv
rv.__term = term
return rv
def isAdaptable (self, ctd):
"""A particle has an unresolvable particle if it cannot be
resolved, or if it has resolved to a term which is a model
group that has an unresolvable particle.
"""
if not self.isResolved():
return False
return self.term().isAdaptable(ctd)
def walkParticleTree (self, visit, arg):
"""The entry-point to walk a particle tree defining a content model.
See L{_ParticleTree_mixin._walkParticleTree}."""
self._walkParticleTree(visit, arg)
def _walkParticleTree (self, visit, arg):
visit(self, True, arg)
self.__term._walkParticleTree(visit, arg)
visit(self, False, arg)
@classmethod
def IsTypedefNode (cls, node):
return xsd.nodeIsNamed(node, 'group', 'all', 'choice', 'sequence')
@classmethod
def IsParticleNode (cls, node, *others):
return xsd.nodeIsNamed(node, 'group', 'all', 'choice', 'sequence', 'element', 'any', *others)
def __str__ (self):
#return 'PART{%s:%d,%s}' % (self.term(), self.minOccurs(), self.maxOccurs())
return 'PART{%s:%d,%s}[%x]' % ('TERM', self.minOccurs(), self.maxOccurs(), id(self))
# 3.10.1
class Wildcard (_ParticleTree_mixin, _SchemaComponent_mixin, _Annotated_mixin):
"""An XMLSchema U{Wildcard<http://www.w3.org/TR/xmlschema-1/#cParticle>} component."""
NC_any = '##any' #<<< The namespace constraint "##any"
NC_not = '##other' #<<< A flag indicating constraint "##other"
NC_targetNamespace = '##targetNamespace'
NC_local = '##local'
__namespaceConstraint = None
def namespaceConstraint (self):
"""A constraint on the namespace for the wildcard.
Valid values are:
- L{Wildcard.NC_any}
- A tuple ( L{Wildcard.NC_not}, a_namespace )
- set(of_namespaces)
Note that namespace are represented by
L{Namespace<pyxb.namespace.Namespace>} instances, not the URIs that
actually define a namespace. Absence of a namespace is represented by
C{None}, both in the "not" pair and in the set.
"""
return self.__namespaceConstraint
@classmethod
def IntensionalUnion (cls, constraints):
"""http://www.w3.org/TR/xmlschema-1/#cos-aw-union"""
assert 0 < len(constraints)
o1 = constraints.pop(0)
while 0 < len(constraints):
o2 = constraints.pop(0)
# 1
if (o1 == o2):
continue
# 2
if (cls.NC_any == o1) or (cls.NC_any == o2):
o1 = cls.NC_any
continue
# 3
if isinstance(o1, set) and isinstance(o2, set):
o1 = o1.union(o2)
continue
# 4
if (isinstance(o1, tuple) and isinstance(o2, tuple)) and (o1[1] != o2[1]):
o1 = ( cls.NC_not, None )
continue
# At this point, one must be a negated namespace and the
# other a set. Identify them.
c_tuple = None
c_set = None
if isinstance(o1, tuple):
assert isinstance(o2, set)
c_tuple = o1
c_set = o2
else:
assert isinstance(o1, set)
assert isinstance(o2, tuple)
c_tuple = o2
c_set = o1
negated_ns = c_tuple[1]
if negated_ns is not None:
# 5.1
if (negated_ns in c_set) and (None in c_set):
o1 = cls.NC_any
continue
# 5.2
if negated_ns in c_set:
o1 = ( cls.NC_not, None )
continue
# 5.3
if None in c_set:
raise pyxb.SchemaValidationError('Union of wildcard namespace constraints not expressible')
o1 = c_tuple
continue
# 6
if None in c_set:
o1 = cls.NC_any
else:
o1 = ( cls.NC_not, None )
return o1
@classmethod
def IntensionalIntersection (cls, constraints):
"""http://www.w3.org/TR/xmlschema-1/#cos-aw-intersect"""
assert 0 < len(constraints)
o1 = constraints.pop(0)
while 0 < len(constraints):
o2 = constraints.pop(0)
# 1
if (o1 == o2):
continue
# 2
if (cls.NC_any == o1) or (cls.NC_any == o2):
if cls.NC_any == o1:
o1 = o2
continue
# 4
if isinstance(o1, set) and isinstance(o2, set):
o1 = o1.intersection(o2)
continue
if isinstance(o1, tuple) and isinstance(o2, tuple):
ns1 = o1[1]
ns2 = o2[1]
# 5
if (ns1 is not None) and (ns2 is not None) and (ns1 != ns2):
raise pyxb.SchemaValidationError('Intersection of wildcard namespace constraints not expressible')
# 6
assert (ns1 is None) or (ns2 is None)
if ns1 is None:
assert ns2 is not None
o1 = ( cls.NC_not, ns2 )
else:
assert ns1 is not None
o1 = ( cls.NC_not, ns1 )
continue
# 3
# At this point, one must be a negated namespace and the
# other a set. Identify them.
c_tuple = None
c_set = None
if isinstance(o1, tuple):
assert isinstance(o2, set)
c_tuple = o1
c_set = o2
else:
assert isinstance(o1, set)
assert isinstance(o2, tuple)
c_tuple = o2
c_set = o1
negated_ns = c_tuple[1]
if negated_ns in c_set:
c_set.remove(negated_ns)
if None in c_set:
c_set.remove(None)
o1 = c_set
return o1
PC_skip = 'skip' #<<< No constraint is applied
PC_lax = 'lax' #<<< Validate against available uniquely determined declaration
PC_strict = 'strict' #<<< Validate against declaration or xsi:type which must be available
# One of PC_*
__processContents = None
def processContents (self):
return self.__processContents
def hasWildcardElement (self):
"""Return True, since Wildcard components are wildcards."""
return True
def __init__ (self, *args, **kw):
assert 0 == len(args)
super(Wildcard, self).__init__(*args, **kw)
self.__namespaceConstraint = kw['namespace_constraint']
self.__processContents = kw['process_contents']
def isAdaptable (self, ctd):
return True
def _walkParticleTree (self, visit, arg):
visit(self, None, arg)
# aFS:WC
def _adaptForScope (self, owner, ctd):
"""Wildcards are scope-independent; return self"""
return self
# CFD:Wildcard
@classmethod
def CreateFromDOM (cls, node, **kw):
namespace_context = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node)
assert xsd.nodeIsNamed(node, 'any', 'anyAttribute')
nc = domutils.NodeAttribute(node, 'namespace')
if nc is None:
namespace_constraint = cls.NC_any
else:
if cls.NC_any == nc:
namespace_constraint = cls.NC_any
elif cls.NC_not == nc:
namespace_constraint = ( cls.NC_not, namespace_context.targetNamespace() )
else:
ncs = set()
for ns_uri in nc.split():
if cls.NC_local == ns_uri:
ncs.add(None)
elif cls.NC_targetNamespace == ns_uri:
ncs.add(namespace_context.targetNamespace())
else:
ncs.add(pyxb.namespace.NamespaceForURI(ns_uri, create_if_missing=True))
namespace_constraint = frozenset(ncs)
pc = domutils.NodeAttribute(node, 'processContents')
if pc is None:
process_contents = cls.PC_strict
else:
if pc in [ cls.PC_skip, cls.PC_lax, cls.PC_strict ]:
process_contents = pc
else:
raise pyxb.SchemaValidationError('illegal value "%s" for any processContents attribute' % (pc,))
rv = cls(node=node, namespace_constraint=namespace_constraint, process_contents=process_contents, **kw)
rv._annotationFromDOM(node)
return rv
# 3.11.1
class IdentityConstraintDefinition (_SchemaComponent_mixin, _NamedComponent_mixin, _Annotated_mixin, pyxb.namespace.resolution._Resolvable_mixin):
"""An XMLSchema U{Identity Constraint Definition<http://www.w3.org/TR/xmlschema-1/#cIdentity-constraint_Definitions>} component."""
ICC_KEY = 0x01
ICC_KEYREF = 0x02
ICC_UNIQUE = 0x04
__identityConstraintCategory = None
def identityConstraintCategory (self):
return self.__identityConstraintCategory
__selector = None
def selector (self):
return self.__selector
__fields = None
def fields (self):
return self.__fields
__referencedKey = None
__referAttribute = None
__icc = None
__annotations = None
def annotations (self):
return self.__annotations
# CFD:ICD CFD:IdentityConstraintDefinition
@classmethod
def CreateFromDOM (cls, node, **kw):
name = domutils.NodeAttribute(node, 'name')
scope = kw['scope']
assert _ScopedDeclaration_mixin.ScopeIsIndeterminate(scope) or _ScopedDeclaration_mixin.IsValidScope(scope)
rv = cls(name=name, node=node, **kw)
kw.pop('node', None)
kw['owner'] = rv
#self._annotationFromDOM(node)
rv.__isResolved = True
icc = None
if xsd.nodeIsNamed(node, 'key'):
icc = rv.ICC_KEY
elif xsd.nodeIsNamed(node, 'keyref'):
icc = rv.ICC_KEYREF
rv.__referAttribute = domutils.NodeAttribute(node, 'refer')
if rv.__referAttribute is None:
raise pyxb.SchemaValidationError('Require refer attribute on keyref elements')
rv.__isResolved = False
elif xsd.nodeIsNamed(node, 'unique'):
icc = rv.ICC_UNIQUE
else:
raise pyxb.LogicError('Unexpected identity constraint node %s' % (node.toxml("utf-8"),))
rv.__icc = icc
cn = domutils.LocateUniqueChild(node, 'selector')
rv.__selector = domutils.NodeAttribute(cn, 'xpath')
if rv.__selector is None:
raise pyxb.SchemaValidationError('selector element missing xpath attribute')
rv.__fields = []
for cn in domutils.LocateMatchingChildren(node, 'field'):
xp_attr = domutils.NodeAttribute(cn, 'xpath')
if xp_attr is None:
raise pyxb.SchemaValidationError('field element missing xpath attribute')
rv.__fields.append(xp_attr)
rv._annotationFromDOM(node)
rv.__annotations = []
if rv.annotation() is not None:
rv.__annotations.append(rv)
for cn in node.childNodes:
if (Node.ELEMENT_NODE != cn.nodeType):
continue
an = None
if xsd.nodeIsNamed(cn, 'selector', 'field'):
an = domutils.LocateUniqueChild(cn, 'annotation')
elif xsd.nodeIsNamed(cn, 'annotation'):
an = cn
if an is not None:
rv.__annotations.append(Annotation.CreateFromDOM(an, **kw))
rv.__identityConstraintCategory = icc
if rv.ICC_KEYREF != rv.__identityConstraintCategory:
rv._namespaceContext().targetNamespace().addCategoryObject('identityConstraintDefinition', rv.name(), rv)
if not rv.isResolved():
rv._queueForResolution('creation')
return rv
__isResolved = False
def isResolved (self):
return self.__isResolved
# res:ICD res:IdentityConstraintDefinition
def _resolve (self):
if self.isResolved():
return self
icc = self.__icc
if self.ICC_KEYREF == icc:
refer_en = self._namespaceContext().interpretQName(self.__referAttribute)
refer = refer_en.identityConstraintDefinition()
if refer is None:
self._queueForResolution('Identity constraint definition %s cannot be found' % (refer_en,), depends_on=refer)
return self
self.__referencedKey = refer
self.__isResolved = True
return self
# bR:ICD
def _bindingRequires_vx (self, include_lax):
"""Constraint definitions that are by reference require the referenced constraint."""
rv = set()
if include_lax and (self.__referencedKey is not None):
rv.add(self.__referencedKey)
return frozenset(rv)
# 3.12.1
class NotationDeclaration (_SchemaComponent_mixin, _NamedComponent_mixin, _Annotated_mixin):
"""An XMLSchema U{Notation Declaration<http://www.w3.org/TR/xmlschema-1/#cNotation_Declarations>} component."""
__systemIdentifier = None
def systemIdentifier (self):
return self.__systemIdentifier
__publicIdentifier = None
def publicIdentifier (self):
return self.__publicIdentifier
# CFD:ND CFD:NotationDeclaration
@classmethod
def CreateFromDOM (cls, node, **kw):
name = domutils.NodeAttribute(node, 'name')
rv = cls(name=name, node=node, **kw)
rv.__systemIdentifier = domutils.NodeAttribute(node, 'system')
rv.__publicIdentifier = domutils.NodeAttribute(node, 'public')
rv._annotationFromDOM(node)
return rv
# bR:ND
def _bindingRequires_vx (self, include_lax):
return frozenset()
# 3.13.1
class Annotation (_SchemaComponent_mixin):
"""An XMLSchema U{Annotation<http://www.w3.org/TR/xmlschema-1/#cAnnotation>} component."""
__applicationInformation = None
def applicationInformation (self):
return self.__applicationInformation
__userInformation = None
def userInformation (self):
return self.__userInformation
# Define so superclasses can take keywords
def __init__ (self, **kw):
application_information = kw.pop('application_information', None)
user_information = kw.pop('user_information', None)
super(Annotation, self).__init__(**kw)
if (user_information is not None) and (not isinstance(user_information, list)):
user_information = [ unicode(user_information) ]
if (application_information is not None) and (not isinstance(application_information, list)):
application_information = [ unicode(application_information) ]
self.__userInformation = user_information
self.__applicationInformation = application_information
# @todo: what the hell is this? From 3.13.2, I think it's a place
# to stuff attributes from the annotation element, which makes
# sense, as well as from the annotation's parent element, which
# doesn't. Apparently it's for attributes that don't belong to
# the XMLSchema namespace; so maybe we're not supposed to add
# those to the other components. Note that these are attribute
# information items, not attribute uses.
__attributes = None
# CFD:Annotation
@classmethod
def CreateFromDOM (cls, node, **kw):
rv = cls(node=node, **kw)
# @todo:: Scan for attributes in the node itself that do not
# belong to the XMLSchema namespace.
# Node should be an XMLSchema annotation node
assert xsd.nodeIsNamed(node, 'annotation')
app_info = []
user_info = []
for cn in node.childNodes:
if xsd.nodeIsNamed(cn, 'appinfo'):
app_info.append(cn)
elif xsd.nodeIsNamed(cn, 'documentation'):
user_info.append(cn)
else:
pass
if 0 < len(app_info):
rv.__applicationInformation = app_info
if 0 < len(user_info):
rv.__userInformation = user_info
return rv
__RemoveMultiQuote_re = re.compile('""+')
def asDocString (self):
"""Return the text in a form suitable for embedding in a
triple-double-quoted docstring.
Any sequence of two or more double quotes is replaced by a sequence of
single quotes that is the same length. Following this, spaces are
added at the start and the end as necessary to ensure a double quote
does not appear in those positions."""
rv = self.text()
rv = self.__RemoveMultiQuote_re.sub(lambda _mo: "'" * (_mo.end(0) - _mo.start(0)), rv)
if rv.startswith('"'):
rv = ' ' + rv
if rv.endswith('"'):
rv = rv + ' '
return rv
def text (self):
if self.__userInformation is None:
return ''
text = []
# Values in userInformation are DOM "documentation" elements.
# We want their combined content.
for dn in self.__userInformation:
for cn in dn.childNodes:
if Node.TEXT_NODE == cn.nodeType:
text.append(cn.data)
return ''.join(text)
def __str__ (self):
"""Return the catenation of all user information elements in the
annotation as a single unicode string. Returns the empty string if
there are no user information elements."""
return self.text()
# Section 3.14.
class SimpleTypeDefinition (_SchemaComponent_mixin, _NamedComponent_mixin, pyxb.namespace.resolution._Resolvable_mixin, _Annotated_mixin):
"""An XMLSchema U{Simple Type Definition<http://www.w3.org/TR/xmlschema-1/#Simple_Type_Definitions>} component."""
# Reference to the SimpleTypeDefinition on which this is based.
# The value must be non-None except for the simple ur-type
# definition.
__baseTypeDefinition = None
def baseTypeDefinition (self):
return self.__baseTypeDefinition
__memberTypes = None
__itemTypeExpandedName = None
__baseExpandedName = None
__memberTypesExpandedNames = None
__localFacets = None
# A map from a subclass of facets.Facet to an instance of that class.
# Presence of a facet class as a key in this map is the indicator that the
# type definition and its subtypes are permitted to use the corresponding
# facet. All facets in force for this type are present in the map,
# including those constraints inherited parent types.
__facets = None
def facets (self):
assert (self.__facets is None) or (type(self.__facets) == types.DictType)
return self.__facets
# The facets.FundamentalFacet instances that describe this type
__fundamentalFacets = None
def fundamentalFacets (self):
"""A frozenset of instances of facets.FundamentallFacet."""
return self.__fundamentalFacets
STD_empty = 0 #<<< Marker indicating an empty set of STD forms
STD_extension = 0x01 #<<< Representation for extension in a set of STD forms
STD_list = 0x02 #<<< Representation for list in a set of STD forms
STD_restriction = 0x04 #<<< Representation of restriction in a set of STD forms
STD_union = 0x08 #<<< Representation of union in a set of STD forms
_STD_Map = { 'extension' : STD_extension
, 'list' : STD_list
, 'restriction' : STD_restriction
, 'union' : STD_union }
# Bitmask defining the subset that comprises the final property
__final = STD_empty
@classmethod
def _FinalToString (cls, final_value):
"""Convert a final value to a string."""
tags = []
if final_value & cls.STD_extension:
tags.append('extension')
if final_value & cls.STD_list:
tags.append('list')
if final_value & cls.STD_restriction:
tags.append('restriction')
if final_value & cls.STD_union:
tags.append('union')
return ' '.join(tags)
VARIETY_absent = 0x01 #<<< Only used for the ur-type
VARIETY_atomic = 0x02 #<<< Use for types based on a primitive type
VARIETY_list = 0x03 #<<< Use for lists of atomic-variety types
VARIETY_union = 0x04 #<<< Use for types that aggregate other types
# Derivation alternative
_DA_empty = 'none specified'
_DA_restriction = 'restriction'
_DA_list = 'list'
_DA_union = 'union'
def _derivationAlternative (self):
return self.__derivationAlternative
__derivationAlternative = None
# Identify the sort of value collection this holds. This field is
# used to identify unresolved definitions.
__variety = None
def variety (self):
return self.__variety
@classmethod
def VarietyToString (cls, variety):
"""Convert a variety value to a string."""
if cls.VARIETY_absent == variety:
return 'absent'
if cls.VARIETY_atomic == variety:
return 'atomic'
if cls.VARIETY_list == variety:
return 'list'
if cls.VARIETY_union == variety:
return 'union'
return '?NoVariety?'
# For atomic variety only, the root (excepting ur-type) type.
__primitiveTypeDefinition = None
def primitiveTypeDefinition (self, throw_if_absent=True):
if throw_if_absent:
assert self.VARIETY_atomic == self.variety()
if self.__primitiveTypeDefinition is None:
raise pyxb.LogicError('Expected primitive type for %s in %s', self, self.targetNamespace())
return self.__primitiveTypeDefinition
# For list variety only, the type of items in the list
__itemTypeDefinition = None
def itemTypeDefinition (self):
assert self.VARIETY_list == self.variety()
if self.__itemTypeDefinition is None:
raise pyxb.LogicError('Expected item type')
return self.__itemTypeDefinition
# For union variety only, the sequence of candidate members
__memberTypeDefinitions = None
def memberTypeDefinitions (self):
assert self.VARIETY_union == self.variety()
if self.__memberTypeDefinitions is None:
raise pyxb.LogicError('Expected member types')
return self.__memberTypeDefinitions
# bR:STD
def _bindingRequires_vx (self, include_lax):
"""Implement base class method.
This STD depends on its baseTypeDefinition, unless its variety
is absent. Other dependencies are on item, primitive, or
member type definitions."""
type_definitions = set()
if self != self.baseTypeDefinition():
type_definitions.add(self.baseTypeDefinition())
if self.VARIETY_absent == self.variety():
type_definitions = set()
elif self.VARIETY_atomic == self.variety():
if self != self.primitiveTypeDefinition():
type_definitions.add(self.primitiveTypeDefinition())
elif self.VARIETY_list == self.variety():
assert self != self.itemTypeDefinition()
type_definitions.add(self.itemTypeDefinition())
else:
assert self.VARIETY_union == self.variety()
assert self not in self.memberTypeDefinitions()
type_definitions.update(self.memberTypeDefinitions())
# NB: This type also depends on the value type definitions for
# any facets that apply to it. This fact only matters when
# generating the datatypes_facets source. That, and the fact
# that there are dependency loops (e.g., integer requires a
# nonNegativeInteger for its length facet) means we don't
# bother adding in those.
return frozenset(type_definitions)
# A non-property field that holds a reference to the DOM node from
# which the type is defined. The value is held only between the
# point where the simple type definition instance is created until
# the point it is resolved.
__domNode = None
# Indicate that this instance was defined as a built-in rather
# than from a DOM instance.
__isBuiltin = False
# Allocate one of these. Users should use one of the Create*
# factory methods instead.
def __init__ (self, *args, **kw):
super(SimpleTypeDefinition, self).__init__(*args, **kw)
self.__variety = kw['variety']
def __setstate__ (self, state):
"""Extend base class unpickle support to retain link between
this instance and the Python class that it describes.
This is because the pythonSupport value is a class reference,
not an instance reference, so it wasn't deserialized, and its
class member link was never set.
"""
super_fn = getattr(super(SimpleTypeDefinition, self), '__setstate__', lambda _state: self.__dict__.update(_state))
super_fn(state)
if self.__pythonSupport is not None:
self.__pythonSupport._SimpleTypeDefinition(self)
def __str__ (self):
if self.name() is not None:
elts = [ self.name(), ':' ]
else:
elts = [ '<anonymous>:' ]
if self.VARIETY_absent == self.variety():
elts.append('the ur-type')
elif self.VARIETY_atomic == self.variety():
elts.append('restriction of %s' % (self.baseTypeDefinition().name(),))
elif self.VARIETY_list == self.variety():
elts.append('list of %s' % (self.itemTypeDefinition().name(),))
elif self.VARIETY_union == self.variety():
elts.append('union of %s' % (u" ".join([unicode(_mtd.name()) for _mtd in self.memberTypeDefinitions()],)))
else:
# Gets here if the type has not been resolved.
elts.append('?')
#raise pyxb.LogicError('Unexpected variety %s' % (self.variety(),))
if self.__facets:
felts = []
for (k, v) in self.__facets.iteritems():
if v is not None:
felts.append(unicode(v))
elts.append(u"\n %s" % (','.join(felts),))
if self.__fundamentalFacets:
elts.append("\n ")
elts.append(u','.join( [unicode(_f) for _f in self.__fundamentalFacets ]))
return 'STD[%s]' % (''.join(elts),)
def _updateFromOther_csc (self, other):
"""Override fields in this instance with those from the other.
This method is invoked only by Schema._addNamedComponent, and
then only when a built-in type collides with a schema-defined
type. Material like facets is not (currently) held in the
built-in copy, so the DOM information is copied over to the
built-in STD, which is subsequently re-resolved.
Returns self.
"""
assert self != other
assert self.isNameEquivalent(other)
super(SimpleTypeDefinition, self)._updateFromOther_csc(other)
# The other STD should be an unresolved schema-defined type.
assert other.__baseTypeDefinition is None, 'Update from resolved STD %s' % (other,)
assert other.__domNode is not None
self.__domNode = other.__domNode
# Preserve the python support
if other.__pythonSupport is not None:
# @todo: ERROR multiple references
self.__pythonSupport = other.__pythonSupport
# Mark this instance as unresolved so it is re-examined
self.__variety = None
return self
def isBuiltin (self):
"""Indicate whether this simple type is a built-in type."""
return self.__isBuiltin
__SimpleUrTypeDefinition = None
@classmethod
def SimpleUrTypeDefinition (cls, schema=None, in_builtin_definition=False):
"""Create the SimpleTypeDefinition instance that approximates the simple ur-type.
See section 3.14.7."""
#if in_builtin_definition and (cls.__SimpleUrTypeDefinition is not None):
# raise pyxb.LogicError('Multiple definitions of SimpleUrType')
if cls.__SimpleUrTypeDefinition is None:
# Note: We use a singleton subclass
assert schema is not None
ns_ctx = schema.targetNamespace().initialNamespaceContext()
kw = { 'name' : 'anySimpleType',
'schema' : schema,
'namespace_context' : ns_ctx,
'binding_namespace' : schema.targetNamespace(),
'variety' : cls.VARIETY_absent,
'scope' : _ScopedDeclaration_mixin.SCOPE_global }
bi = _SimpleUrTypeDefinition(**kw)
bi._setPythonSupport(datatypes.anySimpleType)
# The baseTypeDefinition is the ur-type.
bi.__baseTypeDefinition = ComplexTypeDefinition.UrTypeDefinition()
bi.__derivationAlternative = cls._DA_restriction
# The simple ur-type has an absent variety, not an atomic
# variety, so does not have a primitiveTypeDefinition
# No facets on the ur type
bi.__facets = {}
bi.__fundamentalFacets = frozenset()
bi.__resolveBuiltin()
cls.__SimpleUrTypeDefinition = bi
return cls.__SimpleUrTypeDefinition
@classmethod
def _CreateXMLInstance (cls, name, schema):
"""Create STD instances for built-in types.
For example, xml:space is a restriction of NCName; xml:lang is a union.
"""
from pyxb.binding import xml_
kw = { 'schema' : schema,
'binding_namespace' : schema.targetNamespace(),
'namespace_context' : schema.targetNamespace().initialNamespaceContext(),
'scope' : _ScopedDeclaration_mixin.SCOPE_global,
'variety' : cls.VARIETY_atomic }
if 'space' == name:
bi = cls(**kw)
bi.__derivationAlternative = cls._DA_restriction
bi.__baseTypeDefinition = datatypes.NCName.SimpleTypeDefinition()
bi.__primitiveTypeDefinition = bi.__baseTypeDefinition.__primitiveTypeDefinition
bi._setPythonSupport(xml_.STD_ANON_space)
bi.setNameInBinding('STD_ANON_space')
elif 'lang' == name:
bi = cls(**kw)
bi.__baseTypeDefinition = cls.SimpleUrTypeDefinition()
bi.__memberTypes = [ datatypes.language.SimpleTypeDefinition() ]
bi.__derivationAlternative = cls._DA_union
bi.__primitiveTypeDefinition = bi
bi._setPythonSupport(xml_.STD_ANON_lang)
bi.setNameInBinding('STD_ANON_lang')
else:
raise pyxb.IncompleteImplementationError('No implementation for xml:%s' % (name,))
bi.__facets = { }
for v in bi.pythonSupport().__dict__.itervalues():
if isinstance(v, facets.ConstrainingFacet):
bi.__facets[v.__class__] = v
return bi
@classmethod
def CreatePrimitiveInstance (cls, name, schema, python_support):
"""Create a primitive simple type in the target namespace.
This is mainly used to pre-load standard built-in primitive
types, such as those defined by XMLSchema Datatypes. You can
use it for your own schemas as well, if you have special types
that require explicit support to for Pythonic conversion.
All parameters are required and must be non-None.
"""
kw = { 'name' : name,
'schema' : schema,
'binding_namespace' : schema.targetNamespace(),
'namespace_context' : schema.targetNamespace().initialNamespaceContext(),
'scope' : _ScopedDeclaration_mixin.SCOPE_global,
'variety' : cls.VARIETY_atomic }
bi = cls(**kw)
bi._setPythonSupport(python_support)
# Primitive types are based on the ur-type, and have
# themselves as their primitive type definition.
bi.__derivationAlternative = cls._DA_restriction
bi.__baseTypeDefinition = cls.SimpleUrTypeDefinition()
bi.__primitiveTypeDefinition = bi
# Primitive types are built-in
bi.__resolveBuiltin()
assert bi.isResolved()
return bi
@classmethod
def CreateDerivedInstance (cls, name, schema, parent_std, python_support):
"""Create a derived simple type in the target namespace.
This is used to pre-load standard built-in derived types. You
can use it for your own schemas as well, if you have special
types that require explicit support to for Pythonic
conversion.
"""
assert parent_std
assert parent_std.__variety in (cls.VARIETY_absent, cls.VARIETY_atomic)
kw = { 'name' : name,
'schema' : schema,
'binding_namespace' : schema.targetNamespace(),
'namespace_context' : schema.targetNamespace().initialNamespaceContext(),
'scope' : _ScopedDeclaration_mixin.SCOPE_global,
'variety' : parent_std.__variety }
bi = cls(**kw)
bi._setPythonSupport(python_support)
# We were told the base type. If this is atomic, we re-use
# its primitive type. Note that these all may be in different
# namespaces.
bi.__baseTypeDefinition = parent_std
bi.__derivationAlternative = cls._DA_restriction
if cls.VARIETY_atomic == bi.__variety:
bi.__primitiveTypeDefinition = bi.__baseTypeDefinition.__primitiveTypeDefinition
# Derived types are built-in
bi.__resolveBuiltin()
return bi
@classmethod
def CreateListInstance (cls, name, schema, item_std, python_support):
"""Create a list simple type in the target namespace.
This is used to preload standard built-in list types. You can
use it for your own schemas as well, if you have special types
that require explicit support to for Pythonic conversion; but
note that such support is identified by the item_std.
"""
kw = { 'name' : name,
'schema' : schema,
'binding_namespace' : schema.targetNamespace(),
'namespace_context' : schema.targetNamespace().initialNamespaceContext(),
'scope' : _ScopedDeclaration_mixin.SCOPE_global,
'variety' : cls.VARIETY_list }
bi = cls(**kw)
bi._setPythonSupport(python_support)
# The base type is the ur-type. We were given the item type.
bi.__baseTypeDefinition = cls.SimpleUrTypeDefinition()
assert item_std
bi.__itemTypeDefinition = item_std
# List types are built-in
bi.__resolveBuiltin()
return bi
@classmethod
def CreateUnionInstance (cls, name, schema, member_stds):
"""(Placeholder) Create a union simple type in the target namespace.
This function has not been implemented."""
raise pyxb.IncompleteImplementationError('No support for built-in union types')
def __singleSimpleTypeChild (self, body, other_elts_ok=False):
simple_type_child = None
for cn in body.childNodes:
if (Node.ELEMENT_NODE == cn.nodeType):
if not xsd.nodeIsNamed(cn, 'simpleType'):
if other_elts_ok:
continue
raise pyxb.SchemaValidationError('Context requires element to be xs:simpleType')
assert not simple_type_child
simple_type_child = cn
if simple_type_child is None:
raise pyxb.SchemaValidationError('Content requires an xs:simpleType member (or a base attribute)')
return simple_type_child
# The __initializeFrom* methods are responsible for identifying
# the variety and the baseTypeDefinition. The remainder of the
# resolution is performed by the __completeResolution method.
# Note that in some cases resolution might yet be premature, so
# variety is not saved until it is complete. All this stuff is
# from section 3.14.2.
def __initializeFromList (self, body, **kw):
self.__baseTypeDefinition = self.SimpleUrTypeDefinition()
self.__itemTypeExpandedName = domutils.NodeAttributeQName(body, 'itemType')
if self.__itemTypeExpandedName is None:
# NOTE: The newly created anonymous item type will
# not be resolved; the caller needs to handle
# that.
self.__itemTypeDefinition = self.CreateFromDOM(self.__singleSimpleTypeChild(body), **kw)
return self.__completeResolution(body, self.VARIETY_list, self._DA_list)
def __initializeFromRestriction (self, body, **kw):
if self.__baseTypeDefinition is None:
self.__baseExpandedName = domutils.NodeAttributeQName(body, 'base')
if self.__baseExpandedName is None:
self.__baseTypeDefinition = self.CreateFromDOM(self.__singleSimpleTypeChild(body, other_elts_ok=True), **kw)
return self.__completeResolution(body, None, self._DA_restriction)
__localMemberTypes = None
def __initializeFromUnion (self, body, **kw):
self.__baseTypeDefinition = self.SimpleUrTypeDefinition()
mta = domutils.NodeAttribute(body, 'memberTypes')
self.__memberTypesExpandedNames = None
if mta is not None:
nsc = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(body)
self.__memberTypesExpandedNames = [ nsc.interpretQName(_mten) for _mten in mta.split() ]
if self.__localMemberTypes is None:
self.__localMemberTypes = []
for cn in body.childNodes:
if (Node.ELEMENT_NODE == cn.nodeType) and xsd.nodeIsNamed(cn, 'simpleType'):
self.__localMemberTypes.append(self.CreateFromDOM(cn, **kw))
return self.__completeResolution(body, self.VARIETY_union, self._DA_union)
def __resolveBuiltin (self):
if self.hasPythonSupport():
self.__facets = { }
for v in self.pythonSupport().__dict__.itervalues():
if isinstance(v, facets.ConstrainingFacet):
self.__facets[v.__class__] = v
if v.ownerTypeDefinition() is None:
v.setFromKeywords(_constructor=True, owner_type_definition=self)
self.__isBuiltin = True
return self
def __defineDefaultFacets (self, variety):
"""Create facets for varieties that can take facets that are undeclared.
This means unions, which per section 4.1.2.3 of
http://www.w3.org/TR/xmlschema-2/ can have enumeration or
pattern restrictions."""
if self.VARIETY_union != variety:
return self
self.__facets.setdefault(facets.CF_pattern)
self.__facets.setdefault(facets.CF_enumeration)
return self
def __processHasFacetAndProperty (self, variety):
"""Identify the facets and properties for this stype.
This method simply identifies the facets that apply to this
specific type, and records property values. Only
explicitly-associated facets and properties are stored; others
from base types will also affect this type. The information
is taken from the applicationInformation children of the
definition's annotation node, if any. If there is no support
for the XMLSchema_hasFacetAndProperty namespace, this is a
no-op.
Upon return, self.__facets is a map from the class for an
associated fact to None, and self.__fundamentalFacets is a
frozenset of instances of FundamentalFacet.
The return value is self.
"""
self.__facets = { }
self.__fundamentalFacets = frozenset()
if self.annotation() is None:
return self.__defineDefaultFacets(variety)
app_info = self.annotation().applicationInformation()
if app_info is None:
return self.__defineDefaultFacets(variety)
facet_map = { }
fundamental_facets = set()
seen_facets = set()
for ai in app_info:
for cn in ai.childNodes:
if Node.ELEMENT_NODE != cn.nodeType:
continue
if pyxb.namespace.XMLSchema_hfp.nodeIsNamed(cn, 'hasFacet'):
facet_name = domutils.NodeAttribute(cn, 'name')# , pyxb.namespace.XMLSchema_hfp)
if facet_name is None:
raise pyxb.SchemaValidationError('hasFacet missing name attribute in %s' % (cn,))
if facet_name in seen_facets:
raise pyxb.SchemaValidationError('Multiple hasFacet specifications for %s' % (facet_name,))
seen_facets.add(facet_name)
facet_class = facets.ConstrainingFacet.ClassForFacet(facet_name)
#facet_map[facet_class] = facet_class(base_type_definition=self)
facet_map[facet_class] = None
if pyxb.namespace.XMLSchema_hfp.nodeIsNamed(cn, 'hasProperty'):
fundamental_facets.add(facets.FundamentalFacet.CreateFromDOM(cn, self))
if 0 < len(facet_map):
assert self.__baseTypeDefinition == self.SimpleUrTypeDefinition()
self.__facets = facet_map
assert type(self.__facets) == types.DictType
if 0 < len(fundamental_facets):
self.__fundamentalFacets = frozenset(fundamental_facets)
return self
# NB: Must be done after resolution of the base type
def __updateFacets (self, body):
# Create local list consisting of facet classes matched in children
# and the map of keywords used to initialize the local instance.
local_facets = {}
for fc in facets.Facet.Facets:
children = domutils.LocateMatchingChildren(body, fc.Name())
if 0 < len(children):
fi = fc(base_type_definition=self.__baseTypeDefinition,
owner_type_definition=self)
if isinstance(fi, facets._LateDatatype_mixin):
fi.bindValueDatatype(self)
for cn in children:
kw = { 'annotation': domutils.LocateUniqueChild(cn, 'annotation') }
for ai in range(0, cn.attributes.length):
attr = cn.attributes.item(ai)
# Convert name from unicode to string
kw[unicode(attr.localName)] = attr.value
try:
fi.setFromKeywords(**kw)
except pyxb.PyXBException as e:
raise pyxb.SchemaValidationError('Error assigning facet %s in %s: %s' % (fc.Name(), self.expandedName(), e))
local_facets[fc] = fi
self.__localFacets = local_facets
# We want a map from the union of the facet classes from this STD up
# through its baseTypeDefinition (if present). Map elements should be
# to None if the facet has not been constrained, or to the nearest
# ConstrainingFacet instance if it is. ConstrainingFacet instances
# created for local constraints also need a pointer to the
# corresponding facet from the ancestor type definition, because those
# constraints also affect this type.
base_facets = {}
# Built-ins didn't get their facets() setting configured, so use the
# _FacetMap() instead.
if self.__baseTypeDefinition.isBuiltin():
pstd = self.__baseTypeDefinition.pythonSupport()
if pstd != datatypes.anySimpleType:
base_facets.update(pstd._FacetMap())
elif self.__baseTypeDefinition.facets():
assert type(self.__baseTypeDefinition.facets()) == types.DictType
base_facets.update(self.__baseTypeDefinition.facets())
base_facets.update(self.facets())
self.__facets = self.__localFacets
for fc in base_facets.iterkeys():
self.__facets.setdefault(fc, base_facets[fc])
assert type(self.__facets) == types.DictType
def _createRestriction (self, owner, body):
"""Create a new simple type with this as its base.
The type is owned by the provided owner, and may have facet
restrictions defined by the body.
@param owner: the owner for the newly created type
@type owner: L{ComplexTypeDefinition}
@param body: the DOM node from which facet information will be extracted
@type body: C{xml.dom.Node}
@rtype: L{SimpleTypeDefinition}
"""
std = SimpleTypeDefinition(owner=owner, namespace_context=owner._namespaceContext(), variety=None, scope=self._scope(), schema=owner._schema())
std.__baseTypeDefinition = self
return std.__completeResolution(body, None, self._DA_restriction)
# Complete the resolution of some variety of STD. Note that the
# variety is compounded by an alternative, since there is no
# 'restriction' variety.
def __completeResolution (self, body, variety, alternative):
assert self.__variety is None
if self.__baseTypeDefinition is None:
assert self.__baseExpandedName is not None
base_type = self.__baseExpandedName.typeDefinition()
if not isinstance(base_type, SimpleTypeDefinition):
raise pyxb.SchemaValidationError('Unable to locate base type %s' % (self.__baseExpandedName,))
self.__baseTypeDefinition = base_type
# If the base type exists but has not yet been resolved,
# delay processing this type until the one it depends on
# has been completed.
assert self.__baseTypeDefinition != self
if not self.__baseTypeDefinition.isResolved():
self._queueForResolution('base type %s is not resolved' % (self.__baseTypeDefinition,), depends_on=self.__baseTypeDefinition)
return self
if variety is None:
# 3.14.1 specifies that the variety is the variety of the base
# type definition which, by the way, can't be the ur type.
variety = self.__baseTypeDefinition.__variety
assert variety is not None
if self.VARIETY_absent == variety:
# The ur-type is always resolved. So are restrictions of it,
# which is how we might get here.
pass
elif self.VARIETY_atomic == variety:
# Atomic types (and their restrictions) use the primitive
# type, which is the highest type that is below the
# ur-type (which is not atomic).
ptd = self
while isinstance(ptd, SimpleTypeDefinition) and (self.VARIETY_atomic == ptd.__baseTypeDefinition.variety()):
ptd = ptd.__baseTypeDefinition
self.__primitiveTypeDefinition = ptd
elif self.VARIETY_list == variety:
if self._DA_list == alternative:
if self.__itemTypeExpandedName is not None:
self.__itemTypeDefinition = self.__itemTypeExpandedName.typeDefinition()
if not isinstance(self.__itemTypeDefinition, SimpleTypeDefinition):
raise pyxb.SchemaValidationError('Unable to locate STD %s for items' % (self.__itemTypeExpandedName,))
elif self._DA_restriction == alternative:
self.__itemTypeDefinition = self.__baseTypeDefinition.__itemTypeDefinition
else:
raise pyxb.LogicError('completeResolution list variety with alternative %s' % (alternative,))
elif self.VARIETY_union == variety:
if self._DA_union == alternative:
# First time we try to resolve, create the member type
# definitions. If something later prevents us from resolving
# this type, we don't want to create them again, because we
# might already have references to them.
if self.__memberTypeDefinitions is None:
mtd = []
# If present, first extract names from memberTypes,
# and add each one to the list
if self.__memberTypesExpandedNames is not None:
for mn_en in self.__memberTypesExpandedNames:
# THROW if type has not been defined
std = mn_en.typeDefinition()
if std is None:
raise pyxb.SchemaValidationError('Unable to locate member type %s' % (mn_en,))
# Note: We do not need these to be resolved (here)
assert isinstance(std, SimpleTypeDefinition)
mtd.append(std)
# Now look for local type definitions
mtd.extend(self.__localMemberTypes)
self.__memberTypeDefinitions = mtd
assert None not in self.__memberTypeDefinitions
# Replace any member types that are themselves unions with the
# members of those unions, in order. Note that doing this
# might indicate we can't resolve this type yet, which is why
# we separated the member list creation and the substitution
# phases
mtd = []
for mt in self.__memberTypeDefinitions:
assert isinstance(mt, SimpleTypeDefinition)
if not mt.isResolved():
self._queueForResolution('member type not resolved', depends_on=mt)
return self
if self.VARIETY_union == mt.variety():
mtd.extend(mt.memberTypeDefinitions())
else:
mtd.append(mt)
elif self._DA_restriction == alternative:
assert self.__baseTypeDefinition
# Base type should have been resolved before we got here
assert self.__baseTypeDefinition.isResolved()
mtd = self.__baseTypeDefinition.__memberTypeDefinitions
assert mtd is not None
else:
raise pyxb.LogicError('completeResolution union variety with alternative %s' % (alternative,))
# Save a unique copy
self.__memberTypeDefinitions = mtd[:]
else:
raise pyxb.LogicError('completeResolution with variety 0x%02x' % (variety,))
# Determine what facets, if any, apply to this type. This
# should only do something if this is a primitive type.
self.__processHasFacetAndProperty(variety)
self.__updateFacets(body)
self.__derivationAlternative = alternative
self.__variety = variety
self.__domNode = None
return self
def isResolved (self):
"""Indicate whether this simple type is fully defined.
Type resolution for simple types means that the corresponding
schema component fields have been set. Specifically, that
means variety, baseTypeDefinition, and the appropriate
additional fields depending on variety. See _resolve() for
more information.
"""
# Only unresolved nodes have an unset variety
return (self.__variety is not None)
# STD:res
def _resolve (self):
"""Attempt to resolve the type.
Type resolution for simple types means that the corresponding
schema component fields have been set. Specifically, that
means variety, baseTypeDefinition, and the appropriate
additional fields depending on variety.
All built-in STDs are resolved upon creation. Schema-defined
STDs are held unresolved until the schema has been completely
read, so that references to later schema-defined STDs can be
resolved. Resolution is performed after the entire schema has
been scanned and STD instances created for all
topLevelSimpleTypes.
If a built-in STD is also defined in a schema (which it should
be for XMLSchema), the built-in STD is kept, with the
schema-related information copied over from the matching
schema-defined STD. The former then replaces the latter in
the list of STDs to be resolved.
Types defined by restriction have the same variety as the type
they restrict. If a simple type restriction depends on an
unresolved type, this method simply queues it for resolution
in a later pass and returns.
"""
if self.__variety is not None:
return self
assert self.__domNode
node = self.__domNode
kw = { 'owner' : self
, 'schema' : self._schema() }
bad_instance = False
# The guts of the node should be exactly one instance of
# exactly one of these three types.
candidate = domutils.LocateUniqueChild(node, 'list')
if candidate:
self.__initializeFromList(candidate, **kw)
candidate = domutils.LocateUniqueChild(node, 'restriction')
if candidate:
if self.__variety is None:
self.__initializeFromRestriction(candidate, **kw)
else:
bad_instance = True
candidate = domutils.LocateUniqueChild(node, 'union')
if candidate:
if self.__variety is None:
self.__initializeFromUnion(candidate, **kw)
else:
bad_instance = True
if self.__baseTypeDefinition is None:
raise pyxb.SchemaValidationError('xs:simpleType must have list, union, or restriction as child')
if self._schema() is not None:
self.__final = self._schema().finalForNode(node, self._STD_Map)
# It is NOT an error to fail to resolve the type.
if bad_instance:
raise pyxb.SchemaValidationError('Expected exactly one of list, restriction, union as child of simpleType')
return self
# CFD:STD CFD:SimpleTypeDefinition
@classmethod
def CreateFromDOM (cls, node, **kw):
# Node should be an XMLSchema simpleType node
assert xsd.nodeIsNamed(node, 'simpleType')
name = domutils.NodeAttribute(node, 'name')
rv = cls(name=name, node=node, variety=None, **kw)
rv._annotationFromDOM(node)
# Creation does not attempt to do resolution. Queue up the newly created
# whatsis so we can resolve it after everything's been read in.
rv.__domNode = node
rv._queueForResolution('creation')
return rv
# pythonSupport is None, or a subclass of datatypes.simpleTypeDefinition.
# When set, this simple type definition instance must be uniquely
# associated with the python support type.
__pythonSupport = None
def _setPythonSupport (self, python_support):
# Includes check that python_support is not None
assert issubclass(python_support, basis.simpleTypeDefinition)
# Can't share support instances
self.__pythonSupport = python_support
self.__pythonSupport._SimpleTypeDefinition(self)
if self.nameInBinding() is None:
self.setNameInBinding(self.__pythonSupport.__name__)
return self.__pythonSupport
def hasPythonSupport (self):
return self.__pythonSupport is not None
def pythonSupport (self):
if self.__pythonSupport is None:
raise pyxb.LogicError('%s: No support defined' % (self.name(),))
return self.__pythonSupport
def stringToPython (self, string):
return self.pythonSupport().stringToPython(string)
def pythonToString (self, value):
return self.pythonSupport().pythonToString(value)
class _SimpleUrTypeDefinition (SimpleTypeDefinition, _Singleton_mixin):
"""Subclass ensures there is only one simple ur-type."""
pass
class _ImportElementInformationItem (_Annotated_mixin):
"""Data associated with an
U{import<http://www.w3.org/TR/xmlschema-1/#composition-schemaImport>}
statement within a schema."""
def id (self):
"""The value of the C{id} attribute from the import statement."""
return self.__id
__id = None
def namespace (self):
"""The L{pyxb.namespace.Namespace} instance corresponding to the value
of the C{namespace} attribute from the import statement."""
return self.__namespace
__namespace = None
def schemaLocation (self):
"""The value of the C{schemaLocation} attribute from the import
statement, normalized relative to the location of the importing
schema."""
return self.__schemaLocation
__schemaLocation = None
def prefix (self):
"""The prefix from a namespace declaration for L{namespace} that was
active in the context of the import element, or C{None} if there was
no relevant namespace declaration in scope at that point.
This is propagated to be used as the default prefix for the
corresponding namespace if no prefix had been assigned.
"""
return self.__prefix
__prefix = None
def schema (self):
"""The L{Schema} instance corresponding to the imported schema, if
available.
Normally C{import} statements will be fulfilled by loading components
from a L{namespace archive<pyxb.namespace.NamespaceArchive>} in which
the corresponding namespace is marked as public. Where there are
cycles in the namespace dependency graph, or the schema for a
namespace are associated with a restricted profile of another
namespace, there may be no such archive and instead the components are
obtained using this schema."""
return self.__schema
__schema = None
def __init__ (self, importing_schema, node, **kw):
"""Gather the information relative to an C{import} statement.
If the imported namespace can be loaded from an archive, the
C{schemaLocation} attribute is ignored. Otherwise, it attempts to
retrieve and parse the corresponding schema (if this has not already
been done).
@param importing_schema: The L{Schema} instance in which the import
was found.
@param node: The C{xml.dom.DOM} node incorporating the schema
information.
@raise Exception: Any exception raised when attempting to retrieve and
parse data from the schema location.
"""
super(_ImportElementInformationItem, self).__init__(**kw)
uri = domutils.NodeAttribute(node, 'namespace')
if uri is None:
raise pyxb.IncompleteImplementationError('import statements without namespace not supported')
schema_location = pyxb.utils.utility.NormalizeLocation(domutils.NodeAttribute(node, 'schemaLocation'), importing_schema.location())
self.__schemaLocation = schema_location
ns = self.__namespace = pyxb.namespace.NamespaceForURI(uri, create_if_missing=True)
need_schema = ns.isImportAugmentable()
if not need_schema:
# Discard location if we expect to be able to learn about this
# namespace from an archive or a built-in description
self.__schemaLocation = None
ns_ctx = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node)
if self.schemaLocation() is not None:
# @todo: NOTICE
(has_schema, schema_instance) = self.__namespace.lookupSchemaByLocation(schema_location)
if not has_schema:
ckw = { 'absolute_schema_location' : schema_location,
'generation_uid' : importing_schema.generationUID(),
'uri_content_archive_directory' : importing_schema._uriContentArchiveDirectory(),
}
try:
schema_instance = Schema.CreateFromLocation(**ckw)
except Exception:
_log.exception('Import %s cannot read schema location %s (%s)', ns, self.__schemaLocation, schema_location)
raise
self.__schema = schema_instance
elif need_schema:
_log.warning('No information available on imported namespace %s', uri)
# If we think we found a schema, make sure it's in the right
# namespace.
if self.__schema is not None:
if ns != self.__schema.targetNamespace():
raise pyxb.SchemaValidationError('Import expected namespace %s but got %s' % (ns, self.__schema.targetNamespace()))
self.__prefix = ns_ctx.prefixForNamespace(self.namespace())
self._annotationFromDOM(node)
class Schema (_SchemaComponent_mixin):
"""An XMLSchema U{Schema<http://www.w3.org/TR/xmlschema-1/#Schemas>}."""
def __getstate__ (self):
raise pyxb.LogicError('Attempt to serialize Schema instance')
# List of annotations
__annotations = None
# True when we have started seeing elements, attributes, or
# notations.
__pastProlog = False
def location (self):
"""URI or path to where the schema can be found.
For schema created by a user, the location should be provided to the
constructor using the C{schema_location} keyword. In the case of
imported or included schema, the including schema's location is used
as the base URI for determining the absolute URI of the included
schema from its (possibly relative) location value. For files,
the scheme and authority portions are generally absent, as is often
the abs_path part."""
return self.__location
__location = None
def locationTag (self):
return self.__locationTag
__locationTag = None
def signature (self):
return self.__signature
__signature = None
def generationUID (self):
return self.__generationUID
__generationUID = None
def originRecord (self):
return self.__originRecord
__originRecord = None
def targetNamespace (self):
"""The targetNamespace of a componen.
This is None, or a reference to a Namespace in which the
component is declared (either as a global or local to one of
the namespace's complex type definitions). This is immutable
after creation.
"""
return self.__targetNamespace
__targetNamespace = None
def defaultNamespace (self):
"""Default namespace of the schema.
Will be None unless the schema has an 'xmlns' attribute. The
value must currently be provided as a keyword parameter to the
constructor. """
return self.__defaultNamespace
__defaultNamespace = None
def referencedNamespaces (self):
return self.__referencedNamespaces
__referencedNamespaces = None
__namespaceData = None
def importEIIs (self):
return self.__importEIIs
__importEIIs = None
def importedSchema (self):
return self.__importedSchema
__importedSchema = None
def includedSchema (self):
return self.__includedSchema
__includedSchema = None
_QUALIFIED = "qualified"
_UNQUALIFIED = "unqualified"
# Default values for standard recognized schema attributes
__attributeMap = { pyxb.namespace.ExpandedName(None, 'attributeFormDefault') : _UNQUALIFIED
, pyxb.namespace.ExpandedName(None, 'elementFormDefault') : _UNQUALIFIED
, pyxb.namespace.ExpandedName(None, 'blockDefault') : ''
, pyxb.namespace.ExpandedName(None, 'finalDefault') : ''
, pyxb.namespace.ExpandedName(None, 'id') : None
, pyxb.namespace.ExpandedName(None, 'targetNamespace') : None
, pyxb.namespace.ExpandedName(None, 'version') : None
, pyxb.namespace.XML.createExpandedName('lang') : None
}
def _setAttributeFromDOM (self, attr):
"""Override the schema attribute with the given DOM value."""
self.__attributeMap[pyxb.namespace.ExpandedName(attr.name)] = attr.nodeValue
return self
def _setAttributesFromMap (self, attr_map):
"""Override the schema attributes with values from the given map."""
self.__attributeMap.update(attr_map)
return self
def schemaHasAttribute (self, attr_name):
"""Return True iff the schema has an attribute with the given (nc)name."""
if isinstance(attr_name, basestring):
attr_name = pyxb.namespace.ExpandedName(None, attr_name)
return attr_name in self.__attributeMap
def schemaAttribute (self, attr_name):
"""Return the schema attribute value associated with the given (nc)name.
@param attr_name: local name for the attribute in the schema element.
@return: the value of the corresponding attribute, or C{None} if it
has not been defined and has no default.
@raise KeyError: C{attr_name} is not a valid attribute for a C{schema} element.
"""
if isinstance(attr_name, basestring):
attr_name = pyxb.namespace.ExpandedName(None, attr_name)
return self.__attributeMap[attr_name]
__SchemaCategories = ( 'typeDefinition', 'attributeGroupDefinition', 'modelGroupDefinition',
'attributeDeclaration', 'elementDeclaration', 'notationDeclaration',
'identityConstraintDefinition' )
def _uriContentArchiveDirectory (self):
return self.__uriContentArchiveDirectory
__uriContentArchiveDirectory = None
def __init__ (self, *args, **kw):
# Force resolution of available namespaces if not already done
if not kw.get('_bypass_preload', False):
pyxb.namespace.archive.NamespaceArchive.PreLoadArchives()
assert 'schema' not in kw
self.__uriContentArchiveDirectory = kw.get('uri_content_archive_directory')
self.__location = kw.get('schema_location')
if self.__location is not None:
schema_path = self.__location
if 0 <= schema_path.find(':'):
schema_path = urlparse.urlparse(schema_path)[2] # .path
self.__locationTag = os.path.split(schema_path)[1].split('.')[0]
self.__generationUID = kw.get('generation_uid')
if self.__generationUID is None:
_log.warning('No generationUID provided')
self.__generationUID = pyxb.utils.utility.UniqueIdentifier()
self.__signature = kw.get('schema_signature')
super(Schema, self).__init__(*args, **kw)
self.__importEIIs = set()
self.__includedSchema = set()
self.__importedSchema = set()
self.__targetNamespace = self._namespaceContext().targetNamespace()
if not isinstance(self.__targetNamespace, pyxb.namespace.Namespace):
raise pyxb.LogicError('Schema constructor requires valid Namespace instance as target_namespace')
# NB: This will raise pyxb.SchemaUniquenessError if it appears this
# schema has already been incorporated into the target namespace.
self.__originRecord = self.__targetNamespace.addSchema(self)
self.__targetNamespace.configureCategories(self.__SchemaCategories)
if self.__defaultNamespace is not None:
self.__defaultNamespace.configureCategories(self.__SchemaCategories)
self.__attributeMap = self.__attributeMap.copy()
self.__annotations = []
# @todo: This isn't right if namespaces are introduced deeper in the document
self.__referencedNamespaces = self._namespaceContext().inScopeNamespaces().values()
__TopLevelComponentMap = {
'element' : ElementDeclaration,
'attribute' : AttributeDeclaration,
'notation' : NotationDeclaration,
'simpleType' : SimpleTypeDefinition,
'complexType' : ComplexTypeDefinition,
'group' : ModelGroupDefinition,
'attributeGroup' : AttributeGroupDefinition
}
@classmethod
def CreateFromDocument (cls, xmls, **kw):
if not ('schema_signature' in kw):
kw['schema_signature'] = pyxb.utils.utility.HashForText(xmls)
return cls.CreateFromDOM(domutils.StringToDOM(xmls, **kw), **kw)
@classmethod
def CreateFromLocation (cls, **kw):
"""Create a schema from a schema location.
Reads an XML document from the schema location and creates a schema
using it. All keyword parameters are passed to L{CreateFromDOM}.
@keyword schema_location: A file path or a URI. If this is a relative
URI and C{parent_uri} is present, the actual location will be
L{normallzed<pyxb.utils.utility.NormalizeLocation>}.
@keyword parent_uri: The context within which schema_location will be
normalized, if necessary.
@keyword absolute_schema_location: A file path or URI. This value is
not normalized, and supersedes C{schema_location}.
"""
schema_location = kw.pop('absolute_schema_location', pyxb.utils.utility.NormalizeLocation(kw.get('schema_location'), kw.get('parent_uri'), kw.get('prefix_map')))
kw['location_base'] = kw['schema_location'] = schema_location
assert isinstance(schema_location, basestring), 'Unexpected value %s type %s for schema_location' % (schema_location, type(schema_location))
uri_content_archive_directory = kw.get('uri_content_archive_directory')
return cls.CreateFromDocument(pyxb.utils.utility.DataFromURI(schema_location, archive_directory=uri_content_archive_directory), **kw)
@classmethod
def CreateFromStream (cls, stream, **kw):
return cls.CreateFromDocument(stream.read(), **kw)
@classmethod
def CreateFromDOM (cls, node, namespace_context=None, schema_location=None, schema_signature=None, generation_uid=None, **kw):
"""Take the root element of the document, and scan its attributes under
the assumption it is an XMLSchema schema element. That means
recognize namespace declarations and process them. Also look for
and set the default namespace. All other attributes are passed up
to the parent class for storage."""
# Get the context of any schema that is including (not importing) this
# one.
including_context = kw.get('including_context')
root_node = node
if Node.DOCUMENT_NODE == node.nodeType:
root_node = root_node.documentElement
if Node.ELEMENT_NODE != root_node.nodeType:
raise pyxb.LogicError('Must be given a DOM node of type ELEMENT')
assert (namespace_context is None) or isinstance(namespace_context, pyxb.namespace.resolution.NamespaceContext)
ns_ctx = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(root_node,
parent_context=namespace_context,
including_context=including_context)
tns = ns_ctx.targetNamespace()
if tns is None:
raise pyxb.SchemaValidationError('No targetNamespace associated with content (not a schema?)')
schema = cls(namespace_context=ns_ctx, schema_location=schema_location, schema_signature=schema_signature, generation_uid=generation_uid, **kw)
schema.__namespaceData = ns_ctx
if schema.targetNamespace() != ns_ctx.targetNamespace():
raise pyxb.SchemaValidationError('targetNamespace %s conflicts with %s' % (schema.targetNamespace(), ns_ctx.targetNamespace()))
# Update the attribute map
for ai in range(root_node.attributes.length):
schema._setAttributeFromDOM(root_node.attributes.item(ai))
# Verify that the root node is an XML schema element
if not xsd.nodeIsNamed(root_node, 'schema'):
raise pyxb.SchemaValidationError('Root node %s of document is not an XML schema element' % (root_node.nodeName,))
for cn in root_node.childNodes:
if Node.ELEMENT_NODE == cn.nodeType:
rv = schema.__processTopLevelNode(cn)
if rv is None:
_log.info('Unrecognized: %s %s', cn.nodeName, cn.toxml("utf-8"))
elif Node.TEXT_NODE == cn.nodeType:
# Non-element content really should just be whitespace.
# If something else is seen, print it for inspection.
text = cn.data.strip()
if text:
_log.info('Ignored text: %s', text)
elif Node.COMMENT_NODE == cn.nodeType:
pass
else:
# ATTRIBUTE_NODE
# CDATA_SECTION_NODE
# ENTITY_NODE
# PROCESSING_INSTRUCTION
# DOCUMENT_NODE
# DOCUMENT_TYPE_NODE
# NOTATION_NODE
_log.info('Ignoring non-element: %s', cn)
# Do not perform resolution yet: we may be done with this schema, but
# the namespace may incorporate additional ones, and we can't resolve
# until everything's present.
return schema
_SA_All = '#all'
def __ebvForNode (self, attr, dom_node, candidate_map):
ebv = domutils.NodeAttribute(dom_node, attr)
if ebv is None:
ebv = self.schemaAttribute('%sDefault' % (attr,))
rv = 0
if ebv == self._SA_All:
for v in candidate_map.itervalues():
rv += v
else:
for candidate in ebv.split():
rv += candidate_map.get(candidate, 0)
return rv
def blockForNode (self, dom_node, candidate_map):
"""Return a bit mask indicating a set of options read from the node's "block" attribute or the schema's "blockDefault" attribute.
A value of '#all' means enable every options; otherwise, the attribute
value should be a list of tokens, for which the corresponding value
will be added to the return value.
@param dom_node: the node from which the "block" attribute will be retrieved
@type dom_node: C{xml.dom.Node}
@param candidate_map: map from strings to bitmask values
"""
return self.__ebvForNode('block', dom_node, candidate_map)
def finalForNode (self, dom_node, candidate_map):
"""Return a bit mask indicating a set of options read from the node's
"final" attribute or the schema's "finalDefault" attribute.
A value of '#all' means enable every options; otherwise, the attribute
value should be a list of tokens, for which the corresponding value
will be added to the return value.
@param dom_node: the node from which the "final" attribute will be retrieved
@type dom_node: C{xml.dom.Node}
@param candidate_map: map from strings to bitmask values
"""
return self.__ebvForNode('final', dom_node, candidate_map)
def targetNamespaceForNode (self, dom_node, declaration_type):
"""Determine the target namespace for a local attribute or element declaration.
Look at the node's C{form} attribute, or if none the schema's
C{attributeFormDefault} or C{elementFormDefault} value. If the
resulting value is C{"qualified"} and the parent schema has a
non-absent target namespace, return it to use as the declaration
target namespace. Otherwise, return None to indicate that the
declaration has no namespace.
@param dom_node: The node defining an element or attribute declaration
@param declaration_type: Either L{AttributeDeclaration} or L{ElementDeclaration}
@return: L{pyxb.namespace.Namespace} or None
"""
form_type = domutils.NodeAttribute(dom_node, 'form')
if form_type is None:
if declaration_type == ElementDeclaration:
form_type = self.schemaAttribute('elementFormDefault')
elif declaration_type == AttributeDeclaration:
form_type = self.schemaAttribute('attributeFormDefault')
else:
raise pyxb.LogicError('Expected ElementDeclaration or AttributeDeclaration: got %s' % (declaration_type,))
tns = None
if (self._QUALIFIED == form_type):
tns = self.targetNamespace()
if tns.isAbsentNamespace():
tns = None
else:
if (self._UNQUALIFIED != form_type):
raise pyxb.SchemaValidationError('Form type neither %s nor %s' % (self._QUALIFIED, self._UNQUALIFIED))
return tns
def __requireInProlog (self, node_name):
"""Throw a SchemaValidationException referencing the given
node if we have passed the sequence point representing the end
of prolog elements."""
if self.__pastProlog:
raise pyxb.SchemaValidationError('Unexpected node %s after prolog' % (node_name,))
def __processInclude (self, node):
self.__requireInProlog(node.nodeName)
# See section 4.2.1 of Structures.
abs_uri = pyxb.utils.utility.NormalizeLocation(domutils.NodeAttribute(node, 'schemaLocation'), self.__location)
(has_schema, schema_instance) = self.targetNamespace().lookupSchemaByLocation(abs_uri)
if not has_schema:
kw = { 'absolute_schema_location': abs_uri,
'including_context': self.__namespaceData,
'generation_uid': self.generationUID(),
'uri_content_archive_directory': self._uriContentArchiveDirectory(),
}
try:
schema_instance = self.CreateFromLocation(**kw)
except pyxb.SchemaUniquenessError as e:
_log.warning('Skipping apparent redundant inclusion of %s defining %s (hash matches %s)', e.schemaLocation(), e.namespace(), e.existingSchema().location())
except Exception as e:
_log.exception('INCLUDE %s caught', abs_uri)
raise
if schema_instance:
if self.targetNamespace() != schema_instance.targetNamespace():
raise pyxb.SchemaValidationError('Included namespace %s not consistent with including namespace %s' % (schema_instance.targetNamespace(), self.targetNamespace()))
self.__includedSchema.add(schema_instance)
return node
def __processImport (self, node):
"""Process an import directive.
This attempts to locate schema (named entity) information for
a namespace that is referenced by this schema.
"""
self.__requireInProlog(node.nodeName)
import_eii = _ImportElementInformationItem(self, node)
if import_eii.schema() is not None:
self.__importedSchema.add(import_eii.schema())
self.targetNamespace().importNamespace(import_eii.namespace())
ins = import_eii.namespace()
if ins.prefix() is None:
ins.setPrefix(import_eii.prefix())
self.__importEIIs.add(import_eii)
return node
def __processRedefine (self, node):
self.__requireInProlog(node.nodeName)
raise pyxb.IncompleteImplementationError('redefine not implemented')
def __processAnnotation (self, node):
self._addAnnotation(Annotation.CreateFromDOM(node))
return self
def __processTopLevelNode (self, node):
"""Process a DOM node from the top level of the schema.
This should return a non-None value if the node was
successfully recognized."""
if xsd.nodeIsNamed(node, 'include'):
return self.__processInclude(node)
if xsd.nodeIsNamed(node, 'import'):
return self.__processImport(node)
if xsd.nodeIsNamed(node, 'redefine'):
return self.__processRedefine(node)
if xsd.nodeIsNamed(node, 'annotation'):
return self.__processAnnotation(node)
component = self.__TopLevelComponentMap.get(node.localName)
if component is not None:
self.__pastProlog = True
kw = { 'scope' : _ScopedDeclaration_mixin.SCOPE_global,
'schema' : self,
'owner' : self }
return self._addNamedComponent(component.CreateFromDOM(node, **kw))
raise pyxb.SchemaValidationError('Unexpected top-level element %s' % (node.nodeName,))
def _addAnnotation (self, annotation):
self.__annotations.append(annotation)
return annotation
def _addNamedComponent (self, nc):
tns = self.targetNamespace()
assert tns is not None
if not isinstance(nc, _NamedComponent_mixin):
raise pyxb.LogicError('Attempt to add unnamed %s instance to dictionary' % (nc.__class__,))
if nc.isAnonymous():
raise pyxb.LogicError('Attempt to add anonymous component to dictionary: %s', (nc.__class__,))
if isinstance(nc, _ScopedDeclaration_mixin):
assert _ScopedDeclaration_mixin.SCOPE_global == nc.scope()
if isinstance(nc, (SimpleTypeDefinition, ComplexTypeDefinition)):
return self.__addTypeDefinition(nc)
if isinstance(nc, AttributeDeclaration):
return self.__addAttributeDeclaration(nc)
if isinstance(nc, AttributeGroupDefinition):
return self.__addAttributeGroupDefinition(nc)
if isinstance(nc, ModelGroupDefinition):
return tns.addCategoryObject('modelGroupDefinition', nc.name(), nc)
if isinstance(nc, ElementDeclaration):
return tns.addCategoryObject('elementDeclaration', nc.name(), nc)
if isinstance(nc, NotationDeclaration):
return tns.addCategoryObject('notationDeclaration', nc.name(), nc)
if isinstance(nc, IdentityConstraintDefinition):
return tns.addCategoryObject('identityConstraintDefinition', nc.name(), nc)
assert False, 'No support to record named component of type %s' % (nc.__class__,)
def __addTypeDefinition (self, td):
local_name = td.name()
assert self.__targetNamespace
tns = self.targetNamespace()
old_td = tns.typeDefinitions().get(local_name)
if (old_td is not None) and (old_td != td):
if isinstance(td, ComplexTypeDefinition) != isinstance(old_td, ComplexTypeDefinition):
raise pyxb.SchemaValidationError('Name %s used for both simple and complex types' % (td.name(),))
if not old_td._allowUpdateFromOther(td):
raise pyxb.SchemaValidationError('Attempt to re-define non-builtin type definition %s' % (tns.createExpandedName(local_name),))
# Copy schema-related information from the new definition
# into the old one, and continue to use the old one.
td = tns._replaceComponent(td, old_td._updateFromOther(td))
else:
tns.addCategoryObject('typeDefinition', td.name(), td)
assert td is not None
return td
def __addAttributeDeclaration (self, ad):
local_name = ad.name()
assert self.__targetNamespace
tns = self.targetNamespace()
old_ad = tns.attributeDeclarations().get(local_name)
if (old_ad is not None) and (old_ad != ad):
if not old_ad._allowUpdateFromOther(ad):
raise pyxb.SchemaValidationError('Attempt to re-define non-builtin attribute declaration %s' % (tns.createExpandedName(local_name),))
# Copy schema-related information from the new definition
# into the old one, and continue to use the old one.
ad = tns._replaceComponent(ad, old_ad._updateFromOther(ad))
else:
tns.addCategoryObject('attributeDeclaration', ad.name(), ad)
assert ad is not None
return ad
def __addAttributeGroupDefinition (self, agd):
local_name = agd.name()
assert self.__targetNamespace
tns = self.targetNamespace()
old_agd = tns.attributeGroupDefinitions().get(local_name)
if (old_agd is not None) and (old_agd != agd):
if not old_agd._allowUpdateFromOther(agd):
raise pyxb.SchemaValidationError('Attempt to re-define non-builtin attribute group definition %s' % (tns.createExpandedName(local_name),))
# Copy schema-related information from the new definition
# into the old one, and continue to use the old one.
tns._replaceComponent(agd, old_agd._updateFromOther(agd))
else:
tns.addCategoryObject('attributeGroupDefinition', agd.name(), agd)
assert agd is not None
return agd
def __str__ (self):
return 'SCH[%s]' % (self.location(),)
def _AddSimpleTypes (namespace):
"""Add to the schema the definitions of the built-in types of XMLSchema.
This should only be invoked by L{pyxb.namespace} when the built-in
namespaces are initialized. """
# Add the ur type
#schema = namespace.schema()
schema = Schema(namespace_context=pyxb.namespace.XMLSchema.initialNamespaceContext(), schema_location='URN:noLocation:PyXB:XMLSchema', generation_uid=pyxb.namespace.BuiltInObjectUID, _bypass_preload=True)
td = schema._addNamedComponent(ComplexTypeDefinition.UrTypeDefinition(schema, in_builtin_definition=True))
assert td.isResolved()
# Add the simple ur type
td = schema._addNamedComponent(SimpleTypeDefinition.SimpleUrTypeDefinition(schema, in_builtin_definition=True))
assert td.isResolved()
# Add definitions for all primitive and derived simple types
pts_std_map = {}
for dtc in datatypes._PrimitiveDatatypes:
name = dtc.__name__.rstrip('_')
td = schema._addNamedComponent(SimpleTypeDefinition.CreatePrimitiveInstance(name, schema, dtc))
assert td.isResolved()
assert dtc.SimpleTypeDefinition() == td
pts_std_map.setdefault(dtc, td)
for dtc in datatypes._DerivedDatatypes:
name = dtc.__name__.rstrip('_')
parent_std = pts_std_map[dtc.XsdSuperType()]
td = schema._addNamedComponent(SimpleTypeDefinition.CreateDerivedInstance(name, schema, parent_std, dtc))
assert td.isResolved()
assert dtc.SimpleTypeDefinition() == td
pts_std_map.setdefault(dtc, td)
for dtc in datatypes._ListDatatypes:
list_name = dtc.__name__.rstrip('_')
element_name = dtc._ItemType.__name__.rstrip('_')
element_std = schema.targetNamespace().typeDefinitions().get(element_name)
assert element_std is not None
td = schema._addNamedComponent(SimpleTypeDefinition.CreateListInstance(list_name, schema, element_std, dtc))
assert td.isResolved()
global _PastAddBuiltInTypes
_PastAddBuiltInTypes = True
return schema
import sys
import pyxb.namespace.builtin
pyxb.namespace.builtin._InitializeBuiltinNamespaces(sys.modules[__name__])
## Local Variables:
## fill-column:78
## End:
| {
"content_hash": "c483a17c441525116ced97aa2e4d5624",
"timestamp": "",
"source": "github",
"line_count": 5158,
"max_line_length": 219,
"avg_line_length": 43.59557968204731,
"alnum_prop": 0.6254035736838829,
"repo_name": "balanced/PyXB",
"id": "36f1d261217f413bdc5aad8c2eccd9570df3a301",
"size": "225483",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyxb/xmlschema/structures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6286"
},
{
"name": "Python",
"bytes": "1854695"
},
{
"name": "Shell",
"bytes": "37524"
}
],
"symlink_target": ""
} |
import argparse
import numpy as np
import data_parser as dp
FLAG = None
def get_mean_var(feats):
np_target = np.array(feats)
mean = np.mean(np_target,0)
var = np.var(np_target,0)
return mean, var
def normalize(feats, mean, var):
np_feats = np.array(feats)
np_feats = np_feats - mean
feats = np_feats.tolist()
return feats
def parse_opt():
parser = argparse.ArgumentParser(
description='To do the normalize of query and corpus files')
parser.add_argument('query_fn',
metavar='<query filename>',
help='The filename of query file with feat and label')
parser.add_argument('corpus_fn',
metavar='<the database filename>',
help='The database filename with feat and label')
parser.add_argument('--test_num',type=int,default=100,
metavar='--test number',
help='The testing number for MAP')
return parser
def main():
test_feats, test_labs = dp.read_csv_file(FLAG.query_fn)
train_feats, train_labs = dp.read_csv_file(FLAG.corpus_fn)
if len(test_feats[0]) != len(train_feats[0]):
print (len(test_feats[0]), len(train_feats[0]))
raise NameError('The dimension between two files are not the same')
feat_dim = len(test_feats[0])
mean, var = get_mean_var(train_feats)
train_normed_feats = normalize(train_feats, mean, var)
test_normed_feats = normalize(test_feats, mean, var)
dp.write_feat_lab(FLAG.query_fn +'_normed', test_normed_feats, test_labs)
dp.write_feat_lab(FLAG.corpus_fn +'_normed', train_normed_feats, train_labs)
#print (MAP(test_list[:FLAG.test_num],train_list, feat_dim=feat_dim))
return
if __name__ == '__main__':
parser = parse_opt()
FLAG = parser.parse_args()
main()
| {
"content_hash": "36393b117c84bc9fab534b1be8a7da92",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 31.105263157894736,
"alnum_prop": 0.6446700507614214,
"repo_name": "hoaaoh/Audio2Vec",
"id": "e5ac4596fae230a876392ccba4a99c424dac11fc",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/norm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114034"
},
{
"name": "Shell",
"bytes": "6932"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ftools.parser.Fortran03Lexer import Fortran03Lexer as Lexer
from antlr4.InputStream import InputStream
def test_empty():
inp = InputStream("")
lex = Lexer(inp)
def test_program():
inp = InputStream("PROGRAM")
lex = Lexer(inp)
tok = lex.nextToken()
assert tok.text == "PROGRAM"
assert tok.type == Lexer.PROGRAM
def test_special():
inp = InputStream("@")
lex = Lexer(inp)
tok = lex.nextToken()
assert tok.text == "@"
assert tok.type == Lexer.SpecialCharacter
| {
"content_hash": "2a5df7c812bbcb40c5c11b0955e85406",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 30.657894736842106,
"alnum_prop": 0.7304721030042919,
"repo_name": "ScottWales/ftools",
"id": "d10fb97cca5e68a5719ee33b277409a26ba042f7",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/parser/test_antlr_lexer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "51021"
},
{
"name": "Fortran",
"bytes": "2136"
},
{
"name": "Makefile",
"bytes": "495"
},
{
"name": "Python",
"bytes": "31737"
}
],
"symlink_target": ""
} |
from pandas.util.decorators import cache_readonly
import pandas.util.testing as tm
import pandas as pd
_ts = tm.makeTimeSeries()
class TestData(object):
@cache_readonly
def ts(self):
ts = _ts.copy()
ts.name = 'ts'
return ts
@cache_readonly
def series(self):
series = tm.makeStringSeries()
series.name = 'series'
return series
@cache_readonly
def objSeries(self):
objSeries = tm.makeObjectSeries()
objSeries.name = 'objects'
return objSeries
@cache_readonly
def empty(self):
return pd.Series([], index=[])
| {
"content_hash": "373c7c10698e02eb0aca96818db04143",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 49,
"avg_line_length": 20.8,
"alnum_prop": 0.6121794871794872,
"repo_name": "boomsbloom/dtm-fmri",
"id": "613961e1c670f0a5561da19b3c756995c6dc86c8",
"size": "624",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/series/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "413670"
},
{
"name": "C++",
"bytes": "262666"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "14725"
},
{
"name": "HTML",
"bytes": "555708"
},
{
"name": "JavaScript",
"bytes": "23921"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Matlab",
"bytes": "36260"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "46698963"
},
{
"name": "R",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "11728"
},
{
"name": "TeX",
"bytes": "18567"
}
],
"symlink_target": ""
} |
from setuptools import setup
import multiprocessing # nopep8
VERSION = '0.0.5-1'
version_file = open('./orloui/_version.py', 'w')
version_file.write("__version__ = '{}'".format(VERSION))
version_file.close()
setup(
name='orloui',
version=VERSION,
description='User interface to Orlo',
author='Alex Forbes',
author_email='alforbes@ebay.com',
license='GPL',
long_description=open('README.md').read(),
url='https://github.com/eBayClassifiedsGroup/orloui',
packages=[
'orloui',
],
include_package_data=True,
install_requires=[
'Flask',
'arrow',
'gunicorn',
'orloclient>=0.0.5',
'pytz',
],
tests_require=[
'Flask-Testing',
],
test_suite='tests',
)
| {
"content_hash": "d72f29db1041a4d7451c29e8c4ef1124",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 57,
"avg_line_length": 23.21212121212121,
"alnum_prop": 0.5939947780678851,
"repo_name": "al4/orloui",
"id": "7021ff477ce8538bfa35de40d73aa027b70de86f",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2009"
},
{
"name": "HTML",
"bytes": "29306"
},
{
"name": "Makefile",
"bytes": "306"
},
{
"name": "Nginx",
"bytes": "497"
},
{
"name": "Python",
"bytes": "32814"
}
],
"symlink_target": ""
} |
from autosar.writer.writer_base import ElementWriter
import autosar.behavior
class XMLModeWriter(ElementWriter):
def __init__(self, version, patch):
super().__init__(version, patch)
def getSupportedXML(self):
return 'ModeDeclarationGroup'
def getSupportedCode(self):
return []
def writeElementXML(self, elem):
if type(elem).__name__ == 'ModeDeclarationGroup':
return self.writeModeDeclarationGroupXML(elem)
else:
return None
def writeElementCode(self, elem, localvars):
raise NotImplementedError('writeElementCode')
def writeModeDeclarationGroupXML(self, modeDeclGroup):
assert(isinstance(modeDeclGroup,autosar.mode.ModeDeclarationGroup))
lines=[]
ws = modeDeclGroup.rootWS()
assert(ws is not None)
lines.append('<%s>'%modeDeclGroup.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%modeDeclGroup.name,1))
if modeDeclGroup.category is not None:
lines.append(self.indent('<CATEGORY>%s</CATEGORY>'%modeDeclGroup.category,1))
if modeDeclGroup.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(modeDeclGroup.adminData),1))
if modeDeclGroup.initialModeRef is not None:
modeElem = ws.find(modeDeclGroup.initialModeRef)
if (modeElem is None):
raise ValueError("invalid mode reference: '%s'"%modeDeclGroup.typeRef)
else:
lines.append(self.indent('<INITIAL-MODE-REF DEST="%s">%s</INITIAL-MODE-REF>'%(modeElem.tag(self.version),modeElem.ref),1))
if len(modeDeclGroup.modeDeclarations)>0:
lines.append(self.indent('<MODE-DECLARATIONS>',1))
for elem in modeDeclGroup.modeDeclarations:
lines.append(self.indent('<%s>'%elem.tag(self.version),2))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,3))
if elem.value is not None:
lines.append(self.indent('<VALUE>{:d}</VALUE>'.format(elem.value),3))
lines.append(self.indent('</%s>'%elem.tag(self.version),2))
lines.append(self.indent('</MODE-DECLARATIONS>',1))
lines.append('</%s>'%modeDeclGroup.tag(self.version))
return lines
| {
"content_hash": "d15d00183f00c9e93218882991217283",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 138,
"avg_line_length": 46.745098039215684,
"alnum_prop": 0.6254194630872483,
"repo_name": "cogu/autosar",
"id": "17412ea7cafcc656f10d90ee4f3809a99f8885fb",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autosar/writer/mode_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "1039000"
},
{
"name": "Shell",
"bytes": "445"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("products", "0007_projectairfiles"),
]
operations = [
migrations.AddField(
model_name="projectairfiles",
name="archive",
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "d0ea74ae96257332b852dbf61b543300",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 21.625,
"alnum_prop": 0.5895953757225434,
"repo_name": "comic/comic-django",
"id": "77c7f12615055973d1439124dede5adb19c7367d",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/products/migrations/0008_projectairfiles_archive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
from openerp import fields, models
class Partner(models.Model):
_inherit = 'res.partner'
instructor = fields.Boolean("Instructor", default=False)
session_ids = fields.Many2many('openacademy.session',
string="Attended Sessions",
readonly=True)
| {
"content_hash": "ab281d521d6a9b870dc3348b207167f3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 30,
"alnum_prop": 0.5787878787878787,
"repo_name": "JesusZapata/openacademy-project",
"id": "4a414e051d3f2be0b4052b9ad0fd344b39347e7a",
"size": "354",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openacademy2/model/partner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27780"
}
],
"symlink_target": ""
} |
"""
File contains necessary utils for module
"""
from math import exp
def normalize(value, k=6):
if (value >= 0):
return 1.0 - exp(-abs(value/k))
else:
return - (1.0 - exp(-abs(value/k)))
def to_unicode(s):
if isinstance(s, str):
return unicode(s, 'utf-8')
elif isinstance(s, unicode):
return s
| {
"content_hash": "0b44ad75c98bfc25862afc5d7b997586",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 43,
"avg_line_length": 18.31578947368421,
"alnum_prop": 0.5718390804597702,
"repo_name": "nicolay-r/tone-classifier",
"id": "c31b711b66926b1f7dc681e51c147c0b4d5c8605",
"size": "373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/core/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "33817"
},
{
"name": "Python",
"bytes": "140319"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Feed.unread_count'
db.delete_column(u'feeds_feed', 'unread_count')
def backwards(self, orm):
# Adding field 'Feed.unread_count'
db.add_column(u'feeds_feed', 'unread_count',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feeds.category': {
'Meta': {'ordering': "('order', 'name', 'id')", 'unique_together': "(('user', 'slug'), ('user', 'name'))", 'object_name': 'Category'},
'color': ('django.db.models.fields.CharField', [], {'default': "'gray'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['profiles.User']"})
},
u'feeds.entry': {
'Meta': {'ordering': "('-date', '-id')", 'object_name': 'Entry', 'index_together': "(('user', 'date'), ('user', 'read'), ('user', 'starred'), ('user', 'broadcast'))"},
'author': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'blank': 'True'}),
'broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entries'", 'null': 'True', 'to': u"orm['feeds.Feed']"}),
'guid': ('feedhq.feeds.fields.URLField', [], {'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('feedhq.feeds.fields.URLField', [], {'db_index': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'read_later_url': ('feedhq.feeds.fields.URLField', [], {'blank': 'True'}),
'starred': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['profiles.User']"})
},
u'feeds.favicon': {
'Meta': {'object_name': 'Favicon'},
'favicon': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('feedhq.feeds.fields.URLField', [], {'unique': 'True', 'db_index': 'True'})
},
u'feeds.feed': {
'Meta': {'ordering': "('name',)", 'object_name': 'Feed'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['feeds.Category']"}),
'favicon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_safe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1023'}),
'url': ('feedhq.feeds.fields.URLField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feeds'", 'to': u"orm['profiles.User']"})
},
u'feeds.uniquefeed': {
'Meta': {'object_name': 'UniqueFeed'},
'error': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_column': "'muted_reason'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'muted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'url': ('feedhq.feeds.fields.URLField', [], {'unique': 'True'})
},
u'profiles.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'allow_media': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'endless_pages': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'entries_per_page': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'font': ('django.db.models.fields.CharField', [], {'default': "'pt-serif'", 'max_length': '75'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'oldest_first': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'read_later': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'read_later_credentials': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sharing_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sharing_gplus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sharing_twitter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'UTC'", 'max_length': '75'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '30'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
}
}
complete_apps = ['feeds'] | {
"content_hash": "b5e7a76ed022fdd2aa6eb887a89c0e3e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 195,
"avg_line_length": 76.02521008403362,
"alnum_prop": 0.5463689620868797,
"repo_name": "vincentbernat/feedhq",
"id": "eeca8a0e3e347d9a29df17cd31bf6a3b1b58d200",
"size": "9071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedhq/feeds/migrations/0017_auto__del_field_feed_unread_count.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48652"
},
{
"name": "HTML",
"bytes": "49008"
},
{
"name": "JavaScript",
"bytes": "8882"
},
{
"name": "Makefile",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "639116"
},
{
"name": "Ruby",
"bytes": "656"
},
{
"name": "Shell",
"bytes": "278"
},
{
"name": "XML",
"bytes": "157996"
}
],
"symlink_target": ""
} |
"""
This file is covered by the LICENSING file in the root of this project.
"""
from hackathon.hack.hackathon_manager import HackathonManager
from hackathon.hack.admin_manager import AdminManager
from hackathon.hack.team_manager import TeamManager
from hackathon.hack.host_server_manager import DockerHostManager
from hackathon.hack.register_manager import RegisterManager
from hackathon.hack.hackathon_template_manager import HackathonTemplateManager
from hackathon.hack.cryptor import Cryptor
| {
"content_hash": "b5f3c6b64a247791b8eaa8ef5b4fb8cf",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 45.09090909090909,
"alnum_prop": 0.8508064516129032,
"repo_name": "juniwang/open-hackathon",
"id": "ac9ef9229a8e0f98293f7065089137c76f4e5a57",
"size": "520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "open-hackathon-server/src/hackathon/hack/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "180309"
},
{
"name": "Dockerfile",
"bytes": "12333"
},
{
"name": "HTML",
"bytes": "468243"
},
{
"name": "Java",
"bytes": "7224"
},
{
"name": "JavaScript",
"bytes": "570371"
},
{
"name": "Python",
"bytes": "463614"
},
{
"name": "Ruby",
"bytes": "330"
},
{
"name": "Shell",
"bytes": "19148"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.resources import HdfsResource
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()
slider_home_dir = format('/usr/lib/slider')
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version_formatted = format_stack_version(stack_version_unformatted)
#hadoop params
slider_bin_dir = "/usr/lib/slider/bin"
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
slider_bin_dir = format('{slider_home_dir}/bin')
slider_conf_dir = format("{slider_home_dir}/conf")
storm_slider_conf_dir = format('{stack_root}/current/storm-slider-client/conf')
slider_lib_dir = format('{slider_home_dir}/lib')
slider_tar_gz = format('{slider_lib_dir}/slider.tar.gz')
user_group = config['configurations']['cluster-env']['user_group']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| {
"content_hash": "1bccddc5cc4d7095bbdda4d8c5d4fa30",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 106,
"avg_line_length": 45.5,
"alnum_prop": 0.7837041007772715,
"repo_name": "arenadata/ambari",
"id": "35288c99860ce853de1d763824f182996eb1de3d",
"size": "3731",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/ADH/1.5/services/SLIDER/package/scripts/params_linux.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
from .button import Button
from .checkbox import CheckBox
from .combobox import ComboBox
from .edit import Edit
from .frame import Frame
from .link import Link
from .listitem import ListItem
from .radiogroup import RadioGroup
from .table import Table
from .text import Text
| {
"content_hash": "461b26a2b53e3551f97ad6eef5b23792",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 34,
"avg_line_length": 27.4,
"alnum_prop": 0.8175182481751825,
"repo_name": "WesleyPeng/uiXautomation",
"id": "3ac6749ae17d81b75c4f026fe9aa1d6f9c33f574",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/taf/foundation/api/ui/controls/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "140"
},
{
"name": "Gherkin",
"bytes": "1235"
},
{
"name": "Python",
"bytes": "182375"
},
{
"name": "RobotFramework",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "1672"
}
],
"symlink_target": ""
} |
import argparse
import os
import re
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
description='A script to compile xib and storyboard.',
fromfile_prefix_chars='@')
parser.add_argument('-o', '--output', required=True,
help='Path to output bundle.')
parser.add_argument('-i', '--input', required=True,
help='Path to input xib or storyboard.')
parser.add_argument('--developer_dir', required=False,
help='Path to Xcode.')
args, unknown_args = parser.parse_known_args()
if args.developer_dir:
os.environ['DEVELOPER_DIR'] = args.developer_dir
ibtool_args = [
'xcrun', 'ibtool',
'--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text'
]
ibtool_args += unknown_args
ibtool_args += [
'--compile',
os.path.abspath(args.output),
os.path.abspath(args.input)
]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
try:
stdout = subprocess.check_output(ibtool_args)
except subprocess.CalledProcessError as e:
print(e.output)
raise
current_section_header = None
for line in stdout.splitlines():
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
print(current_section_header)
current_section_header = None
print(line)
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "0617ffa1c9896016f6e35d6528993d4d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 62,
"avg_line_length": 28.181818181818183,
"alnum_prop": 0.6245161290322581,
"repo_name": "youtube/cobalt_sandbox",
"id": "d53fa1579092b7bdc78a5c8a39c4b2838ba4e346",
"size": "1714",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "third_party/skia/gn/compile_ib_files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
tests.utils.py
~~~~~~
:copyright: (c) 2014 by @zizzamia
:license: BSD (See LICENSE for details)
"""
import unittest
from flask import g
# Imports inside Bombolone
import core.utils
# Fake text to use in tests
TEXT = "This is a test"
class TestUtils(unittest.TestCase):
def test_get_message(self):
"""should handle valid requests"""
g.lang = 'en'
g.test = {'fake_entry': TEXT}
get_value = core.utils.decorators.GetValue(g.test)
g.test_msg = get_value.check_key
message = ('test_msg', 'fake_entry')
data = dict(success=True, message=message)
result = core.utils.get_message(data)
self.assertEqual(True, result[0])
self.assertEqual(TEXT, result[1])
errors = [{'code': message}]
data = dict(success=False, errors=errors)
result = core.utils.get_message(data)
self.assertEqual(False, result[0])
self.assertEqual(TEXT, result[1])
def test_get_message_invalid(self):
"""should return an empty string on invalid request"""
message = ()
data = dict(success=True, message=message)
result = core.utils.get_message(data)
self.assertEqual(True, result[0])
self.assertEqual("", result[1])
data = dict(success=False, message=message)
result = core.utils.get_message(data)
self.assertEqual(False, result[0])
self.assertEqual("", result[1])
def test_set_message(self):
data = {
"success": ""
}
result = core.utils.set_message(data)
def test_msg_status(self):
success = ""
result = core.utils.msg_status(success)
def test_linkify(self):
# Test with some random characters
string = u"fòò bàr !£##“`ñµ@ł€¶ŧ←↓→øæßðđŋħ«»¢“”ñµ"
result = core.utils.linkify(string)
self.assertEqual("foo-bar-nn", result)
# Test with a real string
string = u"Foo bar... O fuì bar lol Mare!"
result = core.utils.linkify(string)
self.assertEqual("foo-bar-o-fui-bar-lol-mare", result)
def test_ensure_objectid(self):
item_id = ""
result = core.utils.ensure_objectid(item_id)
| {
"content_hash": "fdc5074564df57ea68a5daba37aec974",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 62,
"avg_line_length": 27.962025316455698,
"alnum_prop": 0.6011770031688547,
"repo_name": "katiecheng/Bombolone",
"id": "c07a88a3ed14770ddb9ccc0202e8fc9faf90f4b2",
"size": "2269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "107071"
},
{
"name": "JavaScript",
"bytes": "38358"
},
{
"name": "Python",
"bytes": "7381383"
},
{
"name": "Ruby",
"bytes": "1502"
},
{
"name": "Shell",
"bytes": "3839"
}
],
"symlink_target": ""
} |
'''
Created on Feb 28, 2013
@author: czue
'''
from copy import copy
from datetime import datetime
from casexml.apps.case.mock import CaseBlock
from corehq.apps.commtrack.const import RequisitionActions, RequisitionStatus, UserRequisitionRoles, notification_template
import uuid
from corehq.apps.commtrack.models import RequisitionCase
from corehq.apps.users.cases import get_owning_users, get_owner_id
from corehq.apps.users.models import CouchUser
from casexml.apps.case.xml import V2
from corehq.apps.commtrack import const
from xml.etree import ElementTree
from corehq.apps.hqcase.utils import submit_case_blocks
from dimagi.utils.parsing import json_format_datetime
class RequisitionState(object):
"""
Intermediate representation of a requisition
"""
def __init__(self, domain, id, user_id, username, product_stock_case,
create=False, owner_id=None, close=False, **custom_fields):
self.domain = domain
self.id = id
self.user_id = user_id
self.owner_id = owner_id
self.username = username
self.product_stock_case = product_stock_case
self.create = create
self.close = close
self.custom_fields = custom_fields or {}
def to_xml(self):
extras = {}
if self.owner_id:
extras['owner_id'] = self.owner_id
if self.create:
extras['case_name'] = self.product_stock_case.name
extras['index'] = {
const.PARENT_CASE_REF: (const.SUPPLY_POINT_PRODUCT_CASE_TYPE,
self.product_stock_case._id),
}
caseblock = CaseBlock(
case_id=self.id,
create=self.create,
version=V2,
user_id=self.user_id,
case_type=const.REQUISITION_CASE_TYPE,
update = copy(self.custom_fields),
close=self.close,
**extras
)
return ElementTree.tostring(caseblock.as_xml(format_datetime=json_format_datetime))
@classmethod
def from_transactions(cls, user_id, product_stock_case, transactions):
assert transactions, "can't make a requisition state from an empty transaction list"
def _to_fields(transaction):
ret = {'requisition_status': RequisitionStatus.by_action_type(transaction.action_config.action_type)}
if transaction.action_config.action_type == RequisitionActions.REQUEST:
ret.update({
'create': True,
'owner_id': get_owner_id(product_stock_case) or user_id,
'amount_requested': transaction.value,
'product_id': product_stock_case.product,
'requested_by': user_id,
'requested_on': datetime.utcnow(),
})
elif transaction.action_config.action_type == RequisitionActions.APPROVAL:
ret.update({
'amount_approved': transaction.value,
'approved_by': user_id,
'approved_on': datetime.utcnow(),
})
elif transaction.action_config.action_type == RequisitionActions.PACK:
ret.update({
'amount_packed': transaction.value,
'packed_by': user_id,
'packed_on': datetime.utcnow(),
})
elif transaction.action_config.action_type == RequisitionActions.RECEIPTS:
ret.update({
'amount_received': transaction.value,
'received_by': user_id,
'received_on': datetime.utcnow(),
'close': True,
})
else:
raise ValueError("the type %s isn't yet supported." % transaction.action_config.action_type)
return ret
def _get_case_id(transactions):
req_case_id = None
for tx in transactions:
if tx.requisition_case_id:
if req_case_id:
assert tx.requisition_case_id == req_case_id, 'tried to update multiple cases with one set of transactions'
req_case_id = tx.requisition_case_id
return req_case_id or uuid.uuid4().hex
kwargs = {}
for tx in transactions:
fields = _to_fields(tx)
for field in fields:
assert field not in kwargs, 'transaction updates should be disjoint but found %s twice' % field
kwargs.update(fields)
username = CouchUser.get(user_id).username
return cls(
domain=product_stock_case.domain,
id=_get_case_id(transactions),
user_id=user_id,
username=username,
product_stock_case=product_stock_case,
**kwargs
)
def create_requisition(user_id, product_stock_case, transaction):
req = RequisitionState.from_transactions(user_id, product_stock_case, [transaction])
submit_case_blocks(req.to_xml(), req.domain, req.username,
req.user_id)
case = RequisitionCase.get(req.id)
case.location_ = product_stock_case.location_
return case
def should_notify_user(user, next_action_type):
return user.user_data.get(UserRequisitionRoles.get_user_role(next_action_type), False)
def get_notification_recipients(next_action, requisition):
# given a status and list of requisitions, get the exhaustive list of
# people to notify about the requisition entering that status.
users = get_owning_users(get_owner_id(requisition))
if len(users) == 1:
return users
return [u for u in users if should_notify_user(u, next_action.action_type)]
def get_notification_message(next_action, requisitions):
# NOTE: it'd be weird if this was None but for now we won't fail hard
guessed_location = requisitions[0].get_location()
summary = ', '.join(r.sms_format() for r in requisitions)
requester = requisitions[0].get_requester()
return notification_template(next_action.action).format(
name=requester.full_name if requester else "Unknown",
summary=summary,
loc=guessed_location.site_code if guessed_location else "<loc code>",
keyword=next_action.keyword,
)
| {
"content_hash": "6c849965fe08959fa50c00e3ec6ec809",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 131,
"avg_line_length": 40.29299363057325,
"alnum_prop": 0.6062282643060386,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "ee474f01cdb702ff2e64f08f2d1e66c9e7311747",
"size": "6326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/commtrack/requisitions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import argparse
import os
import time
import jnius_config
import tensorflow as tf
from pycarbon.reader import make_reader
from pycarbon.tests.mnist.dataset_with_unischema import DEFAULT_MNIST_DATA_PATH
from pycarbon.tests import DEFAULT_CARBONSDK_PATH
from pycarbon.core.carbon_tf_utils import tf_tensors
def train_and_test(dataset_url, training_iterations, batch_size, evaluation_interval, start):
"""
Train a model for training iterations with a batch size batch_size, printing accuracy every log_interval.
:param dataset_url: The MNIST dataset url.
:param training_iterations: The training iterations to train for.
:param batch_size: The batch size for training.
:param evaluation_interval: The interval used to print the accuracy.
:return:
"""
with make_reader(os.path.join(dataset_url, 'train'), num_epochs=None, is_batch=False) as train_reader:
with make_reader(os.path.join(dataset_url, 'test'), num_epochs=None, is_batch=False) as test_reader:
train_readout = tf_tensors(train_reader)
train_image = tf.cast(tf.reshape(train_readout.image, [784]), tf.float32)
train_label = train_readout.digit
batch_image, batch_label = tf.train.batch(
[train_image, train_label], batch_size=batch_size
)
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(batch_image, W) + b
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.losses.sparse_softmax_cross_entropy on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=batch_label, logits=y)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), batch_label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_readout = tf_tensors(test_reader)
test_image = tf.cast(tf.reshape(test_readout.image, [784]), tf.float32)
test_label = test_readout.digit
test_batch_image, test_batch_label = tf.train.batch(
[test_image, test_label], batch_size=batch_size
)
end = time.time()
print("before train time: " + str(end - start))
# Train
print('Training model for {0} training iterations with batch size {1} and evaluation interval {2}'.format(
training_iterations, batch_size, evaluation_interval
))
with tf.Session() as sess:
sess.run([
tf.local_variables_initializer(),
tf.global_variables_initializer(),
])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for i in range(training_iterations):
if coord.should_stop():
break
sess.run(train_step)
if (i % evaluation_interval) == 0 or i == (training_iterations - 1):
feed_batch_image, feed_batch_label = sess.run([test_batch_image, test_batch_label])
print('After {0} training iterations, the accuracy of the model is: {1:.2f}'.format(
i,
sess.run(accuracy, feed_dict={
batch_image: feed_batch_image, batch_label: feed_batch_label
})))
finally:
coord.request_stop()
coord.join(threads)
def main():
print("Start")
start = time.time()
# Training settings
parser = argparse.ArgumentParser(description='Pycarbon Tensorflow MNIST Example')
default_dataset_url = 'file://{}'.format(DEFAULT_MNIST_DATA_PATH)
parser.add_argument('--dataset-url', type=str,
default=default_dataset_url, metavar='S',
help='hdfs:// or file:/// URL to the MNIST pycarbon dataset'
'(default: %s)' % default_dataset_url)
parser.add_argument('--training-iterations', type=int, default=100, metavar='N',
help='number of training iterations to train (default: 100)')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument('--evaluation-interval', type=int, default=10, metavar='N',
help='how many batches to wait before evaluating the model accuracy (default: 10)')
parser.add_argument('--carbon-sdk-path', type=str, default=DEFAULT_CARBONSDK_PATH,
help='carbon sdk path')
args = parser.parse_args()
jnius_config.set_classpath(args.carbon_sdk_path)
train_and_test(
dataset_url=args.dataset_url,
training_iterations=args.training_iterations,
batch_size=args.batch_size,
evaluation_interval=args.evaluation_interval,
start=start
)
end = time.time()
print("all time: " + str(end - start))
print("Finish")
if __name__ == '__main__':
main()
| {
"content_hash": "ee1230ac329e73aa6d9fd71f5d162e64",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 112,
"avg_line_length": 39.87596899224806,
"alnum_prop": 0.6401632970451011,
"repo_name": "jackylk/incubator-carbondata",
"id": "853af762249a414d4886da0268ecf8168f081742",
"size": "6102",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pycarbon/tests/mnist/dataset_with_unischema/tf_example_carbon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1639"
},
{
"name": "C++",
"bytes": "110888"
},
{
"name": "CMake",
"bytes": "1555"
},
{
"name": "Java",
"bytes": "7172203"
},
{
"name": "Python",
"bytes": "365111"
},
{
"name": "Scala",
"bytes": "10579226"
},
{
"name": "Shell",
"bytes": "7259"
},
{
"name": "Smalltalk",
"bytes": "86"
},
{
"name": "Thrift",
"bytes": "24309"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from designate.objects.adapters.api_v2 import base
from designate import objects
from designate import exceptions
LOG = logging.getLogger(__name__)
class RecordSetAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.RecordSet
MODIFICATIONS = {
'fields': {
"id": {},
"zone_id": {
'rename': 'domain_id'
},
"name": {
'immutable': True
},
"type": {
'rename': 'type',
'immutable': True
},
"records": {
'read_only': False
},
"description": {
'read_only': False
},
"ttl": {
'read_only': False
},
"status": {},
"action": {},
"version": {},
"created_at": {},
"updated_at": {},
},
'options': {
'links': True,
'resource_name': 'recordset',
'collection_name': 'recordsets',
}
}
@classmethod
def _parse_object(cls, new_recordset, recordset, *args, **kwargs):
# TODO(Graham): Remove this when
# https://bugs.launchpad.net/designate/+bug/1432842 is fixed
try:
recordset.records
except exceptions.RelationNotLoaded:
recordset.records = objects.RecordList()
original_records = set()
for record in recordset.records:
original_records.add(record.data)
# Get new list of Records
new_records = set()
if 'records' in new_recordset:
if isinstance(new_recordset['records'], list):
for record in new_recordset['records']:
new_records.add(record)
else:
errors = objects.ValidationErrorList()
e = objects.ValidationError()
e.path = ['records']
e.validator = 'type'
e.validator_value = ["list"]
e.message = ("'%(data)s' is not a valid list of records"
% {'data': new_recordset['records']})
# Add it to the list for later
errors.append(e)
raise exceptions.InvalidObject(
"Provided object does not match "
"schema", errors=errors, object=cls.ADAPTER_OBJECT())
# Get differences of Records
records_to_add = new_records.difference(original_records)
records_to_rm = original_records.difference(new_records)
# Update all items except records
record_update = False
if 'records' in new_recordset:
record_update = True
del new_recordset['records']
# Remove deleted records if we have provided a records array
if record_update:
recordset.records[:] = [record for record in recordset.records
if record.data not in records_to_rm]
# Add new records
for record in records_to_add:
recordset.records.append(objects.Record(data=record))
return super(RecordSetAPIv2Adapter, cls)._parse_object(
new_recordset, recordset, *args, **kwargs)
class RecordSetListAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.RecordSetList
MODIFICATIONS = {
'options': {
'links': True,
'resource_name': 'recordset',
'collection_name': 'recordsets',
}
}
| {
"content_hash": "5f98ec1ef803d9ad817200f5341d419f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 74,
"avg_line_length": 31.831858407079647,
"alnum_prop": 0.5159855435084792,
"repo_name": "ionrock/designate",
"id": "33d4d055ca5d0ca32c4945dc372ec7c8f42522ea",
"size": "4230",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "designate/objects/adapters/api_v2/recordset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2169940"
},
{
"name": "Ruby",
"bytes": "4560"
},
{
"name": "Shell",
"bytes": "12933"
}
],
"symlink_target": ""
} |
import datetime
from django.utils.encoding import smart_str
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from django.conf import settings
from django.core.management.base import BaseCommand
from awx.conf.models import Setting
class Command(BaseCommand):
"""Generate and store a randomized RSA key for SSH traffic to isolated instances"""
help = 'Generates and stores a randomized RSA key for SSH traffic to isolated instances'
def handle(self, *args, **kwargs):
if getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', False):
print(settings.AWX_ISOLATED_PUBLIC_KEY)
return
key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
Setting.objects.create(
key='AWX_ISOLATED_PRIVATE_KEY',
value=key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
).save()
pemfile = Setting.objects.create(
key='AWX_ISOLATED_PUBLIC_KEY',
value=smart_str(key.public_key().public_bytes(
encoding=serialization.Encoding.OpenSSH,
format=serialization.PublicFormat.OpenSSH
)) + " generated-by-awx@%s" % datetime.datetime.utcnow().isoformat()
)
pemfile.save()
print(pemfile.value)
| {
"content_hash": "4f363f275eec24fd6d34dc639b737143",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 92,
"avg_line_length": 37.883720930232556,
"alnum_prop": 0.6586863106200123,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "e2b06bd21e5b986be2b2fdc056063746f5d1bc48",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/management/commands/generate_isolated_key.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from chainer.functions.connection import dilated_convolution_2d
from chainer import initializers
from chainer import link
from chainer import variable
class DilatedConvolution2D(link.Link):
"""Two-dimensional dilated convolutional layer.
This link wraps the :func:`~chainer.functions.dilated_convolution_2d`
function and holds the filter weight and bias vector as parameters.
Args:
in_channels (int or None): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (4-D array): Initial weight value. If ``None``, the defaul
initializer is used. May also be a callable that takes
``numpy.ndarray`` or ``cupy.ndarray`` and edits its value.
initial_bias (1-D array): Initial bias value. If ``None``, the default
initializer is used. May also be a callable that takes
``numpy.ndarray`` or ``cupy.ndarray`` and edits its value.
.. seealso::
See :func:`chainer.functions.dilated_convolution_2d`
for the definition of two-dimensional dilated convolution.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
.. admonition:: Example
There are several ways to make a DilatedConvolution2D link.
Let an input vector ``x`` be:
>>> x = np.arange(1 * 3 * 10 * 10, dtype='f').reshape(1, 3, 10, 10)
1. Give the first three arguments explicitly:
>>> l = L.DilatedConvolution2D(3, 7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.DilatedConvolution2D(7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
>>> l = L.DilatedConvolution2D(None, 7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
When you omit the first argument, you need to specify the other
subsequent arguments from ``stride`` as keyword auguments. So the
below two cases are the same.
>>> l = L.DilatedConvolution2D(None, 7, 5, 1, 0, 2)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2)
>>> l = L.DilatedConvolution2D(7, 5, stride=1, pad=0, dilate=2)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2)
"""
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
dilate=1, nobias=False, initialW=None, initial_bias=None):
super(DilatedConvolution2D, self).__init__()
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.dilate = _pair(dilate)
self.out_channels = out_channels
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
initial_bias = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(initial_bias, out_channels)
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
W_shape = (self.out_channels, in_channels, kh, kw)
self.W.initialize(W_shape)
def __call__(self, x):
"""Applies the convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the convolution.
"""
if self.W.data is None:
self._initialize_params(x.shape[1])
return dilated_convolution_2d.dilated_convolution_2d(
x, self.W, self.b, self.stride, self.pad, self.dilate)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| {
"content_hash": "6a99ce3468ca2b7e35aaaf008b880f5c",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 35.78417266187051,
"alnum_prop": 0.5701648572577402,
"repo_name": "ysekky/chainer",
"id": "812368302affca0cebb055e26ed0aafba4b6e93f",
"size": "4974",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chainer/links/connection/dilated_convolution_2d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2598837"
}
],
"symlink_target": ""
} |
import configparser
import os
class Secrets:
"""
Save secret user information in an ini file
"""
def __init__(self, file_path):
self.secrets = None
self.file_path = file_path
def create_file_if_not_exists(self):
"""
Creates secrets file if it doesn't exist
:return:
"""
if not os.path.isfile(self.file_path):
open(self.file_path, 'w').close()
def load_file(self):
"""
Loads secrets file if not yet loaded
:return:
"""
# Nothing to do if secrets are already loaded
if self.secrets is None:
# In case the file doesn't exist, we create it first
self.create_file_if_not_exists()
# Load the ini file
self.secrets = configparser.ConfigParser()
self.secrets.read(self.file_path)
def save_file(self):
"""
Save the file on the disk
:return:
"""
# Load the secrets file
self.load_file()
# Save file on the disk
with open(self.file_path, 'w') as secrets_file:
self.secrets.write(secrets_file)
def get(self, section, key):
"""
Return a specific secret identified by its key in a section
:param section: section containing the secret
:param key: secret identifier
:return: String: secret value or None if it doesn't exists
"""
# Load the secrets file
self.load_file()
# Check that the section exists
if not self.has_section(section):
return None
# Return requested secret
return self.secrets[section].get(key)
def put(self, section, key, value):
"""
Save a specific secret identified by its key in a section
:param section: section containing the secret
:param key: secret identifier
:return:
"""
# Load the secrets file
self.load_file()
# Create secret section if it doesn't exists
if section not in self.secrets:
self.secrets[section] = {}
# Save secret
self.secrets[section][key] = value
def has_section(self, section):
"""
Check that the file contain a specific section
:param section: section identifier
:return: True if the file contains the section
"""
# Load the secrets file
self.load_file()
return section in self.secrets
| {
"content_hash": "e2ca2f8f22af756bcf277d352e949382",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 67,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.5684713375796179,
"repo_name": "jameslafa/500px-fav-downloader",
"id": "fa5dce20bd6f9fc1c9f8601b769464d06136f3fd",
"size": "2512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/secrets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "537"
},
{
"name": "JavaScript",
"bytes": "110"
},
{
"name": "Python",
"bytes": "11808"
}
],
"symlink_target": ""
} |
"""
A stack is the classic abstract data type container implementing a LIFO interface.
But we're also interested in how deep our stack gets over its lifetime and, more
specifically over the last 'n' operations (set in Stack.DEPTH_HISTORY).
"""
class KStack:
class Empty:
def tos(self, stack):
return KStack.Empty
def pop(self, stack):
return KStack.Empty
def push(self, stack, value):
stack._stack = KStack.NonEmpty(value)
return KStack.NonEmpty
class NonEmpty:
def __init__(self, value):
self._data = []
self._data.append(value)
def tos(self,stack):
return self._data[-1]
def pop(self, stack):
if len(self._data):
return self._data.pop()
stack._stack = KStack.Empty()
return KStack.Empty
def push(self, stack, value):
self._data.append(value)
return KStack.NonEmpty
def __init__(self):
self._stack = KStack.Empty()
def tos(self):
return self._stack.tos(self)
def pop(self):
return self._stack.pop(self)
def push(self, value):
return self._stack.push(self, value)
class Stack(KStack):
DEPTH_HISTORY = 1000
def __init__(self):
self.reset()
super(Stack, self).__init__()
def reset(self):
self._depth_history_count = {0:1}
self._depth_history = []
self._push_count = 0
self._pop_count = 0
self._stack = KStack.Empty()
def _update_depth_history(self):
stack_depth = self.depth()
assert stack_depth >= 0, "Error - somehow we got more pops(%s) than pushes(%s)!" % (self._push_count, self._pop_count)
self._depth_history_count[stack_depth] = self._depth_history_count.get(stack_depth,0) + 1
self._depth_history.append(stack_depth)
if len(self._depth_history) > Stack.DEPTH_HISTORY:
self._depth_history.pop(0)
def history_depth_count(self, depth = None):
"""
"""
if depth is not None:
return self._depth_history_count.get(depth,0)
return [(k,self._depth_history_count.get(k,0)) for k in self._depth_history_count.keys()]
def depth_history(self, count_limit = None):
if count_limit is None:
count_limit = Stack.DEPTH_HISTORY
else:
if count_limit > Stack.DEPTH_HISTORY:
count_limit = Stack.DEPTH_HISTORY
count_limit = count_limit * -1 # So we can return in reverse (most recent) order.
return [x for x in self._depth_history[count_limit::]]
def max_depth(self, history_limit = None):
"""
Returns the deepest depth of our stack over it's known history
up to 'history_limit' (if specified) or 'Stack.DEPTH_HISTORY'
operations whichever is smaller.
"""
return max(self.depth_history(history_limit))
def depth(self):
return self._push_count - self._pop_count
def total_operations(self):
"""
Returns the total number of time push() or pop() have been called against
this stack up.
"""
return self._push_count + self._pop_count
def is_empty(self):
return self.depth() == 0
def content(self):
"""
Returns the contents of the stack. Leftmost is oldest. Rightmost is top of stack.
"""
if isinstance(self._stack, KStack.NonEmpty):
return [x for x in self._stack._data]
return []
def push(self, item):
"""
Adds item to top of stack.
Updates our stack_history.
"""
self._push_count += 1
self._update_depth_history()
return super(Stack,self).push(item)
def pop(self):
"""
If the stack is not empty, update the depth_history
and return the top item on the stack.
"""
result = super(Stack,self).pop()
if result is not KStack.Empty:
self._pop_count += 1
self._update_depth_history()
return result
def tos(self):
"""
Returns the value on the top of stack.
Does not remove from stack.
"""
return super(Stack,self).tos()
| {
"content_hash": "3f6c66bfdd85f154bce97633a419b10e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 126,
"avg_line_length": 28.61437908496732,
"alnum_prop": 0.5612151667428049,
"repo_name": "scherrey/ADnD",
"id": "ce88ef9ad83fd5b27079f26b8a24d4f7a78aa9a5",
"size": "4378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adndpy/stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Elixir",
"bytes": "4800"
},
{
"name": "Python",
"bytes": "23188"
},
{
"name": "Shell",
"bytes": "92"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
readme = open(os.path.join(here, 'README.md')).read()
except IOError:
readme = ''
setup_requires = [
'nose>=1.3.0',
]
setup(
name='detach',
version='1.0',
description="Fork and detach the current process.",
long_description=readme,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
keywords='fork daemon detach',
author='Ryan Bourgeois',
author_email='bluedragonx@gmail.com',
url='https://github.com/bluedragonx/detach',
license='BSD-derived',
py_modules=['detach'],
setup_requires=setup_requires,
test_suite = 'nose.collector',
)
| {
"content_hash": "9b590274d1d70a828e2437955df8cead",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 57,
"avg_line_length": 26,
"alnum_prop": 0.6296703296703297,
"repo_name": "BlueDragonX/detach",
"id": "e25a9e0c49f967e504c570e4842a6197ac07717a",
"size": "910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9209"
}
],
"symlink_target": ""
} |
from urllib.parse import urljoin
OAUTH_BASE_ENDPOINT = "account/api/oauth/"
AUTH_ENDPOINT = urljoin(OAUTH_BASE_ENDPOINT, 'authorize.htm')
USER_ENDPOINT = urljoin(OAUTH_BASE_ENDPOINT, 'user.htm')
EMAIL_VALIDATION_ENDPOINT = "/account/validateEmail.htm"
EMAIL_VALIDATION_STATUS_ENDPOINT = "/account/api/isEmailValidated.htm"
TOU_ENDPOINT = "/account/user/termsOfUse.htm"
TOU_STATUS_ENDPOINT = "/account/api/isTermsOfUseCurrent.htm"
ENROLLMENT_ENDPOINT = "/account/api/enrollment.htm"
ENROLLMENT_STATUS_ENDPOINT = "/account/api/getEnrollment.htm"
USER_SEARCH_ENDPOINT = "/account/api/user.htm"
USERS_SEARCH_ENDPOINT = "/account/api/getUsers.htm"
| {
"content_hash": "d501c175d19306901e95429f6173f18b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 36.05555555555556,
"alnum_prop": 0.7781201848998459,
"repo_name": "CityOfNewYork/NYCOpenRecords",
"id": "c4af7d52cdb73dcb35c726e2aa79c469bf78fa18",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/constants/web_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40622"
},
{
"name": "HCL",
"bytes": "270"
},
{
"name": "HTML",
"bytes": "560649"
},
{
"name": "JavaScript",
"bytes": "219182"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "998502"
},
{
"name": "Ruby",
"bytes": "4498"
},
{
"name": "Shell",
"bytes": "52597"
},
{
"name": "TeX",
"bytes": "2379"
}
],
"symlink_target": ""
} |
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.longrunning import operations_pb2 # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.iam_v2beta.types import policy
from google.cloud.iam_v2beta.types import policy as gi_policy
from .base import DEFAULT_CLIENT_INFO, PoliciesTransport
from .grpc import PoliciesGrpcTransport
class PoliciesGrpcAsyncIOTransport(PoliciesTransport):
"""gRPC AsyncIO backend transport for Policies.
An interface for managing Identity and Access Management
(IAM) policies.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "iam.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "iam.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_policies(
self,
) -> Callable[[policy.ListPoliciesRequest], Awaitable[policy.ListPoliciesResponse]]:
r"""Return a callable for the list policies method over gRPC.
Retrieves the policies of the specified kind that are
attached to a resource.
The response lists only policy metadata. In particular,
policy rules are omitted.
Returns:
Callable[[~.ListPoliciesRequest],
Awaitable[~.ListPoliciesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_policies" not in self._stubs:
self._stubs["list_policies"] = self.grpc_channel.unary_unary(
"/google.iam.v2beta.Policies/ListPolicies",
request_serializer=policy.ListPoliciesRequest.serialize,
response_deserializer=policy.ListPoliciesResponse.deserialize,
)
return self._stubs["list_policies"]
@property
def get_policy(
self,
) -> Callable[[policy.GetPolicyRequest], Awaitable[policy.Policy]]:
r"""Return a callable for the get policy method over gRPC.
Gets a policy.
Returns:
Callable[[~.GetPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_policy" not in self._stubs:
self._stubs["get_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v2beta.Policies/GetPolicy",
request_serializer=policy.GetPolicyRequest.serialize,
response_deserializer=policy.Policy.deserialize,
)
return self._stubs["get_policy"]
@property
def create_policy(
self,
) -> Callable[[gi_policy.CreatePolicyRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create policy method over gRPC.
Creates a policy.
Returns:
Callable[[~.CreatePolicyRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_policy" not in self._stubs:
self._stubs["create_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v2beta.Policies/CreatePolicy",
request_serializer=gi_policy.CreatePolicyRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_policy"]
@property
def update_policy(
self,
) -> Callable[[policy.UpdatePolicyRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update policy method over gRPC.
Updates the specified policy.
You can update only the rules and the display name for the
policy.
To update a policy, you should use a read-modify-write loop:
1. Use [GetPolicy][google.iam.v2beta.Policies.GetPolicy] to read
the current version of the policy.
2. Modify the policy as needed.
3. Use ``UpdatePolicy`` to write the updated policy.
This pattern helps prevent conflicts between concurrent updates.
Returns:
Callable[[~.UpdatePolicyRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_policy" not in self._stubs:
self._stubs["update_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v2beta.Policies/UpdatePolicy",
request_serializer=policy.UpdatePolicyRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_policy"]
@property
def delete_policy(
self,
) -> Callable[[policy.DeletePolicyRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete policy method over gRPC.
Deletes a policy. This action is permanent.
Returns:
Callable[[~.DeletePolicyRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_policy" not in self._stubs:
self._stubs["delete_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v2beta.Policies/DeletePolicy",
request_serializer=policy.DeletePolicyRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_policy"]
def close(self):
return self.grpc_channel.close()
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
__all__ = ("PoliciesGrpcAsyncIOTransport",)
| {
"content_hash": "0f1497a217a1d8d27808c62401178796",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 88,
"avg_line_length": 43.52463054187192,
"alnum_prop": 0.6137739799671779,
"repo_name": "googleapis/python-iam",
"id": "85ebabb33f8daa1b0a834a3abf5fcb9851d27504",
"size": "18271",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/iam_v2beta/services/policies/transports/grpc_asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "778265"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_imperial_defector.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "7cb84d3503394fba8297ae4119468aa3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6983606557377049,
"repo_name": "obi-two/Rebelion",
"id": "9e045194a1564b1749faf649ad609a9894cb1090",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_imperial_defector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from morphforge.morphology.conversion.region_to_int_bimap import AutoRegionToIntMapTable
from morphforge.morphology.conversion.region_to_int_bimap import RegionToIntMapBiMap
from morphforge.morphology.conversion.internal_representation_converter import MorphologyConverter
| {
"content_hash": "820663847a348793d0b4baf6f32b6999",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 98,
"avg_line_length": 68.5,
"alnum_prop": 0.8905109489051095,
"repo_name": "mikehulluk/morphforge",
"id": "17597cc3fc064c6d2a8ef35d207691e9dce692ff",
"size": "1813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforge/morphology/conversion/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from .models import Medicamento, Insumo, Laboratorio
from django.core.urlresolvers import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from .forms import MedicamentoForm, InsumoForm, LaboratorioForm
from django.db.models import Q
import json
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
def index_medicamento(request):
# return HttpResponse("Index")
return render(request, 'medicamento/index_medicamento.html')
# asi se llama paciente/paciente_autocomplete/?term=11
@login_required(login_url = 'accounts:login')
def MedicamentoAutoComplete(request):
q = request.GET.get('term', '')
# print '--------------'
# print q
if q:
qset = (
Q(codigo__istartswith=q) |
Q(nombre__icontains=q) |
Q(presentacion__icontains=q)
)
medicamentos = Medicamento.objects.filter(qset).distinct()
else:
medicamentos = []
medicamentos_list = []
for p in medicamentos:
value = '(cod:%s) %s, %s' % (p.codigo, p.nombre, p.presentacion)
p_dict = {'id': p.id, 'label': value, 'value': value}
medicamentos_list.append(p_dict)
data = json.dumps(medicamentos_list)
return HttpResponse(data)
# asi se llama paciente/paciente_autocomplete/?term=11
@login_required(login_url = 'accounts:login')
def InsumoAutoComplete(request):
q = request.GET.get('term', '')
# print '--------------'
# print q
if q:
qset = (
Q(codigo__istartswith=q) |
Q(nombre__icontains=q)
)
insumos = Insumo.objects.filter(qset).distinct()
else:
insumos = []
insumos_list = []
for p in insumos:
value = '(cod:%s) %s' % (p.codigo, p.nombre)
p_dict = {'id': p.id, 'label': value, 'value': value}
insumos_list.append(p_dict)
data = json.dumps(insumos_list)
return HttpResponse(data)
class MedicamentoList(LoginRequiredMixin, ListView):
model = Medicamento # le paso el modelo al atributo
template_name = 'medicamento/medicamento_listar.html'
class MedicamentoCreate(LoginRequiredMixin,CreateView):
model = Medicamento
template_name = 'medicamento/medicamento_form.html'
form_class = MedicamentoForm
success_url = reverse_lazy('medicamento:listar_medicamento')
class MedicamentoUpdate(LoginRequiredMixin, UpdateView):
model = Medicamento
form_class = MedicamentoForm
template_name = 'medicamento/medicamento_form.html'
success_url = reverse_lazy('medicamento:listar_medicamento')
class MedicamentoDelete(LoginRequiredMixin,DeleteView):
model = Medicamento
template_name = 'medicamento/medicamento_delete.html'
success_url = reverse_lazy('medicamento:listar_medicamento')
class InsumoList(LoginRequiredMixin, ListView):
model = Insumo # le paso el modelo al atributo
template_name = 'medicamento/insumo_listar.html'
class InsumoCreate(LoginRequiredMixin,CreateView):
model = Insumo
template_name = 'medicamento/insumo_form.html'
form_class = InsumoForm
success_url = reverse_lazy('medicamento:listar_insumo')
class InsumoUpdate(LoginRequiredMixin, UpdateView):
model = Insumo
form_class = InsumoForm
template_name = 'medicamento/insumo_form.html'
success_url = reverse_lazy('medicamento:listar_insumo')
class InsumoDelete(LoginRequiredMixin,DeleteView):
model = Insumo
template_name = 'medicamento/insumo_delete.html'
success_url = reverse_lazy('medicamento:listar_insumo')
class LaboratorioList(LoginRequiredMixin, ListView):
model = Laboratorio
template_name = 'medicamento/laboratorio_listar.html'
class LaboratorioCreate(LoginRequiredMixin,CreateView):
model = Laboratorio
template_name = 'medicamento/laboratorio_form.html'
form_class = LaboratorioForm
success_url = reverse_lazy('medicamento:listar_laboratorio')
class LaboratorioUpdate(LoginRequiredMixin, UpdateView):
model = Laboratorio
form_class = LaboratorioForm
template_name = 'medicamento/laboratorio_form.html'
success_url = reverse_lazy('medicamento:listar_laboratorio')
class LaboratorioDelete(LoginRequiredMixin,DeleteView):
model = Laboratorio
template_name = 'medicamento/laboratorio_delete.html'
success_url = reverse_lazy('medicamento:listar_insumo')
# class MedicamentoCreate(ListView):
# model = Medicamento # le paso el modelo al atributo
# form_class = MedicamentoForm # le paso el formulrio
# template_name = 'medicamento/medicamento_form.html'
# success_url = reverse_lazy('medicamento:listar_medicamento')
# # se reescribe el metodo de la clase ListView de django
# def get_context_data(self, **kwargs):
# context = super(MedicamentoCreate,self).get_context_data(**kwargs)
# if 'form' not in context:
# context['form'] = self.form_class(self.request.GET)
# return context
# def post(self, request, *args, **kwargs):
# self.object = self.get_object
# form = self.form_class(request.POST)
# if form.is_valid() :
# medicamento = form.save(commit=False)
# medicamento.save()
# return HttpResponseRedirect(self.get_success_url())
# else:
# self.render_to_response(self.get_context_data(form=form)) | {
"content_hash": "60196a6575b515c3cf196bd497680108",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 77,
"avg_line_length": 32.85064935064935,
"alnum_prop": 0.7497529155959676,
"repo_name": "andresmauro17/mediapp",
"id": "0de84e6b7c98e7f5584793107c9d10c13c39b8d1",
"size": "5059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/medicamento/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "605428"
},
{
"name": "HTML",
"bytes": "357312"
},
{
"name": "JavaScript",
"bytes": "575186"
},
{
"name": "Python",
"bytes": "335982"
}
],
"symlink_target": ""
} |
import unittest
from backend.device.thermometer import Thermometer
from backend.device.mock import MockMAX31855
import math
class ThermometerTests(unittest.TestCase):
def setUp(self):
self.thermometer = Thermometer(MockMAX31855())
def test_read_c(self):
for i in range(10000):
temperature = self.thermometer.current_temperature
self.assertFalse(math.isnan(temperature))
| {
"content_hash": "26a532ebca598bb98f7ba2e1e9509dcd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.7268408551068883,
"repo_name": "jimrybarski/piwarmer",
"id": "85030edb71e0773efc1a02ca37f5d695eb4aac8b",
"size": "421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/tests/thermometer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1807"
},
{
"name": "HTML",
"bytes": "12200"
},
{
"name": "JavaScript",
"bytes": "22527"
},
{
"name": "Nginx",
"bytes": "900"
},
{
"name": "Python",
"bytes": "48151"
}
],
"symlink_target": ""
} |
from dataclasses import dataclass
from typing import Any
import pytest
from _py2tmp.compiler.output_files import load_object_files, ObjectFileContent
def pytest_addoption(parser: Any):
group = parser.getgroup("tmppy")
group.addoption(
"--tmppyc_files",
action="store",
dest="tmppyc_files",
default="",
help='*.tmppyc files used by tests (comma-separated list).',
)
parser.addini(
name='tmppyc_files',
help='*.tmppyc files used by tests (comma-separated list).',
type='pathlist'
)
@dataclass(frozen=True)
class TmppyFixture:
tmppyc_files: ObjectFileContent
@pytest.fixture()
def tmppy(request) -> TmppyFixture:
if request.config.getoption('tmppyc_files'):
tmppyc_files = request.config.getoption('tmppyc_files').split(',')
elif request.config.getini('tmppyc_files'):
tmppyc_files = request.config.getini('tmppyc_files')
else:
raise ValueError('You must specify the *.tmppyc files needed by tests in tmppyc_files.')
return TmppyFixture(load_object_files(tuple(tmppyc_files)))
| {
"content_hash": "a7fe0d8390ae129bd33f78c19319a3d8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 96,
"avg_line_length": 30.77777777777778,
"alnum_prop": 0.6759927797833934,
"repo_name": "google/tmppy",
"id": "f13fc9e694c9e1de47c64958fff8bfad3f0e305b",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py2tmp/testing/pytest_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2835"
},
{
"name": "CMake",
"bytes": "6123"
},
{
"name": "Python",
"bytes": "1243209"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
} |
from click.testing import CliRunner
from clifunzone.cli import main
def test_main():
runner = CliRunner()
result = runner.invoke(main, [])
assert result.output == '()\n'
assert result.exit_code == 0
| {
"content_hash": "d0860011541b6cc1913770488b874052",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 36,
"avg_line_length": 19.90909090909091,
"alnum_prop": 0.6666666666666666,
"repo_name": "Justin-W/clifunland",
"id": "0025921cdb6612de5bee351a14056e464f7fb2a4",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_clifunzone.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1598"
},
{
"name": "Gherkin",
"bytes": "348"
},
{
"name": "Python",
"bytes": "252891"
},
{
"name": "Shell",
"bytes": "8436"
}
],
"symlink_target": ""
} |
import os
SETTINGS_DIR = os.path.dirname(__file__)
PROJECT_PATH = os.path.join(SETTINGS_DIR, os.pardir)
PROEJCT_PATH = os.path.abspath(PROJECT_PATH)
DATABASE_PATH = os.path.join(PROJECT_PATH, 'grabber.db')
TEMPLATE_PATH = os.path.join(PROJECT_PATH, 'templates')
STATIC_PATH = os.path.join(PROJECT_PATH, 'static')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': DATABASE_PATH, # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
#'USER': '',
#'PASSWORD': '',
#'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
#'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATIC_PATH,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '2zh672rt=q3yyn^szs8m9#8b+sc_p+%&v0l7_@=cv*2_nls(4s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tacograbber.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tacograbber.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_PATH,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'grabber',
'twython_django_oauth',
#'django_facebook'
)
#twitter specific
API = 'MeDbWwKCGN2wuh0Z8LrIvhHtj'
API_SECRET = 'ECgRA6bqI3NeveZrexwxpuB7IzWmPeInAzF7Ib5jZRt9wIHbRm'
LOGIN_URL='/twython/login'
LOGOUT_URL='/twython/logout'
LOGIN_REDIRECT_URL='grabber/'
LOGOUT_REDIRECT_URL='grabber/'
#end twitter specific
#FACEBOOK_APP_ID = '1515258648693553'
#FACEBOOK_APP_SECRET = '5c3be66cee30556d8c825c480b9ba6c4'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "a628ce07c1ce979cdec81195ddb4e3eb",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 128,
"avg_line_length": 33.45945945945946,
"alnum_prop": 0.6964458804523425,
"repo_name": "benkul/twitterproject",
"id": "4a2695927822f25c320b1eb7eae575c93a2309e7",
"size": "6233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacograbber/tacograbber/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "41612"
},
{
"name": "JavaScript",
"bytes": "80698"
},
{
"name": "Python",
"bytes": "26114"
}
],
"symlink_target": ""
} |
"""
Provides base classes for working with drivers
"""
from __future__ import with_statement
import sys
import time
import hashlib
import os
import socket
import random
import binascii
from libcloud.utils.py3 import b
import libcloud.compute.ssh
from libcloud.pricing import get_size_price
from libcloud.compute.types import NodeState, StorageVolumeState,\
DeploymentError
from libcloud.compute.ssh import SSHClient
from libcloud.common.base import ConnectionKey
from libcloud.common.base import BaseDriver
from libcloud.common.types import LibcloudError
from libcloud.compute.ssh import have_paramiko
from libcloud.utils.networking import is_private_subnet
from libcloud.utils.networking import is_valid_ip_address
if have_paramiko:
from paramiko.ssh_exception import SSHException
from paramiko.ssh_exception import AuthenticationException
SSH_TIMEOUT_EXCEPTION_CLASSES = (AuthenticationException, SSHException,
IOError, socket.gaierror, socket.error)
else:
SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error)
# How long to wait for the node to come online after creating it
NODE_ONLINE_WAIT_TIMEOUT = 10 * 60
# How long to try connecting to a remote SSH server when running a deployment
# script.
SSH_CONNECT_TIMEOUT = 5 * 60
__all__ = [
'Node',
'NodeState',
'NodeSize',
'NodeImage',
'NodeLocation',
'NodeAuthSSHKey',
'NodeAuthPassword',
'NodeDriver',
'StorageVolume',
'StorageVolumeState',
'VolumeSnapshot',
# Deprecated, moved to libcloud.utils.networking
'is_private_subnet',
'is_valid_ip_address'
]
class UuidMixin(object):
"""
Mixin class for get_uuid function.
"""
def __init__(self):
self._uuid = None
def get_uuid(self):
"""
Unique hash for a node, node image, or node size
The hash is a function of an SHA1 hash of the node, node image,
or node size's ID and its driver which means that it should be
unique between all objects of its type.
In some subclasses (e.g. GoGridNode) there is no ID
available so the public IP address is used. This means that,
unlike a properly done system UUID, the same UUID may mean a
different system install at a different time
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.get_uuid()
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
Note, for example, that this example will always produce the
same UUID!
:rtype: ``str``
"""
if not self._uuid:
self._uuid = hashlib.sha1(b('%s:%s' %
(self.id, self.driver.type))).hexdigest()
return self._uuid
@property
def uuid(self):
return self.get_uuid()
class Node(UuidMixin):
"""
Provide a common interface for handling nodes of all types.
The Node object provides the interface in libcloud through which
we can manipulate nodes in different cloud providers in the same
way. Node objects don't actually do much directly themselves,
instead the node driver handles the connection to the node.
You don't normally create a node object yourself; instead you use
a driver and then have that create the node for you.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.public_ips[0]
'127.0.0.3'
>>> node.name
'dummy-3'
You can also get nodes from the driver's list_node function.
>>> node = driver.list_nodes()[0]
>>> node.name
'dummy-1'
The node keeps a reference to its own driver which means that we
can work on nodes from different providers without having to know
which is which.
>>> driver = DummyNodeDriver(72)
>>> node2 = driver.create_node()
>>> node.driver.creds
0
>>> node2.driver.creds
72
Although Node objects can be subclassed, this isn't normally
done. Instead, any driver specific information is stored in the
"extra" attribute of the node.
>>> node.extra
{'foo': 'bar'}
"""
def __init__(self, id, name, state, public_ips, private_ips,
driver, size=None, image=None, extra=None):
"""
:param id: Node ID.
:type id: ``str``
:param name: Node name.
:type name: ``str``
:param state: Node state.
:type state: :class:`libcloud.compute.types.NodeState`
:param public_ips: Public IP addresses associated with this node.
:type public_ips: ``list``
:param private_ips: Private IP addresses associated with this node.
:type private_ips: ``list``
:param driver: Driver this node belongs to.
:type driver: :class:`.NodeDriver`
:param size: Size of this node. (optional)
:type size: :class:`.NodeSize`
:param image: Image of this node. (optional)
:type size: :class:`.NodeImage`
:param extra: Optional provider specific attributes associated with
this node.
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.state = state
self.public_ips = public_ips if public_ips else []
self.private_ips = private_ips if private_ips else []
self.driver = driver
self.size = size
self.image = image
self.extra = extra or {}
UuidMixin.__init__(self)
def reboot(self):
"""
Reboot this node
:return: ``bool``
This calls the node's driver and reboots the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.state == NodeState.REBOOTING
False
>>> node.reboot()
True
>>> node.state == NodeState.REBOOTING
True
"""
return self.driver.reboot_node(self)
def destroy(self):
"""
Destroy this node
:return: ``bool``
This calls the node's driver and destroys the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> from libcloud.compute.types import NodeState
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.destroy()
True
>>> node.state == NodeState.RUNNING
False
"""
return self.driver.destroy_node(self)
def __repr__(self):
state = NodeState.tostring(self.state)
return (('<Node: uuid=%s, name=%s, state=%s, public_ips=%s, '
'private_ips=%s, provider=%s ...>')
% (self.uuid, self.name, state, self.public_ips,
self.private_ips, self.driver.name))
class NodeSize(UuidMixin):
"""
A Base NodeSize class to derive from.
NodeSizes are objects which are typically returned a driver's
list_sizes function. They contain a number of different
parameters which define how big an image is.
The exact parameters available depends on the provider.
N.B. Where a parameter is "unlimited" (for example bandwidth in
Amazon) this will be given as 0.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> size = driver.list_sizes()[0]
>>> size.ram
128
>>> size.bandwidth
500
>>> size.price
4
"""
def __init__(self, id, name, ram, disk, bandwidth, price,
driver, extra=None):
"""
:param id: Size ID.
:type id: ``str``
:param name: Size name.
:type name: ``str``
:param ram: Amount of memory (in MB) provided by this size.
:type ram: ``int``
:param disk: Amount of disk storage (in GB) provided by this image.
:type disk: ``int``
:param bandwidth: Amount of bandiwdth included with this size.
:type bandwidth: ``int``
:param price: Price (in US dollars) of running this node for an hour.
:type price: ``float``
:param driver: Driver this size belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provider specific attributes associated with
this size.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.ram = ram
self.disk = disk
self.bandwidth = bandwidth
self.price = price
self.driver = driver
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeSize: id=%s, name=%s, ram=%s disk=%s bandwidth=%s '
'price=%s driver=%s ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name))
class NodeImage(UuidMixin):
"""
An operating system image.
NodeImage objects are typically returned by the driver for the
cloud provider in response to the list_images function
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> image = driver.list_images()[0]
>>> image.name
'Ubuntu 9.10'
Apart from name and id, there is no further standard information;
other parameters are stored in a driver specific "extra" variable
When creating a node, a node image should be given as an argument
to the create_node function to decide which OS image to use.
>>> node = driver.create_node(image=image)
"""
def __init__(self, id, name, driver, extra=None):
"""
:param id: Image ID.
:type id: ``str``
:param name: Image name.
:type name: ``str``
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provided specific attributes associated with
this image.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.driver = driver
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeImage: id=%s, name=%s, driver=%s ...>')
% (self.id, self.name, self.driver.name))
class NodeLocation(object):
"""
A physical location where nodes can be.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> location = driver.list_locations()[0]
>>> location.country
'US'
"""
def __init__(self, id, name, country, driver):
"""
:param id: Location ID.
:type id: ``str``
:param name: Location name.
:type name: ``str``
:param country: Location country.
:type country: ``str``
:param driver: Driver this location belongs to.
:type driver: :class:`.NodeDriver`
"""
self.id = str(id)
self.name = name
self.country = country
self.driver = driver
def __repr__(self):
return (('<NodeLocation: id=%s, name=%s, country=%s, driver=%s>')
% (self.id, self.name, self.country, self.driver.name))
class NodeAuthSSHKey(object):
"""
An SSH key to be installed for authentication to a node.
This is the actual contents of the users ssh public key which will
normally be installed as root's public key on the node.
>>> pubkey = '...' # read from file
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> k = NodeAuthSSHKey(pubkey)
>>> k
<NodeAuthSSHKey>
"""
def __init__(self, pubkey):
"""
:param pubkey: Public key matetiral.
:type pubkey: ``str``
"""
self.pubkey = pubkey
def __repr__(self):
return '<NodeAuthSSHKey>'
class NodeAuthPassword(object):
"""
A password to be used for authentication to a node.
"""
def __init__(self, password, generated=False):
"""
:param password: Password.
:type password: ``str``
:type generated: ``True`` if this password was automatically generated,
``False`` otherwise.
"""
self.password = password
self.generated = generated
def __repr__(self):
return '<NodeAuthPassword>'
class StorageVolume(UuidMixin):
"""
A base StorageVolume class to derive from.
"""
def __init__(self, id, name, size, driver,
state=None, extra=None):
"""
:param id: Storage volume ID.
:type id: ``str``
:param name: Storage volume name.
:type name: ``str``
:param size: Size of this volume (in GB).
:type size: ``int``
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param state: Optional state of the StorageVolume. If not
provided, will default to UNKNOWN.
:type state: :class:`.StorageVolumeState`
:param extra: Optional provider specific attributes.
:type extra: ``dict``
"""
self.id = id
self.name = name
self.size = size
self.driver = driver
self.extra = extra
self.state = state
UuidMixin.__init__(self)
def list_snapshots(self):
"""
:rtype: ``list`` of ``VolumeSnapshot``
"""
return self.driver.list_volume_snapshots(volume=self)
def attach(self, node, device=None):
"""
Attach this volume to a node.
:param node: Node to attach volume to
:type node: :class:`.Node`
:param device: Where the device is exposed,
e.g. '/dev/sdb (optional)
:type device: ``str``
:return: ``True`` if attach was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.attach_volume(node=node, volume=self, device=device)
def detach(self):
"""
Detach this volume from its node
:return: ``True`` if detach was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.detach_volume(volume=self)
def snapshot(self, name):
"""
Creates a snapshot of this volume.
:return: Created snapshot.
:rtype: ``VolumeSnapshot``
"""
return self.driver.create_volume_snapshot(volume=self, name=name)
def destroy(self):
"""
Destroy this storage volume.
:return: ``True`` if destroy was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.destroy_volume(volume=self)
def __repr__(self):
return '<StorageVolume id=%s size=%s driver=%s>' % (
self.id, self.size, self.driver.name)
class VolumeSnapshot(object):
"""
A base VolumeSnapshot class to derive from.
"""
def __init__(self, id, driver, size=None, extra=None, created=None,
state=None):
"""
VolumeSnapshot constructor.
:param id: Snapshot ID.
:type id: ``str``
:param driver: The driver that represents a connection to the
provider
:type driver: `NodeDriver`
:param size: A snapshot size in GB.
:type size: ``int``
:param extra: Provider depends parameters for snapshot.
:type extra: ``dict``
:param created: A datetime object that represents when the
snapshot was created
:type created: ``datetime.datetime``
:param state: A string representing the state the snapshot is
in. See `libcloud.compute.types.StorageVolumeState`.
:type state: ``str``
"""
self.id = id
self.driver = driver
self.size = size
self.extra = extra or {}
self.created = created
self.state = state
def destroy(self):
"""
Destroys this snapshot.
:rtype: ``bool``
"""
return self.driver.destroy_volume_snapshot(snapshot=self)
def __repr__(self):
return ('<VolumeSnapshot id=%s size=%s driver=%s state=%s>' %
(self.id, self.size, self.driver.name, self.state))
class KeyPair(object):
"""
Represents a SSH key pair.
"""
def __init__(self, name, public_key, fingerprint, driver, private_key=None,
extra=None):
"""
Constructor.
:keyword name: Name of the key pair object.
:type name: ``str``
:keyword fingerprint: Key fingerprint.
:type fingerprint: ``str``
:keyword public_key: Public key in OpenSSH format.
:type public_key: ``str``
:keyword private_key: Private key in PEM format.
:type private_key: ``str``
:keyword extra: Provider specific attributes associated with this
key pair. (optional)
:type extra: ``dict``
"""
self.name = name
self.fingerprint = fingerprint
self.public_key = public_key
self.private_key = private_key
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return ('<KeyPair name=%s fingerprint=%s driver=%s>' %
(self.name, self.fingerprint, self.driver.name))
class NodeDriver(BaseDriver):
"""
A base NodeDriver class to derive from
This class is always subclassed by a specific driver. For
examples of base behavior of most functions (except deploy node)
see the dummy driver.
"""
connectionCls = ConnectionKey
name = None
type = None
port = None
features = {'create_node': []}
"""
List of available features for a driver.
- :meth:`libcloud.compute.base.NodeDriver.create_node`
- ssh_key: Supports :class:`.NodeAuthSSHKey` as an authentication
method for nodes.
- password: Supports :class:`.NodeAuthPassword` as an
authentication
method for nodes.
- generates_password: Returns a password attribute on the Node
object returned from creation.
"""
NODE_STATE_MAP = {}
def list_nodes(self):
"""
List all nodes.
:return: list of node objects
:rtype: ``list`` of :class:`.Node`
"""
raise NotImplementedError(
'list_nodes not implemented for this driver')
def list_sizes(self, location=None):
"""
List sizes on a provider
:param location: The location at which to list sizes
:type location: :class:`.NodeLocation`
:return: list of node size objects
:rtype: ``list`` of :class:`.NodeSize`
"""
raise NotImplementedError(
'list_sizes not implemented for this driver')
def list_locations(self):
"""
List data centers for a provider
:return: list of node location objects
:rtype: ``list`` of :class:`.NodeLocation`
"""
raise NotImplementedError(
'list_locations not implemented for this driver')
def create_node(self, **kwargs):
"""
Create a new node instance. This instance will be started
automatically.
Not all hosting API's are created equal and to allow libcloud to
support as many as possible there are some standard supported
variations of ``create_node``. These are declared using a
``features`` API.
You can inspect ``driver.features['create_node']`` to see what
variation of the API you are dealing with:
``ssh_key``
You can inject a public key into a new node allows key based SSH
authentication.
``password``
You can inject a password into a new node for SSH authentication.
If no password is provided libcloud will generated a password.
The password will be available as
``return_value.extra['password']``.
``generates_password``
The hosting provider will generate a password. It will be returned
to you via ``return_value.extra['password']``.
Some drivers allow you to set how you will authenticate with the
instance that is created. You can inject this initial authentication
information via the ``auth`` parameter.
If a driver supports the ``ssh_key`` feature flag for ``created_node``
you can upload a public key into the new instance::
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> auth = NodeAuthSSHKey('pubkey data here')
>>> node = driver.create_node("test_node", auth=auth)
If a driver supports the ``password`` feature flag for ``create_node``
you can set a password::
>>> driver = DummyNodeDriver(0)
>>> auth = NodeAuthPassword('mysecretpassword')
>>> node = driver.create_node("test_node", auth=auth)
If a driver supports the ``password`` feature and you don't provide the
``auth`` argument libcloud will assign a password::
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node("test_node")
>>> password = node.extra['password']
A password will also be returned in this way for drivers that declare
the ``generates_password`` feature, though in that case the password is
actually provided to the driver API by the hosting provider rather than
generated by libcloud.
You can only pass a :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey` to ``create_node`` via the auth parameter if
has the corresponding feature flag.
:param name: String with a name for this new node (required)
:type name: ``str``
:param size: The size of resources allocated to this node.
(required)
:type size: :class:`.NodeSize`
:param image: OS Image to boot on node. (required)
:type image: :class:`.NodeImage`
:param location: Which data center to create a node in. If empty,
undefined behavior will be selected. (optional)
:type location: :class:`.NodeLocation`
:param auth: Initial authentication information for the node
(optional)
:type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
:return: The newly created node.
:rtype: :class:`.Node`
"""
raise NotImplementedError(
'create_node not implemented for this driver')
def deploy_node(self, **kwargs):
"""
Create a new node, and start deployment.
In order to be able to SSH into a created node access credentials are
required.
A user can pass either a :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey` to the ``auth`` argument. If the
``create_node`` implementation supports that kind if credential (as
declared in ``self.features['create_node']``) then it is passed on to
``create_node``. Otherwise it is not passed on to ``create_node`` and
it is only used for authentication.
If the ``auth`` parameter is not supplied but the driver declares it
supports ``generates_password`` then the password returned by
``create_node`` will be used to SSH into the server.
Finally, if the ``ssh_key_file`` is supplied that key will be used to
SSH into the server.
This function may raise a :class:`DeploymentException`, if a
create_node call was successful, but there is a later error (like SSH
failing or timing out). This exception includes a Node object which
you may want to destroy if incomplete deployments are not desirable.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> from libcloud.compute.deployment import ScriptDeployment
>>> from libcloud.compute.deployment import MultiStepDeployment
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> driver = DummyNodeDriver(0)
>>> key = NodeAuthSSHKey('...') # read from file
>>> script = ScriptDeployment("yum -y install emacs strace tcpdump")
>>> msd = MultiStepDeployment([key, script])
>>> def d():
... try:
... driver.deploy_node(deploy=msd)
... except NotImplementedError:
... print ("not implemented for dummy driver")
>>> d()
not implemented for dummy driver
Deploy node is typically not overridden in subclasses. The
existing implementation should be able to handle most such.
:param deploy: Deployment to run once machine is online and
available to SSH.
:type deploy: :class:`Deployment`
:param ssh_username: Optional name of the account which is used
when connecting to
SSH server (default is root)
:type ssh_username: ``str``
:param ssh_alternate_usernames: Optional list of ssh usernames to
try to connect with if using the
default one fails
:type ssh_alternate_usernames: ``list``
:param ssh_port: Optional SSH server port (default is 22)
:type ssh_port: ``int``
:param ssh_timeout: Optional SSH connection timeout in seconds
(default is 10)
:type ssh_timeout: ``float``
:param auth: Initial authentication information for the node
(optional)
:type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
:param ssh_key: A path (or paths) to an SSH private key with which
to attempt to authenticate. (optional)
:type ssh_key: ``str`` or ``list`` of ``str``
:param timeout: How many seconds to wait before timing out.
(default is 600)
:type timeout: ``int``
:param max_tries: How many times to retry if a deployment fails
before giving up (default is 3)
:type max_tries: ``int``
:param ssh_interface: The interface to wait for. Default is
'public_ips', other option is 'private_ips'.
:type ssh_interface: ``str``
"""
if not libcloud.compute.ssh.have_paramiko:
raise RuntimeError('paramiko is not installed. You can install ' +
'it using pip: pip install paramiko')
if 'auth' in kwargs:
auth = kwargs['auth']
if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)):
raise NotImplementedError(
'If providing auth, only NodeAuthSSHKey or'
'NodeAuthPassword is supported')
elif 'ssh_key' in kwargs:
# If an ssh_key is provided we can try deploy_node
pass
elif 'create_node' in self.features:
f = self.features['create_node']
if 'generates_password' not in f and "password" not in f:
raise NotImplementedError(
'deploy_node not implemented for this driver')
else:
raise NotImplementedError(
'deploy_node not implemented for this driver')
node = self.create_node(**kwargs)
max_tries = kwargs.get('max_tries', 3)
password = None
if 'auth' in kwargs:
if isinstance(kwargs['auth'], NodeAuthPassword):
password = kwargs['auth'].password
elif 'password' in node.extra:
password = node.extra['password']
ssh_interface = kwargs.get('ssh_interface', 'public_ips')
# Wait until node is up and running and has IP assigned
try:
node, ip_addresses = self.wait_until_running(
nodes=[node],
wait_period=3,
timeout=kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT),
ssh_interface=ssh_interface)[0]
except Exception:
e = sys.exc_info()[1]
raise DeploymentError(node=node, original_exception=e, driver=self)
ssh_username = kwargs.get('ssh_username', 'root')
ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', [])
ssh_port = kwargs.get('ssh_port', 22)
ssh_timeout = kwargs.get('ssh_timeout', 10)
ssh_key_file = kwargs.get('ssh_key', None)
timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT)
deploy_error = None
for username in ([ssh_username] + ssh_alternate_usernames):
try:
self._connect_and_run_deployment_script(
task=kwargs['deploy'], node=node,
ssh_hostname=ip_addresses[0], ssh_port=ssh_port,
ssh_username=username, ssh_password=password,
ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout,
timeout=timeout, max_tries=max_tries)
except Exception:
# Try alternate username
# Todo: Need to fix paramiko so we can catch a more specific
# exception
e = sys.exc_info()[1]
deploy_error = e
else:
# Script successfully executed, don't try alternate username
deploy_error = None
break
if deploy_error is not None:
raise DeploymentError(node=node, original_exception=deploy_error,
driver=self)
return node
def reboot_node(self, node):
"""
Reboot a node.
:param node: The node to be rebooted
:type node: :class:`.Node`
:return: True if the reboot was successful, otherwise False
:rtype: ``bool``
"""
raise NotImplementedError(
'reboot_node not implemented for this driver')
def destroy_node(self, node):
"""
Destroy a node.
Depending upon the provider, this may destroy all data associated with
the node, including backups.
:param node: The node to be destroyed
:type node: :class:`.Node`
:return: True if the destroy was successful, False otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_node not implemented for this driver')
##
# Volume and snapshot management methods
##
def list_volumes(self):
"""
List storage volumes.
:rtype: ``list`` of :class:`.StorageVolume`
"""
raise NotImplementedError(
'list_volumes not implemented for this driver')
def list_volume_snapshots(self, volume):
"""
List snapshots for a storage volume.
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
raise NotImplementedError(
'list_volume_snapshots not implemented for this driver')
def create_volume(self, size, name, location=None, snapshot=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
raise NotImplementedError(
'create_volume not implemented for this driver')
def create_volume_snapshot(self, volume, name=None):
"""
Creates a snapshot of the storage volume.
:param volume: The StorageVolume to create a VolumeSnapshot from
:type volume: :class:`.VolumeSnapshot`
:param name: Name of created snapshot (optional)
:type name: `str`
:rtype: :class:`VolumeSnapshot`
"""
raise NotImplementedError(
'create_volume_snapshot not implemented for this driver')
def attach_volume(self, node, volume, device=None):
"""
Attaches volume to node.
:param node: Node to attach volume to.
:type node: :class:`.Node`
:param volume: Volume to attach.
:type volume: :class:`.StorageVolume`
:param device: Where the device is exposed, e.g. '/dev/sdb'
:type device: ``str``
:rytpe: ``bool``
"""
raise NotImplementedError('attach not implemented for this driver')
def detach_volume(self, volume):
"""
Detaches a volume from a node.
:param volume: Volume to be detached
:type volume: :class:`.StorageVolume`
:rtype: ``bool``
"""
raise NotImplementedError('detach not implemented for this driver')
def destroy_volume(self, volume):
"""
Destroys a storage volume.
:param volume: Volume to be destroyed
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_volume not implemented for this driver')
def destroy_volume_snapshot(self, snapshot):
"""
Destroys a snapshot.
:param snapshot: The snapshot to delete
:type snapshot: :class:`VolumeSnapshot`
:rtype: :class:`bool`
"""
raise NotImplementedError(
'destroy_volume_snapshot not implemented for this driver')
##
# Image management methods
##
def list_images(self, location=None):
"""
List images on a provider.
:param location: The location at which to list images.
:type location: :class:`.NodeLocation`
:return: list of node image objects.
:rtype: ``list`` of :class:`.NodeImage`
"""
raise NotImplementedError(
'list_images not implemented for this driver')
def create_image(self, node, name, description=None):
"""
Creates an image from a node object.
:param node: Node to run the task on.
:type node: :class:`.Node`
:param name: name for new image.
:type name: ``str``
:param description: description for new image.
:type name: ``description``
:rtype: :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'create_image not implemented for this driver')
def delete_image(self, node_image):
"""
Deletes a node image from a provider.
:param node_image: Node image object.
:type node_image: :class:`.NodeImage`
:return: ``True`` if delete_image was successful, ``False`` otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_image not implemented for this driver')
def get_image(self, image_id):
"""
Returns a single node image from a provider.
:param image_id: Node to run the task on.
:type image_id: ``str``
:rtype :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'get_image not implemented for this driver')
def copy_image(self, source_region, node_image, name, description=None):
"""
Copies an image from a source region to the current region.
:param source_region: Region to copy the node from.
:type source_region: ``str``
:param node_image: NodeImage to copy.
:type node_image: :class:`.NodeImage`:
:param name: name for new image.
:type name: ``str``
:param description: description for new image.
:type name: ``str``
:rtype: :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'copy_image not implemented for this driver')
##
# SSH key pair management methods
##
def list_key_pairs(self):
"""
List all the available key pair objects.
:rtype: ``list`` of :class:`.KeyPair` objects
"""
raise NotImplementedError(
'list_key_pairs not implemented for this driver')
def get_key_pair(self, name):
"""
Retrieve a single key pair.
:param name: Name of the key pair to retrieve.
:type name: ``str``
:rtype: :class:`.KeyPair`
"""
raise NotImplementedError(
'get_key_pair not implemented for this driver')
def create_key_pair(self, name):
"""
Create a new key pair object.
:param name: Key pair name.
:type name: ``str``
"""
raise NotImplementedError(
'create_key_pair not implemented for this driver')
def import_key_pair_from_string(self, name, key_material):
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_material: Public key material.
:type key_material: ``str``
:rtype: :class:`.KeyPair` object
"""
raise NotImplementedError(
'import_key_pair_from_string not implemented for this driver')
def import_key_pair_from_file(self, name, key_file_path):
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_file_path: Path to the public key file.
:type key_file_path: ``str``
:rtype: :class:`.KeyPair` object
"""
key_file_path = os.path.expanduser(key_file_path)
with open(key_file_path, 'r') as fp:
key_material = fp.read()
return self.import_key_pair_from_string(name=name,
key_material=key_material)
def delete_key_pair(self, key_pair):
"""
Delete an existing key pair.
:param key_pair: Key pair object.
:type key_pair: :class:`.KeyPair`
"""
raise NotImplementedError(
'delete_key_pair not implemented for this driver')
def wait_until_running(self, nodes, wait_period=3,
timeout=600, ssh_interface='public_ips',
force_ipv4=True, ex_list_nodes_kwargs=None):
"""
Block until the provided nodes are considered running.
Node is considered running when it's state is "running" and when it has
at least one IP address assigned.
:param nodes: List of nodes to wait for.
:type nodes: ``list`` of :class:`.Node`
:param wait_period: How many seconds to wait between each loop
iteration. (default is 3)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 600)
:type timeout: ``int``
:param ssh_interface: Which attribute on the node to use to obtain
an IP address. Valid options: public_ips,
private_ips. Default is public_ips.
:type ssh_interface: ``str``
:param force_ipv4: Ignore IPv6 addresses (default is True).
:type force_ipv4: ``bool``
:param ex_list_nodes_kwargs: Optional driver-specific keyword arguments
which are passed to the ``list_nodes``
method.
:type ex_list_nodes_kwargs: ``dict``
:return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and
list of ip_address on success.
:rtype: ``list`` of ``tuple``
"""
ex_list_nodes_kwargs = ex_list_nodes_kwargs or {}
def is_supported(address):
"""
Return True for supported address.
"""
if force_ipv4 and not is_valid_ip_address(address=address,
family=socket.AF_INET):
return False
return True
def filter_addresses(addresses):
"""
Return list of supported addresses.
"""
return [address for address in addresses if is_supported(address)]
if ssh_interface not in ['public_ips', 'private_ips']:
raise ValueError('ssh_interface argument must either be' +
'public_ips or private_ips')
start = time.time()
end = start + timeout
uuids = set([node.uuid for node in nodes])
while time.time() < end:
all_nodes = self.list_nodes(**ex_list_nodes_kwargs)
matching_nodes = list([node for node in all_nodes
if node.uuid in uuids])
if len(matching_nodes) > len(uuids):
found_uuids = [node.uuid for node in matching_nodes]
msg = ('Unable to match specified uuids ' +
'(%s) with existing nodes. Found ' % (uuids) +
'multiple nodes with same uuid: (%s)' % (found_uuids))
raise LibcloudError(value=msg, driver=self)
running_nodes = [node for node in matching_nodes
if node.state == NodeState.RUNNING]
addresses = [filter_addresses(getattr(node, ssh_interface))
for node in running_nodes]
if len(running_nodes) == len(uuids) == len(addresses):
return list(zip(running_nodes, addresses))
else:
time.sleep(wait_period)
continue
raise LibcloudError(value='Timed out after %s seconds' % (timeout),
driver=self)
def _get_and_check_auth(self, auth):
"""
Helper function for providers supporting :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey`
Validates that only a supported object type is passed to the auth
parameter and raises an exception if it is not.
If no :class:`.NodeAuthPassword` object is provided but one is expected
then a password is automatically generated.
"""
if isinstance(auth, NodeAuthPassword):
if 'password' in self.features['create_node']:
return auth
raise LibcloudError(
'Password provided as authentication information, but password'
'not supported', driver=self)
if isinstance(auth, NodeAuthSSHKey):
if 'ssh_key' in self.features['create_node']:
return auth
raise LibcloudError(
'SSH Key provided as authentication information, but SSH Key'
'not supported', driver=self)
if 'password' in self.features['create_node']:
value = os.urandom(16)
value = binascii.hexlify(value).decode('ascii')
# Some providers require password to also include uppercase
# characters so convert some characters to uppercase
password = ''
for char in value:
if not char.isdigit() and char.islower():
if random.randint(0, 1) == 1:
char = char.upper()
password += char
return NodeAuthPassword(password, generated=True)
if auth:
raise LibcloudError(
'"auth" argument provided, but it was not a NodeAuthPassword'
'or NodeAuthSSHKey object', driver=self)
def _wait_until_running(self, node, wait_period=3, timeout=600,
ssh_interface='public_ips', force_ipv4=True):
# This is here for backward compatibility and will be removed in the
# next major release
return self.wait_until_running(nodes=[node], wait_period=wait_period,
timeout=timeout,
ssh_interface=ssh_interface,
force_ipv4=force_ipv4)
def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300):
"""
Try to connect to the remote SSH server. If a connection times out or
is refused it is retried up to timeout number of seconds.
:param ssh_client: A configured SSHClient instance
:type ssh_client: ``SSHClient``
:param wait_period: How many seconds to wait between each loop
iteration. (default is 1.5)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 300)
:type timeout: ``int``
:return: ``SSHClient`` on success
"""
start = time.time()
end = start + timeout
while time.time() < end:
try:
ssh_client.connect()
except SSH_TIMEOUT_EXCEPTION_CLASSES:
e = sys.exc_info()[1]
message = str(e).lower()
expected_msg = 'no such file or directory'
if isinstance(e, IOError) and expected_msg in message:
# Propagate (key) file doesn't exist errors
raise e
# Retry if a connection is refused, timeout occurred,
# or the connection fails due to failed authentication.
ssh_client.close()
time.sleep(wait_period)
continue
else:
return ssh_client
raise LibcloudError(value='Could not connect to the remote SSH ' +
'server. Giving up.', driver=self)
def _connect_and_run_deployment_script(self, task, node, ssh_hostname,
ssh_port, ssh_username,
ssh_password, ssh_key_file,
ssh_timeout, timeout, max_tries):
"""
Establish an SSH connection to the node and run the provided deployment
task.
:rtype: :class:`.Node`:
:return: Node instance on success.
"""
ssh_client = SSHClient(hostname=ssh_hostname,
port=ssh_port, username=ssh_username,
password=ssh_password,
key_files=ssh_key_file,
timeout=ssh_timeout)
ssh_client = self._ssh_client_connect(ssh_client=ssh_client,
timeout=timeout)
# Execute the deployment task
node = self._run_deployment_script(task=task, node=node,
ssh_client=ssh_client,
max_tries=max_tries)
return node
def _run_deployment_script(self, task, node, ssh_client, max_tries=3):
"""
Run the deployment script on the provided node. At this point it is
assumed that SSH connection has already been established.
:param task: Deployment task to run.
:type task: :class:`Deployment`
:param node: Node to run the task on.
:type node: ``Node``
:param ssh_client: A configured and connected SSHClient instance.
:type ssh_client: :class:`SSHClient`
:param max_tries: How many times to retry if a deployment fails
before giving up. (default is 3)
:type max_tries: ``int``
:rtype: :class:`.Node`
:return: ``Node`` Node instance on success.
"""
tries = 0
while tries < max_tries:
try:
node = task.run(node, ssh_client)
except Exception:
tries += 1
if tries >= max_tries:
e = sys.exc_info()[1]
raise LibcloudError(value='Failed after %d tries: %s'
% (max_tries, str(e)), driver=self)
else:
# Deployment succeeded
ssh_client.close()
return node
def _get_size_price(self, size_id):
"""
Return pricing information for the provided size id.
"""
return get_size_price(driver_type='compute',
driver_name=self.api_name,
size_id=size_id)
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "35fe329bcb1e457a26ed8cceab20ab85",
"timestamp": "",
"source": "github",
"line_count": 1512,
"max_line_length": 79,
"avg_line_length": 32.89021164021164,
"alnum_prop": 0.5692941886185401,
"repo_name": "jimbobhickville/libcloud",
"id": "2b9b18a307e5f7ada53bc85ed26e197526e52c0d",
"size": "50512",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/compute/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "Python",
"bytes": "4397714"
},
{
"name": "Shell",
"bytes": "13868"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
from optparse import make_option
import urlparse
from jinja2 import BaseLoader, Environment, TemplateNotFound
import paramiko
from paver.easy import *
import yaml
def host_task(f):
return task(cmdopts([("host=", None, "The host for which to build")])(f))
@host_task
def upgrade(options):
call_task('generate_config', options={
'host': options.host})
call_task('generate_image', options={
'host': options.host})
call_task('flash', options={
'host': options.host})
@host_task
def generate_config(options):
"""
Read the template files and write out the custom config of a host
"""
# template name -> file mode
config_file_templates = {}
config = get_config(options.host)
target_dir = path('target')
for parent_files_dir in filter(lambda p: p.exists(),
[target_dir.joinpath(p, 'files') for p in config['parents']]):
for config_file in parent_files_dir.walkfiles():
template_name = parent_files_dir.relpathto(config_file)
if template_name not in config_file_templates:
config_file_templates[template_name] = config_file.lstat()
# Load Jinja Environment
template_env = Environment(loader=ConfigdichLoader(target_dir))
build_dir = path('config').joinpath(options.host)
# Clean out the build dir
build_dir.rmtree()
build_dir.makedirs_p()
for config_file_name, template_file_stat in config_file_templates.items():
rendered_config_file_path = build_dir.joinpath(config_file_name)
# Create the directory that the config file will be rendered to in if needed
rendered_config_file_path.dirname().makedirs_p()
# Render the template to the file
t = template_env.get_template(options.host + "/" + str(config_file_name))
rendered_config_file_path.write_text(t.render(config))
rendered_config_file_path.chmod(template_file_stat.st_mode)
@host_task
def generate_image(options):
"""
Generate the image for installation on a machine.
"""
call_task('generate_config', options={
'host': options.host})
config = get_config(options.host)
build_dir = path('build')
# Create the build directory if needed
build_dir.makedirs_p()
host_config_files_path = path('config').joinpath(options.host).abspath()
host_image_path = path('images').joinpath(options.host).abspath()
# Create images directory if needed
host_image_path.makedirs_p()
# Build the custom image for this machine including the custom package
# list and configfiles.
#image_builder_tar_path = get_image_builder(config)
with pushd(build_dir):
if not path('openwrt').exists():
sh('git clone git://git.openwrt.org/openwrt.git')
with pushd('openwrt'):
try:
sh('./scripts/feeds update packages')
sh('./scripts/feeds install -a -p packages')
path.copytree(host_config_files_path, 'files')
path('files/buildroot-config').move('.config')
sh('make defconfig')
sh('make prereq')
sh('make -j5 # V=s')
# Copy image to target location
built_image_path = path('bin').walkfiles(config['openwrt_image_builder_image_filename']).next()
built_image_path.copy(host_image_path)
finally:
path('files').rmtree()
path('.config').remove()
@host_task
def flash(options):
"""
Flash a bundled image to a machine and install it via sysupgrade.
"""
config = get_config(options.host)
local_host_image_path = path('images').joinpath(options.host, config['openwrt_image_builder_image_filename'])
remote_host_image_path = path('/tmp').joinpath(config['openwrt_image_builder_image_filename'])
# Use SCP because paramiko SFTP does not seem to play nice with dropbear
sh("scp " + local_host_image_path + " root@" + options.host + ":" + remote_host_image_path)
# Connect to machine and install image.
# Wait until the system says it is rebooting, then disconnect. The
# connection will not be closed by remote so we have to take care of
# it or we will run into a timeout.
info("Performing system upgrade...")
ssh = paramiko.SSHClient()
# TODO: Add a list of trusted hostkeys
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(options.host, username='root')
stdin, stdout, stderr = ssh.exec_command("sysupgrade -n -v " + remote_host_image_path)
while True:
read_line = stdout.readline()
print(read_line, end="")
if "Rebooting system..." in read_line:
ssh.close()
break
@host_task
def update_config(options):
call_task('generate_config', options={
'host': options.host})
host_config_files_path = path('config').joinpath(options.host).abspath()
sh("scp -r {} root@{}:/".format(
host_config_files_path.joinpath("*"),
options.host))
sh("ssh root@{} reboot".format(options.host))
def get_config(target):
"""
Load the configuration for a specific target and include all parent
configuration values.
"""
config = yaml.load(file("target/{}/config.yml".format(target)))
if 'parent' in config.keys():
config = dict(get_config(config['parent']).items() + config.items())
if 'parents' not in config:
config['parents'] = []
config['parents'].append(target)
return config
def get_image_builder(config):
"""
Get the path to the tar file containing the image builder.
This function will download and unpack the image builder tar if it
is not yet available.
"""
builder_uri = config['openwrt_image_builder_uri']
# Use the last path component of the URI as the filename
builder_filename = path(urlparse.urlparse(builder_uri).path).name
# Strip off the bz2 extension
builder_tar_filename = builder_filename.namebase
dl_dir = path('/tmp/configdich/downloads')
builder_tar_path = dl_dir.joinpath(builder_tar_filename)
dl_path = dl_dir.joinpath(builder_filename)
if builder_tar_path.exists():
# Everything already done
pass
elif dl_path.exists():
# Image downloaded but not unpacked
sh("bunzip2 {0}".format(dl_path))
else:
dl_dir.makedirs_p()
# Download image
sh('wget -c -O {0} {1}'.format(dl_path, builder_uri))
# Unpack image
sh("bunzip2 {0}".format(dl_path, builder_filename))
return builder_tar_path
class ConfigdichLoader(BaseLoader):
"""
Template loader to support loading configuration file templates from other targets using the pseudo-path description
[target]/[config_file_path].
"""
def __init__(self, target_path):
self.target_path = target_path
def get_source(self, environment, template):
# split the template identifier into target an template path at
# the first /
target, relative_template_path = template.split("/", 1)
target_template_path = self.target_path.joinpath(target, 'files', relative_template_path)
# Check if the template file exists for the specified target
if target_template_path.exists():
old_mtime = target_template_path.mtime
# Return the template source to the caller
return target_template_path.text(), target_template_path, lambda: old_mtime == target_template_path.mtime
else:
# The template file does not exist, check if it exists in the parent target
parent_target = get_config(target)['parent']
if parent_target is not None:
return self.get_source(environment, parent_target + "/" + relative_template_path)
else:
raise TemplateNotFound(template)
| {
"content_hash": "a234eb7304d4f41373d519b4e7e03ff5",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 120,
"avg_line_length": 37.509433962264154,
"alnum_prop": 0.6427313883299799,
"repo_name": "leonhandreke/configdich",
"id": "5c8702ebf3c9031d2a78305e3b625f1090a136e3",
"size": "7952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pavement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7952"
}
],
"symlink_target": ""
} |
"""
Test loading cifar100.
"""
import os
from dbcollection.utils.test import TestBaseDB
# setup
name = 'cifar100'
task = 'classification'
data_dir = os.path.join(os.path.expanduser("~"), 'tmp', 'download_data')
verbose = True
# Run tester
tester = TestBaseDB(name, task, data_dir, verbose)
tester.run('download') | {
"content_hash": "36a5812cec1073aa817364896bef8c75",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 18.58823529411765,
"alnum_prop": 0.7151898734177216,
"repo_name": "farrajota/dbcollection",
"id": "6d01205e020ed35447405e55d8aa1ac124b8499c",
"size": "340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/functional/download/cifar100.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "Jupyter Notebook",
"bytes": "21468"
},
{
"name": "Makefile",
"bytes": "1692"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "1620582"
}
],
"symlink_target": ""
} |
__author__ = 'Nils Schmidt'
class ShellCmdWrapper:
"""
Attributes
----------
shell_prefix : str, optional ( default is the class name )
Name of the shell command to be wrapped.
process : subprocess32.Popen
"""
def __init__(self):
# TODO: #2: DOC
super(ShellCmdWrapper, self).__init__()
self.shell_prefix = None
self.process = None
def start(self, *args, **kwargs):
raise NotImplementedError
@property
def shell_prefix(self):
return str(self.__class__.__name__)
| {
"content_hash": "dc7e3fbb62c386e724c90c28d3336861",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 22.52,
"alnum_prop": 0.5701598579040853,
"repo_name": "miniworld-project/miniworld_core",
"id": "90614cb3d95b43ce330b40eb44df877772ff0050",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniworld/model/ShellCmdWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "696934"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
} |
import math
from metwit import Metwit
from flask import Flask, request, jsonify, render_template
app = Flask(__name__)
app.debug = True
error_codes = {
'NO_RESULTS': 1
}
weather_statuses = {
'unknown': 0,
'sunny': 1,
'rainy': 2,
'stormy': 3,
'snowy': 4,
'partly_cloudy': 5,
'cloudy': 6,
'hailing': 7,
'heavy_seas': 8,
'calm_seas': 9,
'foggy': 10,
'snow_flurries': 11,
'windy': 12,
'clear_moon': 13,
'partly_moon': 14,
'twitter': 15,
'instagram': 15,
}
def get_result_with_temperature(results):
"""
Returns the first result with temperature found in results.
If no result with temperature is found returns None.
"""
for result in results:
weather = result['weather']
if 'measured' in weather and 'temperature' in weather['measured']:
return result
return None
@app.route('/weather', methods=['POST'])
def weather():
"""
INPUT BUNDLE: {'0': latitude, '1': longitude, '2': unit ("F" or "C")}
E.G. {'0': 439596, '1': 109551, '2': 'F'}
OUTPUT BUNDLE: {'0': error code, '1': weather condition, '2': temperature}
E.G. {'0': 0, '1': 1, '2': 78}
"""
latitude = float(request.json['0']) / 10000
longitude = float(request.json['1']) / 10000
format = request.json.get('2', 'C')
results = Metwit.weather.get(location_lat=latitude, location_lng=longitude)
result = get_result_with_temperature(results)
if result is not None:
weather = result['weather']
temperature = weather['measured']['temperature']
icon_name = result['icon'].split('/')[-1]
if icon_name in weather_statuses:
status = weather_statuses[icon_name]
else:
status = weather_statuses['unknown']
# temperature is in Kelvin
if format == 'C':
temperature = temperature - 273.15
else:
temperature = temperature * 9 / 5.0 - 459.67
return jsonify({
'0': 0,
'1': status,
'2': int(math.ceil(temperature))
})
else:
error_code = error_codes['NO_RESULTS']
return jsonify({'0': error_code})
@app.route('/')
def home():
return render_template('index.html')
if __name__ == "__main__":
app.run(host='0.0.0.0')
| {
"content_hash": "23954ac637841d1f13339712472acebc",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 22.423076923076923,
"alnum_prop": 0.5604631217838765,
"repo_name": "patrick91/MetwitPebble",
"id": "451e58094e2cc27341d5a929ddb85255626e5b7d",
"size": "2332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weather.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4519"
},
{
"name": "CSS",
"bytes": "12338"
},
{
"name": "JavaScript",
"bytes": "2485"
},
{
"name": "Python",
"bytes": "5251"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
} |
from queue import Queue
from microrpc import Server
queues = {}
server = Server()
@server.rpc
def create(name):
if name not in queues:
queues[name] = Queue()
return True
return False
@server.rpc
def delete(name):
if not queues.pop(name, False):
return False
return True
@server.rpc
def put(name, payload):
queues[name].put(payload)
@server.rpc
def get(name):
return queues[name].get()
server.run()
| {
"content_hash": "50f2b8c5f72be67a52bb6e3ba2da1a03",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 35,
"avg_line_length": 15.620689655172415,
"alnum_prop": 0.6534216335540839,
"repo_name": "jasonjohnson/microq",
"id": "2f01368269b9a6a6646084ad88b1861e5dd93688",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microq/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1154"
}
],
"symlink_target": ""
} |
"""ImportLite setup module.
Based on the example at https://github.com/pypa/sampleproject.
"""
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='importlite',
version='0.1.0',
description='Import CSV files into SQLite databases',
url='https://github.com/KatrinaE/importlite',
author='Katrina Ellison Geltman',
author_email='katrina.m.ellison@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords='csv sqlite',
packages=['importlite'],
entry_points={
'console_scripts': [
'importlite=importlite.__main__:main',
]
}
)
| {
"content_hash": "bf2957f7938b67a8e6c50401b3609903",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 63,
"avg_line_length": 32.175,
"alnum_prop": 0.5656565656565656,
"repo_name": "KatrinaE/importlite",
"id": "793b19497df9f81dd9891d06fe355e034f9745de",
"size": "1287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26398"
}
],
"symlink_target": ""
} |
import sqlite3
from utils import error_printer
class DBHandler:
"""
A class which will handle any and all database operations.
"""
def __init__(self, db_name='blobs.db'):
self.db = db_name
self.open = False
self.conn = self.open_db_conn()
self.conn.text_factory = str
@staticmethod
def construct_insert_dict(from_values):
db_schema_names = ['id', 'owner', 'repo', 'sha', 'file_name']
return dict(zip(db_schema_names, from_values))
@staticmethod
def _format_string(string, cols, table):
if isinstance(cols, list):
cols_str = ''
for i in cols:
cols_str += '{},'.format(i)
string = '{} {} FROM {}'.format(string, cols_str[:-1], table)
elif isinstance(cols, dict):
if 'insert' not in string.lower():
where_str = 'WHERE '
for key, val in cols.iteritems():
where_str += '{}=\'{}\' AND '.format(key, val)
string = '{} {} {}'.format(string, table, where_str)[:-4]
else:
questions = ''
values = ''
for _, val in cols.iteritems():
questions += '?,'
values += '{},'.format(val)
where_str = '({}) VALUES'.format(questions[:-1])
string = '{} {} {} ({})'.format(string,
table,
where_str,
values[:-1])
else:
string = '{} {}'.format(string, cols)
return string
def open_db_conn(self, db_name=None):
if db_name is None:
db_name = self.db
try:
conn = sqlite3.connect(db_name)
if conn:
self.open = True
return conn
except sqlite3.Error as e:
error_printer(e, self.open_db_conn)
def _close_conn(self):
self.conn.close()
self.open = False
def insert_blob(self, repo_dict):
try:
owner = repo_dict['owner']
repo = repo_dict['repo']
sha = repo_dict['sha']
file_name = repo_dict['file_name']
blob = repo_dict['blob']
cursor = self.conn.cursor()
cursor.execute('INSERT INTO blobs(owner, repo, sha, file_name,'
'blob) VALUES (?,?,?,?,?)',
[owner, repo, sha, file_name, blob])
self.conn.commit()
except sqlite3.Error as e:
error_printer(e, self.insert_blob)
except KeyError as e:
error_printer(e, self.insert_blob)
def iter_database(self, cols='blob', table='blobs'):
string = self._format_string('SELECT', cols, table)
for row in self.conn.execute(string):
yield row
def delete_row(self, where):
string = self._format_string('DELETE FROM', where, 'blobs')
try:
cursor = self.conn.cursor()
cursor.execute(string)
self.conn.commit()
except sqlite3.Error as e:
print e
def get_row(self, db_conn, string=None, where=None, table=None):
if string is None:
string = self._format_string('SELECT * FROM', where, '')
try:
cursor = self.conn.cursor()
result = cursor.execute(string)
return result.fetchone()
except sqlite3.Error as e:
print e
def move_row_to_backup(self, where):
to_string = "INSERT INTO processed_blobs SELECT"
to_string += " * FROM blobs WHERE sha='{}' AND file_name='{}'".format(
where['sha'], where['file_name'])
try:
cursor = self.conn.cursor()
cursor.execute(to_string)
self.conn.commit()
self.delete_row(where)
except sqlite3.Error as e:
print e
def export_to_mysql(self):
pass
| {
"content_hash": "25c855f3bb7030e2ab24bc1fcd627eb4",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 32.75,
"alnum_prop": 0.48584092588032507,
"repo_name": "GrappigPanda/GithubTODOScraper",
"id": "9ebd5cd2ccedec09b6ca8db024209c926306c848",
"size": "4061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper/DBHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18406"
}
],
"symlink_target": ""
} |
"""Common tests for metaestimators"""
import pytest
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
from sklearn.exceptions import NotFittedError
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
check_is_fitted(self, 'coef_')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises a NotFittedError
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| {
"content_hash": "04832c56ec6e4c1b6cc92dc256ba0231",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 36.361702127659576,
"alnum_prop": 0.5835771406280476,
"repo_name": "vortex-ape/scikit-learn",
"id": "1c2d5a0873cd985a2c79501f64291544bf1fd85a",
"size": "5127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/tests/test_metaestimators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6351428"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import AccountTypes, CashInflow, CashOutflow, ChartOfAccounts
admin.site.register(CashInflow)
admin.site.register(CashOutflow)
admin.site.register(ChartOfAccounts)
admin.site.register(AccountTypes)
| {
"content_hash": "4e72b0481deee3135b78d227a5c0ef1b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.8421052631578947,
"repo_name": "jrmanrique/ekalay-finance",
"id": "a01999b64a2c15a87afbae15cc3d59206eb554aa",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finance/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "300"
},
{
"name": "HTML",
"bytes": "541124"
},
{
"name": "Python",
"bytes": "63703"
}
],
"symlink_target": ""
} |
import warnings
from functools import partial
import numpy as np
from astropy import units as u
from astropy.modeling.convolution import Convolution
from astropy.modeling.core import SPECIAL_OPERATORS, CompoundModel
from astropy.nddata import support_nddata
from astropy.utils.console import human_file_size
from astropy.utils.exceptions import AstropyUserWarning
from ._convolve import _convolveNd_c
from .core import MAX_NORMALIZATION, Kernel, Kernel1D, Kernel2D
from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception
# np.unique([scipy.fft.next_fast_len(i, real=True) for i in range(10000)])
# fmt: off
_good_sizes = np.array(
[
0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12,
15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40,
45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90,
96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162,
180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288,
300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480,
486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720,
729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024,
1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458,
1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025,
2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700,
2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645,
3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800,
4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144,
6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776,
8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000,
]
)
# fmt: on
_good_range = int(np.log10(_good_sizes[-1]))
# Disabling doctests when scipy isn't present.
__doctest_requires__ = {("convolve_fft",): ["scipy.fft"]}
BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"]
def _next_fast_lengths(shape):
"""
Find optimal or good sizes to pad an array of ``shape`` to for better
performance with `numpy.fft.*fft` and `scipy.fft.*fft`.
Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise
looked up from list and scaled by powers of 10, if necessary.
"""
try:
import scipy.fft
return np.array([scipy.fft.next_fast_len(j) for j in shape])
except ImportError:
pass
newshape = np.empty(len(np.atleast_1d(shape)), dtype=int)
for i, j in enumerate(shape):
scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0)
for n in _good_sizes:
if n * scale >= j:
newshape[i] = n * scale
break
else:
raise ValueError(
f"No next fast length for {j} found in list of _good_sizes "
f"<= {_good_sizes[-1] * scale}."
)
return newshape
def _copy_input_if_needed(
input, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=None
):
# Alias input
input = input.array if isinstance(input, Kernel) else input
# strip quantity attributes
if hasattr(input, "unit"):
input = input.value
output = input
# Copy input
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == "fill" or np.ma.is_masked(input) or mask is not None:
if np.ma.is_masked(input):
# ``np.ma.maskedarray.filled()`` returns a copy, however there
# is no way to specify the return type or order etc. In addition
# ``np.nan`` is a ``float`` and there is no conversion to an
# ``int`` type. Therefore, a pre-fill copy is needed for non
# ``float`` masked arrays. ``subok=True`` is needed to retain
# ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill
# to act as the copy if type and order are already correct.
output = np.array(
input, dtype=dtype, copy=False, order=order, subok=True
)
output = output.filled(fill_value)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
output = np.array(
input, dtype=dtype, copy=True, order=order, subok=False
)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
output[mask != 0] = fill_value
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass.
# If it is and `subok=False` (default), then it will copy even if `copy=False`. This
# uses less memory when ndarray subclasses are passed in.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
except (TypeError, ValueError) as e:
raise TypeError(
"input should be a Numpy array or something convertible into a float array",
e,
)
return output
@support_nddata(data="array")
def convolve(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
mask=None,
preserve_nan=False,
normalization_zero_tol=1e-8,
):
"""
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``.
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
"""
if boundary not in BOUNDARY_OPTIONS:
raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}")
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# OpenMP support is disabled at the C src code level, changing this will have
# no effect.
n_threads = 1
# Keep refs to originals
passed_kernel = kernel
passed_array = array
# The C routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Convert kernel to ndarray if not already
# Copy or alias array to array_internal
array_internal = _copy_input_if_needed(
passed_array,
dtype=float,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
array_dtype = getattr(passed_array, "dtype", array_internal.dtype)
# Copy or alias kernel to kernel_internal
kernel_internal = _copy_input_if_needed(
passed_kernel,
dtype=float,
order="C",
nan_treatment=None,
mask=None,
fill_value=fill_value,
)
# Make sure kernel has all odd axes
if has_even_axis(kernel_internal):
raise_even_kernel_exception()
# If both image array and kernel are Kernel instances
# constrain convolution method
# This must occur before the main alias/copy of ``passed_kernel`` to
# ``kernel_internal`` as it is used for filling masked kernels.
if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel):
warnings.warn(
"Both array and kernel are Kernel instances, hardwiring "
"the following parameters: boundary='fill', fill_value=0,"
" normalize_Kernel=True, nan_treatment='interpolate'",
AstropyUserWarning,
)
boundary = "fill"
fill_value = 0
normalize_kernel = True
nan_treatment = "interpolate"
# -----------------------------------------------------------------------
# From this point onwards refer only to ``array_internal`` and
# ``kernel_internal``.
# Assume both are base np.ndarrays and NOT subclasses e.g. NOT
# ``Kernel`` nor ``np.ma.maskedarray`` classes.
# -----------------------------------------------------------------------
# Check dimensionality
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim > 3:
raise NotImplementedError(
"convolve only supports 1, 2, and 3-dimensional arrays at this time"
)
elif array_internal.ndim != kernel_internal.ndim:
raise Exception("array and kernel have differing number of dimensions.")
array_shape = np.array(array_internal.shape)
kernel_shape = np.array(kernel_internal.shape)
pad_width = kernel_shape // 2
# For boundary=None only the center space is convolved. All array indices within a
# distance kernel.shape//2 from the edge are completely ignored (zeroed).
# E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2
# are convolved. It is therefore not possible to use this method to convolve an
# array by a kernel that is larger (see note below) than the array - as ALL pixels
# would be ignored leaving an array of only zeros.
# Note: For even kernels the correctness condition is array_shape > kernel_shape.
# For odd kernels it is:
# array_shape >= kernel_shape OR
# array_shape > kernel_shape-1 OR
# array_shape > 2*(kernel_shape//2).
# Since the latter is equal to the former two for even lengths, the latter condition is
# complete.
if boundary is None and not np.all(array_shape > 2 * pad_width):
raise KernelSizeError(
"for boundary=None all kernel axes must be smaller than array's - "
"use boundary in ['fill', 'extend', 'wrap'] instead."
)
# NaN interpolation significantly slows down the C convolution
# computation. Since nan_treatment = 'interpolate', is the default
# check whether it is even needed, if not, don't interpolate.
# NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any()
nan_interpolate = (nan_treatment == "interpolate") and np.isnan(
array_internal.sum()
)
# Check if kernel is normalizable
if normalize_kernel or nan_interpolate:
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero:
if nan_interpolate:
raise ValueError(
"Setting nan_treatment='interpolate' "
"requires the kernel to be normalized, "
"but the input kernel has a sum close "
"to zero. For a zero-sum kernel and "
"data with NaNs, set nan_treatment='fill'."
)
else:
raise ValueError(
"The kernel can't be normalized, because "
"its sum is close to zero. The sum of the "
f"given kernel is < {1.0 / MAX_NORMALIZATION}"
)
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan or nan_treatment == "fill":
initially_nan = np.isnan(array_internal)
if nan_treatment == "fill":
array_internal[initially_nan] = fill_value
# Avoid any memory allocation within the C code. Allocate output array
# here and pass through instead.
result = np.zeros(array_internal.shape, dtype=float, order="C")
embed_result_within_padded_region = True
array_to_convolve = array_internal
if boundary in ("fill", "extend", "wrap"):
embed_result_within_padded_region = False
if boundary == "fill":
# This method is faster than using numpy.pad(..., mode='constant')
array_to_convolve = np.full(
array_shape + 2 * pad_width,
fill_value=fill_value,
dtype=float,
order="C",
)
# Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of
# [pad_width[0]:-pad_width[0]]
# to account for when the kernel has size of 1 making pad_width = 0.
if array_internal.ndim == 1:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0]
] = array_internal
elif array_internal.ndim == 2:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
] = array_internal
else:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
pad_width[2] : array_shape[2] + pad_width[2],
] = array_internal
else:
np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"}
np_pad_mode = np_pad_mode_dict[boundary]
pad_width = kernel_shape // 2
if array_internal.ndim == 1:
np_pad_width = (pad_width[0],)
elif array_internal.ndim == 2:
np_pad_width = ((pad_width[0],), (pad_width[1],))
else:
np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],))
array_to_convolve = np.pad(
array_internal, pad_width=np_pad_width, mode=np_pad_mode
)
_convolveNd_c(
result,
array_to_convolve,
kernel_internal,
nan_interpolate,
embed_result_within_padded_region,
n_threads,
)
# So far, normalization has only occurred for nan_treatment == 'interpolate'
# because this had to happen within the C extension so as to ignore
# any NaNs
if normalize_kernel:
if not nan_interpolate:
result /= kernel_sum
elif nan_interpolate:
result *= kernel_sum
if nan_interpolate and not preserve_nan and np.isnan(result.sum()):
warnings.warn(
"nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this.",
AstropyUserWarning,
)
if preserve_nan:
result[initially_nan] = np.nan
# Convert result to original data type
array_unit = getattr(passed_array, "unit", None)
if array_unit is not None:
result <<= array_unit
if isinstance(passed_array, Kernel):
if isinstance(passed_array, Kernel1D):
new_result = Kernel1D(array=result)
elif isinstance(passed_array, Kernel2D):
new_result = Kernel2D(array=result)
else:
raise TypeError("Only 1D and 2D Kernels are supported.")
new_result._is_bool = False
new_result._separable = passed_array._separable
if isinstance(passed_kernel, Kernel):
new_result._separable = new_result._separable and passed_kernel._separable
return new_result
elif array_dtype.kind == "f":
# Try to preserve the input type if it's a floating point type
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@support_nddata(data="array")
def convolve_fft(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False,
mask=None,
crop=True,
return_fft=False,
fft_pad=None,
psf_pad=None,
min_wt=0.0,
allow_huge=False,
fftn=np.fft.fftn,
ifftn=np.fft.ifftn,
complex_dtype=complex,
dealias=False,
):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* It optionally pads to the nearest faster sizes to improve FFT speed.
These sizes are optimized for the numpy and scipy implementations, and
``fftconvolve`` uses them by default as well; when using other external
functions (see below), results may vary.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.org/project/pyFFTW/>`_ or
`pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also
offer somewhat better performance and a multi-threaded option.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution.
fill_value : float, optional
The value to use outside the array when using boundary='fill'.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
normalize_kernel : callable or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fft_pad : bool, optional
Default on. Zero-pad image to the nearest size supporting more efficient
execution of the FFT, generally values factorizable into the first 3-5
prime numbers. With ``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below.
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB.
fftn : callable, optional
The fft function. Can be overridden to use your own ffts,
e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``.
ifftn : callable, optional
The inverse fft function. Can be overridden the same way ``fttn``.
complex_dtype : complex type, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
dealias: bool, optional
Default off. Zero-pad image to enable explicit dealiasing
of convolution. With ``boundary='wrap'``, this will be disabled.
Note that for an input of nd dimensions this will increase
the size of the temporary arrays by at least ``1.5**nd``.
This may result in significantly more memory usage.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size.
Raises
------
`ValueError`
If the array is bigger than 1 GB after padding, will raise this
exception unless ``allow_huge`` is True.
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data
can become large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 and the update in
https://github.com/astropy/astropy/pull/11533 for further details.
Dealiasing of pseudospectral convolutions is necessary for
numerical stability of the underlying algorithms. A common
method for handling this is to zero pad the image by at least
1/2 to eliminate the wavenumbers which have been aliased
by convolution. This is so that the aliased 1/3 of the
results of the convolution computation can be thrown out. See
https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2
https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037
Note that if dealiasing is necessary to your application, but your
process is memory constrained, you may want to consider using
FFTW++: https://github.com/dealias/fftwpp. It includes python
wrappers for a pseudospectral convolution which will implicitly
dealias your convolution without the need for additional padding.
Note that one cannot use FFTW++'s convlution directly in this
method as in handles the entire convolution process internally.
Additionally, FFTW++ includes other useful pseudospectral methods to
consider.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([0.33333333, 1.33333333, 1. ])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([0.5, 2. , 1.5])
>>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP
array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00])
>>> convolve_fft([1, 2, 3], [1])
array([1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
array([1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([0.5, 2. , 1.5])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([0.5, 2. , 1.5])
>>> import scipy.fft # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn)
array([0.5, 2. , 1.5])
>>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores
>>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1)
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp)
array([0.5, 2. , 1.5])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError(
"Can't convolve two kernels with convolve_fft. Use convolve instead."
)
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Get array quantity if it exists
array_unit = getattr(array, "unit", None)
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = _copy_input_if_needed(
array,
dtype=complex,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
kernel = _copy_input_if_needed(
kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0
)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (
np.product(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_B > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_B)}. "
"Use allow_huge=True to override this exception."
)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
if nan_treatment == "fill":
array[nanmaskarray] = fill_value
else:
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1.0 / MAX_NORMALIZATION:
raise Exception(
"The kernel can't be normalized, because its sum is close to zero. The"
f" sum of the given kernel is < {1.0 / MAX_NORMALIZATION}"
)
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == "interpolate":
raise ValueError(
"Cannot interpolate NaNs with an unnormalizable kernel"
)
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn(
"The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary",
AstropyUserWarning,
)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == "fill":
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn(
f"psf_pad was set to {psf_pad}, which overrides the "
"boundary='fill' setting.",
AstropyUserWarning,
)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == "wrap":
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
if dealias:
raise ValueError("With boundary='wrap', dealias cannot be enabled.")
fill_value = 0 # force zero; it should not be used
elif boundary == "extend":
raise NotImplementedError(
"The 'extend' option is not implemented for fft-based convolution"
)
# Add shapes elementwise for psf_pad.
if psf_pad: # default=False
# add the sizes along each dimension (bigger)
newshape = np.array(arrayshape) + np.array(kernshape)
else:
# take the larger shape in each dimension (smaller)
newshape = np.maximum(arrayshape, kernshape)
if dealias:
# Extend shape by 1/2 for dealiasing
newshape += np.ceil(newshape / 2).astype(int)
# Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5).
if fft_pad: # default=True
# Get optimized sizes from scipy.
newshape = _next_fast_lengths(newshape)
# perform a second check after padding
array_size_C = (
np.product(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_C > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_C)}. "
"Use allow_huge=True to override this exception."
)
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [
slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2)
]
kernslices += [
slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2)
]
arrayslices = tuple(arrayslices)
kernslices = tuple(kernslices)
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = nan_treatment == "interpolate"
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
fftmult *= kernel_scale
if array_unit is not None:
fftmult <<= array_unit
if return_fft:
return fftmult
if interpolate_nan:
with np.errstate(divide="ignore", invalid="ignore"):
# divide by zeros are expected here; if the weight is zero, we want
# the output to be nan or inf
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.0:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(
array,
kernel,
nan_treatment="interpolate",
normalize_kernel=True,
preserve_nan=False,
**kwargs,
)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode="convolve_fft", **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
**kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
if mode == "convolve_fft":
operator = SPECIAL_OPERATORS.add(
"convolve_fft", partial(convolve_fft, **kwargs)
)
elif mode == "convolve":
operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs))
else:
raise ValueError(f"Mode {mode} is not supported.")
return CompoundModel(operator, model, kernel)
def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
bounding_box : tuple
The bounding box which encompasses enough of the support of both
the ``model`` and ``kernel`` so that an accurate convolution can be
computed.
resolution : float
The resolution that one wishes to approximate the convolution
integral at.
cache : optional, bool
Default value True. Allow for the storage of the convolution
computation for later reuse.
**kwargs : dict
Keyword arguments to be passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs))
return Convolution(operator, model, kernel, bounding_box, resolution, cache)
| {
"content_hash": "42625dc33aae1612838a793afe41f504",
"timestamp": "",
"source": "github",
"line_count": 1069,
"max_line_length": 100,
"avg_line_length": 41.57717492984097,
"alnum_prop": 0.6124735634252801,
"repo_name": "astropy/astropy",
"id": "9ae234f59819da1fb9649b8d02b8cf86d3397cf5",
"size": "44511",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/convolution/convolve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11039709"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "79917"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12402561"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from openerp.osv import fields, osv
class mrp_workcenter_load(osv.osv_memory):
_name = 'mrp.workcenter.load'
_description = 'Work Center Load'
_columns = {
'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True),
'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True),
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Work Center Load
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['time_unit','measure_unit'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'mrp.workcenter.load',
'datas' : datas,
}
mrp_workcenter_load()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "e4e501cd83c45ad0737919a8b082fa74",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 143,
"avg_line_length": 36.142857142857146,
"alnum_prop": 0.583399209486166,
"repo_name": "ntiufalara/openerp7",
"id": "265c6ddcea04f4e588fe95b9d1e79e17eb187de6",
"size": "2244",
"binary": false,
"copies": "55",
"ref": "refs/heads/master",
"path": "openerp/addons/mrp/wizard/mrp_workcenter_load.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
"""
Django settings for esite project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3fm991d7&g*!j%9kgzq!-#%ioo&iw$%9#+o4z23q@n9km=cg$p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'captcha',
'crispy_forms',
'mptt',
'node',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'esite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'esite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fm',
'USER': 'root',
'PASSWORD': 'justdoit',
'HOST': '127.0.0.1',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# define resource root
#RESOURCE_ROOT='/opt/www/fm/'
RESOURCE_ROOT='c:/opt/var/www/fm/'
"""
DEBUG: Low level system information for debugging purposes
INFO: General system information
WARNING: Information describing a minor problem that has occurred.
ERROR: Information describing a major problem that has occurred.
CRITICAL: Information describing a critical problem that has occurred.
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '[ %(levelname)s ] %(asctime)s - %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'images.logfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(RESOURCE_ROOT,'logs/debug.log'),
'formatter': 'simple',
},
'requests.logfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(RESOURCE_ROOT,'logs/requests.log'),
'formatter': 'simple',
},
},
'loggers': {
'image': {
'handlers': ['images.logfile', 'console'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['requests.logfile'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# variable for production
STATIC_ROOT= os.path.join(RESOURCE_ROOT,'static')
#variables for developement
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = (
TEMPLATE_PATH,
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(RESOURCE_ROOT,'media')
CRISPY_TEMPLATE_PACK = 'bootstrap3' | {
"content_hash": "510d7749ce8a33729ee4be17e075ae73",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 95,
"avg_line_length": 26.69832402234637,
"alnum_prop": 0.6258631512868801,
"repo_name": "vollov/filemanager",
"id": "fae0a030b82e5d8d15a3963d609df9f0a1783676",
"size": "4779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/esite/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17213"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
from sentry.utils.cache import memoize
from sentry.utils.hashlib import md5
class EventUser(Model):
__core__ = False
project = FlexibleForeignKey('sentry.Project')
hash = models.CharField(max_length=32)
ident = models.CharField(max_length=64, null=True)
email = models.EmailField(null=True)
username = models.CharField(max_length=64, null=True)
ip_address = models.GenericIPAddressField(null=True)
date_added = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_eventuser'
unique_together = (('project', 'ident'), ('project', 'hash'))
index_together = (
('project', 'email'),
('project', 'username'),
('project', 'ip_address'),
)
__repr__ = sane_repr('project_id', 'ident', 'email', 'username', 'ip_address')
def save(self, *args, **kwargs):
assert self.ident or self.username or self.email or self.ip_address, \
'No identifying value found for user'
if not self.hash:
self.hash = self.get_hash()
super(EventUser, self).save(*args, **kwargs)
def get_hash(self):
value = self.ident or self.username or self.email or self.ip_address
return md5(value).hexdigest()
@memoize
def tag_value(self):
"""
Return the identifier used with tags to link this user.
"""
assert self.ident or self.username or self.email or self.ip_address, \
'No identifying value found for user'
if self.ident:
return u'id:{}'.format(self.ident)
if self.email:
return u'email:{}'.format(self.email)
if self.username:
return u'username:{}'.format(self.username)
if self.ip_address:
return u'ip:{}'.format(self.ip_address)
def get_label(self):
return self.email or self.username or self.ident or self.ip_address
| {
"content_hash": "afbcbbd43fc54d1e6c8b8f52a275bc63",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.6240111679851094,
"repo_name": "Natim/sentry",
"id": "18d46930063a99f87c38dc47cad7df74f8ec26e2",
"size": "2149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/models/eventuser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "160813"
},
{
"name": "HTML",
"bytes": "193981"
},
{
"name": "JavaScript",
"bytes": "417570"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6877708"
}
],
"symlink_target": ""
} |
import Adafruit_BBIO.GPIO as GPIO
import time
import subprocess
# Define program constants
BUTTON_PIN = 'P9_11'
LED_PIN = 'P9_12'
# Configure the GPIO pins and set the initial state of variables to track the
# state of the button.
GPIO.setup(BUTTON_PIN, GPIO.IN)
GPIO.setup(LED_PIN, GPIO.OUT)
GPIO.output(LED_PIN, GPIO.LOW)
# print out a nice message to let the user know how to quit.
print('Starting, press <control>-c to quit.\n')
# Execute until a keyboard interrupt
try:
while True:
# Wait for the BUTTON_PIN to have a falling edge, indicating the
# button has been pressed.
GPIO.wait_for_edge(BUTTON_PIN, GPIO.RISING)
# Button has been pressed so turn on the LED and start your program
GPIO.output(LED_PIN, GPIO.HIGH)
subprocess.call(['/path/to/the/program', '-argument'])
# Program is done, turn off the LED and start waiting again.
GPIO.output(LED_PIN, GPIO.LOW)
except KeyboardInterrupt:
GPIO.cleanup()
| {
"content_hash": "3a6bef5c8a369c7e9f7b738fc9dda366",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.7166666666666667,
"repo_name": "SpinStabilized/bbb-primer",
"id": "4d46c491f9cf3f662ac22651210c7f219262f897",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter09/not_as_simple_button.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1229"
},
{
"name": "JavaScript",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "40014"
},
{
"name": "Shell",
"bytes": "2061"
}
],
"symlink_target": ""
} |
_global_iana_crypt_hash_nsp = 'urn:ietf:params:xml:ns:yang:iana-crypt-hash'
_global_iana_if_type_nsp = 'urn:ietf:params:xml:ns:yang:iana-if-type'
_global_ietf_diffserv_classifier_nsp = 'urn:ietf:params:xml:ns:yang:ietf-diffserv-classifier'
_global_ietf_diffserv_policy_nsp = 'urn:ietf:params:xml:ns:yang:ietf-diffserv-policy'
_global_ietf_diffserv_target_nsp = 'urn:ietf:params:xml:ns:yang:ietf-diffserv-target'
_global_ietf_inet_types_nsp = 'urn:ietf:params:xml:ns:yang:ietf-inet-types'
_global_ietf_interfaces_nsp = 'urn:ietf:params:xml:ns:yang:ietf-interfaces'
_global_ietf_interfaces_ext_nsp = 'urn:ietf:params:xml:ns:yang:ietf-interfaces-ext'
_global_ietf_ip_nsp = 'urn:ietf:params:xml:ns:yang:ietf-ip'
_global_ietf_ipv4_unicast_routing_nsp = 'urn:ietf:params:xml:ns:yang:ietf-ipv4-unicast-routing'
_global_ietf_ipv6_unicast_routing_nsp = 'urn:ietf:params:xml:ns:yang:ietf-ipv6-unicast-routing'
_global_ietf_key_chain_nsp = 'urn:ietf:params:xml:ns:yang:ietf-key-chain'
_global_ietf_netconf_nsp = 'urn:ietf:params:xml:ns:netconf:base:1.0'
_global_ietf_netconf_acm_nsp = 'urn:ietf:params:xml:ns:yang:ietf-netconf-acm'
_global_ietf_netconf_monitoring_nsp = 'urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring'
_global_ietf_netconf_notifications_nsp = 'urn:ietf:params:xml:ns:yang:ietf-netconf-notifications'
_global_ietf_netconf_with_defaults_nsp = 'urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults'
_global_ietf_ospf_nsp = 'urn:ietf:params:xml:ns:yang:ietf-ospf'
_global_ietf_restconf_monitoring_nsp = 'urn:ietf:params:xml:ns:yang:ietf-restconf-monitoring'
_global_ietf_routing_nsp = 'urn:ietf:params:xml:ns:yang:ietf-routing'
_global_ietf_syslog_types_nsp = 'urn:ietf:params:xml:ns:yang:ietf-syslog-types'
_global_ietf_system_nsp = 'urn:ietf:params:xml:ns:yang:ietf-system'
_global_ietf_yang_library_nsp = 'urn:ietf:params:xml:ns:yang:ietf-yang-library'
_global_ietf_yang_smiv2_nsp = 'urn:ietf:params:xml:ns:yang:ietf-yang-smiv2'
_global_ietf_yang_types_nsp = 'urn:ietf:params:xml:ns:yang:ietf-yang-types'
_namespaces = { \
'iana-crypt-hash' : 'urn:ietf:params:xml:ns:yang:iana-crypt-hash',
'iana-if-type' : 'urn:ietf:params:xml:ns:yang:iana-if-type',
'ietf-diffserv-classifier' : 'urn:ietf:params:xml:ns:yang:ietf-diffserv-classifier',
'ietf-diffserv-policy' : 'urn:ietf:params:xml:ns:yang:ietf-diffserv-policy',
'ietf-diffserv-target' : 'urn:ietf:params:xml:ns:yang:ietf-diffserv-target',
'ietf-inet-types' : 'urn:ietf:params:xml:ns:yang:ietf-inet-types',
'ietf-interfaces' : 'urn:ietf:params:xml:ns:yang:ietf-interfaces',
'ietf-interfaces-ext' : 'urn:ietf:params:xml:ns:yang:ietf-interfaces-ext',
'ietf-ip' : 'urn:ietf:params:xml:ns:yang:ietf-ip',
'ietf-ipv4-unicast-routing' : 'urn:ietf:params:xml:ns:yang:ietf-ipv4-unicast-routing',
'ietf-ipv6-unicast-routing' : 'urn:ietf:params:xml:ns:yang:ietf-ipv6-unicast-routing',
'ietf-key-chain' : 'urn:ietf:params:xml:ns:yang:ietf-key-chain',
'ietf-netconf' : 'urn:ietf:params:xml:ns:netconf:base:1.0',
'ietf-netconf-acm' : 'urn:ietf:params:xml:ns:yang:ietf-netconf-acm',
'ietf-netconf-monitoring' : 'urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring',
'ietf-netconf-notifications' : 'urn:ietf:params:xml:ns:yang:ietf-netconf-notifications',
'ietf-netconf-with-defaults' : 'urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults',
'ietf-ospf' : 'urn:ietf:params:xml:ns:yang:ietf-ospf',
'ietf-restconf-monitoring' : 'urn:ietf:params:xml:ns:yang:ietf-restconf-monitoring',
'ietf-routing' : 'urn:ietf:params:xml:ns:yang:ietf-routing',
'ietf-syslog-types' : 'urn:ietf:params:xml:ns:yang:ietf-syslog-types',
'ietf-system' : 'urn:ietf:params:xml:ns:yang:ietf-system',
'ietf-yang-library' : 'urn:ietf:params:xml:ns:yang:ietf-yang-library',
'ietf-yang-smiv2' : 'urn:ietf:params:xml:ns:yang:ietf-yang-smiv2',
'ietf-yang-types' : 'urn:ietf:params:xml:ns:yang:ietf-yang-types',
}
_identity_map = { \
('iana-if-type', 'a12MppSwitch'):('ydk.models.ietf.iana_if_type', 'A12MppswitchIdentity'),
('iana-if-type', 'aal2'):('ydk.models.ietf.iana_if_type', 'Aal2Identity'),
('iana-if-type', 'aal5'):('ydk.models.ietf.iana_if_type', 'Aal5Identity'),
('iana-if-type', 'actelisMetaLOOP'):('ydk.models.ietf.iana_if_type', 'ActelismetaloopIdentity'),
('iana-if-type', 'adsl2'):('ydk.models.ietf.iana_if_type', 'Adsl2Identity'),
('iana-if-type', 'adsl2plus'):('ydk.models.ietf.iana_if_type', 'Adsl2PlusIdentity'),
('iana-if-type', 'adsl'):('ydk.models.ietf.iana_if_type', 'AdslIdentity'),
('iana-if-type', 'aflane8023'):('ydk.models.ietf.iana_if_type', 'Aflane8023Identity'),
('iana-if-type', 'aflane8025'):('ydk.models.ietf.iana_if_type', 'Aflane8025Identity'),
('iana-if-type', 'aluELP'):('ydk.models.ietf.iana_if_type', 'AluelpIdentity'),
('iana-if-type', 'aluEpon'):('ydk.models.ietf.iana_if_type', 'AlueponIdentity'),
('iana-if-type', 'aluEponLogicalLink'):('ydk.models.ietf.iana_if_type', 'AlueponlogicallinkIdentity'),
('iana-if-type', 'aluEponOnu'):('ydk.models.ietf.iana_if_type', 'AluepononuIdentity'),
('iana-if-type', 'aluEponPhysicalUni'):('ydk.models.ietf.iana_if_type', 'AlueponphysicaluniIdentity'),
('iana-if-type', 'aluGponOnu'):('ydk.models.ietf.iana_if_type', 'AlugpononuIdentity'),
('iana-if-type', 'aluGponPhysicalUni'):('ydk.models.ietf.iana_if_type', 'AlugponphysicaluniIdentity'),
('iana-if-type', 'arap'):('ydk.models.ietf.iana_if_type', 'ArapIdentity'),
('iana-if-type', 'arcnet'):('ydk.models.ietf.iana_if_type', 'ArcnetIdentity'),
('iana-if-type', 'arcnetPlus'):('ydk.models.ietf.iana_if_type', 'ArcnetplusIdentity'),
('iana-if-type', 'async'):('ydk.models.ietf.iana_if_type', 'AsyncIdentity'),
('iana-if-type', 'atm'):('ydk.models.ietf.iana_if_type', 'AtmIdentity'),
('iana-if-type', 'atmbond'):('ydk.models.ietf.iana_if_type', 'AtmbondIdentity'),
('iana-if-type', 'atmDxi'):('ydk.models.ietf.iana_if_type', 'AtmdxiIdentity'),
('iana-if-type', 'atmFuni'):('ydk.models.ietf.iana_if_type', 'AtmfuniIdentity'),
('iana-if-type', 'atmIma'):('ydk.models.ietf.iana_if_type', 'AtmimaIdentity'),
('iana-if-type', 'atmLogical'):('ydk.models.ietf.iana_if_type', 'AtmlogicalIdentity'),
('iana-if-type', 'atmRadio'):('ydk.models.ietf.iana_if_type', 'AtmradioIdentity'),
('iana-if-type', 'atmSubInterface'):('ydk.models.ietf.iana_if_type', 'AtmsubinterfaceIdentity'),
('iana-if-type', 'atmVciEndPt'):('ydk.models.ietf.iana_if_type', 'AtmvciendptIdentity'),
('iana-if-type', 'atmVirtual'):('ydk.models.ietf.iana_if_type', 'AtmvirtualIdentity'),
('iana-if-type', 'aviciOpticalEther'):('ydk.models.ietf.iana_if_type', 'AviciopticaletherIdentity'),
('iana-if-type', 'basicISDN'):('ydk.models.ietf.iana_if_type', 'BasicisdnIdentity'),
('iana-if-type', 'bgppolicyaccounting'):('ydk.models.ietf.iana_if_type', 'BgppolicyaccountingIdentity'),
('iana-if-type', 'bits'):('ydk.models.ietf.iana_if_type', 'BitsIdentity'),
('iana-if-type', 'bridge'):('ydk.models.ietf.iana_if_type', 'BridgeIdentity'),
('iana-if-type', 'bsc'):('ydk.models.ietf.iana_if_type', 'BscIdentity'),
('iana-if-type', 'cableDownstreamRfPort'):('ydk.models.ietf.iana_if_type', 'CabledownstreamrfportIdentity'),
('iana-if-type', 'capwapDot11Bss'):('ydk.models.ietf.iana_if_type', 'Capwapdot11BssIdentity'),
('iana-if-type', 'capwapDot11Profile'):('ydk.models.ietf.iana_if_type', 'Capwapdot11ProfileIdentity'),
('iana-if-type', 'capwapWtpVirtualRadio'):('ydk.models.ietf.iana_if_type', 'CapwapwtpvirtualradioIdentity'),
('iana-if-type', 'cblVectaStar'):('ydk.models.ietf.iana_if_type', 'CblvectastarIdentity'),
('iana-if-type', 'cctEmul'):('ydk.models.ietf.iana_if_type', 'CctemulIdentity'),
('iana-if-type', 'ces'):('ydk.models.ietf.iana_if_type', 'CesIdentity'),
('iana-if-type', 'channel'):('ydk.models.ietf.iana_if_type', 'ChannelIdentity'),
('iana-if-type', 'ciscoISLvlan'):('ydk.models.ietf.iana_if_type', 'CiscoislvlanIdentity'),
('iana-if-type', 'cnr'):('ydk.models.ietf.iana_if_type', 'CnrIdentity'),
('iana-if-type', 'coffee'):('ydk.models.ietf.iana_if_type', 'CoffeeIdentity'),
('iana-if-type', 'compositeLink'):('ydk.models.ietf.iana_if_type', 'CompositelinkIdentity'),
('iana-if-type', 'dcn'):('ydk.models.ietf.iana_if_type', 'DcnIdentity'),
('iana-if-type', 'ddnX25'):('ydk.models.ietf.iana_if_type', 'Ddnx25Identity'),
('iana-if-type', 'digitalPowerline'):('ydk.models.ietf.iana_if_type', 'DigitalpowerlineIdentity'),
('iana-if-type', 'digitalWrapperOverheadChannel'):('ydk.models.ietf.iana_if_type', 'DigitalwrapperoverheadchannelIdentity'),
('iana-if-type', 'dlsw'):('ydk.models.ietf.iana_if_type', 'DlswIdentity'),
('iana-if-type', 'docsCableDownstream'):('ydk.models.ietf.iana_if_type', 'DocscabledownstreamIdentity'),
('iana-if-type', 'docsCableMaclayer'):('ydk.models.ietf.iana_if_type', 'DocscablemaclayerIdentity'),
('iana-if-type', 'docsCableMCmtsDownstream'):('ydk.models.ietf.iana_if_type', 'DocscablemcmtsdownstreamIdentity'),
('iana-if-type', 'docsCableUpstream'):('ydk.models.ietf.iana_if_type', 'DocscableupstreamIdentity'),
('iana-if-type', 'docsCableUpstreamChannel'):('ydk.models.ietf.iana_if_type', 'DocscableupstreamchannelIdentity'),
('iana-if-type', 'docsCableUpstreamRfPort'):('ydk.models.ietf.iana_if_type', 'DocscableupstreamrfportIdentity'),
('iana-if-type', 'ds0Bundle'):('ydk.models.ietf.iana_if_type', 'Ds0BundleIdentity'),
('iana-if-type', 'ds0'):('ydk.models.ietf.iana_if_type', 'Ds0Identity'),
('iana-if-type', 'ds1FDL'):('ydk.models.ietf.iana_if_type', 'Ds1FdlIdentity'),
('iana-if-type', 'ds1'):('ydk.models.ietf.iana_if_type', 'Ds1Identity'),
('iana-if-type', 'ds3'):('ydk.models.ietf.iana_if_type', 'Ds3Identity'),
('iana-if-type', 'dtm'):('ydk.models.ietf.iana_if_type', 'DtmIdentity'),
('iana-if-type', 'dvbAsiIn'):('ydk.models.ietf.iana_if_type', 'DvbasiinIdentity'),
('iana-if-type', 'dvbAsiOut'):('ydk.models.ietf.iana_if_type', 'DvbasioutIdentity'),
('iana-if-type', 'dvbRccDownstream'):('ydk.models.ietf.iana_if_type', 'DvbrccdownstreamIdentity'),
('iana-if-type', 'dvbRccMacLayer'):('ydk.models.ietf.iana_if_type', 'DvbrccmaclayerIdentity'),
('iana-if-type', 'dvbRccUpstream'):('ydk.models.ietf.iana_if_type', 'DvbrccupstreamIdentity'),
('iana-if-type', 'dvbRcsMacLayer'):('ydk.models.ietf.iana_if_type', 'DvbrcsmaclayerIdentity'),
('iana-if-type', 'dvbRcsTdma'):('ydk.models.ietf.iana_if_type', 'DvbrcstdmaIdentity'),
('iana-if-type', 'dvbTdm'):('ydk.models.ietf.iana_if_type', 'DvbtdmIdentity'),
('iana-if-type', 'e1'):('ydk.models.ietf.iana_if_type', 'E1Identity'),
('iana-if-type', 'econet'):('ydk.models.ietf.iana_if_type', 'EconetIdentity'),
('iana-if-type', 'eon'):('ydk.models.ietf.iana_if_type', 'EonIdentity'),
('iana-if-type', 'eplrs'):('ydk.models.ietf.iana_if_type', 'EplrsIdentity'),
('iana-if-type', 'escon'):('ydk.models.ietf.iana_if_type', 'EsconIdentity'),
('iana-if-type', 'ethernet3Mbit'):('ydk.models.ietf.iana_if_type', 'Ethernet3MbitIdentity'),
('iana-if-type', 'ethernetCsmacd'):('ydk.models.ietf.iana_if_type', 'EthernetcsmacdIdentity'),
('iana-if-type', 'fast'):('ydk.models.ietf.iana_if_type', 'FastIdentity'),
('iana-if-type', 'fastEther'):('ydk.models.ietf.iana_if_type', 'FastetherIdentity'),
('iana-if-type', 'fastEtherFX'):('ydk.models.ietf.iana_if_type', 'FastetherfxIdentity'),
('iana-if-type', 'fcipLink'):('ydk.models.ietf.iana_if_type', 'FciplinkIdentity'),
('iana-if-type', 'fddi'):('ydk.models.ietf.iana_if_type', 'FddiIdentity'),
('iana-if-type', 'fibreChannel'):('ydk.models.ietf.iana_if_type', 'FibrechannelIdentity'),
('iana-if-type', 'frameRelay'):('ydk.models.ietf.iana_if_type', 'FramerelayIdentity'),
('iana-if-type', 'frameRelayInterconnect'):('ydk.models.ietf.iana_if_type', 'FramerelayinterconnectIdentity'),
('iana-if-type', 'frameRelayMPI'):('ydk.models.ietf.iana_if_type', 'FramerelaympiIdentity'),
('iana-if-type', 'frameRelayService'):('ydk.models.ietf.iana_if_type', 'FramerelayserviceIdentity'),
('iana-if-type', 'frDlciEndPt'):('ydk.models.ietf.iana_if_type', 'FrdlciendptIdentity'),
('iana-if-type', 'frf16MfrBundle'):('ydk.models.ietf.iana_if_type', 'Frf16MfrbundleIdentity'),
('iana-if-type', 'frForward'):('ydk.models.ietf.iana_if_type', 'FrforwardIdentity'),
('iana-if-type', 'g703at2mb'):('ydk.models.ietf.iana_if_type', 'G703At2MbIdentity'),
('iana-if-type', 'g703at64k'):('ydk.models.ietf.iana_if_type', 'G703At64KIdentity'),
('iana-if-type', 'g9981'):('ydk.models.ietf.iana_if_type', 'G9981Identity'),
('iana-if-type', 'g9982'):('ydk.models.ietf.iana_if_type', 'G9982Identity'),
('iana-if-type', 'g9983'):('ydk.models.ietf.iana_if_type', 'G9983Identity'),
('iana-if-type', 'gfp'):('ydk.models.ietf.iana_if_type', 'GfpIdentity'),
('iana-if-type', 'gigabitEthernet'):('ydk.models.ietf.iana_if_type', 'GigabitethernetIdentity'),
('iana-if-type', 'gpon'):('ydk.models.ietf.iana_if_type', 'GponIdentity'),
('iana-if-type', 'gr303IDT'):('ydk.models.ietf.iana_if_type', 'Gr303IdtIdentity'),
('iana-if-type', 'gr303RDT'):('ydk.models.ietf.iana_if_type', 'Gr303RdtIdentity'),
('iana-if-type', 'gtp'):('ydk.models.ietf.iana_if_type', 'GtpIdentity'),
('iana-if-type', 'h323Gatekeeper'):('ydk.models.ietf.iana_if_type', 'H323GatekeeperIdentity'),
('iana-if-type', 'h323Proxy'):('ydk.models.ietf.iana_if_type', 'H323ProxyIdentity'),
('iana-if-type', 'hdh1822'):('ydk.models.ietf.iana_if_type', 'Hdh1822Identity'),
('iana-if-type', 'hdlc'):('ydk.models.ietf.iana_if_type', 'HdlcIdentity'),
('iana-if-type', 'hdsl2'):('ydk.models.ietf.iana_if_type', 'Hdsl2Identity'),
('iana-if-type', 'hiperlan2'):('ydk.models.ietf.iana_if_type', 'Hiperlan2Identity'),
('iana-if-type', 'hippi'):('ydk.models.ietf.iana_if_type', 'HippiIdentity'),
('iana-if-type', 'hippiInterface'):('ydk.models.ietf.iana_if_type', 'HippiinterfaceIdentity'),
('iana-if-type', 'homepna'):('ydk.models.ietf.iana_if_type', 'HomepnaIdentity'),
('iana-if-type', 'hostPad'):('ydk.models.ietf.iana_if_type', 'HostpadIdentity'),
('iana-if-type', 'hssi'):('ydk.models.ietf.iana_if_type', 'HssiIdentity'),
('iana-if-type', 'hyperchannel'):('ydk.models.ietf.iana_if_type', 'HyperchannelIdentity'),
('iana-if-type', 'iana-interface-type'):('ydk.models.ietf.iana_if_type', 'IanaInterfaceTypeIdentity'),
('iana-if-type', 'ibm370parChan'):('ydk.models.ietf.iana_if_type', 'Ibm370ParchanIdentity'),
('iana-if-type', 'idsl'):('ydk.models.ietf.iana_if_type', 'IdslIdentity'),
('iana-if-type', 'ieee1394'):('ydk.models.ietf.iana_if_type', 'Ieee1394Identity'),
('iana-if-type', 'ieee80211'):('ydk.models.ietf.iana_if_type', 'Ieee80211Identity'),
('iana-if-type', 'ieee80212'):('ydk.models.ietf.iana_if_type', 'Ieee80212Identity'),
('iana-if-type', 'ieee802154'):('ydk.models.ietf.iana_if_type', 'Ieee802154Identity'),
('iana-if-type', 'ieee80216WMAN'):('ydk.models.ietf.iana_if_type', 'Ieee80216WmanIdentity'),
('iana-if-type', 'ieee8023adLag'):('ydk.models.ietf.iana_if_type', 'Ieee8023AdlagIdentity'),
('iana-if-type', 'if-gsn'):('ydk.models.ietf.iana_if_type', 'IfGsnIdentity'),
('iana-if-type', 'ifPwType'):('ydk.models.ietf.iana_if_type', 'IfpwtypeIdentity'),
('iana-if-type', 'ifVfiType'):('ydk.models.ietf.iana_if_type', 'IfvfitypeIdentity'),
('iana-if-type', 'ilan'):('ydk.models.ietf.iana_if_type', 'IlanIdentity'),
('iana-if-type', 'imt'):('ydk.models.ietf.iana_if_type', 'ImtIdentity'),
('iana-if-type', 'infiniband'):('ydk.models.ietf.iana_if_type', 'InfinibandIdentity'),
('iana-if-type', 'interleave'):('ydk.models.ietf.iana_if_type', 'InterleaveIdentity'),
('iana-if-type', 'ip'):('ydk.models.ietf.iana_if_type', 'IpIdentity'),
('iana-if-type', 'ipForward'):('ydk.models.ietf.iana_if_type', 'IpforwardIdentity'),
('iana-if-type', 'ipOverAtm'):('ydk.models.ietf.iana_if_type', 'IpoveratmIdentity'),
('iana-if-type', 'ipOverCdlc'):('ydk.models.ietf.iana_if_type', 'IpovercdlcIdentity'),
('iana-if-type', 'ipOverClaw'):('ydk.models.ietf.iana_if_type', 'IpoverclawIdentity'),
('iana-if-type', 'ipSwitch'):('ydk.models.ietf.iana_if_type', 'IpswitchIdentity'),
('iana-if-type', 'isdn'):('ydk.models.ietf.iana_if_type', 'IsdnIdentity'),
('iana-if-type', 'isdns'):('ydk.models.ietf.iana_if_type', 'IsdnsIdentity'),
('iana-if-type', 'isdnu'):('ydk.models.ietf.iana_if_type', 'IsdnuIdentity'),
('iana-if-type', 'iso88022llc'):('ydk.models.ietf.iana_if_type', 'Iso88022LlcIdentity'),
('iana-if-type', 'iso88023Csmacd'):('ydk.models.ietf.iana_if_type', 'Iso88023CsmacdIdentity'),
('iana-if-type', 'iso88024TokenBus'):('ydk.models.ietf.iana_if_type', 'Iso88024TokenbusIdentity'),
('iana-if-type', 'iso88025CRFPInt'):('ydk.models.ietf.iana_if_type', 'Iso88025CrfpintIdentity'),
('iana-if-type', 'iso88025Dtr'):('ydk.models.ietf.iana_if_type', 'Iso88025DtrIdentity'),
('iana-if-type', 'iso88025Fiber'):('ydk.models.ietf.iana_if_type', 'Iso88025FiberIdentity'),
('iana-if-type', 'iso88025TokenRing'):('ydk.models.ietf.iana_if_type', 'Iso88025TokenringIdentity'),
('iana-if-type', 'iso88026Man'):('ydk.models.ietf.iana_if_type', 'Iso88026ManIdentity'),
('iana-if-type', 'isup'):('ydk.models.ietf.iana_if_type', 'IsupIdentity'),
('iana-if-type', 'l2vlan'):('ydk.models.ietf.iana_if_type', 'L2VlanIdentity'),
('iana-if-type', 'l3ipvlan'):('ydk.models.ietf.iana_if_type', 'L3IpvlanIdentity'),
('iana-if-type', 'l3ipxvlan'):('ydk.models.ietf.iana_if_type', 'L3IpxvlanIdentity'),
('iana-if-type', 'lapb'):('ydk.models.ietf.iana_if_type', 'LapbIdentity'),
('iana-if-type', 'lapd'):('ydk.models.ietf.iana_if_type', 'LapdIdentity'),
('iana-if-type', 'lapf'):('ydk.models.ietf.iana_if_type', 'LapfIdentity'),
('iana-if-type', 'linegroup'):('ydk.models.ietf.iana_if_type', 'LinegroupIdentity'),
('iana-if-type', 'lmp'):('ydk.models.ietf.iana_if_type', 'LmpIdentity'),
('iana-if-type', 'localTalk'):('ydk.models.ietf.iana_if_type', 'LocaltalkIdentity'),
('iana-if-type', 'macSecControlledIF'):('ydk.models.ietf.iana_if_type', 'MacseccontrolledifIdentity'),
('iana-if-type', 'macSecUncontrolledIF'):('ydk.models.ietf.iana_if_type', 'MacsecuncontrolledifIdentity'),
('iana-if-type', 'mediaMailOverIp'):('ydk.models.ietf.iana_if_type', 'MediamailoveripIdentity'),
('iana-if-type', 'mfSigLink'):('ydk.models.ietf.iana_if_type', 'MfsiglinkIdentity'),
('iana-if-type', 'miox25'):('ydk.models.ietf.iana_if_type', 'Miox25Identity'),
('iana-if-type', 'mocaVersion1'):('ydk.models.ietf.iana_if_type', 'Mocaversion1Identity'),
('iana-if-type', 'modem'):('ydk.models.ietf.iana_if_type', 'ModemIdentity'),
('iana-if-type', 'mpc'):('ydk.models.ietf.iana_if_type', 'MpcIdentity'),
('iana-if-type', 'mpegTransport'):('ydk.models.ietf.iana_if_type', 'MpegtransportIdentity'),
('iana-if-type', 'mpls'):('ydk.models.ietf.iana_if_type', 'MplsIdentity'),
('iana-if-type', 'mplsTunnel'):('ydk.models.ietf.iana_if_type', 'MplstunnelIdentity'),
('iana-if-type', 'msdsl'):('ydk.models.ietf.iana_if_type', 'MsdslIdentity'),
('iana-if-type', 'mvl'):('ydk.models.ietf.iana_if_type', 'MvlIdentity'),
('iana-if-type', 'myrinet'):('ydk.models.ietf.iana_if_type', 'MyrinetIdentity'),
('iana-if-type', 'nfas'):('ydk.models.ietf.iana_if_type', 'NfasIdentity'),
('iana-if-type', 'nsip'):('ydk.models.ietf.iana_if_type', 'NsipIdentity'),
('iana-if-type', 'opticalChannel'):('ydk.models.ietf.iana_if_type', 'OpticalchannelIdentity'),
('iana-if-type', 'opticalChannelGroup'):('ydk.models.ietf.iana_if_type', 'OpticalchannelgroupIdentity'),
('iana-if-type', 'opticalTransport'):('ydk.models.ietf.iana_if_type', 'OpticaltransportIdentity'),
('iana-if-type', 'other'):('ydk.models.ietf.iana_if_type', 'OtherIdentity'),
('iana-if-type', 'otnOdu'):('ydk.models.ietf.iana_if_type', 'OtnoduIdentity'),
('iana-if-type', 'otnOtu'):('ydk.models.ietf.iana_if_type', 'OtnotuIdentity'),
('iana-if-type', 'para'):('ydk.models.ietf.iana_if_type', 'ParaIdentity'),
('iana-if-type', 'pdnEtherLoop1'):('ydk.models.ietf.iana_if_type', 'Pdnetherloop1Identity'),
('iana-if-type', 'pdnEtherLoop2'):('ydk.models.ietf.iana_if_type', 'Pdnetherloop2Identity'),
('iana-if-type', 'pip'):('ydk.models.ietf.iana_if_type', 'PipIdentity'),
('iana-if-type', 'plc'):('ydk.models.ietf.iana_if_type', 'PlcIdentity'),
('iana-if-type', 'pon155'):('ydk.models.ietf.iana_if_type', 'Pon155Identity'),
('iana-if-type', 'pon622'):('ydk.models.ietf.iana_if_type', 'Pon622Identity'),
('iana-if-type', 'pos'):('ydk.models.ietf.iana_if_type', 'PosIdentity'),
('iana-if-type', 'ppp'):('ydk.models.ietf.iana_if_type', 'PppIdentity'),
('iana-if-type', 'pppMultilinkBundle'):('ydk.models.ietf.iana_if_type', 'PppmultilinkbundleIdentity'),
('iana-if-type', 'primaryISDN'):('ydk.models.ietf.iana_if_type', 'PrimaryisdnIdentity'),
('iana-if-type', 'propAtm'):('ydk.models.ietf.iana_if_type', 'PropatmIdentity'),
('iana-if-type', 'propBWAp2Mp'):('ydk.models.ietf.iana_if_type', 'Propbwap2MpIdentity'),
('iana-if-type', 'propCnls'):('ydk.models.ietf.iana_if_type', 'PropcnlsIdentity'),
('iana-if-type', 'propDocsWirelessDownstream'):('ydk.models.ietf.iana_if_type', 'PropdocswirelessdownstreamIdentity'),
('iana-if-type', 'propDocsWirelessMaclayer'):('ydk.models.ietf.iana_if_type', 'PropdocswirelessmaclayerIdentity'),
('iana-if-type', 'propDocsWirelessUpstream'):('ydk.models.ietf.iana_if_type', 'PropdocswirelessupstreamIdentity'),
('iana-if-type', 'propMultiplexor'):('ydk.models.ietf.iana_if_type', 'PropmultiplexorIdentity'),
('iana-if-type', 'propPointToPointSerial'):('ydk.models.ietf.iana_if_type', 'ProppointtopointserialIdentity'),
('iana-if-type', 'propVirtual'):('ydk.models.ietf.iana_if_type', 'PropvirtualIdentity'),
('iana-if-type', 'propWirelessP2P'):('ydk.models.ietf.iana_if_type', 'Propwirelessp2PIdentity'),
('iana-if-type', 'proteon10Mbit'):('ydk.models.ietf.iana_if_type', 'Proteon10MbitIdentity'),
('iana-if-type', 'proteon80Mbit'):('ydk.models.ietf.iana_if_type', 'Proteon80MbitIdentity'),
('iana-if-type', 'q2931'):('ydk.models.ietf.iana_if_type', 'Q2931Identity'),
('iana-if-type', 'qam'):('ydk.models.ietf.iana_if_type', 'QamIdentity'),
('iana-if-type', 'qllc'):('ydk.models.ietf.iana_if_type', 'QllcIdentity'),
('iana-if-type', 'radioMAC'):('ydk.models.ietf.iana_if_type', 'RadiomacIdentity'),
('iana-if-type', 'radsl'):('ydk.models.ietf.iana_if_type', 'RadslIdentity'),
('iana-if-type', 'reachDSL'):('ydk.models.ietf.iana_if_type', 'ReachdslIdentity'),
('iana-if-type', 'regular1822'):('ydk.models.ietf.iana_if_type', 'Regular1822Identity'),
('iana-if-type', 'rfc1483'):('ydk.models.ietf.iana_if_type', 'Rfc1483Identity'),
('iana-if-type', 'rfc877x25'):('ydk.models.ietf.iana_if_type', 'Rfc877X25Identity'),
('iana-if-type', 'rpr'):('ydk.models.ietf.iana_if_type', 'RprIdentity'),
('iana-if-type', 'rs232'):('ydk.models.ietf.iana_if_type', 'Rs232Identity'),
('iana-if-type', 'rsrb'):('ydk.models.ietf.iana_if_type', 'RsrbIdentity'),
('iana-if-type', 'sdlc'):('ydk.models.ietf.iana_if_type', 'SdlcIdentity'),
('iana-if-type', 'sdsl'):('ydk.models.ietf.iana_if_type', 'SdslIdentity'),
('iana-if-type', 'shdsl'):('ydk.models.ietf.iana_if_type', 'ShdslIdentity'),
('iana-if-type', 'sip'):('ydk.models.ietf.iana_if_type', 'SipIdentity'),
('iana-if-type', 'sipSig'):('ydk.models.ietf.iana_if_type', 'SipsigIdentity'),
('iana-if-type', 'sipTg'):('ydk.models.ietf.iana_if_type', 'SiptgIdentity'),
('iana-if-type', 'sixToFour'):('ydk.models.ietf.iana_if_type', 'SixtofourIdentity'),
('iana-if-type', 'slip'):('ydk.models.ietf.iana_if_type', 'SlipIdentity'),
('iana-if-type', 'smdsDxi'):('ydk.models.ietf.iana_if_type', 'SmdsdxiIdentity'),
('iana-if-type', 'smdsIcip'):('ydk.models.ietf.iana_if_type', 'SmdsicipIdentity'),
('iana-if-type', 'softwareLoopback'):('ydk.models.ietf.iana_if_type', 'SoftwareloopbackIdentity'),
('iana-if-type', 'sonet'):('ydk.models.ietf.iana_if_type', 'SonetIdentity'),
('iana-if-type', 'sonetOverheadChannel'):('ydk.models.ietf.iana_if_type', 'SonetoverheadchannelIdentity'),
('iana-if-type', 'sonetPath'):('ydk.models.ietf.iana_if_type', 'SonetpathIdentity'),
('iana-if-type', 'sonetVT'):('ydk.models.ietf.iana_if_type', 'SonetvtIdentity'),
('iana-if-type', 'srp'):('ydk.models.ietf.iana_if_type', 'SrpIdentity'),
('iana-if-type', 'ss7SigLink'):('ydk.models.ietf.iana_if_type', 'Ss7SiglinkIdentity'),
('iana-if-type', 'stackToStack'):('ydk.models.ietf.iana_if_type', 'StacktostackIdentity'),
('iana-if-type', 'starLan'):('ydk.models.ietf.iana_if_type', 'StarlanIdentity'),
('iana-if-type', 'tdlc'):('ydk.models.ietf.iana_if_type', 'TdlcIdentity'),
('iana-if-type', 'teLink'):('ydk.models.ietf.iana_if_type', 'TelinkIdentity'),
('iana-if-type', 'termPad'):('ydk.models.ietf.iana_if_type', 'TermpadIdentity'),
('iana-if-type', 'tr008'):('ydk.models.ietf.iana_if_type', 'Tr008Identity'),
('iana-if-type', 'transpHdlc'):('ydk.models.ietf.iana_if_type', 'TransphdlcIdentity'),
('iana-if-type', 'tunnel'):('ydk.models.ietf.iana_if_type', 'TunnelIdentity'),
('iana-if-type', 'ultra'):('ydk.models.ietf.iana_if_type', 'UltraIdentity'),
('iana-if-type', 'usb'):('ydk.models.ietf.iana_if_type', 'UsbIdentity'),
('iana-if-type', 'v11'):('ydk.models.ietf.iana_if_type', 'V11Identity'),
('iana-if-type', 'v35'):('ydk.models.ietf.iana_if_type', 'V35Identity'),
('iana-if-type', 'v36'):('ydk.models.ietf.iana_if_type', 'V36Identity'),
('iana-if-type', 'v37'):('ydk.models.ietf.iana_if_type', 'V37Identity'),
('iana-if-type', 'vdsl2'):('ydk.models.ietf.iana_if_type', 'Vdsl2Identity'),
('iana-if-type', 'vdsl'):('ydk.models.ietf.iana_if_type', 'VdslIdentity'),
('iana-if-type', 'virtualIpAddress'):('ydk.models.ietf.iana_if_type', 'VirtualipaddressIdentity'),
('iana-if-type', 'virtualTg'):('ydk.models.ietf.iana_if_type', 'VirtualtgIdentity'),
('iana-if-type', 'vmwareNicTeam'):('ydk.models.ietf.iana_if_type', 'VmwarenicteamIdentity'),
('iana-if-type', 'vmwareVirtualNic'):('ydk.models.ietf.iana_if_type', 'VmwarevirtualnicIdentity'),
('iana-if-type', 'voiceDID'):('ydk.models.ietf.iana_if_type', 'VoicedidIdentity'),
('iana-if-type', 'voiceEBS'):('ydk.models.ietf.iana_if_type', 'VoiceebsIdentity'),
('iana-if-type', 'voiceEM'):('ydk.models.ietf.iana_if_type', 'VoiceemIdentity'),
('iana-if-type', 'voiceEMFGD'):('ydk.models.ietf.iana_if_type', 'VoiceemfgdIdentity'),
('iana-if-type', 'voiceEncap'):('ydk.models.ietf.iana_if_type', 'VoiceencapIdentity'),
('iana-if-type', 'voiceFGDEANA'):('ydk.models.ietf.iana_if_type', 'VoicefgdeanaIdentity'),
('iana-if-type', 'voiceFGDOS'):('ydk.models.ietf.iana_if_type', 'VoicefgdosIdentity'),
('iana-if-type', 'voiceFXO'):('ydk.models.ietf.iana_if_type', 'VoicefxoIdentity'),
('iana-if-type', 'voiceFXS'):('ydk.models.ietf.iana_if_type', 'VoicefxsIdentity'),
('iana-if-type', 'voiceOverAtm'):('ydk.models.ietf.iana_if_type', 'VoiceoveratmIdentity'),
('iana-if-type', 'voiceOverCable'):('ydk.models.ietf.iana_if_type', 'VoiceovercableIdentity'),
('iana-if-type', 'voiceOverFrameRelay'):('ydk.models.ietf.iana_if_type', 'VoiceoverframerelayIdentity'),
('iana-if-type', 'voiceOverIp'):('ydk.models.ietf.iana_if_type', 'VoiceoveripIdentity'),
('iana-if-type', 'wwanPP2'):('ydk.models.ietf.iana_if_type', 'Wwanpp2Identity'),
('iana-if-type', 'wwanPP'):('ydk.models.ietf.iana_if_type', 'WwanppIdentity'),
('iana-if-type', 'x213'):('ydk.models.ietf.iana_if_type', 'X213Identity'),
('iana-if-type', 'x25huntGroup'):('ydk.models.ietf.iana_if_type', 'X25HuntgroupIdentity'),
('iana-if-type', 'x25mlp'):('ydk.models.ietf.iana_if_type', 'X25MlpIdentity'),
('iana-if-type', 'x25ple'):('ydk.models.ietf.iana_if_type', 'X25PleIdentity'),
('iana-if-type', 'x86Laps'):('ydk.models.ietf.iana_if_type', 'X86LapsIdentity'),
('ietf-diffserv-classifier', 'classifier-entry-filter-operation-type'):('ydk.models.ietf.ietf_diffserv_classifier', 'ClassifierEntryFilterOperationTypeIdentity'),
('ietf-diffserv-classifier', 'destination-ip-address'):('ydk.models.ietf.ietf_diffserv_classifier', 'DestinationIpAddressIdentity'),
('ietf-diffserv-classifier', 'destination-port'):('ydk.models.ietf.ietf_diffserv_classifier', 'DestinationPortIdentity'),
('ietf-diffserv-classifier', 'dscp'):('ydk.models.ietf.ietf_diffserv_classifier', 'DscpIdentity'),
('ietf-diffserv-classifier', 'filter-type'):('ydk.models.ietf.ietf_diffserv_classifier', 'FilterTypeIdentity'),
('ietf-diffserv-classifier', 'match-all-filter'):('ydk.models.ietf.ietf_diffserv_classifier', 'MatchAllFilterIdentity'),
('ietf-diffserv-classifier', 'match-any-filter'):('ydk.models.ietf.ietf_diffserv_classifier', 'MatchAnyFilterIdentity'),
('ietf-diffserv-classifier', 'protocol'):('ydk.models.ietf.ietf_diffserv_classifier', 'ProtocolIdentity'),
('ietf-diffserv-classifier', 'source-ip-address'):('ydk.models.ietf.ietf_diffserv_classifier', 'SourceIpAddressIdentity'),
('ietf-diffserv-classifier', 'source-port'):('ydk.models.ietf.ietf_diffserv_classifier', 'SourcePortIdentity'),
('ietf-diffserv-policy', 'action-type'):('ydk.models.ietf.ietf_diffserv_policy', 'ActionTypeIdentity'),
('ietf-diffserv-target', 'direction'):('ydk.models.ietf.ietf_diffserv_target', 'DirectionIdentity'),
('ietf-diffserv-target', 'inbound'):('ydk.models.ietf.ietf_diffserv_target', 'InboundIdentity'),
('ietf-diffserv-target', 'outbound'):('ydk.models.ietf.ietf_diffserv_target', 'OutboundIdentity'),
('ietf-interfaces', 'interface-type'):('ydk.models.ietf.ietf_interfaces', 'InterfaceTypeIdentity'),
('ietf-ipv4-unicast-routing', 'ipv4-unicast'):('ydk.models.ietf.ietf_ipv4_unicast_routing', 'Ipv4UnicastIdentity'),
('ietf-ipv6-unicast-routing', 'ipv6-unicast'):('ydk.models.ietf.ietf_ipv6_unicast_routing', 'Ipv6UnicastIdentity'),
('ietf-netconf-monitoring', 'netconf-beep'):('ydk.models.ietf.ietf_netconf_monitoring', 'NetconfBeepIdentity'),
('ietf-netconf-monitoring', 'netconf-soap-over-beep'):('ydk.models.ietf.ietf_netconf_monitoring', 'NetconfSoapOverBeepIdentity'),
('ietf-netconf-monitoring', 'netconf-soap-over-https'):('ydk.models.ietf.ietf_netconf_monitoring', 'NetconfSoapOverHttpsIdentity'),
('ietf-netconf-monitoring', 'netconf-ssh'):('ydk.models.ietf.ietf_netconf_monitoring', 'NetconfSshIdentity'),
('ietf-netconf-monitoring', 'netconf-tls'):('ydk.models.ietf.ietf_netconf_monitoring', 'NetconfTlsIdentity'),
('ietf-netconf-monitoring', 'rnc'):('ydk.models.ietf.ietf_netconf_monitoring', 'RncIdentity'),
('ietf-netconf-monitoring', 'rng'):('ydk.models.ietf.ietf_netconf_monitoring', 'RngIdentity'),
('ietf-netconf-monitoring', 'schema-format'):('ydk.models.ietf.ietf_netconf_monitoring', 'SchemaFormatIdentity'),
('ietf-netconf-monitoring', 'transport'):('ydk.models.ietf.ietf_netconf_monitoring', 'TransportIdentity'),
('ietf-netconf-monitoring', 'xsd'):('ydk.models.ietf.ietf_netconf_monitoring', 'XsdIdentity'),
('ietf-netconf-monitoring', 'yang'):('ydk.models.ietf.ietf_netconf_monitoring', 'YangIdentity'),
('ietf-netconf-monitoring', 'yin'):('ydk.models.ietf.ietf_netconf_monitoring', 'YinIdentity'),
('ietf-ospf', 'area-type'):('ydk.models.ietf.ietf_ospf', 'AreaTypeIdentity'),
('ietf-ospf', 'if-link-type'):('ydk.models.ietf.ietf_ospf', 'IfLinkTypeIdentity'),
('ietf-ospf', 'if-link-type-normal'):('ydk.models.ietf.ietf_ospf', 'IfLinkTypeNormalIdentity'),
('ietf-ospf', 'if-link-type-sham-link'):('ydk.models.ietf.ietf_ospf', 'IfLinkTypeShamLinkIdentity'),
('ietf-ospf', 'if-link-type-virtual-link'):('ydk.models.ietf.ietf_ospf', 'IfLinkTypeVirtualLinkIdentity'),
('ietf-ospf', 'normal'):('ydk.models.ietf.ietf_ospf', 'NormalIdentity'),
('ietf-ospf', 'nssa'):('ydk.models.ietf.ietf_ospf', 'NssaIdentity'),
('ietf-ospf', 'operation-mode'):('ydk.models.ietf.ietf_ospf', 'OperationModeIdentity'),
('ietf-ospf', 'ospf'):('ydk.models.ietf.ietf_ospf', 'OspfIdentity'),
('ietf-ospf', 'ospfv2'):('ydk.models.ietf.ietf_ospf', 'Ospfv2Identity'),
('ietf-ospf', 'ospfv3'):('ydk.models.ietf.ietf_ospf', 'Ospfv3Identity'),
('ietf-ospf', 'ships-in-the-night'):('ydk.models.ietf.ietf_ospf', 'ShipsInTheNightIdentity'),
('ietf-ospf', 'stub'):('ydk.models.ietf.ietf_ospf', 'StubIdentity'),
('ietf-routing', 'address-family'):('ydk.models.ietf.ietf_routing', 'AddressFamilyIdentity'),
('ietf-routing', 'default-routing-instance'):('ydk.models.ietf.ietf_routing', 'DefaultRoutingInstanceIdentity'),
('ietf-routing', 'direct'):('ydk.models.ietf.ietf_routing', 'DirectIdentity'),
('ietf-routing', 'ipv4'):('ydk.models.ietf.ietf_routing', 'Ipv4Identity'),
('ietf-routing', 'ipv6'):('ydk.models.ietf.ietf_routing', 'Ipv6Identity'),
('ietf-routing', 'routing-instance'):('ydk.models.ietf.ietf_routing', 'RoutingInstanceIdentity'),
('ietf-routing', 'routing-protocol'):('ydk.models.ietf.ietf_routing', 'RoutingProtocolIdentity'),
('ietf-routing', 'static'):('ydk.models.ietf.ietf_routing', 'StaticIdentity'),
('ietf-routing', 'vrf-routing-instance'):('ydk.models.ietf.ietf_routing', 'VrfRoutingInstanceIdentity'),
('ietf-syslog-types', 'audit'):('ydk.models.ietf.ietf_syslog_types', 'AuditIdentity'),
('ietf-syslog-types', 'auth'):('ydk.models.ietf.ietf_syslog_types', 'AuthIdentity'),
('ietf-syslog-types', 'authpriv'):('ydk.models.ietf.ietf_syslog_types', 'AuthprivIdentity'),
('ietf-syslog-types', 'console'):('ydk.models.ietf.ietf_syslog_types', 'ConsoleIdentity'),
('ietf-syslog-types', 'cron2'):('ydk.models.ietf.ietf_syslog_types', 'Cron2Identity'),
('ietf-syslog-types', 'cron'):('ydk.models.ietf.ietf_syslog_types', 'CronIdentity'),
('ietf-syslog-types', 'daemon'):('ydk.models.ietf.ietf_syslog_types', 'DaemonIdentity'),
('ietf-syslog-types', 'ftp'):('ydk.models.ietf.ietf_syslog_types', 'FtpIdentity'),
('ietf-syslog-types', 'kern'):('ydk.models.ietf.ietf_syslog_types', 'KernIdentity'),
('ietf-syslog-types', 'local0'):('ydk.models.ietf.ietf_syslog_types', 'Local0Identity'),
('ietf-syslog-types', 'local1'):('ydk.models.ietf.ietf_syslog_types', 'Local1Identity'),
('ietf-syslog-types', 'local2'):('ydk.models.ietf.ietf_syslog_types', 'Local2Identity'),
('ietf-syslog-types', 'local3'):('ydk.models.ietf.ietf_syslog_types', 'Local3Identity'),
('ietf-syslog-types', 'local4'):('ydk.models.ietf.ietf_syslog_types', 'Local4Identity'),
('ietf-syslog-types', 'local5'):('ydk.models.ietf.ietf_syslog_types', 'Local5Identity'),
('ietf-syslog-types', 'local6'):('ydk.models.ietf.ietf_syslog_types', 'Local6Identity'),
('ietf-syslog-types', 'local7'):('ydk.models.ietf.ietf_syslog_types', 'Local7Identity'),
('ietf-syslog-types', 'lpr'):('ydk.models.ietf.ietf_syslog_types', 'LprIdentity'),
('ietf-syslog-types', 'mail'):('ydk.models.ietf.ietf_syslog_types', 'MailIdentity'),
('ietf-syslog-types', 'news'):('ydk.models.ietf.ietf_syslog_types', 'NewsIdentity'),
('ietf-syslog-types', 'ntp'):('ydk.models.ietf.ietf_syslog_types', 'NtpIdentity'),
('ietf-syslog-types', 'syslog-facility'):('ydk.models.ietf.ietf_syslog_types', 'SyslogFacilityIdentity'),
('ietf-syslog-types', 'syslog'):('ydk.models.ietf.ietf_syslog_types', 'SyslogIdentity'),
('ietf-syslog-types', 'user'):('ydk.models.ietf.ietf_syslog_types', 'UserIdentity'),
('ietf-syslog-types', 'uucp'):('ydk.models.ietf.ietf_syslog_types', 'UucpIdentity'),
('ietf-system', 'authentication-method'):('ydk.models.ietf.ietf_system', 'AuthenticationMethodIdentity'),
('ietf-system', 'local-users'):('ydk.models.ietf.ietf_system', 'LocalUsersIdentity'),
('ietf-system', 'radius-authentication-type'):('ydk.models.ietf.ietf_system', 'RadiusAuthenticationTypeIdentity'),
('ietf-system', 'radius-chap'):('ydk.models.ietf.ietf_system', 'RadiusChapIdentity'),
('ietf-system', 'radius'):('ydk.models.ietf.ietf_system', 'RadiusIdentity'),
('ietf-system', 'radius-pap'):('ydk.models.ietf.ietf_system', 'RadiusPapIdentity'),
('ietf-yang-smiv2', 'object-identity'):('ydk.models.ietf.ietf_yang_smiv2', 'ObjectIdentityIdentity'),
}
_namespace_package_map = { \
('urn:ietf:params:xml:ns:netconf:base:1.0', 'get') : 'from ydk.models.ietf.ietf_netconf import GetRpc',
('urn:ietf:params:xml:ns:netconf:base:1.0', 'get-config') : 'from ydk.models.ietf.ietf_netconf import GetConfigRpc',
('urn:ietf:params:xml:ns:yang:ietf-diffserv-classifier', 'classifiers') : 'from ydk.models.ietf.ietf_diffserv_classifier import Classifiers',
('urn:ietf:params:xml:ns:yang:ietf-diffserv-policy', 'policies') : 'from ydk.models.ietf.ietf_diffserv_policy import Policies',
('urn:ietf:params:xml:ns:yang:ietf-interfaces', 'interfaces') : 'from ydk.models.ietf.ietf_interfaces import Interfaces',
('urn:ietf:params:xml:ns:yang:ietf-interfaces', 'interfaces-state') : 'from ydk.models.ietf.ietf_interfaces import InterfacesState',
('urn:ietf:params:xml:ns:yang:ietf-key-chain', 'key-chains') : 'from ydk.models.ietf.ietf_key_chain import KeyChains',
('urn:ietf:params:xml:ns:yang:ietf-netconf-acm', 'nacm') : 'from ydk.models.ietf.ietf_netconf_acm import Nacm',
('urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring', 'get-schema') : 'from ydk.models.ietf.ietf_netconf_monitoring import GetSchemaRpc',
('urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring', 'netconf-state') : 'from ydk.models.ietf.ietf_netconf_monitoring import NetconfState',
('urn:ietf:params:xml:ns:yang:ietf-restconf-monitoring', 'restconf-state') : 'from ydk.models.ietf.ietf_restconf_monitoring import RestconfState',
('urn:ietf:params:xml:ns:yang:ietf-routing', 'fib-route') : 'from ydk.models.ietf.ietf_routing import FibRouteRpc',
('urn:ietf:params:xml:ns:yang:ietf-routing', 'routing') : 'from ydk.models.ietf.ietf_routing import Routing',
('urn:ietf:params:xml:ns:yang:ietf-routing', 'routing-state') : 'from ydk.models.ietf.ietf_routing import RoutingState',
('urn:ietf:params:xml:ns:yang:ietf-system', 'system') : 'from ydk.models.ietf.ietf_system import System',
('urn:ietf:params:xml:ns:yang:ietf-system', 'system-state') : 'from ydk.models.ietf.ietf_system import SystemState',
('urn:ietf:params:xml:ns:yang:ietf-yang-library', 'modules-state') : 'from ydk.models.ietf.ietf_yang_library import ModulesState',
}
| {
"content_hash": "cf868605c8f4315162d4b267ea6f9355",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 166,
"avg_line_length": 89.88888888888889,
"alnum_prop": 0.6824783683559951,
"repo_name": "111pontes/ydk-py",
"id": "31b94f6398fea4b3ea0bc717a569b80f03bfb9df",
"size": "38832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ietf/ydk/models/ietf/_yang_ns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
import boto3
import logging
import json
import datetime
import tempfile
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import listRecords
def lambda_handler(event, context):
bucket = 'ictrp-data'
key = 'idlist/ictrp-idlist-non-nct-{}.txt'.format(datetime.datetime.today().strftime('%Y%m%d'))
idlist = listRecords.ictrpList()
with tempfile.TemporaryFile() as tmpfile:
for id in idlist:
tmpfile.write('{}\n'.format(id))
tmpfile.seek(0)
s3 = boto3.resource('s3')
object = s3.Bucket(bucket).put_object(Key=key, Body=tmpfile)
object.Acl().put(ACL='public-read')
return '{}/{}/{}'.format('https://s3.eu-central-1.amazonaws.com', bucket, key) | {
"content_hash": "983356bb94ba4c1480d59e44018c23c6",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 99,
"avg_line_length": 30.869565217391305,
"alnum_prop": 0.676056338028169,
"repo_name": "gertvv/ictrp-retrieval",
"id": "78efd67996e6b0875ab928a1ea91b336e823c2d8",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambdaListNonNCT.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31536"
},
{
"name": "Shell",
"bytes": "661"
}
],
"symlink_target": ""
} |
'''
Global settings module for Color Switch
By @blopker
'''
import sublime
import os
plugin_name = 'ColorSwitch'
FOLDER_REL = 'Packages/User/' + plugin_name
FOLDER_THEMES_REL = FOLDER_REL + '/themes'
FOLDER_ABS = None
FOLDER_THEMES_ABS = None
PLUGIN_PREF = 'ColorSwitch.sublime-settings'
SUBLIME_PREF = 'Preferences.sublime-settings'
pluginObj = {}
sublimeObj = {}
def load():
global FOLDER_ABS, FOLDER_THEMES_ABS
FOLDER_ABS = os.path.join(sublime.packages_path(), 'User', plugin_name)
FOLDER_THEMES_ABS = os.path.join(FOLDER_ABS, 'themes')
global pluginObj
pluginObj = sublime.load_settings(PLUGIN_PREF)
global sublimeObj
sublimeObj = sublime.load_settings(SUBLIME_PREF)
# In case settings file is missing the debug value.
debug = pluginObj.get('debug', None)
if debug is None:
pluginObj.set('debug', True)
def get(*args):
return pluginObj.get(*args)
def get_themes_abs():
return FOLDER_THEMES_ABS
def get_themes_rel():
return FOLDER_THEMES_REL
def get_color_scheme():
return sublimeObj.get('color_scheme', '')
def set_color_scheme(path):
return sublimeObj.set('color_scheme', path)
def save_user():
sublime.save_settings(SUBLIME_PREF)
def isDebug():
return get('debug')
def platform():
return sublime.platform()
def sublime_version():
return int(sublime.version())
| {
"content_hash": "418e364eacfd5868828b07061e30371f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 75,
"avg_line_length": 19.3943661971831,
"alnum_prop": 0.690631808278867,
"repo_name": "blopker/Color-Switch",
"id": "5f7d71c97aaccb46bfcbdc0670ce9f3096e848d2",
"size": "1377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colorswitch/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49819"
},
{
"name": "Shell",
"bytes": "184"
}
],
"symlink_target": ""
} |
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
USE_TZ = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
MIDDLEWARE_CLASSES = (
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ratings.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(APP_ROOT, 'tests/test_app/templates'),
)
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
os.path.join(APP_ROOT, 'tests/coverage'))
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
]
INTERNAL_APPS = [
'ratings',
'ratings.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
SECRET_KEY = 'foobar'
FROM_EMAIL = "info@example.com"
DEFAULT_FROM_EMAIL = FROM_EMAIL
SERVER_EMAIL = FROM_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# App specific settings
| {
"content_hash": "86a90fdf3856e07b50c16ad90db5eb2c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 64,
"avg_line_length": 25.5974025974026,
"alnum_prop": 0.6884830035514967,
"repo_name": "fedosov/django-generic-ratings",
"id": "59d82cfafa93c754901638c0f83c309c3b1c7109",
"size": "1971",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ratings/tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5174"
},
{
"name": "JavaScript",
"bytes": "3276"
},
{
"name": "Python",
"bytes": "111770"
}
],
"symlink_target": ""
} |
from bottle import Bottle, route, run, template, static_file, get, jinja2_template as template, post, request, response, redirect
import runtime
import actions
import json
import requests
from datetime import datetime
from dateutil import parser
"""
GetTimeDiff
@param[in] time1, time2
@param[out] timeDiff
"""
defaultInputParams = {'time1': '', 'time2': ''}
defaultOutputParams = {'timeDiff': 0.}
sim_parameters = dict()
# Register actions
def registerAction(user, project, version, sim_id):
sim_parameters['user'] = user
sim_parameters['project'] = project
sim_parameters['version'] = version
sim_parameters['sim_id'] = sim_id
runtime.register_webActions(user, project, version, sim_id,'GetTimeDiff', '/home/actions/GetTimeDiff/')
def start():
request_Id = request.json['requestId']
inputParams = request.json['input']
inputParams = actions.applyDefaultValues(inputParams, defaultInputParams)
time1 = parser.parse(inputParams['time1'])
time2 = parser.parse(inputParams['time2'])
timeDiff = time2 - time1
output_json = json.dumps({'timeDiff': timeDiff.total_seconds()})
output_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/output'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
success_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/success'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
json_headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(output_url, data=output_json, headers = json_headers)
r = requests.post(success_url)
return
def cancel():
request_Id = request.json['requestId']
cancel_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/cancelation'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(cancel_url)
return | {
"content_hash": "43ad6177d1add6073434f2e29261e5f0",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 214,
"avg_line_length": 41.895833333333336,
"alnum_prop": 0.7230233714569866,
"repo_name": "sebcreme/SmartAlarmClock",
"id": "7722270c0fda10c690b113a801ff6091fefccb27",
"size": "2011",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/actions/GetTimeDiff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "30242"
},
{
"name": "HTML",
"bytes": "8197"
},
{
"name": "JavaScript",
"bytes": "310818"
},
{
"name": "Python",
"bytes": "36859"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import scipy.sparse as sp
import csv
from coclust.CoclustSpecMod import CoclustSpecMod
file_name = "./datasets/classic3.csv"
csv_file = open(file_name, 'rb')
csv_reader = csv.reader(csv_file, delimiter=",")
nb_row, nb_col, nb_clusters = map(int, csv_reader.next())
X = sp.lil_matrix((nb_row, nb_col))
for row in csv_reader:
i, j, v = map(int, row)
X[i, j] = v
model = CoclustSpecMod(n_clusters=nb_clusters)
model.fit(X)
predicted_row_labels = model.row_labels_
for i in range(nb_clusters):
number_of_rows, number_of_columns = model.get_shape(i)
print("Cluster", i, "has", number_of_rows, "rows and",
number_of_columns, "columns.")
| {
"content_hash": "5c299350c5e1cf5ec7597b7b79be5ea9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.6828571428571428,
"repo_name": "sh4rkman/CoPub",
"id": "654b84b31f9e6ef13ee8840edd1ba8fa178e8e01",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/CoclustSpecMod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "141"
},
{
"name": "HTML",
"bytes": "17709"
},
{
"name": "JavaScript",
"bytes": "3172"
},
{
"name": "Python",
"bytes": "19161"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from django.db import models
from corehq.apps.data_analytics.const import NOT_SET, DEFAULT_EXPERIENCED_THRESHOLD, DEFAULT_PERFORMANCE_THRESHOLD
GIRExportRow = namedtuple('GIRExportRow',
'domain country sector subsector bu self_service test start device active_users wam '
'pam wam_current wam_1_prior wam_2_prior active_current active_1_prior active_2_prior '
'using_and_performing not_performing inactive_experienced inactive_not_experienced '
'not_experienced not_performing_not_experienced d1 d2 d3 d4 d5 d6 eligible '
'experienced_threshold performance_threshold ')
class MALTRow(models.Model):
"""
Specifies a row for 'Monthly Aggregate Lite Table (MALT)'
See https://docs.google.com/document/d/1QQ3tzFPs6TWiPiah6YUBCrFILKih6OcJV7444i50o1U/edit
"""
month = models.DateField(db_index=True)
# Using TextField instead of CharField, because...
# postgres doesn't differentiate between Char/Text and there is no hard max-length limit
user_id = models.TextField()
username = models.TextField()
email = models.EmailField()
user_type = models.TextField()
domain_name = models.TextField(db_index=True)
num_of_forms = models.PositiveIntegerField()
app_id = models.TextField()
device_id = models.TextField(blank=True, null=True)
is_app_deleted = models.BooleanField(default=False)
wam = models.NullBooleanField(default=NOT_SET)
pam = models.NullBooleanField(default=NOT_SET)
use_threshold = models.PositiveSmallIntegerField(default=15)
experienced_threshold = models.PositiveSmallIntegerField(default=3)
class Meta:
unique_together = ('month', 'domain_name', 'user_id', 'app_id', 'device_id')
@classmethod
def get_unique_fields(cls):
return list(cls._meta.unique_together[0])
class GIRRow(models.Model):
month = models.DateField(db_index=True)
domain_name = models.TextField()
country = models.TextField(blank=True, null=True)
sector = models.TextField(blank=True, null=True)
subsector = models.TextField(blank=True, null=True)
bu = models.TextField(blank=True, null=True)
self_service = models.NullBooleanField(default=NOT_SET)
test_domain = models.NullBooleanField(default=NOT_SET)
start_date = models.DateField()
device_id = models.TextField(blank=True, null=True)
wam = models.NullBooleanField(default=NOT_SET)
pam = models.NullBooleanField(default=NOT_SET)
wams_current = models.PositiveIntegerField()
active_users = models.PositiveIntegerField()
using_and_performing = models.PositiveIntegerField()
not_performing = models.PositiveIntegerField()
inactive_experienced = models.PositiveIntegerField()
inactive_not_experienced = models.PositiveIntegerField()
not_experienced = models.PositiveIntegerField()
not_performing_not_experienced = models.PositiveIntegerField()
active_ever = models.PositiveIntegerField()
possibly_exp = models.PositiveIntegerField()
ever_exp = models.PositiveIntegerField()
exp_and_active_ever = models.PositiveIntegerField()
active_in_span = models.PositiveIntegerField()
eligible_forms = models.PositiveIntegerField()
performance_threshold = models.PositiveIntegerField(null=True)
experienced_threshold = models.PositiveIntegerField(null=True)
class Meta:
unique_together = ('month', 'domain_name')
def export_row(self, past_months):
last_month = past_months[0] if past_months else None
two_months_ago = past_months[1] if len(past_months) > 1 else None
wams_current = self.wams_current if self.wam else 0
wams_1_prior = last_month.wams_current if last_month and self.wam else 0
wams_2_prior = two_months_ago.wams_current if two_months_ago and self.wam else 0
return GIRExportRow(domain=self.domain_name,
country=self.country,
sector=self.sector,
subsector=self.subsector,
bu=self.bu,
self_service=self.self_service,
test=self.test_domain,
start=self.start_date,
device=self.device_id,
active_users=self.active_users,
wam=self.wam,
pam=self.pam,
wam_current=wams_current,
wam_1_prior=wams_1_prior,
wam_2_prior=wams_2_prior,
active_current=self.active_users,
active_1_prior=last_month.active_users if last_month else 0,
active_2_prior=two_months_ago.active_users if two_months_ago else 0,
using_and_performing=self.using_and_performing,
not_performing=self.not_performing,
inactive_experienced=self.inactive_experienced,
inactive_not_experienced=self.inactive_not_experienced,
not_experienced=self.not_experienced,
not_performing_not_experienced=self.not_performing_not_experienced,
d1=self.active_ever,
d2=self.possibly_exp,
d3=self.ever_exp,
d4=self.exp_and_active_ever,
d5=self.active_users,
d6=self.active_in_span,
eligible=self.eligible_forms,
experienced_threshold=self.experienced_threshold or DEFAULT_EXPERIENCED_THRESHOLD,
performance_threshold=self.performance_threshold or DEFAULT_PERFORMANCE_THRESHOLD)
| {
"content_hash": "6a103e5d2a5d6f656cebd4b92bc8c709",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 114,
"avg_line_length": 48.088,
"alnum_prop": 0.6226917318249875,
"repo_name": "qedsoftware/commcare-hq",
"id": "c4e3ea95dc0be7fbe51a4bd291188d55e1fbe477",
"size": "6011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/data_analytics/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
"""Helper script to copy files at build time.
This is easier than trying to detect whether to use copy, cp, or something else.
"""
import shutil
import typing as T
def run(args: T.List[str]) -> int:
try:
shutil.copy2(args[0], args[1])
except Exception:
return 1
return 0
| {
"content_hash": "f68ef2b16d16bfdf8e27ff8a13d3ab48",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 20.2,
"alnum_prop": 0.6534653465346535,
"repo_name": "pexip/meson",
"id": "acef2a89bf4974cb730a3dcde2b0b88dc114fe16",
"size": "379",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mesonbuild/scripts/copy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "C",
"bytes": "203464"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "59032"
},
{
"name": "CMake",
"bytes": "38429"
},
{
"name": "Cuda",
"bytes": "10592"
},
{
"name": "Cython",
"bytes": "1921"
},
{
"name": "D",
"bytes": "7840"
},
{
"name": "Fortran",
"bytes": "12248"
},
{
"name": "Genie",
"bytes": "476"
},
{
"name": "HTML",
"bytes": "897"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "3768"
},
{
"name": "JavaScript",
"bytes": "150"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "219"
},
{
"name": "Limbo",
"bytes": "28"
},
{
"name": "Meson",
"bytes": "595904"
},
{
"name": "Objective-C",
"bytes": "686"
},
{
"name": "Objective-C++",
"bytes": "378"
},
{
"name": "PowerShell",
"bytes": "4748"
},
{
"name": "Python",
"bytes": "4096804"
},
{
"name": "Roff",
"bytes": "625"
},
{
"name": "Rust",
"bytes": "4039"
},
{
"name": "Shell",
"bytes": "12539"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10033"
},
{
"name": "Verilog",
"bytes": "696"
},
{
"name": "Vim Script",
"bytes": "10684"
},
{
"name": "Yacc",
"bytes": "103"
}
],
"symlink_target": ""
} |
from .resource import Resource
class PublicIPAddress(Resource):
"""Public IP address resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param public_ip_allocation_method: The public IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type public_ip_allocation_method: str or
~azure.mgmt.network.v2017_06_01.models.IPAllocationMethod
:param public_ip_address_version: The public IP address version. Possible
values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6'
:type public_ip_address_version: str or
~azure.mgmt.network.v2017_06_01.models.IPVersion
:ivar ip_configuration: The IP configuration associated with the public IP
address.
:vartype ip_configuration:
~azure.mgmt.network.v2017_06_01.models.IPConfiguration
:param dns_settings: The FQDN of the DNS record associated with the public
IP address.
:type dns_settings:
~azure.mgmt.network.v2017_06_01.models.PublicIPAddressDnsSettings
:param ip_address: The IP address associated with the public IP address
resource.
:type ip_address: str
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:param resource_guid: The resource GUID property of the public IP
resource.
:type resource_guid: str
:param provisioning_state: The provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param zones: A list of availability zones denoting the IP allocated for
the resource needs to come from.
:type zones: list[str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'ip_configuration': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'public_ip_allocation_method': {'key': 'properties.publicIPAllocationMethod', 'type': 'str'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'ip_configuration': {'key': 'properties.ipConfiguration', 'type': 'IPConfiguration'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'PublicIPAddressDnsSettings'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(PublicIPAddress, self).__init__(**kwargs)
self.public_ip_allocation_method = kwargs.get('public_ip_allocation_method', None)
self.public_ip_address_version = kwargs.get('public_ip_address_version', None)
self.ip_configuration = None
self.dns_settings = kwargs.get('dns_settings', None)
self.ip_address = kwargs.get('ip_address', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.etag = kwargs.get('etag', None)
self.zones = kwargs.get('zones', None)
| {
"content_hash": "2f95fca4a88818da661c3d57c3e631aa",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 101,
"avg_line_length": 45.67032967032967,
"alnum_prop": 0.6470163618864293,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "136b42ddce12dbf473bb678eb63f11d736460b2e",
"size": "4630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/public_ip_address.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('edegal', '0009_auto_20180919_0805'),
]
operations = [
migrations.AddField(
model_name='album',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='picture',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "6813db73b9cbe589571933e460e7e617",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 121,
"avg_line_length": 31.115384615384617,
"alnum_prop": 0.6452410383189122,
"repo_name": "conikuvat/edegal",
"id": "a4b7be9b8626760ebc8f547f6eb26b6c09d225fd",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/edegal/migrations/0010_auto_20180919_0806.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3136"
},
{
"name": "Dockerfile",
"bytes": "1141"
},
{
"name": "HTML",
"bytes": "507"
},
{
"name": "Python",
"bytes": "151941"
},
{
"name": "Shell",
"bytes": "6154"
},
{
"name": "TypeScript",
"bytes": "48274"
}
],
"symlink_target": ""
} |
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class BitlyAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('profile_url')
def get_avatar_url(self):
return self.account.extra_data.get('profile_image')
def to_str(self):
dflt = super(BitlyAccount, self).to_str()
return '%s (%s)' % (
self.account.extra_data.get('full_name', ''),
dflt,
)
class BitlyProvider(OAuth2Provider):
id = 'bitly'
name = 'Bitly'
account_class = BitlyAccount
def extract_uid(self, data):
return str(data['login'])
def extract_common_fields(self, data):
return dict(username=data['login'],
name=data.get('full_name'))
provider_classes = [BitlyProvider]
| {
"content_hash": "e677044b07ffb1ab401ff44413b3620b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 27.151515151515152,
"alnum_prop": 0.6428571428571429,
"repo_name": "AltSchool/django-allauth",
"id": "6a80f92c6055b8fa3a42bed5f43eda46f0ebc8a2",
"size": "896",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/bitly/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "42255"
},
{
"name": "JavaScript",
"bytes": "3360"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "760358"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Bells-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| {
"content_hash": "26de4ae27c76807eff7887ebc4d90c55",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.125,
"alnum_prop": 0.7081413210445469,
"repo_name": "bells-coin/bells-coin",
"id": "c6d0a0c184820bbc36ba2337c5676a2c20773e87",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32790"
},
{
"name": "C++",
"bytes": "2616183"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "12478"
},
{
"name": "NSIS",
"bytes": "5866"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "3770"
},
{
"name": "QMake",
"bytes": "15088"
},
{
"name": "Shell",
"bytes": "8323"
}
],
"symlink_target": ""
} |
import os
import gevent
import pkgutil
import importlib
import trace
from work_queue import WorkQueue
from sandesh_logger import SandeshLogger
from sandesh_client import SandeshClient
from sandesh_http import SandeshHttp
from sandesh_uve import SandeshUVETypeMaps, SandeshUVEPerTypeMap
from sandesh_stats import SandeshStats
from sandesh_trace import SandeshTraceRequestRunner
from util import *
from gen_py.sandesh.ttypes import SandeshType, SandeshLevel
from gen_py.sandesh.constants import *
class Sandesh(object):
_DEFAULT_LOG_FILE = SandeshLogger._DEFAULT_LOG_FILE
_DEFAULT_SYSLOG_FACILITY = SandeshLogger._DEFAULT_SYSLOG_FACILITY
class SandeshRole:
INVALID = 0
GENERATOR = 1
COLLECTOR = 2
# end class SandeshRole
def __init__(self):
self._context = ''
self._scope = ''
self._module = ''
self._source = ''
self._node_type = ''
self._instance_id = ''
self._timestamp = 0
self._versionsig = 0
self._type = 0
self._hints = 0
self._client_context = ''
self._client = None
self._role = self.SandeshRole.INVALID
self._logger = None
self._level = SandeshLevel.INVALID
self._category = ''
self._send_queue_enabled = True
self._http_server = None
# end __init__
# Public functions
def init_generator(self, module, source, node_type, instance_id,
collectors, client_context,
http_port, sandesh_req_uve_pkg_list=None,
discovery_client=None):
self._role = self.SandeshRole.GENERATOR
self._module = module
self._source = source
self._node_type = node_type
self._instance_id = instance_id
self._client_context = client_context
self._collectors = collectors
self._rcv_queue = WorkQueue(self._process_rx_sandesh)
self._init_logger(source + ':' + module + ':' + node_type + ':' \
+ instance_id)
self._stats = SandeshStats()
self._trace = trace.Trace()
self._sandesh_request_dict = {}
self._uve_type_maps = SandeshUVETypeMaps()
if sandesh_req_uve_pkg_list is None:
sandesh_req_uve_pkg_list = []
# Initialize the request handling
# Import here to break the cyclic import dependency
import sandesh_req_impl
sandesh_req_impl = sandesh_req_impl.SandeshReqImpl(self)
sandesh_req_uve_pkg_list.append('pysandesh.gen_py')
for pkg_name in sandesh_req_uve_pkg_list:
self._create_sandesh_request_and_uve_lists(pkg_name)
if http_port != -1:
self._http_server = SandeshHttp(
self, module, http_port, sandesh_req_uve_pkg_list)
gevent.spawn(self._http_server.start_http_server)
primary_collector = None
secondary_collector = None
if self._collectors is not None:
if len(self._collectors) > 0:
primary_collector = self._collectors[0]
if len(self._collectors) > 1:
secondary_collector = self._collectors[1]
self._client = SandeshClient(
self, primary_collector, secondary_collector,
discovery_client)
self._client.initiate()
# end init_generator
def logger(self):
return self._logger
# end logger
def sandesh_logger(self):
return self._sandesh_logger
# end sandesh_logger
def set_logging_params(self, enable_local_log=False, category='',
level=SandeshLevel.SYS_INFO,
file=SandeshLogger._DEFAULT_LOG_FILE,
enable_syslog=False,
syslog_facility=_DEFAULT_SYSLOG_FACILITY):
self._sandesh_logger.set_logging_params(
enable_local_log, category, level, file,
enable_syslog, syslog_facility)
# end set_logging_params
def set_local_logging(self, enable_local_log):
self._sandesh_logger.set_local_logging(enable_local_log)
# end set_local_logging
def set_logging_level(self, level):
self._sandesh_logger.set_logging_level(level)
# end set_logging_level
def set_logging_category(self, category):
self._sandesh_logger.set_logging_category(category)
# end set_logging_category
def set_logging_file(self, file):
self._sandesh_logger.set_logging_file(file)
# end set_logging_file
def is_send_queue_enabled(self):
return self._send_queue_enabled
# end is_send_queue_enabled
def set_send_queue(self, enable):
if self._send_queue_enabled != enable:
self._logger.info("SANDESH: CLIENT: SEND QUEUE: %s -> %s",
self._send_queue_enabled, enable)
self._send_queue_enabled = enable
if enable:
connection = self._client.connection()
if connection and connection.session():
connection.session().send_queue().may_be_start_runner()
# end set_send_queue
def init_collector(self):
pass
# end init_collector
def stats(self):
return self._stats
# end stats
@classmethod
def next_seqnum(cls):
if not hasattr(cls, '_lseqnum'):
cls._lseqnum = 1
else:
cls._lseqnum += 1
return cls._lseqnum
# end next_seqnum
@classmethod
def lseqnum(cls):
if not hasattr(cls, '_lseqnum'):
cls._lseqnum = 0
return cls._lseqnum
# end lseqnum
def module(self):
return self._module
# end module
def source_id(self):
return self._source
# end source_id
def node_type(self):
return self._node_type
#end node_type
def instance_id(self):
return self._instance_id
#end instance_id
def scope(self):
return self._scope
# end scope
def context(self):
return self._context
# end context
def seqnum(self):
return self._seqnum
# end seqnum
def timestamp(self):
return self._timestamp
# end timestamp
def versionsig(self):
return self._versionsig
# end versionsig
def type(self):
return self._type
# end type
def hints(self):
return self._hints
# end hints
def client(self):
return self._client
# end client
def level(self):
return self._level
# end level
def category(self):
return self._category
# end category
def validate(self):
return
# end validate
def is_local_logging_enabled(self):
return self._sandesh_logger.is_local_logging_enabled()
# end is_local_logging_enabled
def logging_level(self):
return self._sandesh_logger.logging_level()
# end logging_level
def logging_category(self):
return self._sandesh_logger.logging_category()
# end logging_category
def is_syslog_logging_enabled(self):
return self._sandesh_logger.is_syslog_logging_enabled()
#end is_syslog_logging_enabled
def logging_syslog_facility(self):
return self._sandesh_logger.logging_syslog_facility()
#end logging_syslog_facility
def is_unit_test(self):
return self._role == self.SandeshRole.INVALID
# end is_unit_test
def handle_test(self, sandesh_init):
if sandesh_init.is_unit_test() or self._is_level_ut():
if self._is_logging_allowed(sandesh_init):
sandesh_init._logger.debug(self.log())
return True
return False
def is_logging_allowed(self, sandesh_init):
if not sandesh_init.is_local_logging_enabled():
return False
logging_level = sandesh_init.logging_level()
level_allowed = logging_level >= self._level
logging_category = sandesh_init.logging_category()
if logging_category is None or len(logging_category) == 0:
category_allowed = True
else:
category_allowed = logging_category == self._category
return level_allowed and category_allowed
# end is_logging_allowed
def enqueue_sandesh_request(self, sandesh):
self._rcv_queue.enqueue(sandesh)
# end enqueue_sandesh_request
def send_sandesh(self, tx_sandesh):
if self._client:
ret = self._client.send_sandesh(tx_sandesh)
else:
self._logger.debug(tx_sandesh.log())
# end send_sandesh
def send_generator_info(self):
from gen_py.sandesh_uve.ttypes import SandeshClientInfo, \
ModuleClientState, SandeshModuleClientTrace
client_info = SandeshClientInfo()
try:
client_start_time = self._start_time
except:
self._start_time = UTCTimestampUsec()
finally:
client_info.start_time = self._start_time
client_info.pid = os.getpid()
if self._http_server is not None:
client_info.http_port = self._http_server.get_port()
client_info.collector_name = self._client.connection().collector()
client_info.status = self._client.connection().state()
client_info.successful_connections = \
self._client.connection().statemachine().connect_count()
client_info.primary = self._client.connection().primary_collector()
if client_info.primary is None:
client_info.primary = ''
client_info.secondary = \
self._client.connection().secondary_collector()
if client_info.secondary is None:
client_info.secondary = ''
module_state = ModuleClientState(name=self._source + ':' +
self._node_type + ':' +
self._module + ':' +
self._instance_id,
client_info=client_info)
generator_info = SandeshModuleClientTrace(
data=module_state, sandesh=self)
generator_info.send(sandesh=self)
# end send_generator_info
def get_sandesh_request_object(self, request):
try:
req_module = self._sandesh_request_dict[request]
except KeyError:
self._logger.error('Invalid Sandesh Request "%s"' % (request))
return None
else:
if req_module:
try:
imp_module = importlib.import_module(req_module)
except ImportError:
self._logger.error(
'Failed to import Module "%s"' % (req_module))
else:
try:
sandesh_request = getattr(imp_module, request)()
return sandesh_request
except AttributeError:
self._logger.error(
'Failed to create Sandesh Request "%s"' %
(request))
return None
else:
self._logger.error(
'Sandesh Request "%s" not implemented' % (request))
return None
# end get_sandesh_request_object
def trace_enable(self):
self._trace.TraceOn()
# end trace_enable
def trace_disable(self):
self._trace.TraceOff()
# end trace_disable
def is_trace_enabled(self):
return self._trace.IsTraceOn()
# end is_trace_enabled
def trace_buffer_create(self, name, size, enable=True):
self._trace.TraceBufAdd(name, size, enable)
# end trace_buffer_create
def trace_buffer_delete(self, name):
self._trace.TraceBufDelete(name)
# end trace_buffer_delete
def trace_buffer_enable(self, name):
self._trace.TraceBufOn(name)
# end trace_buffer_enable
def trace_buffer_disable(self, name):
self._trace.TraceBufOff(name)
# end trace_buffer_disable
def is_trace_buffer_enabled(self, name):
return self._trace.IsTraceBufOn(name)
# end is_trace_buffer_enabled
def trace_buffer_list_get(self):
return self._trace.TraceBufListGet()
# end trace_buffer_list_get
def trace_buffer_size_get(self, name):
return self._trace.TraceBufSizeGet(name)
# end trace_buffer_size_get
def trace_buffer_read(self, name, read_context, count, read_cb):
self._trace.TraceRead(name, read_context, count, read_cb)
# end trace_buffer_read
def trace_buffer_read_done(self, name, context):
self._trace.TraceReadDone(name, context)
# end trace_buffer_read_done
# API to send the trace buffer to the Collector.
# If trace count is not specified/or zero, then the entire trace buffer
# is sent to the Collector.
# [Note] No duplicate trace message sent to the Collector. i.e., If there
# is no trace message added between two consequent calls to this API, then
# no trace message is sent to the Collector.
def send_sandesh_trace_buffer(self, trace_buf, count=0):
trace_req_runner = SandeshTraceRequestRunner(sandesh=self,
request_buffer_name=
trace_buf,
request_context='',
read_context='Collector',
request_count=count)
trace_req_runner.Run()
# end send_sandesh_trace_buffer
# Private functions
def _is_level_ut(self):
return self._level >= SandeshLevel.UT_START and \
self._level <= SandeshLevel.UT_END
# end _is_level_ut
def _create_task(self):
return gevent.spawn(self._runner.run_for_ever)
# end _create_task
def _process_rx_sandesh(self, rx_sandesh):
handle_request_fn = getattr(rx_sandesh, "handle_request", None)
if callable(handle_request_fn):
handle_request_fn(rx_sandesh)
else:
self._logger.error('Sandesh Request "%s" not implemented' %
(rx_sandesh.__class__.__name__))
# end _process_rx_sandesh
def _create_sandesh_request_and_uve_lists(self, package):
try:
imp_pkg = __import__(package)
except ImportError:
self._logger.error('Failed to import package "%s"' % (package))
else:
try:
pkg_path = imp_pkg.__path__
except AttributeError:
self._logger.error(
'Failed to get package [%s] path' % (package))
return
for importer, mod, ispkg in \
pkgutil.walk_packages(path=pkg_path,
prefix=imp_pkg.__name__ + '.'):
if not ispkg:
module = mod.rsplit('.', 1)[-1]
if 'ttypes' == module:
self._logger.debug(
'Add Sandesh requests in module "%s"' % (mod))
self._add_sandesh_request(mod)
self._logger.debug(
'Add Sandesh UVEs in module "%s"' % (mod))
self._add_sandesh_uve(mod)
# end _create_sandesh_request_and_uve_lists
def _add_sandesh_request(self, mod):
try:
imp_module = importlib.import_module(mod)
except ImportError:
self._logger.error('Failed to import Module "%s"' % (mod))
else:
try:
sandesh_req_list = getattr(imp_module, '_SANDESH_REQUEST_LIST')
except AttributeError:
self._logger.error(
'"%s" module does not have sandesh request list' % (mod))
else:
# Add sandesh requests to the dictionary.
for req in sandesh_req_list:
self._sandesh_request_dict[req] = mod
# end _add_sandesh_request
def _get_sandesh_uve_list(self, imp_module):
try:
sandesh_uve_list = getattr(imp_module, '_SANDESH_UVE_LIST')
except AttributeError:
self._logger.error(
'"%s" module does not have sandesh UVE list' %
(imp_module.__name__))
return None
else:
return sandesh_uve_list
# end _get_sandesh_uve_list
def _get_sandesh_uve_data_list(self, imp_module):
try:
sandesh_uve_data_list = getattr(imp_module, '_SANDESH_UVE_DATA_LIST')
except AttributeError:
self._logger.error(
'"%s" module does not have sandesh UVE data list' %
(imp_module.__name__))
return None
else:
return sandesh_uve_data_list
# end _get_sandesh_uve_data_list
def _add_sandesh_uve(self, mod):
try:
imp_module = importlib.import_module(mod)
except ImportError:
self._logger.error('Failed to import Module "%s"' % (mod))
else:
sandesh_uve_list = self._get_sandesh_uve_list(imp_module)
sandesh_uve_data_list = self._get_sandesh_uve_data_list(imp_module)
if sandesh_uve_list is None or sandesh_uve_data_list is None:
return
if len(sandesh_uve_list) != len(sandesh_uve_data_list):
self._logger.error(
'"%s" module sandesh UVE and UVE data list do not match' %
(mod))
return
sandesh_uve_info_list = zip(sandesh_uve_list, sandesh_uve_data_list)
# Register sandesh UVEs
for uve_type_name, uve_data_type_name in sandesh_uve_info_list:
SandeshUVEPerTypeMap(self, uve_type_name, uve_data_type_name, mod)
# end _add_sandesh_uve
def _init_logger(self, generator):
if not generator:
generator = 'sandesh'
self._sandesh_logger = SandeshLogger(generator)
self._logger = self._sandesh_logger.logger()
# end _init_logger
# end class Sandesh
sandesh_global = Sandesh()
class SandeshAsync(Sandesh):
def __init__(self):
Sandesh.__init__(self)
# end __init__
def send(self, sandesh=sandesh_global):
try:
self.validate()
except e:
sandesh._logger.error('sandesh "%s" validation failed [%s]' %
(self.__class__.__name__, e))
return -1
self._seqnum = self.next_seqnum()
if self.handle_test(sandesh):
return 0
sandesh.send_sandesh(self)
return 0
# end send
# end class SandeshAsync
class SandeshSystem(SandeshAsync):
def __init__(self):
SandeshAsync.__init__(self)
self._type = SandeshType.SYSTEM
# end __init__
# end class SandeshSystem
class SandeshObject(SandeshAsync):
def __init__(self):
SandeshAsync.__init__(self)
self._type = SandeshType.OBJECT
# end __init__
# end class SandeshObject
class SandeshFlow(SandeshAsync):
def __init__(self):
SandeshAsync.__init__(self)
self._type = SandeshType.FLOW
# end __init__
# end class SandeshFlow
class SandeshRequest(Sandesh):
def __init__(self):
Sandesh.__init__(self)
self._type = SandeshType.REQUEST
# end __init__
def request(self, context='', sandesh=sandesh_global):
try:
self.validate()
except e:
sandesh._logger.error('sandesh "%s" validation failed [%s]' %
(self.__class__.__name__, e))
return -1
if context == 'ctrl':
self._hints |= SANDESH_CONTROL_HINT
self._context = context
self._seqnum = self.next_seqnum()
if self.handle_test(sandesh):
return 0
sandesh.send_sandesh(self)
return 0
# end request
# end class SandeshRequest
class SandeshResponse(Sandesh):
def __init__(self):
Sandesh.__init__(self)
self._type = SandeshType.RESPONSE
self._more = False
# end __init__
def response(self, context='', more=False, sandesh=sandesh_global):
try:
self.validate()
except e:
sandesh._logger.error('sandesh "%s" validation failed [%s]' %
(self.__class__.__name__, e))
return -1
self._context = context
self._more = more
self._seqnum = self.next_seqnum()
if self._context.find('http://') == 0:
SandeshHttp.create_http_response(self, sandesh)
else:
if self.handle_test(sandesh):
return 0
sandesh.send_sandesh(self)
return 0
# end response
# end class SandeshResponse
class SandeshUVE(Sandesh):
def __init__(self):
Sandesh.__init__(self)
self._type = SandeshType.UVE
self._more = False
# end __init__
def send(self, isseq=False, seqno=0, context='',
more=False, sandesh=sandesh_global):
try:
self.validate()
except e:
sandesh._logger.error('sandesh "%s" validation failed [%s]' %
(self.__class__.__name__, e))
return -1
if isseq is True:
self._seqnum = seqno
else:
uve_type_map = sandesh._uve_type_maps.get_uve_type_map(
self.__class__.__name__)
if uve_type_map is None:
return -1
self._seqnum = self.next_seqnum()
uve_type_map.update_uve(self)
self._context = context
self._more = more
if self._context.find('http://') == 0:
SandeshHttp.create_http_response(self, sandesh)
else:
if self.handle_test(sandesh):
return 0
if sandesh._client:
sandesh._client.send_uve_sandesh(self)
else:
sandesh._logger.debug(self.log())
return 0
# end send
# end class SandeshUVE
class SandeshTrace(Sandesh):
def __init__(self, type):
Sandesh.__init__(self)
self._type = type
self._more = False
# end __init__
def send_trace(self, context='', more=False,
sandesh=sandesh_global):
try:
self.validate()
except e:
sandesh._logger.error('sandesh "%s" validation failed [%s]' %
(self.__class__.__name__, e))
return -1
self._context = context
self._more = more
if self._context.find('http://') == 0:
SandeshHttp.create_http_response(self, sandesh)
else:
if self.handle_test(sandesh):
return 0
sandesh.send_sandesh(self)
return 0
# end send_trace
def trace_msg(self, name, sandesh=sandesh_global):
if sandesh._trace.IsTraceOn() and sandesh._trace.IsTraceBufOn(name):
# store the trace buffer name in category
self._category = name
self._seqnum = sandesh._trace.TraceWrite(name, self)
# end trace_msg
# end class SandeshTrace
| {
"content_hash": "ab8312cda9478fedf30b90d429682a04",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 82,
"avg_line_length": 32.6991643454039,
"alnum_prop": 0.558821023937303,
"repo_name": "JioCloud/contrail-sandesh",
"id": "22b3e978106f5bb10eb2ee9b54661aa624736d60",
"size": "23563",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "library/python/pysandesh/sandesh_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import sys
sys.path.append("..")
from demo_2_awac import och_2_awac
def end_relabel(path):
o_size = path['observations'].shape[1] // 2
path['observations'][:, o_size:] = path['observations'][-1, :o_size]
path['next_observations'][:, o_size:] = path['observations'][-1, :o_size]
rs = np.abs(path['observations'][:, 2] - path['observations'][-1, 2]) < 0.05
rs = np.array(rs, dtype=np.float32)
path['rewards'] = rs
return path
bc_relabeled = []
paths = ['../rpl_reset_free/recordings/recording_close_only_0.pkl',
'../rpl_reset_free/recordings/recording_close_only_1.pkl',]
for path in paths:
data = pickle.load(open(path,'rb'))
awac_formatted_list = och_2_awac(data)
for l in awac_formatted_list:
for k in l.keys():
l[k] = l[k][40:160]
l['observations'] = l['observations'][:, 7:27]
l['next_observations'] = l['next_observations'][:, 7:27]
for dat in awac_formatted_list:
relabeled_list = end_relabel(dat)
bc_relabeled += [relabeled_list]
lens = sum(len(a['observations']) for a in bc_relabeled)
print(lens)
output_path = '/usr/local/google/home/abhishekunique/sim_franka/rlkit/demos_slider_close_targeted_noqp_new.pkl'
pickle.dump(bc_relabeled, open(output_path,'wb'))
import IPython
IPython.embed() | {
"content_hash": "72d198b6c3d70729623c8afcb4cfb6f9",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 111,
"avg_line_length": 35.69230769230769,
"alnum_prop": 0.6501436781609196,
"repo_name": "google-research/DBAP-algorithm",
"id": "16492762cc1f028b472b41af6d30d01d3f43f320",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/rlkit_library/scripts/demos_laststep.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5294"
}
],
"symlink_target": ""
} |
from distutils.core import setup
def _read(fname):
import os
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='pykmaze',
version='0.2.0',
description='Keymaze 500-700 communicator',
author='Emmanuel Blot',
author_email='emmanuel.blot@free.fr',
license='MIT',
keywords = 'keymaze geonaute kml kmz gps',
url='http://github.com/eblot/pykmaze',
download_url='https://github.com/eblot/pykmaze/tarball/master',
packages=['pykmaze'],
requires=['serial (>= 2.5)'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Healthcare Industry',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Utilities'
],
long_description=_read('README.rst'),
)
| {
"content_hash": "c79dd7689dd3936f651f551ec6e1f596",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 33.878787878787875,
"alnum_prop": 0.6118067978533095,
"repo_name": "eblot/pykmaze",
"id": "7c7a1e8e14afc48a8c148c89f88a62f0a81bbeee",
"size": "2290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34748"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
class TextLog(object):
"""
A log superclass which provides string handling methods, intended for logs
which will use line-based text. This class does not implement a usable log
by itself, and is intended to be subclassed.
"""
def format(self, elements):
return b'\t'.join(el.encode('unicode_escape') for el in elements)
def parse(self, record):
return [el.decode('unicode_escape') for el in record.split(b'\t')]
| {
"content_hash": "813bdd62a3a339e66741c3b054084020",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.6889312977099237,
"repo_name": "storborg/manhattan",
"id": "7bba80aa64117543ca2bff694577304662a1afcb",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manhattan/log/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "355"
},
{
"name": "Python",
"bytes": "100827"
}
],
"symlink_target": ""
} |
"""
definitions for Data Types
"""
# TODO - consider server providing types, similar to commands
__all__ = ['dtypes', 'vector', 'datetime', 'matrix']
import numpy as np
import json
import re
from pyspark.sql import types
import logging
logger = logging.getLogger('sparktk')
from datetime import datetime
import dateutil.parser as datetime_parser
import pytz
# Chose python's datetime over numpy.datetime64 because of time zone support and string serialization
# Here's a long thread discussing numpy's datetime64 timezone problem:
# http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html
# If need be, UDFs can create numpy objects from x using: numpy.datatime64(x.isoformat())
class _Matrix(object):
base_type = np.ndarray
re_pattern = "matrix"
def __init__(self):
self.constructor = self._get_constructor()
def _get_constructor(self):
def constructor(value):
"""
Creates a numpy ndarray from a value, which can be one of many types
"""
if value is None:
return None
else:
return np.array(value, dtype=np.float64)
return constructor
@property
def is_complex_type(self):
return True
@staticmethod
def get_from_string(data_type_str):
if _Matrix.re_pattern != data_type_str:
raise "Invalid data type"
return _Matrix()
def __repr__(self):
return "matrix"
matrix = _Matrix()
class _Vector(object):
base_type = np.ndarray
re_pattern = re.compile(r"^vector\((\d+)\)$")
def __init__(self, length):
self.length = int(length)
self.constructor = self._get_constructor()
def _get_constructor(self):
length = self.length
def constructor(value):
"""
Creates a numpy array from a value, which can be one of many types
"""
if value is None:
return None
try:
# first try numpy's constructor
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
except:
# also support json or comma-sep string
if dtypes.value_is_string(value):
try:
value = json.loads(value)
except:
value = [np.float64(item.strip()) for item in value.split(',') if item]
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
else:
raise
array = np.atleast_1d(array) # numpy thing, so that vectors of size 1 will still have dimension and length
if len(array) != length:
raise ValueError("Could not construct vector in Python Client. Expected vector of length %s, but received length %d" % (length, len(array)))
return array
return constructor
@property
def is_complex_type(self):
return True
@staticmethod
def get_from_string(data_type_str):
return _Vector(_Vector.re_pattern.match(data_type_str).group(1))
def __repr__(self):
return "vector(%d)" % self.length
vector = _Vector
# map types to their string identifier
_primitive_type_to_str_table = {
float: "float64",
int: "int32",
long: "int64",
unicode: "unicode",
datetime: "datetime",
}
# map the pyspark sql type to the primitive data type
_pyspark_type_to_primitive_type_table = {
#types.BooleanType : bool,
types.LongType : int,
types.IntegerType : int,
types.DoubleType : float,
types.DecimalType : float,
types.StringType : str,
types.TimestampType : datetime
}
# map data type to pyspark sql type
_data_type_to_pyspark_type_table = {
int: types.IntegerType(),
long: types.LongType(),
float: types.DoubleType(),
str: types.StringType(),
unicode: types.StringType(),
datetime: types.TimestampType()
}
# build reverse map string -> type
_primitive_str_to_type_table = dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()])
_primitive_alias_type_to_type_table = {
int: int,
long: long,
str: unicode,
list: vector,
np.ndarray: matrix,
}
_primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()])
_primitive_alias_str_to_type_table["string"] = unicode
def datetime_to_ms(date_time):
"""
Returns the number of milliseconds since epoch (1970-01-01).
:param date_time: (datetime) Date/time to convert to timestamp
:return: Timestamp (number of ms since epoch)
"""
if isinstance(date_time, datetime):
ms = long(date_time.strftime("%s")) * 1000.0
ms += date_time.microsecond // 1000
return long(ms)
else:
raise TypeError("Unable to calculate the number of milliseconds since epoch for type: %s" % type(date_time))
def ms_to_datetime_str(ms):
"""
Returns the date/time string for the specified timestamp (milliseconds since epoch).
:param ms: Milliseconds since epoch (int or long)
:return: Date/time string
"""
if isinstance(ms, long) or isinstance(ms, int):
return datetime.fromtimestamp(ms/1000.0, tz=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
else:
raise TypeError("Unable to convert timestamp milliseconds to a date/time string, because the value provided " +
"is not a long/int. Unsupported type: %" % type(ms))
def datetime_constructor(value):
"""Creates special constructor for datetime parsing. Returns the number of ms since epoch."""
if dtypes.value_is_string(value):
return datetime_to_ms(datetime_parser.parse(value))
elif isinstance(value, long) or isinstance(value, int):
return value
else:
try:
return datetime_to_ms(datetime(*value))
except:
raise TypeError("cannot convert type to the datetime")
def numpy_to_bson_friendly(obj):
"""take an object and convert it to a type that can be serialized to bson if neccessary."""
if isinstance(obj, float):
return float(obj)
if isinstance(obj, int):
return int(obj)
if isinstance(obj, vector.base_type):
return obj.tolist()
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, dict):
return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()])
# Let the base class default method raise the TypeError
return obj
class _DataTypes(object):
"""
Provides functions with define and operate on supported data types.
"""
def __contains__(self, item):
try:
self.validate(item)
return True
except ValueError:
return False
def __repr__(self):
aliases = "\n(and aliases: %s)" % (", ".join(sorted(["%s->%s" % (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()])))
return ", ".join(sorted(_primitive_str_to_type_table.keys() + ["vector(n)"]+["matrix"])) + aliases
@staticmethod
def value_is_string(value):
"""get bool indication that value is a string, whether str or unicode"""
return isinstance(value, basestring)
@staticmethod
def value_is_missing_value(value):
return value is None
@staticmethod
def get_primitive_data_types():
return _primitive_type_to_str_table.keys()
@staticmethod
def to_string(data_type):
"""
Returns the string representation of the given type
Parameters
----------
data_type : type
valid data type; if invalid, a ValueError is raised
Returns
-------
result : str
string representation
Examples
--------
>>> dtypes.to_string(float)
'float32'
"""
valid_data_type = _DataTypes.get_from_type(data_type)
try:
return _primitive_type_to_str_table[valid_data_type]
except KeyError:
# complex data types should use their repr
return repr(valid_data_type)
@staticmethod
def get_from_string(data_type_str):
"""
Returns the data type for the given type string representation
Parameters
----------
data_type_str : str
valid data type str; if invalid, a ValueError is raised
Returns
-------
result : type
type represented by the string
Examples
--------
>>> dtypes.get_from_string('unicode')
unicode
"""
try:
return _primitive_str_to_type_table[data_type_str]
except KeyError:
try:
return _primitive_alias_str_to_type_table[data_type_str]
except KeyError:
try:
if data_type_str == 'matrix':
return matrix.get_from_string(data_type_str)
else:
return vector.get_from_string(data_type_str)
except:
raise ValueError("Unsupported type string '%s' " % data_type_str)
@staticmethod
def is_primitive_type(data_type):
return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table
@staticmethod
def is_complex_type(data_type):
try:
return data_type.is_complex_type
except AttributeError:
return False
@staticmethod
def is_primitive_alias_type(data_type):
return data_type in _primitive_alias_type_to_type_table
@staticmethod
def is_int(data_type):
return data_type in [int, int, long]
@staticmethod
def is_float(data_type):
return data_type is float
@staticmethod
def get_from_type(data_type):
"""
Returns the data type for the given type (often it will return the same type)
Parameters
----------
data_type : type
valid data type or type that may be aliased for a valid data type;
if invalid, a ValueError is raised
Returns
-------
result : type
valid data type for given type
Examples
--------
>>> dtypes.get_from_type(int)
numpy.int32
"""
if _DataTypes.is_primitive_alias_type(data_type):
return _primitive_alias_type_to_type_table[data_type]
if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type):
return data_type
raise ValueError("Unsupported type %s" % data_type)
@staticmethod
def validate(data_type):
"""Raises a ValueError if data_type is not a valid data_type"""
_DataTypes.get_from_type(data_type)
@staticmethod
def get_constructor(to_type):
"""gets the constructor for the to_type"""
try:
return to_type.constructor
except AttributeError:
if to_type == datetime:
return datetime_constructor
def constructor(value):
if value is None:
return None
try:
return to_type(value)
except Exception as e:
print "ERROR for value %s. %s" % (value, e)
raise
return constructor
@staticmethod
def cast(value, to_type):
"""
Returns the given value cast to the given type. None is always returned as None
Parameters
----------
value : object
value to convert by casting
to_type : type
valid data type to use for the cast
Returns
-------
results : object
the value cast to the to_type
Examples
--------
>>> dtypes.cast(3, float)
3.0
>>> dtypes.cast(4.5, str)
'4.5'
>>> dtypes.cast(None, str)
None
>>> dtypes.cast(np.inf, float)
None
"""
if _DataTypes.value_is_missing_value(value): # Special handling for missing values
return None
elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization
return value
try:
constructor = _DataTypes.get_constructor(to_type)
result = constructor(value)
return None if _DataTypes.value_is_missing_value(result) else result
except Exception as e:
raise ValueError(("Unable to cast to type %s\n" % to_type) + str(e))
@staticmethod
def datetime_from_iso(iso_string):
"""create datetime object from ISO 8601 string"""
return datetime_parser.parse(iso_string)
@staticmethod
def get_primitive_type_from_pyspark_type(pyspark_type):
"""
Get the primitive type for the specified pyspark sql data type.
:param pyspark_type: pyspark.sql.types data type
:return: Primitive data type
"""
if _pyspark_type_to_primitive_type_table.has_key(pyspark_type):
return _pyspark_type_to_primitive_type_table[pyspark_type]
else:
raise ValueError("Unable to cast pyspark type %s to primitive type." % str(pyspark_type))
@staticmethod
def merge_types (type_a, type_b):
"""
Returns the lowest common denominator data type for the specified types.
:param type_a: Data type a to compare
:param type_b: Data type b t compare
:return: Merged data type
"""
merged = unicode
numeric_types = [float, long, int, bool] # numeric types in rank order
if type_a == type_b:
merged = type_a
elif type_a == unicode or type_b == unicode:
merged = unicode
elif type_a == str or type_b == str:
merged = str
elif type_a in numeric_types and type_b in numeric_types:
if numeric_types.index(type_a) > numeric_types.index(type_b):
merged = type_b
else:
merged = type_a
elif isinstance(type_a, vector) and isinstance(type_b, vector):
if type_a.length != type_b.length:
raise ValueError("Vectors must all be the same length (found vectors with length %s and %s)." % (type_a.length, type_b.length))
merged = type_a
return merged
dtypes = _DataTypes()
| {
"content_hash": "74a5127189b99fb8d9ef4f7ca8fe1612",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 191,
"avg_line_length": 31.93695652173913,
"alnum_prop": 0.5890000680688857,
"repo_name": "rodorad/spark-tk",
"id": "47588a448165c0352758625a1dfd1b6f656fc367",
"size": "15396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sparktk/dtypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "11509"
},
{
"name": "Jupyter Notebook",
"bytes": "152677"
},
{
"name": "Python",
"bytes": "1542925"
},
{
"name": "R",
"bytes": "2242"
},
{
"name": "Scala",
"bytes": "1509526"
},
{
"name": "Shell",
"bytes": "24942"
}
],
"symlink_target": ""
} |
"""Tests for spline models and fitters"""
import unittest.mock as mk
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import FittableModel, ModelDefinitionError
from astropy.modeling.fitting import (
SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter)
from astropy.modeling.parameters import Parameter
from astropy.modeling.spline import Spline1D, _Spline, _SplineFitter
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
npts = 50
nknots = 10
np.random.seed(42)
test_w = np.random.rand(npts)
test_t = [-1, 0, 1]
noise = np.random.randn(npts)
degree_tests = [1, 2, 3, 4, 5]
wieght_tests = [None, test_w]
smoothing_tests = [None, 0.01]
class TestSpline:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {'test': 'test'}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, 'degree')
# Call _init_spline
with mk.patch.object(_Spline, '_init_spline',
autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == [mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
with pytest.raises(ValueError) as err:
self.Spline(coeffs=mk.MagicMock())
assert str(err.value) == "If one passes a coeffs vector one needs to also pass knots!"
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple(mk.MagicMock() for _ in range(3))
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple(mk.MagicMock() for _ in range(3))
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg('test') == '_test'
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(ValueError,
match=r"Optional argument .* already exists in this class!"):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(RuntimeError,
match=r".* has already been set, something has gone wrong!"):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, "_intercept_optional_inputs",
autospec=True, return_value=new_kwargs) as mkIntercept:
with mk.patch.object(FittableModel, "__call__",
autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == [mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == [mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, 'test')
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, 'fixed_test', True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, '_create_parameter',
autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple(f"test_param{idx}" for idx in range(20))
assert mkCreate.call_args_list == [
mk.call(spl, f"test_param{idx}", idx, 'test', fixed) for idx in range(20)
]
def test__init_parameters(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_parameters()
assert str(err.value) == "This needs to be implemented"
def test__init_data(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This needs to be implemented"
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This needs to be implemented"
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(_Spline, "_init_parameters",
autospec=True) as mkParameters:
with mk.patch.object(_Spline, "_init_data",
autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, 'parameters')
main.attach_mock(mkData, 'data')
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl)
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif(not HAS_SCIPY, reason='requires scipy')
class TestSpline1D:
def setup_class(self):
def func(x, noise=0):
return np.exp(-x**2) + 0.1*noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17*np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx-4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17*np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
with pytest.raises(ValueError) as err:
Spline1D(knots=knots)
assert str(err.value) == f"Knots: {knots} must be iterable or value"
# Not enough knots
for idx in range(8):
with pytest.raises(ValueError) as err:
Spline1D(knots=np.arange(idx))
assert str(err.value) == "Must have at least 8 knots."
# Bad scipy spline
t = np.arange(20)[::-1]
with pytest.raises(ValueError):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
with pytest.raises(ValueError) as err:
spl.t = mk.MagicMock()
assert str(err.value) == "The model parameters must be initialized before setting knots."
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = (np.arange(18) + 15)
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.t = np.arange(idx)
assert str(err.value) == "There must be exactly as many knots as previously defined."
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
with pytest.raises(ValueError) as err:
spl.c = mk.MagicMock()
assert str(err.value) == "The model parameters must be initialized before setting coeffs."
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = (np.arange(18) + 15)
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.c = np.arange(idx)
assert str(err.value) == "There must be exactly as many coeffs as previously defined."
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5*np.arange(16) + 11
c = 7*np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
with pytest.raises(ValueError) as err:
spl.tck = (t, c, 4)
assert str(err.value) == "tck has incompatible degree!"
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, '_create_parameters',
autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c")
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(knots, False, lower, upper)
assert str(err.value) == "Must have at least 8 knots."
# Error
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(0.5, False, lower, upper)
assert str(err.value) == "Knots: 0.5 must be iterable or value"
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(Spline1D, '_init_bounds', autospec=True,
return_value=(has_bounds, lower, upper)) as mkBounds:
with mk.patch.object(Spline1D, '_init_knots',
autospec=True) as mkKnots:
with mk.patch.object(Spline1D, '_init_coeffs',
autospec=True) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, 'bounds')
main.attach_mock(mkKnots, 'knots')
main.attach_mock(mkCoeffs, 'coeffs')
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs)
]
def test_evaluate(self):
spl = Spline1D()
args = tuple(mk.MagicMock() for _ in range(3))
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value=new_kwargs) as mkEval:
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)
assert mkBspline.return_value.call_args_list == [mk.call(args[0], **new_kwargs)]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == [mk.call(spl, *args, **kwargs)]
# Error
for idx in range(5, 8):
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value={'nu': idx}):
with pytest.raises(RuntimeError) as err:
spl.evaluate(*args, **kwargs)
assert str(err.value) == "Cannot evaluate a derivative of order higher than 4"
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info['resid'])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
with pytest.raises(RuntimeError) as err:
fitter(spl, self.x, self.y, weights=w)
assert str(err.value) == "No knots have been provided"
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, s=s, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, t=knots, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],
k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
for nu in range(4, 9):
with pytest.raises(ValueError) as err:
spl.derivative(nu=nu)
assert str(err.value) == "Must have nu <= 3"
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
with pytest.raises(ValueError) as err:
spl.antiderivative(nu=nu)
assert str(err.value) == ("Supported splines can have max degree 5, "
f"antiderivative degree will be {nu + 3}")
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
with pytest.raises(ValueError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "1D model can only have 2 data points."
with pytest.raises(ModelDefinitionError) as err:
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "Only spline models are compatible with this fitter."
with pytest.raises(NotImplementedError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock())
assert str(err.value) == "This has not been implemented for _SplineFitter."
| {
"content_hash": "a19d90b093463504d24c171afda70e77",
"timestamp": "",
"source": "github",
"line_count": 1495,
"max_line_length": 98,
"avg_line_length": 36.00401337792642,
"alnum_prop": 0.5556236762902687,
"repo_name": "lpsinger/astropy",
"id": "762dfda74aa2afdfee2a9e2f8e91baf4090c6e95",
"size": "53891",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/modeling/tests/test_spline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import serial
import MySQLdb
import json
# CLASES.
class ValorArduino(object):
def __init__(self, luz, temperatura, ph, humedad, regando):
self.luz = float(luz)
self.temperatura = float(temperatura)
self.ph = float(ph)
self.humedad = float(humedad)
self.regando = int(regando)
# CONSTANTES.
# Parametros para conexion con Arduino.
DISPOSITIVO = "/dev/ttyACM0" # Nombre de dispositivo a utilizar.
BAUDIOS = 9600 # Baudios a los cuales leer el puerto de Arduino.
# Parametros para base de datos.
SERVIDOR = "localhost"
USUARIO = "UsuarioRiego"
FRASE = "UsuarioRiego"
BASE_DATOS = "sistema_riego"
# Nombre de procedimiento para actualizar valores obtenido de Arduino.
ACTUALIZAR_VALORES = "ActualizarValores"
# FUNCIONES.
def ActualizarValores(valorArduino):
"Actualizar valores leidos de arduino a base de datos."
# Guardar parametros a actualizar.
lecturas = (valorArduino.luz, valorArduino.temperatura,
valorArduino.ph, valorArduino.humedad, valorArduino.regando)
try:
# Preparar conexion a base de datos.
con = MySQLdb.connect(SERVIDOR, USUARIO, FRASE, BASE_DATOS)
cursor = con.cursor()
# Actualizar valores.
cursor.callproc(ACTUALIZAR_VALORES, lecturas)
con.commit()
except:
print("Fallo de conexion a " + BASE_DATOS)
finally:
# Cerrar conexiones a almacenamiento.
cursor.close()
con.close()
# PRINCIPAL.
# Conectar con Arduino y leer indefinidamente.
# Obtener datos de arduino y convertir a formato usable.
repetir = True
while repetir:
try:
print("Conectando a " + DISPOSITIVO)
arduino = serial.Serial(DISPOSITIVO, BAUDIOS)
# Leer y desplegar valor proveniente de arduino.
datosJson = arduino.readline()
print(datosJson)
# Guardar y convertir datos a formato apropiado.
datos = lambda: None
datos.__dict__ = json.loads(datosJson)
valorArduino = ValorArduino(datos.luz, datos.temperatura,
datos.presion, datos.humedad, datos.regando)
# Guardar datos obtenidos en almacenamiento.
ActualizarValores(valorArduino)
except:
print("Falla de conexion a " + DISPOSITIVO)
repetir = False
| {
"content_hash": "5e5cdce4906539691a77ef5dd51a0080",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 80,
"avg_line_length": 30.407894736842106,
"alnum_prop": 0.6668109043704025,
"repo_name": "salcedojose/LaLluvia",
"id": "3cc1ceeab2776ad282df7df22b6090bff4edb9b7",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Proyecto/arduino/SistemaRiego/LecturaArduino.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5063"
},
{
"name": "C++",
"bytes": "15112"
},
{
"name": "HTML",
"bytes": "2895"
},
{
"name": "JavaScript",
"bytes": "1954"
},
{
"name": "PHP",
"bytes": "1789"
},
{
"name": "Python",
"bytes": "2498"
},
{
"name": "Shell",
"bytes": "634"
},
{
"name": "TeX",
"bytes": "13149"
}
],
"symlink_target": ""
} |
import re
from structs.nest import *
def nu(base):
def dep(filename):
raw = re.sub("(?P<pre>.*)" + base + "/(?P<package>.+)/(?P<module>.+?)\.js$", "\g<package>.\g<module>", filename)
return re.sub("/", ".", raw)
def pattern(word):
return '' + word + '.js'
def path(d):
qualified = dep(d)
last = qualified.rfind('.')
if last > -1:
rest = qualified[:last]
name = qualified[last + 1:len(qualified)]
return base + '/' + rest + '/' + name + '.js'
else:
return qualified
return Nest(base, pattern, dep, path)
| {
"content_hash": "676ab1b2a6144ec384085ebf650a5337",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 120,
"avg_line_length": 26.208333333333332,
"alnum_prop": 0.48966613672496023,
"repo_name": "boltjs/bolt",
"id": "fa9692f66604251a515ad407482f094383af6187",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/ide/sublime/src/Bolt/structs/namespaced.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "126987"
},
{
"name": "Python",
"bytes": "35906"
},
{
"name": "Ruby",
"bytes": "103"
},
{
"name": "Scala",
"bytes": "286"
},
{
"name": "Shell",
"bytes": "3973"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0029_caregiver_key'),
]
operations = [
migrations.AlterField(
model_name='caregiver',
name='key',
field=models.UUIDField(default=uuid.uuid4, unique=True, editable=False),
),
]
| {
"content_hash": "91e7694a993b11101051b534efc1a1d3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 84,
"avg_line_length": 21.94736842105263,
"alnum_prop": 0.6115107913669064,
"repo_name": "koebbe/homeworks",
"id": "2756f53c5cab874f782d3107bf48d7d86a3d65c3",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visit/migrations/0030_auto_20150614_1814.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44210"
},
{
"name": "HTML",
"bytes": "69003"
},
{
"name": "JavaScript",
"bytes": "124572"
},
{
"name": "Python",
"bytes": "223075"
}
],
"symlink_target": ""
} |
import smtplib
from email.mime.text import MIMEText
__author__ = 'hiroki'
def send_mail(source, destination, message, subject):
'''
Send mail.
:param source: Address of the source..
:param destination: Address of the destination
:param message: Message.
:param subject: Subject
:return:
'''
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = source
msg['To'] = destination
server = smtplib.SMTP('localhost')
server.sendmail(source, [destination], msg.as_string())
server.quit() | {
"content_hash": "056bb16baa2bedf4a3a43d77294a824c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 26.238095238095237,
"alnum_prop": 0.6533575317604355,
"repo_name": "Kokemomo/Kokemomo",
"id": "48acef5cb69edb155725f68c9335351dc634e0ac",
"size": "597",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kokemomo/plugins/engine/utils/km_mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8296"
},
{
"name": "HTML",
"bytes": "213"
},
{
"name": "JavaScript",
"bytes": "25668"
},
{
"name": "Python",
"bytes": "125814"
},
{
"name": "Smarty",
"bytes": "75782"
}
],
"symlink_target": ""
} |
from sqlalchemy import orm
from ggrc import db
from ggrc.models.mixins import Base
class ObjectFile(Base, db.Model):
__tablename__ = 'object_files'
file_id = db.Column(db.String, nullable=False)
parent_folder_id = db.Column(db.String, nullable=True)
fileable_id = db.Column(db.Integer, nullable=False)
fileable_type = db.Column(db.String, nullable=False)
@property
def fileable_attr(self):
return '{0}_fileable'.format(self.fileable_type)
@property
def fileable(self):
return getattr(self, self.fileable_attr)
@fileable.setter
def fileable(self, value):
self.fileable_id = value.id if value is not None else None
self.fileable_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.fileable_attr, value)
_publish_attrs = [
'file_id',
'parent_folder_id',
'fileable',
]
@classmethod
def eager_query(cls):
query = super(ObjectFile, cls).eager_query()
return query.options()
def _display_name(self):
return self.fileable.display_name + '<-> gdrive file' + self.file_id
class Fileable(object):
@classmethod
def late_init_fileable(cls):
def make_object_files(cls):
joinstr = 'and_(foreign(ObjectFile.fileable_id) == {type}.id, '\
'foreign(ObjectFile.fileable_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'ObjectFile',
primaryjoin=joinstr,
backref='{0}_fileable'.format(cls.__name__),
cascade='all, delete-orphan',
)
cls.object_files = make_object_files(cls)
_publish_attrs = [
'object_files',
]
@classmethod
def eager_query(cls):
query = super(Fileable, cls).eager_query()
return query.options(
orm.subqueryload('object_files'))
| {
"content_hash": "ee2d7796994b654f5ba98700ef49313e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 26.420289855072465,
"alnum_prop": 0.6500274273176083,
"repo_name": "edofic/ggrc-core",
"id": "714a6d6953eeced55aae5c4f8116869ee083ce2c",
"size": "1936",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "src/ggrc_gdrive_integration/models/object_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "191076"
},
{
"name": "Cucumber",
"bytes": "136322"
},
{
"name": "HTML",
"bytes": "1069698"
},
{
"name": "JavaScript",
"bytes": "1704619"
},
{
"name": "Makefile",
"bytes": "7103"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2385925"
},
{
"name": "Shell",
"bytes": "30802"
}
],
"symlink_target": ""
} |
import unittest
from couchdbkit import *
from pyramid import testing
from .views import *
from .models import *
import uuid
class ModelTests(unittest.TestCase):
def setUp(self):
self.config = testing.testConfig()
def tearDown(self):
testing.tearDown()
def test_create_user(self):
username = uuid.uuid4().hex
db = Database("http://admin:password@localhost:5984/_users")
create_new_user(db, username, "password")
| {
"content_hash": "e6078fdc68c2660875cf7b721b7698d8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 22.19047619047619,
"alnum_prop": 0.6781115879828327,
"repo_name": "adlnet/LR-Lite",
"id": "c249e30286dac17d5864ba4701b41de103144fb7",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lrlite/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6015"
},
{
"name": "JavaScript",
"bytes": "476"
},
{
"name": "Python",
"bytes": "44509"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from reversion import revisions
from autoslug import AutoSlugField
from parler.models import TranslatedFields, TranslatableModel
from parler.managers import TranslatableQuerySet, TranslatableManager
from democracy.models.comment import BaseComment, recache_on_save
from democracy.models.images import BaseImage
from democracy.plugins import get_implementation
from democracy.enums import InitialSectionType
from .base import ORDERING_HELP, Commentable, StringIdBaseModel, BaseModel, BaseModelManager
from .hearing import Hearing
CLOSURE_INFO_ORDERING = -10000
INITIAL_SECTION_TYPE_IDS = set(value for key, value in InitialSectionType.__dict__.items() if key[:1] != '_')
class SectionTypeQuerySet(models.QuerySet):
def initial(self):
return self.filter(identifier__in=INITIAL_SECTION_TYPE_IDS)
def exclude_initial(self):
return self.exclude(identifier__in=INITIAL_SECTION_TYPE_IDS)
class SectionType(BaseModel):
identifier = AutoSlugField(populate_from='name_singular', unique=True)
name_singular = models.CharField(max_length=64)
name_plural = models.CharField(max_length=64)
objects = BaseModelManager.from_queryset(SectionTypeQuerySet)()
def __str__(self):
return self.name_singular
def save(self, *args, **kwargs):
# prevent initial type editing
if self.identifier in INITIAL_SECTION_TYPE_IDS:
raise Exception("Initial section types cannot be edited.")
return super().save(*args, **kwargs)
class Section(Commentable, StringIdBaseModel, TranslatableModel):
hearing = models.ForeignKey(Hearing, related_name='sections', on_delete=models.PROTECT)
ordering = models.IntegerField(verbose_name=_('ordering'), default=1, db_index=True, help_text=ORDERING_HELP)
type = models.ForeignKey(SectionType, related_name='sections', on_delete=models.PROTECT)
translations = TranslatedFields(
title=models.CharField(verbose_name=_('title'), max_length=255, blank=True),
abstract=models.TextField(verbose_name=_('abstract'), blank=True),
content=models.TextField(verbose_name=_('content'), blank=True),
)
plugin_identifier = models.CharField(verbose_name=_('plugin identifier'), blank=True, max_length=255)
plugin_data = models.TextField(verbose_name=_('plugin data'), blank=True)
plugin_iframe_url = models.URLField(verbose_name=_('plugin iframe url'), blank=True)
plugin_fullscreen = models.BooleanField(default=False)
objects = BaseModelManager.from_queryset(TranslatableQuerySet)()
class Meta:
ordering = ["ordering"]
verbose_name = _('section')
verbose_name_plural = _('sections')
def __str__(self):
return "%s: %s" % (self.hearing, self.title)
def save(self, *args, **kwargs):
if self.hearing_id:
# Closure info should be the first
if self.type == SectionType.objects.get(identifier=InitialSectionType.CLOSURE_INFO):
self.ordering = CLOSURE_INFO_ORDERING
elif (not self.pk and self.ordering == 1) or self.ordering == CLOSURE_INFO_ORDERING:
# This is a new section or changing type from closure info,
# automatically derive next ordering, if possible
self.ordering = max(self.hearing.sections.values_list("ordering", flat=True) or [0]) + 1
return super(Section, self).save(*args, **kwargs)
def check_commenting(self, request):
super().check_commenting(request)
self.hearing.check_commenting(request)
def check_voting(self, request):
super().check_voting(request)
self.hearing.check_voting(request)
@property
def plugin_implementation(self):
return get_implementation(self.plugin_identifier)
class SectionImageManager(TranslatableManager, BaseModelManager):
def get_queryset(self):
return super(SectionImageManager, self).get_queryset().order_by('pk')
class SectionImage(BaseImage, TranslatableModel):
parent_field = "section"
section = models.ForeignKey(Section, related_name="images")
translations = TranslatedFields(
title=models.CharField(verbose_name=_('title'), max_length=255, blank=True, default=''),
caption=models.TextField(verbose_name=_('caption'), blank=True, default=''),
)
objects = SectionImageManager()
class Meta:
verbose_name = _('section image')
verbose_name_plural = _('section images')
ordering = ("ordering", "translations__title")
@revisions.register
@recache_on_save
class SectionComment(BaseComment):
parent_field = "section"
parent_model = Section
section = models.ForeignKey(Section, related_name="comments")
title = models.CharField(verbose_name=_('title'), blank=True, max_length=255)
content = models.TextField(verbose_name=_('content'), blank=True)
class Meta:
verbose_name = _('section comment')
verbose_name_plural = _('section comments')
ordering = ('-created_at',)
class CommentImage(BaseImage):
title = models.CharField(verbose_name=_('title'), max_length=255, blank=True, default='')
caption = models.TextField(verbose_name=_('caption'), blank=True, default='')
parent_field = "sectioncomment"
comment = models.ForeignKey(SectionComment, related_name="images")
class Meta:
verbose_name = _('comment image')
verbose_name_plural = _('comment images')
ordering = ("ordering", "title")
| {
"content_hash": "ede23705dec0d845b27c1fe195d2cad9",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 113,
"avg_line_length": 40.62043795620438,
"alnum_prop": 0.6972147349505841,
"repo_name": "stephawe/kerrokantasi",
"id": "5e3c28829922492a76f9b3db3c6503bc313a87d4",
"size": "5565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "democracy/models/section.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1023"
},
{
"name": "HTML",
"bytes": "10184"
},
{
"name": "JavaScript",
"bytes": "210967"
},
{
"name": "Python",
"bytes": "338676"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.