text
stringlengths 4
1.02M
| meta
dict |
|---|---|
def make_model(model_name: str, new_display_name: str) -> google.cloud.aiplatform_v1beta1.types.model.Model:
model = model
model = {
'name': model_name,
'display_name': new_display_name
}
return model
def make_update_mask() -> google.protobuf.field_mask_pb2.FieldMask:
update_mask = {
'paths': [
'display_name'
]
}
return update_mask
|
{
"content_hash": "379765120f0d718cf867c7cba80872b8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 108,
"avg_line_length": 21.63157894736842,
"alnum_prop": 0.5888077858880778,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "fff76e2ac5b9558a7949991d72b8bda0df2d8dde",
"size": "988",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".sample_configs/param_handlers/update_model_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from desktop.lib.conf import Config, coerce_bool
MAX_SNAPPY_DECOMPRESSION_SIZE = Config(
key="max_snappy_decompression_size",
help=_("Max snappy decompression size in bytes."),
private=True,
default=1024*1024*25,
type=int)
ARCHIVE_UPLOAD_TEMPDIR = Config(
key="archive_upload_tempdir",
help=_("Location on local filesystem where the uploaded archives are temporary stored."),
default=None,
type=str)
SHOW_DOWNLOAD_BUTTON = Config(
key="show_download_button",
help=_("whether to show the download button in hdfs file browser."),
type=coerce_bool,
default=True)
SHOW_UPLOAD_BUTTON = Config(
key="show_upload_button",
help=_("whether to show the upload button in hdfs file browser."),
type=coerce_bool,
default=True)
ENABLE_EXTRACT_UPLOADED_ARCHIVE = Config(
key="enable_extract_uploaded_archive",
help=_("Flag to enable the extraction of a uploaded archive in HDFS."),
type=bool,
default=False
)
|
{
"content_hash": "44093f8639ec0ed530ab23dfe036ca49",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 91,
"avg_line_length": 27.027027027027028,
"alnum_prop": 0.731,
"repo_name": "xq262144/hue",
"id": "0a23fdbec9e1f0ae18c2884feaf2c844b1c72aa3",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/filebrowser/src/filebrowser/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2692409"
},
{
"name": "C++",
"bytes": "199897"
},
{
"name": "CSS",
"bytes": "521820"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "Groff",
"bytes": "16669"
},
{
"name": "HTML",
"bytes": "24188238"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "4987047"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "144341"
},
{
"name": "Mako",
"bytes": "3052598"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "44291483"
},
{
"name": "Shell",
"bytes": "44147"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "518588"
}
],
"symlink_target": ""
}
|
from pandas import read_csv
from numpy import array
from sys import argv
from sys import exit
from numpy import random
trainFile = argv[1]
testFile = argv[2]
classOptions = argv[3].split(",")
algorithm = argv[4]
def readData(inFilePath):
return read_csv(inFilePath, sep='\t', index_col=0)
def predict(algorithm, train_X, train_y, test_X):
if algorithm == 'lda':
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
clf = LinearDiscriminantAnalysis()
elif algorithm == 'qda':
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
clf = QuadraticDiscriminantAnalysis()
elif algorithm == 'random_forest':
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100, random_state=R_SEED)
elif algorithm == 'bagging':
from sklearn.ensemble import BaggingClassifier
clf = BaggingClassifier(n_estimators=100, random_state=R_SEED)
elif algorithm == 'extra_trees':
from sklearn.ensemble import ExtraTreesClassifier
clf = ExtraTreesClassifier(n_estimators=100, random_state=R_SEED)
elif algorithm == 'logistic_regression':
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(random_state=R_SEED)
# elif algorithm == 'passive_aggressive':
# from sklearn.linear_model import PassiveAggressiveClassifier
# clf = PassiveAggressiveClassifier(random_state=R_SEED)
# elif algorithm == 'elastic_net':
# from sklearn.linear_model import ElasticNet
# clf = ElasticNet(random_state=R_SEED)
# elif algorithm == 'ridge':
# from sklearn.linear_model import RidgeClassifier
# clf = RidgeClassifier(random_state=R_SEED)
elif algorithm == 'sgd':
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(random_state=R_SEED, loss="modified_huber")
elif algorithm == 'knn':
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
# elif algorithm == 'radius_neighbors':
# from sklearn.neighbors import RadiusNeighborsClassifier
# clf = RadiusNeighborsClassifier()
#elif algorithm == 'nearest_centroid':
# from sklearn.neighbors.nearest_centroid import NearestCentroid
# clf = NearestCentroid()
#elif algorithm == 'bernoulli_rbm':
# from sklearn.neural_network import BernoulliRBM
# clf = BernoulliRBM(random_state=R_SEED)
elif algorithm == 'svm_linear':
from sklearn.svm import SVC
clf = SVC(probability=True, random_state=R_SEED, kernel='linear')
elif algorithm == 'svm_rbf':
from sklearn.svm import SVC
clf = SVC(probability=True, random_state=R_SEED, kernel='rbf')
elif algorithm == 'svm_poly':
from sklearn.svm import SVC
clf = SVC(probability=True, random_state=R_SEED, kernel='poly')
elif algorithm == 'svm_sigmoid':
from sklearn.svm import SVC
clf = SVC(probability=True, random_state=R_SEED, kernel='sigmoid')
elif algorithm == 'svm_nurbf':
from sklearn.svm import NuSVC
clf = NuSVC(probability=True, random_state=R_SEED)
elif algorithm == 'decision_tree':
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(random_state=R_SEED)
else:
print "Invalid algorithm: %s" % algorithm
exit(1)
clf.fit(train_X, train_y)
return clf.predict_proba(test_X)
R_SEED = 0
random.seed(R_SEED)
train_df = readData(trainFile)
train_X = train_df.ix[:,:-1].values
train_y = array([classOptions.index(str(y[0])) for y in train_df.loc[:,["Class"]].values.tolist()])
test_X = readData(testFile).values
probs = predict(algorithm, train_X, train_y, test_X)
for i in range(len(probs)):
iProbs = list(probs[i])
maxProb = max(iProbs)
indicesMatchingMax = [i for i in range(len(iProbs)) if iProbs[i]==maxProb]
random.shuffle(indicesMatchingMax)
prediction = classOptions[indicesMatchingMax[0]]
print "%s\t%s" % (prediction, "\t".join(["%.9f" % iProb for iProb in iProbs]))
|
{
"content_hash": "fe95e998fd353eeac2bf9dcdb5b8351b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 99,
"avg_line_length": 40.09708737864078,
"alnum_prop": 0.6852300242130751,
"repo_name": "srp33/ShinyLearner",
"id": "890dbb66ab2251e4bb5cc5ff7c85cc9590e22824",
"size": "4130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Archive/Classification/tsv/Helper/sklearn_c_generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "91"
},
{
"name": "Java",
"bytes": "78213"
},
{
"name": "Python",
"bytes": "126214"
},
{
"name": "R",
"bytes": "89706"
},
{
"name": "Shell",
"bytes": "1250579"
}
],
"symlink_target": ""
}
|
from qutebrowser.config.configfiles import ConfigAPI
config: ConfigAPI = config
class Colors:
black = "#282a36"
light_black = "#686868"
red = "#ff5c57"
green = "#5af78e"
yellow = "#f3f99d"
blue = "#57c7ff"
magenta = "#ff6ac1"
cyan = "#9aedfe"
white = "#eff0eb"
class Fonts:
sans = "Cantarell"
fixed = "OperatorMonoLig Nerd Font"
medium = 12
large = 14
web = 17
config.load_autoconfig(False)
# Key bindings
config.unbind("cd")
config.unbind("d")
config.unbind("J")
config.unbind("K")
config.bind("dc", "download-clear")
config.bind("do", "download-open")
config.bind(";r", "hint all right-click")
config.bind("<Shift+Escape>", ":fake-key <Escape>")
config.bind("<Shift+j>", "scroll-px 0 50")
config.bind("<Shift+k>", "scroll-px 0 -50")
config.bind("g[", "tab-prev")
config.bind("g]", "tab-next")
config.bind("gc", "tab-close")
config.set("auto_save.session", True)
config.set("completion.shrink", True)
config.set("confirm_quit", ["multiple-tabs", "downloads"])
config.set("content.blocking.method", "both")
config.set("content.blocking.whitelist", ["click.redditmail.com"])
config.set("content.notifications.enabled", True, "https://web.whatsapp.com/")
config.set(
"content.headers.user_agent",
"Mozilla/5.0 (X11; Linux x86_64; rv:97.0) Gecko/20100101 Firefox/97.0",
)
config.set(
"content.tls.certificate_errors", "load-insecurely", "https://127.0.0.1:8384/"
)
config.set("downloads.open_dispatcher", "xdg-open")
config.set("editor.command", ["foot", "-e", "nvim", "{file}"])
config.set(
"url.searchengines",
{
"DEFAULT": "https://duckduckgo.com/?q={}",
"aur": "https://aur.archlinux.org/packages?K={}",
"dict": "https://dictionary.cambridge.org/search/direct/?datasetsearch=english&q={}",
"github": "https://github.com/search?q={}",
"google": "https://www.google.com/search?q={}",
"pac": "https://archlinux.org/packages/?q={}",
"warch": "https://wiki.archlinux.org/?search={}",
"youtube": "https://www.youtube.com/results?search_query={}",
},
)
# Fonts
fonts = Fonts()
config.set("fonts.default_family", fonts.fixed)
config.set("fonts.default_size", f"{fonts.medium}pt")
config.set("fonts.tabs.selected", f"{fonts.large}pt {fonts.sans}")
config.set("fonts.tabs.unselected", f"{fonts.large}pt {fonts.sans}")
config.set("fonts.web.family.fixed", fonts.fixed)
config.set("fonts.web.family.sans_serif", fonts.sans)
config.set("fonts.web.family.serif", fonts.sans)
config.set("fonts.web.family.standard", fonts.sans)
config.set("fonts.web.size.default", fonts.web)
config.set("fonts.web.size.default_fixed", fonts.medium)
# Colors
colors = Colors()
config.set("colors.completion.fg", colors.white)
config.set("colors.completion.odd.bg", colors.black)
config.set("colors.completion.even.bg", colors.black)
config.set("colors.completion.category.fg", colors.magenta)
config.set("colors.completion.category.bg", colors.black)
config.set("colors.completion.category.border.top", colors.black)
config.set("colors.completion.category.border.bottom", colors.black)
config.set("colors.completion.item.selected.fg", colors.black)
config.set("colors.completion.item.selected.bg", colors.yellow)
config.set("colors.completion.item.selected.border.top", colors.yellow)
config.set("colors.completion.item.selected.border.bottom", colors.yellow)
config.set("colors.completion.item.selected.match.fg", colors.magenta)
config.set("colors.completion.match.fg", colors.magenta)
config.set("colors.completion.scrollbar.fg", colors.magenta)
config.set("colors.completion.scrollbar.bg", colors.black)
config.set("colors.contextmenu.disabled.fg", colors.light_black)
config.set("colors.contextmenu.disabled.bg", colors.black)
config.set("colors.contextmenu.menu.fg", colors.white)
config.set("colors.contextmenu.menu.bg", colors.black)
config.set("colors.contextmenu.selected.fg", colors.black)
config.set("colors.contextmenu.selected.bg", colors.blue)
config.set("colors.downloads.bar.bg", colors.black)
config.set("colors.downloads.start.fg", colors.black)
config.set("colors.downloads.start.bg", colors.magenta)
config.set("colors.downloads.stop.fg", colors.black)
config.set("colors.downloads.stop.bg", colors.blue)
config.set("colors.downloads.error.fg", colors.black)
config.set("hints.border", f"1px solid {colors.yellow}")
config.set("colors.hints.fg", colors.black)
config.set("colors.hints.bg", colors.yellow)
config.set("colors.hints.match.fg", colors.red)
config.set("colors.keyhint.suffix.fg", colors.magenta)
config.set("colors.keyhint.fg", colors.magenta)
config.set("colors.keyhint.bg", colors.black)
config.set("colors.messages.error.fg", colors.black)
config.set("colors.messages.error.bg", colors.red)
config.set("colors.messages.error.border", colors.red)
config.set("colors.messages.warning.fg", colors.black)
config.set("colors.messages.warning.bg", colors.yellow)
config.set("colors.messages.warning.border", colors.yellow)
config.set("colors.messages.info.fg", colors.magenta)
config.set("colors.messages.info.bg", colors.black)
config.set("colors.messages.info.border", colors.black)
config.set("colors.prompts.fg", colors.white)
config.set("colors.prompts.bg", colors.black)
config.set("colors.prompts.border", colors.black)
config.set("colors.prompts.selected.fg", colors.magenta)
config.set("colors.prompts.selected.bg", colors.green)
config.set("colors.statusbar.normal.fg", colors.magenta)
config.set("colors.statusbar.normal.bg", colors.black)
config.set("colors.statusbar.insert.fg", colors.blue)
config.set("colors.statusbar.insert.bg", colors.black)
config.set("colors.statusbar.passthrough.fg", colors.green)
config.set("colors.statusbar.passthrough.bg", colors.black)
config.set("colors.statusbar.private.fg", colors.cyan)
config.set("colors.statusbar.private.bg", colors.black)
config.set("colors.statusbar.command.fg", colors.white)
config.set("colors.statusbar.command.bg", colors.black)
config.set("colors.statusbar.command.private.fg", colors.cyan)
config.set("colors.statusbar.command.private.bg", colors.red)
config.set("colors.statusbar.caret.fg", colors.magenta)
config.set("colors.statusbar.caret.bg", colors.black)
config.set("colors.statusbar.caret.selection.fg", colors.magenta)
config.set("colors.statusbar.caret.selection.bg", colors.black)
config.set("colors.statusbar.progress.bg", colors.magenta)
config.set("colors.statusbar.url.fg", colors.magenta)
config.set("colors.statusbar.url.hover.fg", colors.green)
config.set("colors.statusbar.url.success.http.fg", colors.yellow)
config.set("colors.statusbar.url.success.https.fg", colors.white)
config.set("colors.statusbar.url.warn.fg", colors.yellow)
config.set("colors.statusbar.url.error.fg", colors.red)
config.set("colors.tabs.bar.bg", colors.black)
config.set("colors.tabs.indicator.start", colors.magenta)
config.set("colors.tabs.indicator.stop", colors.blue)
config.set("colors.tabs.indicator.error", colors.red)
config.set("colors.tabs.odd.fg", colors.white)
config.set("colors.tabs.odd.bg", colors.black)
config.set("colors.tabs.even.fg", colors.white)
config.set("colors.tabs.even.bg", colors.black)
config.set("colors.tabs.pinned.even.fg", colors.white)
config.set("colors.tabs.pinned.even.bg", colors.black)
config.set("colors.tabs.pinned.odd.fg", colors.white)
config.set("colors.tabs.pinned.odd.bg", colors.black)
config.set("colors.tabs.pinned.selected.even.fg", colors.black)
config.set("colors.tabs.pinned.selected.even.bg", colors.blue)
config.set("colors.tabs.pinned.selected.odd.fg", colors.black)
config.set("colors.tabs.pinned.selected.odd.bg", colors.blue)
config.set("colors.tabs.selected.odd.fg", colors.black)
config.set("colors.tabs.selected.odd.bg", colors.blue)
config.set("colors.tabs.selected.even.fg", colors.black)
config.set("colors.tabs.selected.even.bg", colors.blue)
config.set("colors.webpage.bg", colors.white)
|
{
"content_hash": "5d3aaf139b62a718ef5a30321dc376dd",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 93,
"avg_line_length": 38.95073891625616,
"alnum_prop": 0.7361831288731504,
"repo_name": "ethan605/dotfiles",
"id": "816dd6beedcd5ec37c9c446a697d9175d7df8ee6",
"size": "7907",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dot_config/qutebrowser/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "586"
},
{
"name": "Lua",
"bytes": "26477"
},
{
"name": "Python",
"bytes": "7907"
},
{
"name": "Shell",
"bytes": "18513"
},
{
"name": "Vim Script",
"bytes": "11852"
}
],
"symlink_target": ""
}
|
from rdmo.core.plugins import get_plugins, get_plugin
def get_widgets():
widgets = get_plugins('QUESTIONS_WIDGETS')
return widgets.values()
def get_widget_types():
widgets = get_plugins('QUESTIONS_WIDGETS')
return [widget.key for widget in widgets.values()]
def get_widget_type_choices():
widgets = get_plugins('QUESTIONS_WIDGETS')
return [(widget.key, widget.label) for widget in widgets.values()]
def get_widget_class(key):
widget = get_plugin('QUESTIONS_WIDGETS', key)
return widget.widget_class
|
{
"content_hash": "90ba7366c097536c25ccd7e9019ca02a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7068645640074211,
"repo_name": "rdmorganiser/rdmo",
"id": "95f793d88498e201e2fec530f89b9f9dc8bdfd4b",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdmo/questions/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
}
|
from dppy.beta_ensembles import GinibreEnsemble
ginibre = GinibreEnsemble() # beta must be 2 (default)
ginibre.sample_full_model(size_N=40)
ginibre.plot(normalization=True)
ginibre.sample_full_model(size_N=1000)
ginibre.hist(normalization=True)
|
{
"content_hash": "1be8bb44671b571a6614f0bf1f4a1a66",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 25,
"alnum_prop": 0.792,
"repo_name": "guilgautier/DPPy",
"id": "cfc3c046d3b404411dc2100adca1dbc533963982",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/plots/ex_plot_ginibre_full_matrix_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "408222"
},
{
"name": "Python",
"bytes": "441355"
},
{
"name": "Shell",
"bytes": "1694"
}
],
"symlink_target": ""
}
|
"""Manages watermarks of PCollections and AppliedPTransforms."""
from __future__ import absolute_import
import threading
from apache_beam import pipeline
from apache_beam import pvalue
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
class WatermarkManager(object):
"""For internal use only; no backwards-compatibility guarantees.
Tracks and updates watermarks for all AppliedPTransforms."""
WATERMARK_POS_INF = MAX_TIMESTAMP
WATERMARK_NEG_INF = MIN_TIMESTAMP
def __init__(self, clock, root_transforms, value_to_consumers):
self._clock = clock # processing time clock
self._value_to_consumers = value_to_consumers
self._root_transforms = root_transforms
# AppliedPTransform -> TransformWatermarks
self._transform_to_watermarks = {}
for root_transform in root_transforms:
self._transform_to_watermarks[root_transform] = _TransformWatermarks(
self._clock)
for consumers in value_to_consumers.values():
for consumer in consumers:
self._transform_to_watermarks[consumer] = _TransformWatermarks(
self._clock)
for consumers in value_to_consumers.values():
for consumer in consumers:
self._update_input_transform_watermarks(consumer)
def _update_input_transform_watermarks(self, applied_ptransform):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
input_transform_watermarks = []
for input_pvalue in applied_ptransform.inputs:
assert input_pvalue.producer or isinstance(input_pvalue, pvalue.PBegin)
if input_pvalue.producer:
input_transform_watermarks.append(
self.get_watermarks(input_pvalue.producer))
self._transform_to_watermarks[
applied_ptransform].update_input_transform_watermarks(
input_transform_watermarks)
def get_watermarks(self, applied_ptransform):
"""Gets the input and output watermarks for an AppliedPTransform.
If the applied_ptransform has not processed any elements, return a
watermark with minimum value.
Args:
applied_ptransform: AppliedPTransform to get the watermarks for.
Returns:
A snapshot (TransformWatermarks) of the input watermark and output
watermark for the provided transform.
"""
# TODO(altay): Composite transforms should have a composite watermark. Until
# then they are represented by their last transform.
while applied_ptransform.parts:
applied_ptransform = applied_ptransform.parts[-1]
return self._transform_to_watermarks[applied_ptransform]
def update_watermarks(self, completed_committed_bundle, applied_ptransform,
timer_update, outputs, earliest_hold):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
self._update_pending(
completed_committed_bundle, applied_ptransform, timer_update, outputs)
tw = self.get_watermarks(applied_ptransform)
tw.hold(earliest_hold)
self._refresh_watermarks(applied_ptransform)
def _update_pending(self, input_committed_bundle, applied_ptransform,
timer_update, output_committed_bundles):
"""Updated list of pending bundles for the given AppliedPTransform."""
# Update pending elements. Filter out empty bundles. They do not impact
# watermarks and should not trigger downstream execution.
for output in output_committed_bundles:
if output.has_elements():
if output.pcollection in self._value_to_consumers:
consumers = self._value_to_consumers[output.pcollection]
for consumer in consumers:
consumer_tw = self._transform_to_watermarks[consumer]
consumer_tw.add_pending(output)
completed_tw = self._transform_to_watermarks[applied_ptransform]
completed_tw.update_timers(timer_update)
assert input_committed_bundle or applied_ptransform in self._root_transforms
if input_committed_bundle and input_committed_bundle.has_elements():
completed_tw.remove_pending(input_committed_bundle)
def _refresh_watermarks(self, applied_ptransform):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
tw = self.get_watermarks(applied_ptransform)
if tw.refresh():
for pval in applied_ptransform.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
if v in self._value_to_consumers: # If there are downstream consumers
consumers = self._value_to_consumers[v]
for consumer in consumers:
self._refresh_watermarks(consumer)
def extract_fired_timers(self):
all_timers = []
for applied_ptransform, tw in self._transform_to_watermarks.iteritems():
if tw.extract_fired_timers():
all_timers.append(applied_ptransform)
return all_timers
class _TransformWatermarks(object):
"""Tracks input and output watermarks for aan AppliedPTransform."""
def __init__(self, clock):
self._clock = clock
self._input_transform_watermarks = []
self._input_watermark = WatermarkManager.WATERMARK_NEG_INF
self._output_watermark = WatermarkManager.WATERMARK_NEG_INF
self._earliest_hold = WatermarkManager.WATERMARK_POS_INF
self._pending = set() # Scheduled bundles targeted for this transform.
self._fired_timers = False
self._lock = threading.Lock()
def update_input_transform_watermarks(self, input_transform_watermarks):
with self._lock:
self._input_transform_watermarks = input_transform_watermarks
def update_timers(self, timer_update):
with self._lock:
if timer_update:
assert self._fired_timers
self._fired_timers = False
@property
def input_watermark(self):
with self._lock:
return self._input_watermark
@property
def output_watermark(self):
with self._lock:
return self._output_watermark
def hold(self, value):
with self._lock:
if value is None:
value = WatermarkManager.WATERMARK_POS_INF
self._earliest_hold = value
def add_pending(self, pending):
with self._lock:
self._pending.add(pending)
def remove_pending(self, completed):
with self._lock:
# Ignore repeated removes. This will happen if a transform has a repeated
# input.
if completed in self._pending:
self._pending.remove(completed)
def refresh(self):
with self._lock:
pending_holder = (WatermarkManager.WATERMARK_NEG_INF
if self._pending else
WatermarkManager.WATERMARK_POS_INF)
input_watermarks = [
tw.output_watermark for tw in self._input_transform_watermarks]
input_watermarks.append(WatermarkManager.WATERMARK_POS_INF)
producer_watermark = min(input_watermarks)
self._input_watermark = max(self._input_watermark,
min(pending_holder, producer_watermark))
new_output_watermark = min(self._input_watermark, self._earliest_hold)
advanced = new_output_watermark > self._output_watermark
self._output_watermark = new_output_watermark
return advanced
@property
def synchronized_processing_output_time(self):
return self._clock.time()
def extract_fired_timers(self):
with self._lock:
if self._fired_timers:
return False
should_fire = (
self._earliest_hold < WatermarkManager.WATERMARK_POS_INF and
self._input_watermark == WatermarkManager.WATERMARK_POS_INF)
self._fired_timers = should_fire
return should_fire
|
{
"content_hash": "df9c8f154152acd3e652889f979bbc2d",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 80,
"avg_line_length": 36.66985645933014,
"alnum_prop": 0.6970250521920668,
"repo_name": "dhalperi/beam",
"id": "3a135397e12f0a2c7b1a1733b3602ad6840c89f6",
"size": "8449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/direct/watermark_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "42943"
},
{
"name": "Java",
"bytes": "11325979"
},
{
"name": "Protocol Buffer",
"bytes": "50080"
},
{
"name": "Python",
"bytes": "3022774"
},
{
"name": "Shell",
"bytes": "45521"
}
],
"symlink_target": ""
}
|
import urllib2
from google.appengine.api.images import Image
def downloadFile(url):
data = urllib2.urlopen(url).read()
return data
def getImageDimensions(file):
img = Image(file)
return img.width, img.height
def refreshImageDimensions(obj):
# obj can be either Author or Quote
if obj.img_url:
obj.img_width, obj.img_height = getImageDimensions(downloadFile(obj.img_url))
obj.put()
return True
|
{
"content_hash": "93ff5dc15551cdd62371bdd448988301",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 85,
"avg_line_length": 26.11764705882353,
"alnum_prop": 0.6959459459459459,
"repo_name": "pmylund/haveabit",
"id": "f1b109dbd3c3d889edc513fb3b688c12ccd561b7",
"size": "444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14893"
},
{
"name": "Python",
"bytes": "1226550"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
}
|
"""Home of the Sequential model, and the `save_model`/`load_model` functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import layers as layer_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine import topology
from tensorflow.python.keras._impl.keras.engine.topology import Input
from tensorflow.python.keras._impl.keras.engine.topology import Layer
from tensorflow.python.keras._impl.keras.engine.topology import TFBaseLayer
from tensorflow.python.keras._impl.keras.engine.training import Model
from tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg
from tensorflow.python.keras._impl.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
def save_model(model, filepath, overwrite=True, include_optimizer=True):
"""Save a model to a HDF5 file.
The saved model contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus the saved model can be reinstantiated in
the exact same state, without any of the code
used for model definition or training.
Arguments:
model: Keras model instance to be saved.
filepath: String, path where to save the model.
overwrite: Whether we should overwrite any existing
model at the target location, or instead
ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Raises:
ImportError: if h5py is not available.
"""
if h5py is None:
raise ImportError('`save_model` requires h5py.')
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
Arguments:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {'type': type(obj), 'value': obj.tolist()}
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
from tensorflow.python.keras._impl.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
with h5py.File(filepath, mode='w') as f:
f.attrs['keras_version'] = str(keras_version).encode('utf8')
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['model_config'] = json.dumps(
{
'class_name': model.__class__.__name__,
'config': model.get_config()
},
default=get_json_type).encode('utf8')
model_weights_group = f.create_group('model_weights')
model_layers = model.layers
topology.save_weights_to_hdf5_group(model_weights_group, model_layers)
if include_optimizer and hasattr(model, 'optimizer'):
if isinstance(model.optimizer, optimizers.TFOptimizer):
logging.warning(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
f.attrs['training_config'] = json.dumps(
{
'optimizer_config': {
'class_name': model.optimizer.__class__.__name__,
'config': model.optimizer.get_config()
},
'loss': model.loss,
'metrics': model.metrics,
'sample_weight_mode': model.sample_weight_mode,
'loss_weights': model.loss_weights,
},
default=get_json_type).encode('utf8')
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for w, val in zip(symbolic_weights, weight_values):
name = str(w.name)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
f.flush()
def load_model(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin
"""Loads a model saved via `save_model`.
Arguments:
filepath: String, path to the saved model.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
compile: Boolean, whether to compile the model
after loading.
Returns:
A Keras model instance. If an optimizer was found
as part of the saved model, the model is already
compiled. Otherwise, the model is uncompiled and
a warning will be displayed. When `compile` is set
to False, the compilation is omitted without any
warning.
Raises:
ImportError: if h5py is not available.
ValueError: In case of an invalid savefile.
"""
if h5py is None:
raise ImportError('`load_model` requires h5py.')
if not custom_objects:
custom_objects = {}
def convert_custom_objects(obj):
"""Handles custom object lookup.
Arguments:
obj: object, dict, or list.
Returns:
The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for key, value in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if obj in custom_objects:
return custom_objects[obj]
return obj
with h5py.File(filepath, mode='r') as f:
# instantiate model
model_config = f.attrs.get('model_config')
if model_config is None:
raise ValueError('No model found in config file.')
model_config = json.loads(model_config.decode('utf-8'))
model = model_from_config(model_config, custom_objects=custom_objects)
# set weights
topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)
# Early return if compilation is not required.
if not compile:
return model
# instantiate optimizer
training_config = f.attrs.get('training_config')
if training_config is None:
logging.warning('No training configuration found in save file: '
'the model was *not* compiled. Compile it manually.')
return model
training_config = json.loads(training_config.decode('utf-8'))
optimizer_config = training_config['optimizer_config']
optimizer = optimizers.deserialize(
optimizer_config, custom_objects=custom_objects)
# Recover loss functions and metrics.
loss = convert_custom_objects(training_config['loss'])
metrics = convert_custom_objects(training_config['metrics'])
sample_weight_mode = training_config['sample_weight_mode']
loss_weights = training_config['loss_weights']
# Compile model.
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode)
# Set optimizer weights.
if 'optimizer_weights' in f:
# Build train function (to get weight updates).
if isinstance(model, Sequential):
model.model._make_train_function()
else:
model._make_train_function()
optimizer_weights_group = f['optimizer_weights']
optimizer_weight_names = [
n.decode('utf8')
for n in optimizer_weights_group.attrs['weight_names']
]
optimizer_weight_values = [
optimizer_weights_group[n] for n in optimizer_weight_names
]
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning('Error in loading the saved optimizer '
'state. As a result, your model is '
'starting with a freshly initialized '
'optimizer.')
return model
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Arguments:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError('`model_from_config` expects a dictionary, not a list. '
'Maybe you meant to use '
'`Sequential.from_config(config)`?')
return layer_module.deserialize(config, custom_objects=custom_objects)
def model_from_yaml(yaml_string, custom_objects=None):
"""Parses a yaml model configuration file and returns a model instance.
Arguments:
yaml_string: YAML string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError('Requires yaml module installed.')
config = yaml.load(yaml_string)
return layer_module.deserialize(config, custom_objects=custom_objects)
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration file and returns a model instance.
Arguments:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
config = json.loads(json_string)
return layer_module.deserialize(config, custom_objects=custom_objects)
class Sequential(Model):
"""Linear stack of layers.
Arguments:
layers: list of layers to add to the model.
# Note
The first layer passed to a Sequential model
should have a defined input shape. What that
means is that it should have received an `input_shape`
or `batch_input_shape` argument,
or for some type of layers (recurrent, Dense...)
an `input_dim` argument.
Example:
```python
model = Sequential()
# first layer must have a defined input shape
model.add(Dense(32, input_dim=500))
# afterwards, Keras does automatic shape inference
model.add(Dense(32))
# also possible (equivalent to the above):
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(32))
# also possible (equivalent to the above):
model = Sequential()
# here the batch dimension is None,
# which means any batch size will be accepted by the model.
model.add(Dense(32, batch_input_shape=(None, 500)))
model.add(Dense(32))
```
"""
def __init__(self, layers=None, name=None):
self.layers = [] # Stack of layers.
self.model = None # Internal Model instance.
self.inputs = [] # List of input tensors
self.outputs = [] # List of length 1: the output tensor (unique).
self._trainable = True
self._initial_weights = None
self._input_layers = []
# Model attributes.
self.inbound_nodes = []
self.outbound_nodes = []
self.built = False
# Set model name.
if not name:
prefix = 'sequential_'
name = prefix + str(K.get_uid(prefix))
self.name = name
# Used by Layer base class.
self._dtype = None
# The following properties are not actually used by Keras;
# they exist for compatibility with TF's variable scoping mechanism.
self._updates = []
self._losses = []
self._scope = None
self._reuse = None
self._base_name = name
self._graph = ops.get_default_graph()
# Add to the model any layers passed to the constructor.
if layers:
for layer in layers:
self.add(layer)
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
if not isinstance(layer, (Layer, TFBaseLayer)):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
if not self.outputs:
# first layer in model: check that it is an input layer
if not layer.inbound_nodes:
# create an input layer
if not hasattr(layer, 'batch_input_shape'):
raise ValueError('The first layer in a '
'Sequential model must '
'get an `input_shape` or '
'`batch_input_shape` argument.')
# Instantiate the input layer.
x = Input(
batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
if len(layer.inbound_nodes) != 1:
raise ValueError('A layer added to a Sequential model must '
'not already be connected somewhere else. '
'Model received layer ' + layer.name + ' which has ' +
str(len(layer.inbound_nodes)) +
' pre-existing inbound connections.')
if len(layer.inbound_nodes[0].output_tensors) != 1:
raise ValueError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [layer.inbound_nodes[0].output_tensors[0]]
self.inputs = topology.get_source_inputs(self.outputs[0])
# We create an input node, which we will keep updated
# as we add more layers
topology.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs)
else:
output_tensor = layer(self.outputs[0])
if isinstance(output_tensor, list):
raise TypeError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [output_tensor]
# update self.inbound_nodes
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [K.int_shape(self.outputs[0])]
self.layers.append(layer)
self.built = False
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
self.layers.pop()
if not self.layers:
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[-1].outbound_nodes = []
self.outputs = [self.layers[-1].output]
# update self.inbound_nodes
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [K.int_shape(self.outputs[0])]
self.built = False
def get_layer(self, name=None, index=None):
"""Retrieve a layer that is part of the model.
Returns a layer based on either its name (unique)
or its index in the graph. Indices are based on
order of horizontal graph traversal (bottom-up).
Arguments:
name: string, name of layer.
index: integer, index of layer.
Returns:
A layer instance.
"""
if not self.built:
self.build()
return self.model.get_layer(name, index)
def call(self, inputs, mask=None):
if not self.built:
self.build()
return self.model.call(inputs, mask)
def build(self, input_shape=None):
if not self.inputs or not self.outputs:
raise TypeError('Sequential model cannot be built: model is empty.'
' Add some layers first.')
# actually create the model
self.model = Model(self.inputs, self.outputs[0], name=self.name + '_model')
self.model.trainable = self.trainable
# mirror model attributes
self.supports_masking = self.model.supports_masking
self._output_mask_cache = self.model._output_mask_cache
self._output_tensor_cache = self.model._output_tensor_cache
self._output_shape_cache = self.model._output_shape_cache
self._input_layers = self.model._input_layers
self._output_layers = self.model._output_layers
self._input_coordinates = self.model._input_coordinates
self._output_coordinates = self.model._output_coordinates
self._nodes_by_depth = self.model._nodes_by_depth
self._network_nodes = self.model._network_nodes
self.output_names = self.model.output_names
self.input_names = self.model.input_names
self._feed_input_names = self.model._feed_input_names
self._feed_inputs = self.model._feed_inputs
# Make sure child model callbacks
# will call the parent Sequential model.
self.model.callback_model = self
self.built = True
@property
def uses_learning_phase(self):
if not self.built:
self.build()
return self.model.uses_learning_phase
def _gather_list_attr(self, attr):
all_attrs = []
for layer in self.layers:
all_attrs += getattr(layer, attr, [])
return all_attrs
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
if self.model:
self.model.trainable = value
self._trainable = value
@property
def trainable_weights(self):
if not self.trainable:
return []
return self._gather_list_attr('trainable_weights')
@property
def non_trainable_weights(self):
weights = self._gather_list_attr('non_trainable_weights')
if not self.trainable:
trainable_weights = self._gather_list_attr('trainable_weights')
return trainable_weights + weights
return weights
@property
def updates(self):
if not self.built:
self.build()
return self.model.updates
@property
def state_updates(self):
if not self.built:
self.build()
return self.model.state_updates
def get_updates_for(self, inputs):
if not self.built:
self.build()
return self.model.get_updates_for(inputs)
@property
def losses(self):
if not self.built:
self.build()
return self.model.losses
def get_losses_for(self, inputs):
if not self.built:
self.build()
return self.model.get_losses_for(inputs)
@property
def regularizers(self):
if not self.built:
self.build()
return self.model.regularizers
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays
(one array per model weight).
"""
if not self.built:
self.build()
return self.model.get_weights()
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: Should be a list
of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
if not self.built:
self.build()
self.model.set_weights(weights)
def load_weights(self, filepath, by_name=False):
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
layers = self.layers
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
def save_weights(self, filepath, overwrite=True):
if h5py is None:
raise ImportError('`save_weights` requires h5py.')
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
layers = self.layers
f = h5py.File(filepath, 'w')
topology.save_weights_to_hdf5_group(f, layers)
f.flush()
f.close()
def compile(self,
optimizer,
loss,
metrics=None,
sample_weight_mode=None,
weighted_metrics=None,
**kwargs):
"""Configures the learning process.
Arguments:
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [losses](/losses).
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
See [metrics](/metrics).
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
weighted_metrics: list of metrics to be evaluated and weighted
by `sample_weight` or `class_weight` during training and testing.
**kwargs: These are passed into `tf.Session.run`.
Example:
```python
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
"""
# create the underlying model
self.build()
# call compile method of Model class
self.model.compile(
optimizer,
loss,
metrics=metrics,
sample_weight_mode=sample_weight_mode,
weighted_metrics=weighted_metrics,
**kwargs)
self.optimizer = self.model.optimizer
self.loss = self.model.loss
self.total_loss = self.model.total_loss
self.loss_weights = self.model.loss_weights
self.metrics = self.model.metrics
self.weighted_metrics = self.model.weighted_metrics
self.metrics_tensors = self.model.metrics_tensors
self.metrics_names = self.model.metrics_names
self.sample_weight_mode = self.model.sample_weight_mode
self.sample_weights = self.model.sample_weights
self.targets = self.model.targets
def fit(self,
x,
y,
batch_size=32,
epochs=10,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0):
"""Trains the model for a fixed number of epochs.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
batch_size: integer. Number of samples per gradient update.
epochs: integer, the number of epochs to train the model.
verbose: 0 for no logging to stdout,
1 for progress bar logging, 2 for one log line per epoch.
callbacks: list of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: float (0. < x < 1).
Fraction of the data to use as held-out validation data.
validation_data: tuple (x_val, y_val) or tuple
(x_val, y_val, val_sample_weights) to be used as held-out
validation data. Will override validation_split.
shuffle: boolean or str (for 'batch').
Whether to shuffle the samples at each epoch.
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
class_weight: dictionary mapping classes to a weight value,
used for scaling the loss function (during training only).
sample_weight: Numpy array of weights for
the training samples, used for scaling the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: if the model was never compiled.
"""
if not self.built:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.fit(
x,
y,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
shuffle=shuffle,
class_weight=class_weight,
sample_weight=sample_weight,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
"""Computes the loss on some input data, batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
batch_size: integer. Number of samples per gradient update.
verbose: verbosity mode, 0 or 1.
sample_weight: sample weights, as a Numpy array.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
"""
if not self.built:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.evaluate(
x,
y,
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight)
def predict(self, x, batch_size=32, verbose=0):
"""Generates output predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: the input data, as a Numpy array.
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of predictions.
"""
if not self.built:
self.build()
return self.model.predict(x, batch_size=batch_size, verbose=verbose)
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
Returns:
A Numpy array of predictions.
"""
if not self.built:
self.build()
return self.model.predict_on_batch(x)
def train_on_batch(self, x, y, class_weight=None, sample_weight=None):
"""Single gradient update over one batch of samples.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
class_weight: dictionary mapping classes to a weight value,
used for scaling the loss function (during training only).
sample_weight: sample weights, as a Numpy array.
Returns:
Scalar training loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
"""
if not self.built:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
def test_on_batch(self, x, y, sample_weight=None):
"""Evaluates the model over a single batch of samples.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
y: labels, as a Numpy array.
sample_weight: sample weights, as a Numpy array.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
"""
if not self.built:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.test_on_batch(x, y, sample_weight=sample_weight)
def predict_proba(self, x, batch_size=32, verbose=1):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=1):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def fit_generator(self,
generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
initial_epoch=0,
**kwargs):
"""Fits the model on data generated batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
Arguments:
generator: A generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples of your dataset
divided by the batch size.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- A generator for the validation data
- A tuple (inputs, targets)
- A tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator.
Number of steps to yield from validation generator
at the end of every epoch. It should typically
be equal to the number of unique samples of your
validation dataset divided by the batch size.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Maximum size for the generator queue
workers: Maximum number of processes to spin up
use_multiprocessing: If True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
**kwargs: support for legacy arguments.
Returns:
A `History` object.
Raises:
RuntimeError: if the model was never compiled.
ValueError: In case the generator yields
data in an invalid format.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create Numpy arrays of input data
# and labels, from each line in the file
x, y = process_line(line)
yield (x, y)
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=1000, epochs=10)
```
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if not self.built:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.fit_generator(
generator,
steps_per_epoch,
epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
use_multiprocessing: if True, use process based threading.
Note that because this implementation
relies on multiprocessing, you should not pass
non picklable arguments to the generator
as they can't be passed easily to children processes.
**kwargs: support for legacy arguments.
Returns:
Scalar test loss (if the model has no metrics)
or list of scalars (if the model computes other metrics).
The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
RuntimeError: if the model was never compiled.
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if not self.built:
raise RuntimeError('The model needs to be compiled ' 'before being used.')
return self.model.evaluate_generator(
generator,
steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
**kwargs):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: generator yielding batches of input samples.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
use_multiprocessing: if True, use process based threading.
Note that because this implementation
relies on multiprocessing, you should not pass
non picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
**kwargs: support for legacy arguments.
Returns:
A Numpy array of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
if not self.built:
self.build()
return self.model.predict_generator(
generator,
steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def get_config(self):
config = []
for layer in self.layers:
config.append({
'class_name': layer.__class__.__name__,
'config': layer.get_config()
})
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
model = cls()
for conf in config:
layer = layer_module.deserialize(conf, custom_objects=custom_objects)
model.add(layer)
return model
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(
batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for original_input_layer, cloned_input_layer in zip(model._input_layers,
input_layers):
layer_map[original_input_layer] = cloned_input_layer
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = topology._to_list(input_tensors)
input_tensors_ = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x, name='input_wrapper_for_' + name)
input_tensors_.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
input_tensors_.append(x)
input_tensors = input_tensors_
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = (y, None) # tensor, mask
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, topology.InputLayer):
continue
# Gather inputs to call the new layer.
referenceinput_tensors_ = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in referenceinput_tensors_:
if x in tensor_map:
computed_data.append(tensor_map[x])
if len(computed_data) == len(referenceinput_tensors_):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = topology._to_list(layer(computed_tensor, **kwargs))
output_masks = topology._to_list(
layer.compute_mask(computed_tensor, computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = topology._to_list(layer(computed_tensors, **kwargs))
output_masks = topology._to_list(
layer.compute_mask(computed_tensors, computed_masks))
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors, output_tensors,
output_masks):
tensor_map[x] = (y, mask)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
tensor, _ = tensor_map[x]
output_tensors.append(tensor)
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(topology._to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = topology._to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, topology.InputLayer):
return Sequential(layers=[origin_layer] + layers, name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
|
{
"content_hash": "ceb9a8980d3b2155b5fa3f880f058936",
"timestamp": "",
"source": "github",
"line_count": 1445,
"max_line_length": 117,
"avg_line_length": 35.80968858131488,
"alnum_prop": 0.6270557541791477,
"repo_name": "tornadozou/tensorflow",
"id": "fce86dd565b463e080fed32d496a1169ce2afd49",
"size": "52469",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/_impl/keras/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29734773"
},
{
"name": "CMake",
"bytes": "647266"
},
{
"name": "Go",
"bytes": "976912"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "276756"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26531000"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373122"
}
],
"symlink_target": ""
}
|
import os
import sys
import sysconfig
import copy
import ctypes
import ctypes.util
import warnings
from functools import wraps
import numpy
from numpy.ctypeslib import ndpointer
from scipy import interpolate
from galpy.util import multi, galpyWarning
from galpy.potential_src.Potential import Potential
_DEBUG= False
#Find and load the library
_lib= None
outerr= None
PY3= sys.version > '3'
if PY3: #pragma: no cover
_ext_suffix= sysconfig.get_config_var('EXT_SUFFIX')
else:
_ext_suffix= '.so'
for path in sys.path:
try:
_lib = ctypes.CDLL(os.path.join(path,'galpy_interppotential_c%s' % _ext_suffix))
except OSError as e:
if os.path.exists(os.path.join(path,'galpy_interppotential_c%s' % _ext_suffix)): #pragma: no cover
outerr= e
_lib = None
else:
break
if _lib is None: #pragma: no cover
if not outerr is None:
warnings.warn("interppotential_c extension module not loaded, because of error '%s' " % outerr,
galpyWarning)
else:
warnings.warn("interppotential_c extension module not loaded, because galpy_actionAngle_c%s image was not found" % _ext_suffix,
galpyWarning)
ext_loaded= False
else:
ext_loaded= True
def scalarVectorDecorator(func):
"""Decorator to return scalar outputs as a set"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[1]).shape == () \
and numpy.array(args[2]).shape == (): #only if both R and z are scalars
scalarOut= True
args= (args[0],numpy.array([args[1]]),numpy.array([args[2]]))
elif numpy.array(args[1]).shape == () \
and not numpy.array(args[2]).shape == (): #R scalar, z vector
scalarOut= False
args= (args[0],args[1]*numpy.ones_like(args[2]),args[2])
elif not numpy.array(args[1]).shape == () \
and numpy.array(args[2]).shape == (): #R vector, z scalar
scalarOut= False
args= (args[0],args[1],args[2]*numpy.ones_like(args[1]))
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
def zsymDecorator(odd):
"""Decorator to deal with zsym=True input; set odd=True if the function is an odd function of z (like zforce)"""
def wrapper(func):
@wraps(func)
def zsym_wrapper(*args,**kwargs):
if args[0]._zsym:
out= func(args[0],args[1],numpy.fabs(args[2]),**kwargs)
else:
out= func(*args,**kwargs)
if odd and args[0]._zsym:
return sign(args[2])*out
else:
return out
return zsym_wrapper
return wrapper
def scalarDecorator(func):
"""Decorator to return scalar output for 1D functions (vcirc,etc.)"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[1]).shape == ():
scalarOut= True
args= (args[0],numpy.array([args[1]]))
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
class interpRZPotential(Potential):
"""Class that interpolates a given potential on a grid for fast orbit integration"""
def __init__(self,
RZPot=None,rgrid=(numpy.log(0.01),numpy.log(20.),101),
zgrid=(0.,1.,101),logR=True,
interpPot=False,interpRforce=False,interpzforce=False,
interpDens=False,
interpvcirc=False,
interpdvcircdr=False,
interpepifreq=False,interpverticalfreq=False,
use_c=False,enable_c=False,zsym=True,
numcores=None):
"""
NAME:
__init__
PURPOSE:
Initialize an interpRZPotential instance
INPUT:
RZPot - RZPotential to be interpolated
rgrid - R grid to be given to linspace as in rs= linspace(*rgrid)
zgrid - z grid to be given to linspace as in zs= linspace(*zgrid)
logR - if True, rgrid is in the log of R so logrs= linspace(*rgrid)
interpPot, interpRforce, interpzforce, interpDens,interpvcirc, interpepifreq, interpverticalfreq, interpdvcircdr= if True, interpolate these functions
use_c= use C to speed up the calculation of the grid
enable_c= enable use of C for interpolations
zsym= if True (default), the potential is assumed to be symmetric around z=0 (so you can use, e.g., zgrid=(0.,1.,101)).
numcores= if set to an integer, use this many cores (only used for vcirc, dvcircdR, epifreq, and verticalfreq; NOT NECESSARILY FASTER, TIME TO MAKE SURE)
OUTPUT:
instance
HISTORY:
2010-07-21 - Written - Bovy (NYU)
2013-01-24 - Started with new implementation - Bovy (IAS)
"""
if isinstance(RZPot,interpRZPotential):
from galpy.potential import PotentialError
raise PotentialError('Cannot setup interpRZPotential with another interpRZPotential')
Potential.__init__(self,amp=1.)
self._origPot= RZPot
self._rgrid= numpy.linspace(*rgrid)
self._logR= logR
if self._logR:
self._rgrid= numpy.exp(self._rgrid)
self._logrgrid= numpy.log(self._rgrid)
self._zgrid= numpy.linspace(*zgrid)
self._interpPot= interpPot
self._interpRforce= interpRforce
self._interpzforce= interpzforce
self._interpDens= interpDens
self._interpvcirc= interpvcirc
self._interpdvcircdr= interpdvcircdr
self._interpepifreq= interpepifreq
self._interpverticalfreq= interpverticalfreq
self._enable_c= enable_c*ext_loaded
self.hasC= self._enable_c
self._zsym= zsym
if interpPot:
if use_c*ext_loaded:
self._potGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid)
else:
from galpy.potential import evaluatePotentials
potGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
potGrid[ii,jj]= evaluatePotentials(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._potGrid= potGrid
if self._logR:
self._potInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
self._potGrid,
kx=3,ky=3,s=0.)
else:
self._potInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
self._potGrid,
kx=3,ky=3,s=0.)
if enable_c*ext_loaded:
self._potGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._potGrid)
if interpRforce:
if use_c*ext_loaded:
self._rforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,rforce=True)
else:
from galpy.potential import evaluateRforces
rforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
rforceGrid[ii,jj]= evaluateRforces(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._rforceGrid= rforceGrid
if self._logR:
self._rforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
self._rforceGrid,
kx=3,ky=3,s=0.)
else:
self._rforceInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
self._rforceGrid,
kx=3,ky=3,s=0.)
if enable_c*ext_loaded:
self._rforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._rforceGrid)
if interpzforce:
if use_c*ext_loaded:
self._zforceGrid, err= calc_potential_c(self._origPot,self._rgrid,self._zgrid,zforce=True)
else:
from galpy.potential import evaluatezforces
zforceGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
zforceGrid[ii,jj]= evaluatezforces(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._zforceGrid= zforceGrid
if self._logR:
self._zforceInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
self._zforceGrid,
kx=3,ky=3,s=0.)
else:
self._zforceInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
self._zforceGrid,
kx=3,ky=3,s=0.)
if enable_c*ext_loaded:
self._zforceGrid_splinecoeffs= calc_2dsplinecoeffs_c(self._zforceGrid)
if interpDens:
from galpy.potential import evaluateDensities
densGrid= numpy.zeros((len(self._rgrid),len(self._zgrid)))
for ii in range(len(self._rgrid)):
for jj in range(len(self._zgrid)):
densGrid[ii,jj]= evaluateDensities(self._rgrid[ii],self._zgrid[jj],self._origPot)
self._densGrid= densGrid
if self._logR:
self._densInterp= interpolate.RectBivariateSpline(self._logrgrid,
self._zgrid,
numpy.log(self._densGrid+10.**-10.),
kx=3,ky=3,s=0.)
else:
self._densInterp= interpolate.RectBivariateSpline(self._rgrid,
self._zgrid,
numpy.log(self._densGrid+10.**-10.),
kx=3,ky=3,s=0.)
if interpvcirc:
from galpy.potential import vcirc
if not numcores is None:
self._vcircGrid= multi.parallel_map((lambda x: vcirc(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores)
else:
self._vcircGrid= numpy.array([vcirc(self._origPot,r) for r in self._rgrid])
if self._logR:
self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._vcircGrid,k=3)
else:
self._vcircInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._vcircGrid,k=3)
if interpdvcircdr:
from galpy.potential import dvcircdR
if not numcores is None:
self._dvcircdrGrid= multi.parallel_map((lambda x: dvcircdR(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores)
else:
self._dvcircdrGrid= numpy.array([dvcircdR(self._origPot,r) for r in self._rgrid])
if self._logR:
self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._dvcircdrGrid,k=3)
else:
self._dvcircdrInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._dvcircdrGrid,k=3)
if interpepifreq:
from galpy.potential import epifreq
if not numcores is None:
self._epifreqGrid= numpy.array(multi.parallel_map((lambda x: epifreq(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores))
else:
self._epifreqGrid= numpy.array([epifreq(self._origPot,r) for r in self._rgrid])
indx= True-numpy.isnan(self._epifreqGrid)
if numpy.sum(indx) < 4:
if self._logR:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=1)
else:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=1)
else:
if self._logR:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid[indx],self._epifreqGrid[indx],k=3)
else:
self._epifreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid[indx],self._epifreqGrid[indx],k=3)
if interpverticalfreq:
from galpy.potential import verticalfreq
if not numcores is None:
self._verticalfreqGrid= multi.parallel_map((lambda x: verticalfreq(self._origPot,self._rgrid[x])),
list(range(len(self._rgrid))),numcores=numcores)
else:
self._verticalfreqGrid= numpy.array([verticalfreq(self._origPot,r) for r in self._rgrid])
if self._logR:
self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._logrgrid,self._verticalfreqGrid,k=3)
else:
self._verticalfreqInterp= interpolate.InterpolatedUnivariateSpline(self._rgrid,self._verticalfreqGrid,k=3)
return None
@scalarVectorDecorator
@zsymDecorator(False)
def _evaluate(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluatePotentials
if self._interpPot:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._enable_c:
out[indx]= eval_potential_c(self,R[indx],z[indx])[0]/self._amp
else:
if self._logR:
out[indx]= self._potInterp.ev(numpy.log(R[indx]),z[indx])
else:
out[indx]= self._potInterp.ev(R[indx],z[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluatePotentials(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluatePotentials(R,z,self._origPot)
@scalarVectorDecorator
@zsymDecorator(False)
def _Rforce(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluateRforces
if self._interpRforce:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._enable_c:
out[indx]= eval_force_c(self,R[indx],z[indx])[0]/self._amp
else:
if self._logR:
out[indx]= self._rforceInterp.ev(numpy.log(R[indx]),z[indx])
else:
out[indx]= self._rforceInterp.ev(R[indx],z[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluateRforces(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluateRforces(R,z,self._origPot)
@scalarVectorDecorator
@zsymDecorator(True)
def _zforce(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluatezforces
if self._interpzforce:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._enable_c:
out[indx]= eval_force_c(self,R[indx],z[indx],
zforce=True)[0]/self._amp
else:
if self._logR:
out[indx]= self._zforceInterp.ev(numpy.log(R[indx]),
z[indx])
else:
out[indx]= self._zforceInterp.ev(R[indx],z[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluatezforces(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluatezforces(R,z,self._origPot)
def _Rzderiv(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluateRzderivs
return evaluateRzderivs(R,z,self._origPot)
@scalarVectorDecorator
@zsymDecorator(False)
def _dens(self,R,z,phi=0.,t=0.):
from galpy.potential import evaluateDensities
if self._interpDens:
out= numpy.empty_like(R)
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])\
*(z <= self._zgrid[-1])*(z >= self._zgrid[0])
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= numpy.exp(self._densInterp.ev(numpy.log(R[indx]),z[indx]))-10.**-10.
else:
out[indx]= numpy.exp(self._densInterp.ev(R[indx],z[indx]))-10.**-10.
if numpy.sum(True-indx) > 0:
out[True-indx]= evaluateDensities(R[True-indx],
z[True-indx],
self._origPot)
return out
else:
return evaluateDensities(R,z,self._origPot)
@scalarDecorator
def vcirc(self,R):
from galpy.potential import vcirc
if self._interpvcirc:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._vcircInterp(numpy.log(R[indx]))
else:
out[indx]= self._vcircInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= vcirc(self._origPot,R[True-indx])
return out
else:
return vcirc(self._origPot,R)
@scalarDecorator
def dvcircdR(self,R):
from galpy.potential import dvcircdR
if self._interpdvcircdr:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._dvcircdrInterp(numpy.log(R[indx]))
else:
out[indx]= self._dvcircdrInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= dvcircdR(self._origPot,R[True-indx])
return out
else:
return dvcircdR(self._origPot,R)
@scalarDecorator
def epifreq(self,R):
from galpy.potential import epifreq
if self._interpepifreq:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._epifreqInterp(numpy.log(R[indx]))
else:
out[indx]= self._epifreqInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= epifreq(self._origPot,R[True-indx])
return out
else:
return epifreq(self._origPot,R)
@scalarDecorator
def verticalfreq(self,R):
from galpy.potential import verticalfreq
if self._interpverticalfreq:
indx= (R >= self._rgrid[0])*(R <= self._rgrid[-1])
out= numpy.empty_like(R)
if numpy.sum(indx) > 0:
if self._logR:
out[indx]= self._verticalfreqInterp(numpy.log(R[indx]))
else:
out[indx]= self._verticalfreqInterp(R[indx])
if numpy.sum(True-indx) > 0:
out[True-indx]= verticalfreq(self._origPot,R[True-indx])
return out
else:
return verticalfreq(self._origPot,R)
def calc_potential_c(pot,R,z,rforce=False,zforce=False):
"""
NAME:
calc_potential_c
PURPOSE:
Use C to calculate the potential on a grid
INPUT:
pot - Potential or list of such instances
R - grid in R
z - grid in z
rforce=, zforce= if either of these is True, calculate the radial or vertical force instead
OUTPUT:
potential on the grid (2D array)
HISTORY:
2013-01-24 - Written - Bovy (IAS)
2013-01-29 - Added forces - Bovy (IAS)
"""
from galpy.orbit_src.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot)
#Set up result arrays
out= numpy.empty((len(R),len(z)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
if rforce:
interppotential_calc_potentialFunc= _lib.calc_rforce
elif zforce:
interppotential_calc_potentialFunc= _lib.calc_zforce
else:
interppotential_calc_potentialFunc= _lib.calc_potential
interppotential_calc_potentialFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_potentialFunc(len(R),
R,
len(z),
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value)
def calc_2dsplinecoeffs_c(array2d):
"""
NAME:
calc_2dsplinecoeffs_c
PURPOSE:
Use C to calculate spline coefficients for a 2D array
INPUT:
array2d
OUTPUT:
new array with spline coeffs
HISTORY:
2013-01-24 - Written - Bovy (IAS)
"""
#Set up result arrays
out= copy.copy(array2d)
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
interppotential_calc_2dsplinecoeffs= _lib.samples_to_coefficients
interppotential_calc_2dsplinecoeffs.argtypes= [ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ctypes.c_int]
#Run the C code
interppotential_calc_2dsplinecoeffs(out,out.shape[1],out.shape[0])
return out
def eval_potential_c(pot,R,z):
"""
NAME:
eval_potential_c
PURPOSE:
Use C to evaluate the interpolated potential
INPUT:
pot - Potential or list of such instances
R - array
z - array
OUTPUT:
potential evaluated R and z
HISTORY:
2013-01-24 - Written - Bovy (IAS)
"""
from galpy.orbit_src.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Set up result arrays
out= numpy.empty((len(R)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
interppotential_calc_potentialFunc= _lib.eval_potential
interppotential_calc_potentialFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_potentialFunc(len(R),
R,
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value)
def eval_force_c(pot,R,z,zforce=False):
"""
NAME:
eval_force_c
PURPOSE:
Use C to evaluate the interpolated potential's forces
INPUT:
pot - Potential or list of such instances
R - array
z - array
zforce= if True, return the vertical force, otherwise return the radial force
OUTPUT:
force evaluated R and z
HISTORY:
2013-01-29 - Written - Bovy (IAS)
"""
from galpy.orbit_src.integrateFullOrbit import _parse_pot #here bc otherwise there is an infinite loop
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot)
#Set up result arrays
out= numpy.empty((len(R)))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
if zforce:
interppotential_calc_forceFunc= _lib.eval_zforce
else:
interppotential_calc_forceFunc= _lib.eval_rforce
interppotential_calc_forceFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
interppotential_calc_forceFunc(len(R),
R,
z,
ctypes.c_int(npot),
pot_type,
pot_args,
out,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: z= numpy.asfortranarray(z)
return (out,err.value)
def sign(x):
out= numpy.ones_like(x)
out[(x < 0.)]= -1.
return out
|
{
"content_hash": "6820516f3c0fb9a2ecca6451aa380ddf",
"timestamp": "",
"source": "github",
"line_count": 696,
"max_line_length": 164,
"avg_line_length": 43.52729885057471,
"alnum_prop": 0.5090278923914837,
"repo_name": "followthesheep/galpy",
"id": "6075ba15629d8bf6fc9a3e31bc9e844a50424f22",
"size": "30295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galpy/potential_src/interpRZPotential.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "264645"
},
{
"name": "C++",
"bytes": "5290"
},
{
"name": "Makefile",
"bytes": "423"
},
{
"name": "Python",
"bytes": "2160181"
},
{
"name": "Shell",
"bytes": "524"
}
],
"symlink_target": ""
}
|
from pyboleto.data import BoletoData, CustomProperty
class BoletoBanrisul(BoletoData):
conta_cedente = CustomProperty('conta_cedente', 6)
nosso_numero = CustomProperty('nosso_numero', 8)
def __init__(self):
BoletoData.__init__(self)
self.codigo_banco = "041"
self.logo_image = "logo_banrisul.jpg"
@property
def campo_livre(self):
content = '21%04d%07d%08d40' % (int(self.agencia_cedente),
int(self.conta_cedente),
int(self.nosso_numero))
return '%s%s' % (content, self._dv_campo_livre(content))
# From http://jrimum.org/bopepo/browser/trunk/src/br/com/nordestefomento/
# jrimum/bopepo/campolivre/AbstractCLBanrisul.java
def _dv_campo_livre(self, campo_livre):
dv = self.modulo10(campo_livre)
while True:
restoMod11 = self.modulo11(campo_livre + str(dv), 7, 1)
if restoMod11 != 1:
break
dv += 1
dv %= 10
return str(dv) + str(11 - restoMod11)
|
{
"content_hash": "9aea34154e379bd772256b0c3027a022",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 35.064516129032256,
"alnum_prop": 0.5712971481140754,
"repo_name": "eduardocereto/pyboleto",
"id": "9f82573494658440432d3bc80e9ac20afcbc53f5",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyboleto/bank/banrisul.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8439"
},
{
"name": "Python",
"bytes": "125169"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from collections import defaultdict, MutableMapping
from operator import getitem, add
from datetime import datetime
from time import time
from ..core import istask, ishashable
class Store(MutableMapping):
""" Store - A storage of data and computation
Examples
--------
Store data like a dictionary
>>> import dask.store as ds
>>> s = ds.Store()
>>> s['x'] = 10
>>> s['x']
10
Also store computation on that data
>>> s['y'] = (add, 'x', 5)
Accessing these keys results in computations. Results may be cached for
reuse.
>>> s['y']
15
Design
------
A Store maintains the following state
dsk: dict
A dask to define all computation
cache: dict-like
Stores both ground data and cached intermediate values
data: set
The keys in the cache that can not be removed for correctness.
compute_time: dict:: {key: float}
dict mapping the time it took to compute each key
access_times: dict:: {key: [datetimes]}
The times at which a key was accessed
"""
def __init__(self, cache=None):
self.dsk = dict()
if cache is None:
cache = dict()
self.cache = cache
self.data = set()
self.compute_time = dict()
self.access_times = defaultdict(list)
def __setitem__(self, key, value):
if key in self.dsk:
if (self.dsk[key] == value or
self.dsk[key] == (getitem, self.cache, key) and
self.cache[key] == value):
return
else:
raise KeyError("Can not overwrite data")
if istask(value):
self.dsk[key] = value
else:
self.cache[key] = value
self.dsk[key] = (getitem, self.cache, key)
self.data.add(key)
def __getitem__(self, key):
if isinstance(key, list):
return (self[item] for item in key)
if not ishashable(key):
return key
if key not in self.dsk:
return key
self.access_times[key].append(datetime.now())
if key in self.cache:
return self.cache[key]
task = self.dsk[key]
func, args = task[0], task[1:]
if func == getitem and args[0] is self.cache:
return self.cache[args[1]]
args = [self[arg] for arg in args]
start = time()
result = func(*args)
end = time()
self.cache[key] = result
self.compute_time[key] = end - start
return result
def __len__(self):
return len(self.dsk)
def __iter__(self):
return iter(self.dsk)
def __delitem__(self, key):
raise ValueError("Dask Store does not support deletion")
|
{
"content_hash": "3e460b61f473eb4dcb43ecf7c4a5490b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 76,
"avg_line_length": 25.212389380530972,
"alnum_prop": 0.5605475605475605,
"repo_name": "pombredanne/dask",
"id": "4a7efe85ea65370483f5177710bb4633c681c9d9",
"size": "2849",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dask/store/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "895298"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
}
|
from .component_classes import Health, Velocity
def test_reset(game, entity, entity2):
game.add_component_to_entity(Health(), entity)
game.add_component_to_entity(Velocity(), entity)
game.add_component_to_entity(Health(), entity2)
game.reset()
assert game.uid == 0
assert len(game.entities) == 0
assert len(game.components) == 0
|
{
"content_hash": "7550d6853c80c09722d9932520b3d2e0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.6897506925207756,
"repo_name": "Remolten/galena",
"id": "f8733bb964bed7d2b4438421341005c459b06969",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_galena.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23867"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
from django.utils.translation import ugettext as _
from djblets.markdown import iter_markdown_lines
from pygments.lexers import TextLexer
from reviewboard.reviews.chunk_generators import MarkdownDiffChunkGenerator
from reviewboard.reviews.ui.text import TextBasedReviewUI
from reviewboard.reviews.markdown_utils import render_markdown
logger = logging.getLogger(__name__)
class MarkdownReviewUI(TextBasedReviewUI):
"""A Review UI for markdown files.
This renders the markdown to HTML, and allows users to comment on each
top-level block (header, paragraph, list, code block, etc).
"""
supported_mimetypes = ['text/x-markdown']
object_key = 'markdown'
can_render_text = True
rendered_chunk_generator_cls = MarkdownDiffChunkGenerator
extra_css_classes = ['markdown-review-ui']
js_view_class = 'RB.MarkdownReviewableView'
def generate_render(self):
with self.obj.file as f:
f.open()
rendered = render_markdown(f.read())
try:
for line in iter_markdown_lines(rendered):
yield line
except Exception as e:
logger.error('Failed to parse resulting Markdown XHTML for '
'file attachment %d: %s',
self.obj.pk, e,
exc_info=True)
yield _('Error while rendering Markdown content: %s') % e
def get_source_lexer(self, filename, data):
return TextLexer()
|
{
"content_hash": "fb09e40abd37c19e80f1d5802c291766",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 32.020833333333336,
"alnum_prop": 0.6623292127521145,
"repo_name": "chipx86/reviewboard",
"id": "c3767e1aef4f10b2af7bed69bec00f9231d8ade8",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/reviews/ui/markdownui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "434719"
},
{
"name": "HTML",
"bytes": "224310"
},
{
"name": "JavaScript",
"bytes": "3830753"
},
{
"name": "Python",
"bytes": "7333453"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
}
|
from random import randint, uniform, gauss
from numpy import asarray
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
num_chairs = num_tables = 5000
data = [[randint(0, 5), # Possible colors for chairs
uniform(2, 5), # Possible leg lengths for chairs
gauss(2, 0.25)] # Possible top surface areas for chairs
for i in range(num_chairs)] + \
[[randint(0, 5), # Possible colors for tables
uniform(4, 10), # Possible leg lengths for tables
gauss(5, 1)] # Possible top surface areas for tables
for i in range(num_tables)]
labels = asarray(['chair']*num_chairs + ['table']*num_tables)
rfc = RandomForestClassifier(n_estimators=100)
# rfc.fit(data, labels)
scores = cross_val_score(rfc, data, labels, cv=10)
print('Accuracy: %0.2f +/- %0.2f' % (scores.mean(), scores.std()*2))
|
{
"content_hash": "2ea1a2d41a6355d247343ee11a91be5a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 38.91304347826087,
"alnum_prop": 0.6737430167597765,
"repo_name": "koverholt/random-forest-example",
"id": "f38db5a5b10d6c5b8807a366eccfc8fc4c220fd8",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/random_forest_chairs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1006"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
FIXME: Proper module docstring
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Nov 9, 2012"
import unittest2 as unittest
import os
import json
from monty.json import MontyDecoder
from pymatgen.entries.entry_tools import group_entries_by_structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class FuncTest(unittest.TestCase):
def test_group_entries_by_structure(self):
with open(os.path.join(test_dir, "TiO2_entries.json"), "r") as f:
entries = json.load(f, cls=MontyDecoder)
groups = group_entries_by_structure(entries)
self.assertEqual(sorted([len(g) for g in groups]),
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4])
self.assertLess(len(groups), len(entries))
#Make sure no entries are left behind
self.assertEqual(sum([len(g) for g in groups]), len(entries))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
{
"content_hash": "57a0ea9f9f5cc5316e33e7e25c573bf5",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 30.025,
"alnum_prop": 0.6161532056619484,
"repo_name": "aykol/pymatgen",
"id": "8736604106598cf23b1175b32af87d31523ccee1",
"size": "1311",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/entries/tests/test_entry_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "5203893"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
}
|
"""
flask_oauthlib.provider.oauth2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implemnts OAuth2 provider support for Flask.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
"""
import os
import logging
import datetime
from functools import wraps
from flask import request, url_for
from flask import redirect, abort
from werkzeug import cached_property
from werkzeug.utils import import_string
from oauthlib import oauth2
from oauthlib.oauth2 import RequestValidator, Server
from oauthlib.common import to_unicode, add_params_to_uri
from ..utils import extract_params, decode_base64, create_response
__all__ = ('OAuth2Provider', 'OAuth2RequestValidator')
log = logging.getLogger('flask_oauthlib')
class OAuth2Provider(object):
"""Provide secure services using OAuth2.
The server should provide an authorize handler and a token hander,
But before the handlers are implemented, the server should provide
some getters for the validation.
Like many other Flask extensions, there are two usage modes. One is
binding the Flask app instance::
app = Flask(__name__)
oauth = OAuth2Provider(app)
The second possibility is to bind the Flask app later::
oauth = OAuth2Provider()
def create_app():
app = Flask(__name__)
oauth.init_app(app)
return app
Configure :meth:`tokengetter` and :meth:`tokensetter` to get and
set tokens. Configure :meth:`grantgetter` and :meth:`grantsetter`
to get and set grant tokens. Configure :meth:`clientgetter` to
get the client.
Configure :meth:`usergetter` if you need password credential
authorization.
With everything ready, implement the authorization workflow:
* :meth:`authorize_handler` for consumer to confirm the grant
* :meth:`token_handler` for client to exchange access token
And now you can protect the resource with scopes::
@app.route('/api/user')
@oauth.require_oauth('email', 'username')
def user():
return jsonify(request.oauth.user)
"""
def __init__(self, app=None):
self._before_request_funcs = []
self._after_request_funcs = []
self._invalid_response = None
if app:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
oauth provider instance.
"""
self.app = app
app.extensions = getattr(app, 'extensions', {})
app.extensions['oauthlib.provider.oauth2'] = self
@cached_property
def error_uri(self):
"""The error page URI.
When something turns error, it will redirect to this error page.
You can configure the error page URI with Flask config::
OAUTH2_PROVIDER_ERROR_URI = '/error'
You can also define the error page by a named endpoint::
OAUTH2_PROVIDER_ERROR_ENDPOINT = 'oauth.error'
"""
error_uri = self.app.config.get('OAUTH2_PROVIDER_ERROR_URI')
if error_uri:
return error_uri
error_endpoint = self.app.config.get('OAUTH2_PROVIDER_ERROR_ENDPOINT')
if error_endpoint:
return url_for(error_endpoint)
return '/oauth/errors'
@cached_property
def server(self):
"""
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator()
"""
expires_in = self.app.config.get('OAUTH2_PROVIDER_TOKEN_EXPIRES_IN')
token_generator = self.app.config.get(
'OAUTH2_PROVIDER_TOKEN_GENERATOR', None
)
if token_generator and not callable(token_generator):
token_generator = import_string(token_generator)
if hasattr(self, '_validator'):
return Server(
self._validator,
token_expires_in=expires_in,
token_generator=token_generator,
)
if hasattr(self, '_clientgetter') and \
hasattr(self, '_tokengetter') and \
hasattr(self, '_tokensetter') and \
hasattr(self, '_grantgetter') and \
hasattr(self, '_grantsetter'):
usergetter = None
if hasattr(self, '_usergetter'):
usergetter = self._usergetter
validator = OAuth2RequestValidator(
clientgetter=self._clientgetter,
tokengetter=self._tokengetter,
grantgetter=self._grantgetter,
usergetter=usergetter,
tokensetter=self._tokensetter,
grantsetter=self._grantsetter,
)
self._validator = validator
return Server(
validator,
token_expires_in=expires_in,
token_generator=token_generator,
)
raise RuntimeError('application not bound to required getters')
def before_request(self, f):
"""Register functions to be invoked before accessing the resource.
The function accepts nothing as parameters, but you can get
information from `Flask.request` object. It is usually useful
for setting limitation on the client request::
@oauth.before_request
def limit_client_request():
client_id = request.values.get('client_id')
if not client_id:
return
client = Client.get(client_id)
if over_limit(client):
return abort(403)
track_request(client)
"""
self._before_request_funcs.append(f)
return f
def after_request(self, f):
"""Register functions to be invoked after accessing the resource.
The function accepts ``valid`` and ``request`` as parameters,
and it should return a tuple of them::
@oauth.after_request
def valid_after_request(valid, oauth):
if oauth.user in black_list:
return False, oauth
return valid, oauth
"""
self._after_request_funcs.append(f)
return f
def invalid_response(self, f):
"""Register a function for responsing with invalid request.
When an invalid request proceeds to :meth:`require_oauth`, we can
handle the request with the registered function. The function
accepts one parameter, which is an oauthlib Request object::
@oauth.invalid_response
def invalid_require_oauth(req):
return jsonify(message=req.error_message), 401
If no function is registered, it will return with ``abort(401)``.
"""
self._invalid_response = f
return f
def clientgetter(self, f):
"""Register a function as the client getter.
The function accepts one parameter `client_id`, and it returns
a client object with at least these information:
- client_id: A random string
- client_secret: A random string
- client_type: A string represents if it is `confidential`
- redirect_uris: A list of redirect uris
- default_redirect_uri: One of the redirect uris
- default_scopes: Default scopes of the client
The client may contain more information, which is suggested:
- allowed_grant_types: A list of grant types
- allowed_response_types: A list of response types
- validate_scopes: A function to validate scopes
Implement the client getter::
@oauth.clientgetter
def get_client(client_id):
client = get_client_model(client_id)
# Client is an object
return client
"""
self._clientgetter = f
return f
def usergetter(self, f):
"""Register a function as the user getter.
This decorator is only required for **password credential**
authorization::
@oauth.usergetter
def get_user(username, password, client, request,
*args, **kwargs):
# client: current request client
if not client.has_password_credential_permission:
return None
user = User.get_user_by_username(username)
if not user.validate_password(password):
return None
# parameter `request` is an OAuthlib Request object.
# maybe you will need it somewhere
return user
"""
self._usergetter = f
return f
def tokengetter(self, f):
"""Register a function as the token getter.
The function accepts an `access_token` or `refresh_token` parameters,
and it returns a token object with at least these information:
- access_token: A string token
- refresh_token: A string token
- client_id: ID of the client
- scopes: A list of scopes
- expires: A `datetime.datetime` object
- user: The user object
The implementation of tokengetter should accepts two parameters,
one is access_token the other is refresh_token::
@oauth.tokengetter
def bearer_token(access_token=None, refresh_token=None):
if access_token:
return get_token(access_token=access_token)
if refresh_token:
return get_token(refresh_token=refresh_token)
return None
"""
self._tokengetter = f
return f
def tokensetter(self, f):
"""Register a function to save the bearer token.
The setter accepts two parameters at least, one is token,
the other is request::
@oauth.tokensetter
def set_token(token, request, *args, **kwargs):
save_token(token, request.client, request.user)
The parameter token is a dict, that looks like::
{
u'access_token': u'6JwgO77PApxsFCU8Quz0pnL9s23016',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'scope': u'email address'
}
The request is an object, that contains an user object and a
client object.
"""
self._tokensetter = f
return f
def grantgetter(self, f):
"""Register a function as the grant getter.
The function accepts `client_id`, `code` and more::
@oauth.grantgetter
def grant(client_id, code):
return get_grant(client_id, code)
It returns a grant object with at least these information:
- delete: A function to delete itself
"""
self._grantgetter = f
return f
def grantsetter(self, f):
"""Register a function to save the grant code.
The function accepts `client_id`, `code`, `request` and more::
@oauth.grantsetter
def set_grant(client_id, code, request, *args, **kwargs):
save_grant(client_id, code, request.user, request.scopes)
"""
self._grantsetter = f
return f
def authorize_handler(self, f):
"""Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
"""
@wraps(f)
def decorated(*args, **kwargs):
# raise if server not implemented
server = self.server
uri, http_method, body, headers = extract_params()
if request.method == 'GET':
redirect_uri = request.args.get('redirect_uri', None)
log.debug('Found redirect_uri %s.', redirect_uri)
try:
ret = server.validate_authorization_request(
uri, http_method, body, headers
)
scopes, credentials = ret
kwargs['scopes'] = scopes
kwargs.update(credentials)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e)
return redirect(e.in_uri(self.error_uri))
except Exception as e:
return redirect(add_params_to_uri(
self.error_uri, {'error': e.message}
))
else:
redirect_uri = request.values.get('redirect_uri', None)
try:
rv = f(*args, **kwargs)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e)
return redirect(e.in_uri(self.error_uri))
except Exception as e:
return redirect(add_params_to_uri(
self.error_uri, {'error': e.message}
))
if not isinstance(rv, bool):
# if is a response or redirect
return rv
if not rv:
# denied by user
e = oauth2.AccessDeniedError()
return redirect(e.in_uri(redirect_uri))
return self.confirm_authorization_request()
return decorated
def confirm_authorization_request(self):
"""When consumer confirm the authrozation."""
server = self.server
scope = request.values.get('scope') or ''
scopes = scope.split()
credentials = dict(
client_id=request.values.get('client_id'),
redirect_uri=request.values.get('redirect_uri', None),
response_type=request.values.get('response_type', None),
state=request.values.get('state', None)
)
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
uri, http_method, body, headers = extract_params()
try:
ret = server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
return create_response(*ret)
except oauth2.FatalClientError as e:
return redirect(e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
return redirect(e.in_uri(redirect_uri))
except Exception as e:
return redirect(add_params_to_uri(
self.error_uri, {'error': e.message}
))
def token_handler(self, f):
"""Access/refresh token handler decorator.
The decorated function should return an dictionary or None as
the extra credentials for creating the token response.
You can control the access method with standard flask route mechanism.
If you only allow the `POST` method::
@app.route('/oauth/token', methods=['POST'])
@oauth.token_handler
def access_token():
return None
"""
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
uri, http_method, body, headers = extract_params()
credentials = f(*args, **kwargs) or {}
log.debug('Fetched extra credentials, %r.', credentials)
ret = server.create_token_response(
uri, http_method, body, headers, credentials
)
return create_response(*ret)
return decorated
def require_oauth(self, *scopes):
"""Protect resource with specified scopes."""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
for func in self._before_request_funcs:
func()
if hasattr(request, 'oauth') and request.oauth:
return f(*args, **kwargs)
server = self.server
uri, http_method, body, headers = extract_params()
valid, req = server.verify_request(
uri, http_method, body, headers, scopes
)
for func in self._after_request_funcs:
valid, req = func(valid, req)
if not valid:
if self._invalid_response:
return self._invalid_response(req)
return abort(401)
request.oauth = req
return f(*args, **kwargs)
return decorated
return wrapper
class OAuth2RequestValidator(RequestValidator):
"""Subclass of Request Validator.
:param clientgetter: a function to get client object
:param tokengetter: a function to get bearer token
:param tokensetter: a function to save bearer token
:param grantgetter: a function to get grant token
:param grantsetter: a function to save grant token
"""
def __init__(self, clientgetter, tokengetter, grantgetter,
usergetter=None, tokensetter=None, grantsetter=None):
self._clientgetter = clientgetter
self._tokengetter = tokengetter
self._usergetter = usergetter
self._tokensetter = tokensetter
self._grantgetter = grantgetter
self._grantsetter = grantsetter
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the
following cases:
Resource Owner Password Credentials Grant: see `Section 4.3.2`_.
Authorization Code Grant: see `Section 4.1.3`_.
Refresh Token Grant: see `Section 6`_.
.. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
"""
if request.grant_type == 'password':
return True
auth_required = ('authorization_code', 'refresh_token')
return 'Authorization' in request.headers and\
request.grant_type in auth_required
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate itself in other means.
Other means means is described in `Section 3.2.1`_.
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
auth = request.headers.get('Authorization', None)
log.debug('Authenticate client %r', auth)
if auth:
try:
_, s = auth.split(' ')
client_id, client_secret = decode_base64(s).split(':')
client_id = to_unicode(client_id, 'utf-8')
client_secret = to_unicode(client_secret, 'utf-8')
except Exception as e:
log.debug('Authenticate client failed with exception: %r', e)
return False
else:
client_id = request.client_id
client_secret = request.client_secret
client = self._clientgetter(client_id)
if not client:
log.debug('Authenticate client failed, client not found.')
return False
request.client = client
if client.client_secret != client_secret:
log.debug('Authenticate client failed, secret not match.')
return False
if client.client_type != 'confidential':
log.debug('Authenticate client failed, not confidential.')
return False
log.debug('Authenticate client success.')
return True
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Authenticate a non-confidential client.
:param client_id: Client ID of the non-confidential client
:param request: The Request object passed by oauthlib
"""
log.debug('Authenticate client %r.', client_id)
client = request.client or self._clientgetter(client_id)
if not client:
log.debug('Authenticate failed, client not found.')
return False
if client.client_secret != request.client_secret:
log.debug('Authenticate client failed, secret not match.')
return False
# attach client on request for convenience
request.client = client
return True
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri.
This method is used in the authorization code grant flow. It will
compare redirect_uri and the one in grant token strictly, you can
add a `validate_redirect_uri` function on grant for a customized
validation.
"""
client = client or self._clientgetter(client_id)
log.debug('Confirm redirect uri for client %r and code %r.',
client.client_id, code)
grant = self._grantgetter(client_id=client.client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'validate_redirect_uri'):
return grant.validate_redirect_uri(redirect_uri)
log.debug('Compare redirect uri for grant %r and %r.',
grant.redirect_uri, redirect_uri)
testing = 'OAUTHLIB_INSECURE_TRANSPORT' in os.environ
if testing and redirect_uri is None:
# For testing
return True
return grant.redirect_uri == redirect_uri
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
This method is used in the refresh token grant flow. We return
the scope of the token to be refreshed so it can be applied to the
new access token.
"""
log.debug('Obtaining scope of refreshed token.')
tok = self._tokengetter(refresh_token=refresh_token)
return tok.scopes
def confirm_scopes(self, refresh_token, scopes, request, *args, **kwargs):
"""Ensures the requested scope matches the scope originally granted
by the resource owner. If the scope is omitted it is treated as equal
to the scope originally granted by the resource owner.
DEPRECATION NOTE: This method will cease to be used in oauthlib>0.4.2,
future versions of ``oauthlib`` use the validator method
``get_original_scopes`` to determine the scope of the refreshed token.
"""
if not scopes:
log.debug('Scope omitted for refresh token %r', refresh_token)
return True
log.debug('Confirm scopes %r for refresh token %r',
scopes, refresh_token)
tok = self._tokengetter(refresh_token=refresh_token)
return set(tok.scopes) == set(scopes)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Default redirect_uri for the given client."""
request.client = request.client or self._clientgetter(client_id)
redirect_uri = request.client.default_redirect_uri
log.debug('Found default redirect uri %r', redirect_uri)
return redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Default scopes for the given client."""
request.client = request.client or self._clientgetter(client_id)
scopes = request.client.default_scopes
log.debug('Found default scopes %r', scopes)
return scopes
def invalidate_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself.
"""
log.debug('Destroy grant token for client %r, %r', client_id, code)
grant = self._grantgetter(client_id=client_id, code=code)
if grant:
grant.delete()
def save_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Persist the authorization code."""
log.debug(
'Persist authorization code %r for client %r',
code, client_id
)
request.client = request.client or self._clientgetter(client_id)
self._grantsetter(client_id, code, request, *args, **kwargs)
return request.client.default_redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token."""
log.debug('Save bearer token %r', token)
self._tokensetter(token, request, *args, **kwargs)
return request.client.default_redirect_uri
def validate_bearer_token(self, token, scopes, request):
"""Validate access token.
:param token: A string of random characters
:param scopes: A list of scopes
:param request: The Request object passed by oauthlib
The validation validates:
1) if the token is available
2) if the token has expired
3) if the scopes are available
"""
log.debug('Validate bearer token %r', token)
tok = self._tokengetter(access_token=token)
if not tok:
msg = 'Bearer token not found.'
request.error_message = msg
log.debug(msg)
return False
# validate expires
if datetime.datetime.utcnow() > tok.expires:
msg = 'Bearer token is expired.'
request.error_message = msg
log.debug(msg)
return False
# validate scopes
if not set(tok.scopes).issuperset(set(scopes)):
msg = 'Bearer token scope not valid.'
request.error_message = msg
log.debug(msg)
return False
request.access_token = tok
request.user = tok.user
request.scopes = scopes
if hasattr(tok, 'client'):
request.client = tok.client
elif hasattr(tok, 'client_id'):
request.client = self._clientgetter(tok.client_id)
return True
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client."""
log.debug('Validate client %r', client_id)
client = request.client or self._clientgetter(client_id)
if client:
# attach client to request object
request.client = client
return True
return False
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the grant code is valid."""
client = client or self._clientgetter(client_id)
log.debug(
'Validate code for client %r and code %r', client.client_id, code
)
grant = self._grantgetter(client_id=client.client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'expires') and \
datetime.datetime.utcnow() > grant.expires:
log.debug('Grant is expired.')
return False
request.state = kwargs.get('state')
request.user = grant.user
request.scopes = grant.scopes
return True
def validate_grant_type(self, client_id, grant_type, client, request,
*args, **kwargs):
"""Ensure the client is authorized to use the grant type requested.
It will allow any of the four grant types (`authorization_code`,
`password`, `client_credentials`, `refresh_token`) by default.
Implemented `allowed_grant_types` for client object to authorize
the request.
It is suggested that `allowed_grant_types` should contain at least
`authorization_code` and `refresh_token`.
"""
if self._usergetter is None and grant_type == 'password':
log.debug('Password credential authorization is disabled.')
return False
default_grant_types = (
'authorization_code', 'password',
'client_credentials', 'refresh_token',
)
if grant_type not in default_grant_types:
return False
if hasattr(client, 'allowed_grant_types') and \
grant_type not in client.allowed_grant_types:
return False
if grant_type == 'client_credentials':
if not hasattr(client, 'user'):
log.debug('Client should have a user property')
return False
request.user = client.user
return True
def validate_redirect_uri(self, client_id, redirect_uri, request,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri.
This method is used in the authorization code grant flow and also
in implicit grant flow. It will detect if redirect_uri in client's
redirect_uris strictly, you can add a `validate_redirect_uri`
function on grant for a customized validation.
"""
request.client = request.client or self._clientgetter(client_id)
client = request.client
if hasattr(client, 'validate_redirect_uri'):
return client.validate_redirect_uri(redirect_uri)
return redirect_uri in client.redirect_uris
def validate_refresh_token(self, refresh_token, client, request,
*args, **kwargs):
"""Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant.
"""
token = self._tokengetter(refresh_token=refresh_token)
if token and token.client_id == client.client_id:
# Make sure the request object contains user and client_id
request.client_id = token.client_id
request.user = token.user
return True
return False
def validate_response_type(self, client_id, response_type, client, request,
*args, **kwargs):
"""Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request.
"""
if response_type not in ('code', 'token'):
return False
if hasattr(client, 'allowed_response_types'):
return response_type in client.allowed_response_types
return True
def validate_scopes(self, client_id, scopes, client, request,
*args, **kwargs):
"""Ensure the client is authorized access to requested scopes."""
if hasattr(client, 'validate_scopes'):
return client.validate_scopes(scopes)
return set(client.default_scopes).issuperset(set(scopes))
def validate_user(self, username, password, client, request,
*args, **kwargs):
"""Ensure the username and password is valid.
Attach user object on request for later using.
"""
log.debug('Validating username %r and password %r',
username, password)
if self._usergetter is not None:
user = self._usergetter(
username, password, client, request, *args, **kwargs
)
if user:
request.user = user
return True
return False
log.debug('Password credential authorization is disabled.')
return False
|
{
"content_hash": "d609ba9fa866cda91c5019d02a1f8307",
"timestamp": "",
"source": "github",
"line_count": 880,
"max_line_length": 79,
"avg_line_length": 37.3625,
"alnum_prop": 0.5879740868031266,
"repo_name": "RealGeeks/flask-oauthlib",
"id": "d931ab4d684ece70e502ed710cb75ea3b3eb1f67",
"size": "32895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_oauthlib/provider/oauth2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "675"
},
{
"name": "HTML",
"bytes": "2414"
},
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "174069"
}
],
"symlink_target": ""
}
|
"""
Differ classes
"""
__author__ = "André Malo"
__docformat__ = "epytext en"
__all__ = ["InternalDiffer", "ExternalDiffer"]
class InternalDiffer(object):
""" Differ without an external program call (uses difflib) """
def __init__(self):
""" Initialization """
pass
def getStringDiff(self, string1, string2, label1, label2 = None,
date1 = "", date2 = ""):
""" creates a diff of two line based strings
If a string is C{None}, it's treated as ""
@param string1: First string
@type string1: C{str}
@param string2: Second string
@type string2: C{str}
@param label1: Label for first data
@type label1: C{str}
@param label2: Label for second data
@type label2: C{str}
@param date1: Date description for first data
@type date1: C{str}
@param date2: Date description for second data
@type date2: C{str}
@return: unified diff lines (maybe a generator)
@rtype: iterable
"""
import difflib
list1 = (string1 or "").splitlines(True)
list2 = (string2 or "").splitlines(True)
if not (list1 or list2):
list1 = list2 = [""]
return difflib.unified_diff(
list1, list2, label1, label2 or label1, date1, date2,
)
def getFileDiff(self, name1, name2, label1, label2 = None,
date1 = "", date2 = ""):
""" creates a diff of two line based files
@param name1: First file name
@type name1: C{str}
@param name2: Second file name
@type name2: C{str}
@param label1: Label for first data
@type label1: C{str}
@param label2: Label for second data
@type label2: C{str}
@param date1: Date description for first data
@type date1: C{str}
@param date2: Date description for second data
@type date2: C{str}
@return: unified diff lines (maybe a generator)
@rtype: iterable
"""
import difflib
list1 = file(name1, "rb").readlines()
list2 = file(name2, "rb").readlines()
if not (list1 or list2):
list1 = list2 = [""]
return difflib.unified_diff(
list1, list2, label1, label2 or label1, date1, date2,
)
class ExternalDiffer(object):
""" Differ which calls an external program (e.g. diff)
@ivar _diff_command: The diff command line
@type _diff_command: C{list}
@ivar _tempdir: The tempdir to use for string diffs
@type _tempdir: C{str}
"""
def __init__(self, diff_command, tempdir = None):
""" Initialization
@param diff_command: The diff command to call
@type diff_command: C{list}
@param tempdir: The tempdir to use for string diffs
@type tempdir: C{str}
"""
self._diff_command = diff_command
self._tempdir = tempdir
def getStringDiff(self, string1, string2, label1, label2 = None,
date1 = "", date2 = ""):
""" creates a diff of two line based strings
If a string is C{None}, it's treated as ""
@param string1: First string
@type string1: C{str}
@param string2: Second string
@type string2: C{str}
@param label1: Label for first data
@type label1: C{str}
@param label2: Label for second data
@type label2: C{str}
@param date1: Date description for first data
@type date1: C{str}
@param date2: Date description for second data
@type date2: C{str}
@return: unified diff lines (maybe a generator)
@rtype: iterable
"""
from svnmailer import util
string1 = string1 or ""
string2 = string2 or ""
file1 = util.TempFile(self._tempdir)
file1.fp.write(string1)
file1.close()
file2 = util.TempFile(self._tempdir)
file2.fp.write(string2)
file2.close()
pipe = self._getPipe(
file1.name, file2.name, label1, label2, date1, date2
)
# yield line by line
line = pipe.fromchild.readline()
while line:
yield line
line = pipe.fromchild.readline()
pipe.fromchild.close()
pipe.wait()
def getFileDiff(self, name1, name2, label1, label2 = None,
date1 = "", date2 = ""):
""" creates a diff of two line based files
@param name1: First file name
@type name1: C{str}
@param name2: Second file name
@type name2: C{str}
@param label1: Label for first data
@type label1: C{str}
@param label2: Label for second data
@type label2: C{str}
@param date1: Date description for first data
@type date1: C{str}
@param date2: Date description for second data
@type date2: C{str}
@return: unified diff lines (maybe a generator)
@rtype: iterable
"""
pipe = self._getPipe(name1, name2, label1, label2, date1, date2)
# yield line by line
line = pipe.fromchild.readline()
while line:
yield line
line = pipe.fromchild.readline()
pipe.fromchild.close()
pipe.wait()
def _getPipe(self, name1, name2, label1, label2, date1, date2):
""" Returns a pipe from the diff program
@param name1: First file name
@type name1: C{str}
@param name2: Second file name
@type name2: C{str}
@param label1: Label for first data
@type label1: C{str}
@param label2: Label for second data
@type label2: C{str}
@param date1: Date description for first data
@type date1: C{str}
@param date2: Date description for second data
@type date2: C{str}
@return: The pipe object
@rtype: see: C{util.getPipe4}
"""
from svnmailer import util
params = {
"label_from": "%s %s" % (label1, date1 or ""),
"label_to" : "%s %s" % (label2 or label1, date2 or ""),
"from" : name1,
"to" : name2,
}
# check for sanity
for key, value in params.items():
if isinstance(value, unicode):
params[key] = value.encode("utf-8")
cmd = list(self._diff_command)
cmd[1:] = [(isinstance(arg, unicode) and
[arg.encode("utf-8")] or [arg])[0] % params for arg in cmd[1:]
]
pipe = util.getPipe4(cmd)
pipe.tochild.close()
return pipe
|
{
"content_hash": "dc3a222dca1525810238c0849f171b28",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 74,
"avg_line_length": 27.76284584980237,
"alnum_prop": 0.5269077448747153,
"repo_name": "danielshahaf/svnmailer-debian",
"id": "d712d83a470cb131101aa6d103765d987fe9c763",
"size": "7663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/svnmailer/differ.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "299480"
}
],
"symlink_target": ""
}
|
import ShareYourSystem as SYS
#Define
MyPredicter=SYS.PredicterClass(
).predict(
#PredictingUnitsInt
10,
#PredictingSensorsInt
1,
#PredictingDecoderWeightFloat
10.,
#PredictingCostFloat
0.,
#PredictingNormalisationInt
0.5,
#PredictingPerturbativeWeightFloat
0.1
)
#print
print('MyPredicter is')
SYS._print(MyPredicter)
|
{
"content_hash": "db21aba346e322d28e49d732592e809c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 36,
"avg_line_length": 15.772727272727273,
"alnum_prop": 0.7579250720461095,
"repo_name": "Ledoux/ShareYourSystem",
"id": "5347249efa6051dab8108518d4e62fd14426f3f2",
"size": "363",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Specials/Oldpredicters/Oldpredicter/draft/01_ExampleDoc copy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
from hippy.module.spl.funcs import *
import hippy.module.spl.arrayiter
import hippy.module.spl.iterator
|
{
"content_hash": "1378a63cef81cfdb6a8ea60ec0407cb9",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.8269230769230769,
"repo_name": "hippyvm/hippyvm",
"id": "1a46df242117ff51572a224941f3113d11c8642c",
"size": "104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hippy/module/spl/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1559"
},
{
"name": "C",
"bytes": "2603732"
},
{
"name": "C++",
"bytes": "196295"
},
{
"name": "HTML",
"bytes": "415"
},
{
"name": "JavaScript",
"bytes": "453641"
},
{
"name": "Makefile",
"bytes": "4793"
},
{
"name": "PHP",
"bytes": "15041037"
},
{
"name": "Python",
"bytes": "2503956"
},
{
"name": "Shell",
"bytes": "15527"
}
],
"symlink_target": ""
}
|
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty, NumericProperty, ReferenceListProperty, StringProperty, ListProperty, BooleanProperty
from kivy.uix.popup import Popup
from kivy.graphics import Color, Line
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.graphics import *
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.colorpicker import ColorPicker
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
import socket
import pickle
from threading import Thread
class Game(Screen):
def gotomenu(self):
Color(*(0,0,0))
self.layout.remove_widget(self.s) ##deletes the current sheet
self.manager.current = "menu"
class Menuscreen(Screen):
s = ObjectProperty()
def on_touch_down(self, touch):
if self.karo.collide_point(*touch.pos):
self.create_caro_sheet()
self.manager.get_screen('game').layout.menu.zoomer.value = 1
if self.line.collide_point(*touch.pos):
self.create_line_sheet()
self.manager.get_screen('game').layout.menu.zoomer.value = 1
if self.blank.collide_point(*touch.pos):
Sheet.type = "blank"
self.manager.get_screen('game').s = Sheet()
self.manager.get_screen('game').layout.add_widget(self.manager.get_screen('game').s, index=2)
self.manager.current = "game"
self.manager.get_screen('game').layout.menu.zoomer.value = 1
if self.table.collide_point(*touch.pos):
self.create_table_sheet()
self.manager.get_screen('game').layout.menu.zoomer.value = 1
def create_caro_sheet(self):
Sheet.type = "caro"
self.manager.get_screen('game').s = Sheet() #init a new sheet: property of Game
self.manager.get_screen('game').layout.add_widget(self.manager.get_screen('game').s, index=2)#add this sheet which is instance of Game
self.manager.current = "game"
with self.manager.get_screen('game').s.canvas: #drawing karo lines
Color(*(0,0,0))
y=0
y2=0
for i in range(self.manager.get_screen('game').width):
i+=20
y+=20
y2+=20
Line(points=[0, y,self.width , y2], width=0.5)
x=0
x2=0
for t in range(self.manager.get_screen('game').height):
t+=20
x+=20
x2+=20
Line(points=[x, 0, x2 , self.height], width=0.5)
def create_line_sheet(self):
Sheet.type = "line"
self.manager.get_screen('game').s = Sheet() #init a new sheet: property of Game
self.manager.get_screen('game').layout.add_widget(self.manager.get_screen('game').s, index=2) #add this sheet which is instance of Game
self.manager.current = "game"
with self.manager.get_screen('game').s.canvas: #drawing lines
Color(*(0,0,0))
y=0
y2=0
for t in range(self.manager.get_screen('game').height):
t+=20
y+=20
y2+=20
Line(points=[0, y, self.width ,y2 ], width=0.5)
def create_table_sheet(self):
Sheet.type = "table"
self.manager.get_screen('game').s = Sheet() #init a new sheet: property of Game
self.manager.get_screen('game').layout.add_widget(self.manager.get_screen('game').s, index=2) #add this sheet which is instance of Game
self.manager.current = "game"
with self.manager.get_screen('game').s.canvas: #drawing table
Color(*(0,0,0))
y=0
y2=0
for i in range(self.manager.get_screen('game').width):
i+=40
y+=40
y2+=40
Line(points=[0, y,self.width , y2], width=0.5)
x=0
x2=0
for t in range(self.manager.get_screen('game').height):
t+=100
x+=100
x2+=100
Line(points=[x, 0, x2 , self.height], width=0.5)
class Sheet(Widget):
color = (0,0,0)
linewidth = 1
background = StringProperty("")
type = ""
zoom = 1
centerxy = [Window.width/2, Window.height/2]
###Mode for Server auth.###
data = ["client"]
def connect_to_server(self):
content = BoxLayout()
lbl = TextInput(text='', multiline=False, size_hint=(1,1))
btn = Button(text="Ok", on_release= lambda a: self.set_ip_address(lbl.text), size_hint_x=0.3)
content.add_widget(lbl)
content.add_widget(btn)
popup = Popup(title="Enter IP adress", content=content, size_hint=(None, None), size=(Window.width*0.7, Window.height*0.2))
popup.open()
btn.bind(on_press=popup.dismiss)
def set_ip_address(self, ip):
host = str(ip)
touch = []
self.data.append(touch)
self.data_string = pickle.dumps(self.data)
self.s = socket.socket()
self.s.connect((host, 8888))
self.s.sendto(self.data_string,(host, 8888))
t = Thread(target=self.recieve_data)
t.start()
def on_touch_down(self,touch):
with self.scat.canvas:
Color(*self.color)
touch.ud['current_line'] = Line(points=((touch.x - self.scat.x)/self.zoom, (touch.y - self.scat.y)/self.zoom), width=self.linewidth)
def on_touch_move(self, touch):
if touch.is_double_tap:
self.scat.x += touch.dx
self.scat.y += touch.dy
self.centerxy[0] += touch.dx
self.centerxy[1] += touch.dy
else:
try:
touch.ud['current_line'].points += ((touch.x - self.scat.x)/self.zoom, (touch.y - self.scat.y)/self.zoom)
except KeyError:
pass
def recieve_data(self):
"""
Endless loop for retrieving Data. Should be threaded
"""
while True:
self.touch = self.s.recv(1024)
touch = pickle.loads(self.touch)
if touch[4] == False:
with self.scat.canvas:
Color(*touch[3])
line = Line(points=[touch[0], touch[1]], width = touch[2])
elif touch[4] == True:
line.points += [touch[0],touch[1]]
class Menu(Widget):
def change_pen(self, color, width):
Sheet.color = color
Sheet.linewidth = width
def color(self): #opens color Picker
picker = ColorPicker()
picker.bind(color=self.setcolor)
popup = Popup(title="Choose a Color", content=picker, size_hint=(None, None), size=(400,400))
popup.open()
def setcolor(self, instance, value): # Sets color
Sheet.color = list(value)
def thickness(self, value): #sets linewidth from slider
Sheet.linewidth = value
def scale(self, value=1):
self.parent.parent.s.scat.scale = value
self.parent.parent.s.scat.center = Sheet.centerxy[0], Sheet.centerxy[1]
Sheet.zoom = value
def widgetshot(self):
self.parent.parent.s.export_to_png('Saved Board.png')
def clear(self): #clear screen and set up up correct
if Sheet.type=="blank":
self.parent.remove_widget(self.parent.parent.s)
self.parent.parent.s = Sheet()
self.parent.add_widget(self.parent.parent.s, index=2)
elif Sheet.type=="caro":
self.parent.remove_widget(self.parent.parent.s)
self.parent.parent.s = Sheet()
self.parent.add_widget(self.parent.parent.s, index=2)
with self.parent.parent.s.canvas: #drawing karo lines
Color(*(0,0,0))
y=0
y2=0
for i in range(self.parent.parent.width):
i+=20
y+=20
y2+=20
Line(points=[0, y,self.parent.parent.width , y2], width=0.5)
x=0
x2=0
for t in range(self.parent.parent.height):
t+=20
x+=20
x2+=20
Line(points=[x, 0, x2 , self.parent.parent.height], width=0.5)
elif Sheet.type=="line":
self.parent.remove_widget(self.parent.parent.s)
self.parent.parent.s = Sheet()
self.parent.add_widget(self.parent.parent.s, index=2)
with self.parent.parent.s.canvas: #drawing lines
Color(*(0,0,0))
y=0
y2=0
for t in range(self.parent.parent.height):
t+=20
y+=20
y2+=20
Line(points=[0, y, self.parent.parent.width ,y2 ], width=0.5)
elif Sheet.type=="table":
self.parent.remove_widget(self.parent.parent.s)
self.parent.parent.s = Sheet()
self.parent.add_widget(self.parent.parent.s, index=2)
with self.parent.parent.s.canvas: #drawing table
Color(*(0,0,0))
y=0
y2=0
for i in range(self.parent.parent.width):
i+=40
y+=40
y2+=40
Line(points=[0, y, self.parent.parent.width , y2], width=0.5)
x=0
x2=0
for t in range(self.parent.parent.height):
t+=100
x+=100
x2+=100
Line(points=[x, 0, x2 ,self.parent.parent.height], width=0.5)
def connect(self):
self.parent.parent.s.connect_to_server()
class Manager(ScreenManager):
screen_one = ObjectProperty(None)
screen_two = ObjectProperty(None)
class ScreensApp(App):
def build(self):
return Manager()
ScreensApp().run()
|
{
"content_hash": "c883f457c6d01aac4a265207982b6f91",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 144,
"avg_line_length": 33.81733746130031,
"alnum_prop": 0.5071866703286643,
"repo_name": "georgk10/Whiteboard-Master",
"id": "a30695080c27f1a50ddfab46706efd07df23a131",
"size": "10923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Client/main_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23038"
}
],
"symlink_target": ""
}
|
"""
Understanding NiftiMasker and mask computation
==================================================
In this example, the Nifti masker is used to automatically compute a mask.
For data that has already been masked, the default strategy works out of
the box.
However, for raw EPI, as in resting-state time series, we need to use the
'epi' strategy of the NiftiMasker.
In addition, we show here how to tweak the different parameters of the
underlying mask extraction routine
:func:`nilearn.masking.compute_epi_mask`.
"""
import matplotlib.pyplot as plt
import numpy as np
import nibabel
from nilearn import datasets
###############################################################################
# Simple visualization helper
def display_mask(background, mask, title):
plt.axis('off')
plt.imshow(np.rot90(background), interpolation='nearest', cmap=plt.cm.gray)
ma = np.ma.masked_equal(mask, False)
plt.imshow(np.rot90(ma), interpolation='nearest',
cmap=plt.cm.autumn, alpha=0.5)
plt.title(title)
###############################################################################
# From already masked data
from nilearn.input_data import NiftiMasker
# Load Miyawaki dataset
miyawaki = datasets.fetch_miyawaki2008()
miyawaki_img = nibabel.load(miyawaki.func[0])
miyawaki_func = miyawaki_img.get_data()
background = np.mean(miyawaki_func, axis=-1)[..., 14]
# This time, we can use the NiftiMasker without changing the default mask
# strategy, as the data has already been masked, and thus lies on a
# homogeneous background
masker = NiftiMasker()
masker.fit(miyawaki_img)
default_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(4, 4.5))
display_mask(background, default_mask[..., 14], 'Default background mask')
plt.tight_layout()
###############################################################################
# From raw EPI data
# Load NYU resting-state dataset
nyu = datasets.fetch_nyu_rest(n_subjects=1)
nyu_img = nibabel.load(nyu.func[0])
# Restrict nyu to 100 frames to speed up computation
nyu_func = nyu_img.get_data()[..., :100]
# nyu_func is a 4D-array, we want to make a Niimg out of it:
nyu_img = nibabel.Nifti1Image(nyu_func, nyu_img.get_affine())
# To display the background
background = np.mean(nyu_func, axis=-1)[..., 21]
# Simple mask extraction from EPI images
from nilearn.input_data import NiftiMasker
# We need to specify an 'epi' mask_strategy, as this is raw EPI data
masker = NiftiMasker(mask_strategy='epi')
masker.fit(nyu_img)
default_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(4, 4.5))
display_mask(background, default_mask[..., 21], 'EPI automatic mask')
plt.tight_layout()
# Generate mask with strong opening
masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10))
masker.fit(nyu_img)
opening_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(4, 4.5))
display_mask(background, opening_mask[..., 21], 'EPI Mask with strong opening')
plt.tight_layout()
# Generate mask with a high lower cutoff
masker = NiftiMasker(mask_strategy='epi',
mask_args=dict(upper_cutoff=.9, lower_cutoff=.8,
opening=False))
masker.fit(nyu_img)
cutoff_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(4, 4.5))
display_mask(background, cutoff_mask[..., 21], 'EPI Mask: high lower_cutoff')
plt.tight_layout()
################################################################################
# Extract time series
# trended vs detrended
trended = NiftiMasker(mask_strategy='epi')
detrended = NiftiMasker(mask_strategy='epi', detrend=True)
trended_data = trended.fit_transform(nyu_img)
detrended_data = detrended.fit_transform(nyu_img)
print "Trended: mean %.2f, std %.2f" % \
(np.mean(trended_data), np.std(trended_data))
print "Detrended: mean %.2f, std %.2f" % \
(np.mean(detrended_data), np.std(detrended_data))
plt.show()
|
{
"content_hash": "8411a1d96874efbd471794e33b932a8a",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 80,
"avg_line_length": 32.69421487603306,
"alnum_prop": 0.6531850353892821,
"repo_name": "ainafp/nilearn",
"id": "643f5ce482810cbe8bbf40ea28306fc52ac661cd",
"size": "3956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot_mask_computation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
import tkinter as tk
from datetime import datetime as dt
from datetime import timedelta
from math import floor
from tkinter import filedialog as fd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.interpolate import interp1d
from atmPy.atmos import air
from atmPy.aerosols.physics import aerosol
class SMPS(object):
"""
Defines routines associated with an SMPS object
Attributes
----------
dma: DMA object
Defines the differential mobility analyzer specific to the SMPS
dn_interp: numpy array of floats, 2D
Charge and lag corrected dndlogDp distributions
diam_interp: numpy array of floats, 1D
Diameters over which the dndlogDp distributions are interpolated. This value is determined by the
method buildGrid
cn_raw:
cn_smoothed:
diam:
air: air object
Use this object to set the temperature and pressure and do calculations related to the gas
files: list of file objects
These are the files that will be processed
scan_folder: String
Location of scan data
"""
def __init__(self, dma):
self.air = air()
self.dma = dma
self.files = []
self.diam_interp = 0
self.buildGrid()
self.dn_interp = None
self.date = []
self.cn_raw = None
self.cn_smoothed = None
self.diam = None
self.lag = 10
# Smoothing parameter for the LOWESS smoothing
self.alpha = 0.3
def openFiles(self, scan_folder=''):
"""
Launches a dialog to selct files for SMPS processing.
Parameters
-----------
scan_folder: string, optional
Starting folder for searching for files to process. Default is empty.
"""
_gui = tk.Tk()
# Prompt the user for a file or files to process
self.files = fd.askopenfiles(initialdi=scan_folder)
_gui.destroy()
def __progress__(self):
s = len(self.files)
i = 0
while True:
print('\r[{0}] {1}%'.format('#' * i, i / s * 100))
yield None
i += 1
@staticmethod
def __chargecorr__(diam, dn, gas, n=3, pos_neg=-1):
"""
Correct the input concentrations for multiple charges.
When running in SMPS mode, we must keep in mind that each size of particles can carry multiple charges.
Particles with more charges will appear to be SMALLER than those with less. So, we will index through the
diameters backwards, get the number of the current selected size that would have charge n, and remove those
from the lower bins to the upper bins.
Parameters
----------
diam: array of float
array of diameters in nm
dn: array of integers
Array of particle concentrations corresponding to diameter 'diam'
gas: gas object
Gas object that defines the properties of the gas
n: int, optional
Number of charges to consider. Default is 3.
pos_neg: int, optional
Positive or negative one indicating whether to consider positive or negative charges.
Default is -1.
Returns
-------
None
Notes
------
The charge correction loops from top to bottom in the size distribution.
This assumes that there are no particles beyond the probed distribution.
If there are, there will be noticeable steps in the charge corrected distribution.
Regarding the algorithm, we can solve for each bin by using the charging efficiency
ala Gunn or Wiedensohler. To solve, we use the following algorithm:
1. Assume that the current bin holds ONLY singly charged particles.
2. Calculate the fraction of particles that are expected to be singly charged
at the current diameter, :math:'f_1\left(D_p\right)'.
3. For each charge beyond 1:
a) Get the charging efficiency of the current particle size for :math:'i' charges
:math:'f_i \left(D_p\right)'
b)
"""
# Flip the incoming diamter array
rdiam = np.copy(diam[::-1])
# We are working backwards, so we need to have the length to get this all right...
l = len(dn) - 1
# Inline function for finding the value closest to diameter d in the array diam
fmin = lambda nd: (np.abs(np.asarray(diam) - nd)).argmin()
for i, d in enumerate(rdiam):
# Get the fraction of particles that are singly charged
f1 = aerosol.ndistr(d, pos_neg, gas.t)
for j in reversed(range(2, n + 1)): # Loop through charges 2 and higher backwards
ne = j * pos_neg
fi = aerosol.ndistr(d, ne, gas.t)
# Mobility of multiply charged particles
z_mult = abs(ne * aerosol.z(d, gas, pos_neg))
# Diameter bin which contains the multiply charged particles
d_mult = aerosol.z2d(z_mult, gas, 1)
# Continue while the diameter specified is larger than the minimum diameter
if d_mult >= diam[0]:
# Find the index of the multiple charges
k = fmin(d_mult)
# Remove the particles in bin k that belong in the current bin, but don't remove more
# particles than there are in the bin
dn[k] -= min(dn[l - i] * fi / f1, dn[k])
# The total number of particlesi n the current bin is simply the number of singly charged
# particles divided by the singly charged charging efficiency.
dn[l - i] /= f1
return None
def proc_files(self):
"""
Process the files that are contained by the SMPS class attribute 'files'
"""
e_count = 0
# TODO: Some of the processes here can be parallelized using the multiprocessing library
# Reading of the data and determing the lag will require a proper sequence, but
# we can parallelize:
# 1) The processing of an individual file
# 2) The processing of the up and down scans within a file
# WARNING: Need to ensure that the different processes don't step on each other.
# Likely we would need to make some of the instance variables local (air.t and air.p
# come to mind).
self.dn_interp = np.zeros((2 * len(self.files), len(self.diam_interp)))
self.date = [None] * 2 * len(self.files)
self.cn_raw = np.zeros((2 * len(self.files), len(self.diam_interp)))
self.cn_smoothed = np.zeros((2 * len(self.files), len(self.diam_interp)))
self.diam = np.zeros((2 * len(self.files), len(self.diam_interp)))
for e, i in enumerate(self.files):
try:
print(i.name)
# Get the data in the file header
meta_data = self.__readmeta__(i.name)
# Retrieve the scan and dwell times from the meta data.
tscan = meta_data['Scan_Time'].values[0]
tdwell = meta_data['Dwell_Time'].values[0]
# This is the data in the scan file
data = self.__readdata__(i.name)
# Get the CPC data of interest and pad the end with zeros for the sake of
# readability.
cpc_data = np.pad(data.CPC_1_Cnt.values[self.lag:], (0, self.lag),
mode="constant", constant_values=(0, 0))
# This is the CPC concentration
cpc_data /= data.CPC_Flw.values
# Remove NaNs and infs from the cpc data
cpc_data[np.where(np.isnan(cpc_data))] = 0.0
cpc_data[np.where(np.isinf(cpc_data))] = 0.0
# In the following section, we will take the two variables, 'data' and
# 'cpc_data' to produce the data that will be run through the core of
# the processing code. The steps are as follows to prepare the data:
# 1. If the data is the downward data, flip the arrays.
# 2. Truncate the data to get the scanned data.
# a. If the data is the up data, we simply want the first 'tscan'
# elements.
# b. If the data is the down data, we will account for the final
# dwell time ('tdwell'), and take the portion of the arrays
# from tdwell to tscan + tdwell.
# 3. Get the mean values of all the data in the scan array from the
# respective data array. We will use the mean values for inversion.
# PRODUCE UP DATA FOR PROCESSING #
# Extract the portion of the CPC data of interest for the upward scan
cpc_up = cpc_data[:tscan]
up_data = data.iloc[:tscan]
smooth_up = sm.nonparametric.lowess(cpc_up, up_data.DMA_Diam.values,
frac=self.alpha, it=1, missing='none',
return_sorted=False)
smooth_up[np.where(np.isnan(smooth_up))] = 0.0
smooth_up[np.where(np.isinf(smooth_up))] = 0.0
# Retrieve mean up data
mup = up_data.mean(axis=0)
self.air.t = mup.Aer_Temp_C
self.air.p = mup.Aer_Pres_PSI
# Calculate diameters from voltages
dup = [self.dma.v2d(i, self.air, mup.Sh_Q_VLPM,
mup.Sh_Q_VLPM) for i in up_data.DMA_Set_Volts.values]
# UP DATA PRODUCTION COMPLETE #
# BEGIN DOWN DATA PRODUCTION #
# Flip the cpc data and extricate the portion of interest
cpc_down = cpc_data[::-1]
cpc_down = cpc_down[tdwell:tscan + tdwell]
# Flip the down data and slice it
down_data = data.iloc[::-1]
down_data = down_data.iloc[int(tdwell):int(tscan + tdwell)]
smooth_down = sm.nonparametric.lowess(cpc_down, down_data.DMA_Diam.values,
frac=self.alpha, it=1, missing='none',
return_sorted=False)
smooth_down[np.where(np.isnan(smooth_up))] = 0.0
smooth_down[np.where(np.isinf(smooth_up))] = 0.0
# Retrieve mean down data
mdown = down_data.mean(axis=0)
self.air.t = mdown.Aer_Temp_C
self.air.p = mdown.Aer_Pres_PSI
# Calculate diameters from voltages
ddown = [self.dma.v2d(i, self.air, mdown.Sh_Q_VLPM,
mdown.Sh_Q_VLPM) for i in down_data.DMA_Set_Volts.values]
up_interp_dn = self.__fwhm__(dup, smooth_up, mup)
down_interp_dn = self.__fwhm__(ddown, smooth_down, mdown)
up_interp_dn[np.where(up_interp_dn < 0)] = 0
down_interp_dn[np.where(down_interp_dn < 0)] = 0
except ValueError:
print("Unexpected error:", sys.exc_info()[0])
print("Issue processing file " + str(i.name))
e_count += 1
continue
except TypeError:
print("Error processing file.")
e_count += 1
continue
else:
n_e = e - e_count
# Stuff the data for the attributes down here. If an error is thrown, we will not contaminate
# member data.
self.date[2 * n_e] = dt.strptime(str(meta_data.Date[0]) + ',' + str(meta_data.Time[0]),
'%m/%d/%y,%H:%M:%S')
self.date[2 * n_e + 1] = self.date[2 * n_e] + timedelta(0, tscan + tdwell)
self.diam[2 * n_e, 0:np.asarray(dup).size] = np.asarray(dup)
self.cn_raw[2 * n_e, 0:cpc_up.size] = cpc_up
# Store raw data for the down scan
self.cn_raw[2 * n_e + 1, 0:cpc_down.size] = cpc_down
self.diam[2 * n_e + 1, 0:np.asarray(ddown).size] = np.asarray(ddown)
self.cn_smoothed[2 * n_e, 0:smooth_up.size] = smooth_up
self.cn_smoothed[2 * n_e + 1, 0:smooth_down.size] = smooth_down
self.dn_interp[2 * n_e, :] = up_interp_dn
self.dn_interp[2 * n_e + 1, :] = down_interp_dn
@staticmethod
def __readmeta__(file):
"""
Parameters
----------
file: file object
File to retrieve meta data from
Returns
-------
pandas datafram containing meta data
"""
return pd.read_csv(file, header=0, lineterminator='\n', nrows=1)
@staticmethod
def __readdata__(file):
"""
Read the data from the file.
Data starts in the third row.
Parameters
-----------
file: file object
File containing SMPS data
Returns
--------
pandas data frame
"""
return pd.read_csv(file, parse_dates='Date_Time', index_col=0, header=2, lineterminator='\n')
def getLag(self, index, delta=0, p=True):
"""
This function can be called to guide the user in how to set the lag attribute.
Parameters
----------
index: int
Index of file in attribute files to determine the lag
delta: int, optional
Fudge factor for aligning the two scans; default is 0
p: Boolean, optional
Plot the output if True
"""
meta_data = self.__readmeta__(self.files[index].name)
up_data = self.__readdata__(self.files[index].name)
tscan = meta_data.Scan_Time.values[0]
tdwell = meta_data.Dwell_Time.values[0]
# CPC concentrations will be simply the 1 s buffer divided by the
# CPC flow
cpc_cnt = up_data.CPC_1_Cnt.values / up_data.CPC_Flw.values
# Truncate the upward trajectory
up = cpc_cnt[0:tscan]
# Get the counts for the decreasing voltage and truncate them
down = cpc_cnt[::-1] # Flip the variable cpc_cnt
down = cpc_cnt[tdwell:(tscan + tdwell)] # Truncate the data
down_data = up_data[::-1]
# LAG CORRELATION #
corr = np.correlate(up, down, mode="full")
plt.plot(corr)
corr = corr[corr.size / 2:]
self.lag = floor(corr.argmax(axis=0) / 2 + delta)
f = self.lag
# GET CPC DATA FOR PLOTTING #
# Shift the up data with the number of zeros padding on the end equal to the lag
up = up_data['CPC_1_Cnt'].values[f:tscan + f] / up_data['CPC_Flw'].values[f:tscan + f]
# Remove NaNs and infs
up[np.where(np.isinf(up))] = 0.0
up[np.where(np.isnan(up))] = 0.0
up_data = up_data.iloc[:tscan]
smooth_p = 0.3
smooth_up = sm.nonparametric.lowess(up, up_data.DMA_Diam.values, frac=smooth_p, it=1, missing='none')
# Padding the down scan is trickier - if the parameter f (should be the lag in the correlation)
# is larger than the dwell time, we will have a negative resize parameter - this is no good.
# Pad the front with the number of zeros that goes beyond the end (front in the reveresed array).
# This makes sense. I guess.
if f > tdwell:
f = tdwell
down = np.pad(down_data['CPC_1_Cnt'].values[0:tscan] /
down_data['CPC_Flw'].values[0:tscan],
pad_width={tdwell - f, 0}, constant_values={0, 0})
else:
down = (down_data['CPC_1_Cnt'].values[(tdwell - f):(tscan + tdwell - f)] /
down_data['CPC_Flw'].values[(tdwell - f):(tscan + tdwell - f)])
down[np.where(np.isinf(down))] = 0.0
down[np.where(np.isnan(down))] = 0.0
down_data = down_data.iloc[:tscan]
smooth_down = sm.nonparametric.lowess(down, down_data.DMA_Diam.values, frac=smooth_p, missing='none')
if p:
f2 = plt.figure(2)
plt.plot(up, 'r.', down, 'b.', smooth_up[:, 1], 'r+', smooth_down[:, 1], 'b+')
output = "Lag is estimated to be " + str(self.lag) + " with a delta of " + str(delta) + "."
plt.title(output)
plt.show()
def buildGrid(self, logtype="ln", gmin=1, gmax=1000, n=300):
"""
Define a logarithmic grid over which to interpolate output values
Parameters
----------
type: string, optional
this value can be log10 or natural (e); default is log10
min: int, optional
minimum value in the grid; default is 1
max: int, optional
maximum value in the grid; default is 1000
n: int, optional
number of bins over which to divide the grid; default is 300
"""
if logtype == "log10":
self.diam_interp = np.logspace(np.log10(gmin), np.log10(gmax), n, endpoint=True)
else:
self.diam_interp = np.logspace(np.log(gmin), np.log(gmax), n, base=np.exp(1), endpoint=True)
return None
def __fwhm__(self, diam, dn, mean_data):
"""
Retrieve the full width at half max and return an interpolated concentration dN/dlogdp array
Parameters
-----------
diam: NumPy array of floats
Diameters calculated from the setpoint voltage of the scan. Units are nm
dn: NumPy array of floats
CPC concentration at each diameter. Units are cc^-1.
mean_data: pandas DataFrame
DataFrame containing mean data from the scan.
:return:
"""
ls = len(dn)
dlogd = np.zeros(ls) # calculate dlogd
fwhm = np.zeros(ls) # hold width
self.air.t = mean_data.Aer_Temp_C
self.air.p = mean_data.Aer_Pres_PSI
def xfer(dp, qa, qs):
"""
Return the full-width, half-max of the transfer function in diameter space.
This implementation ignores diffusion broadening.
Parameters
-----------
dp: float
particle size in nm
qa: float
aerosol flow rate in lpm
qs: float
aerosol flow rate in lpm
Returns
-------
Width of transfer function in nm.
"""
beta = float(qa) / float(qs)
# Retrieve the center mobility
zc = aerosol.z(dp, self.air, 1)
# Upper bound of the mobility
zm = (1 - beta / 2) * zc
# Lower bound of the mobility
zp = (1 + beta / 2) * zc
return aerosol.z2d(zm, self.air, 1) - aerosol.z2d(zp, self.air, 1)
for e, i in enumerate(diam):
try:
fwhm[e] = xfer(i, mean_data.Aer_Q_VLPM, mean_data.Sh_Q_VLPM)
dlogd[e] = np.log10(i + fwhm[e] / 2) - np.log10(i - fwhm[e] / 2)
except (ValueError, ZeroDivisionError):
fwhm[e] = np.nan
print('Handling divide by zero error')
except:
fwhm[e] = np.nan
print('Handling unknown error: ' + str(sys.exc_info()[0]))
# Correct for multiple charging. We will use the array dn by reference and stuff this
# into another array
self.__chargecorr__(diam, dn, self.air)
output_sd = np.copy(dn)
# Divide the concentration by dlogdp from the transfer function
output_sd /= dlogd
# Use the 1D interpolation scheme to project the current concentrations
# onto the array defined by diam_interp
f = interp1d(diam, output_sd, bounds_error=False, kind='linear')
# Return the interpolated dNdlogDp distribution
return f(self.diam_interp)
|
{
"content_hash": "c5c4837732e1b805ec5bff05bc93c11b",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 118,
"avg_line_length": 39.609345794392524,
"alnum_prop": 0.5259308196876032,
"repo_name": "lo-co/atm-py",
"id": "8838424fb03c982cc5a5d352d7cd06ec70857e9b",
"size": "21191",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/atmPy/aerosols/instrument/DMA/smps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "805820"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.cdn import CdnManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cdn
# USAGE
python origins_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CdnManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.origins.get(
resource_group_name="RG",
profile_name="profile1",
endpoint_name="endpoint1",
origin_name="www-someDomain-net",
)
print(response)
# x-ms-original-file: specification/cdn/resource-manager/Microsoft.Cdn/stable/2021-06-01/examples/Origins_Get.json
if __name__ == "__main__":
main()
|
{
"content_hash": "6b7e7814bf2eafc5dd69115756616c83",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 114,
"avg_line_length": 30.685714285714287,
"alnum_prop": 0.7057728119180633,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4fd90004ad9f25d1ab856662002515d42e7e6d85",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cdn/azure-mgmt-cdn/generated_samples/origins_get.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
import ConfigParser
# 3rd party
import requests
from fabric.api import *
from fabric import colors
import fabtools
# local
import exceptions
def fabfile_root():
return os.path.dirname(os.path.abspath(__file__))
def fabfile_templates_root():
return os.path.join(fabfile_root(), "templates")
def project_root():
return os.path.dirname(fabfile_root())
def remote_project_root():
return "/vagrant"
def get_config_parser():
parser = ConfigParser.RawConfigParser()
parser.read(os.path.join(project_root(), "config.ini"))
return parser
def set_hosts_from_config():
parser = get_config_parser()
env.hosts = parser.get('servers', env.provider).split(",")
|
{
"content_hash": "89dcdc17dc8f9df513c3f940a8b4b7ca",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 62,
"avg_line_length": 21.875,
"alnum_prop": 0.7071428571428572,
"repo_name": "bjlange/revenge",
"id": "8ea542e9159dd9dc334b47c71544158c3dfa4fe6",
"size": "719",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fabfile/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6329"
},
{
"name": "Python",
"bytes": "13784"
}
],
"symlink_target": ""
}
|
import markovify
import nltk
import re
import numpy as np
import urllib
import oauth2 as oauth
from settings import conn, db, auth, post_tags, blogName
class POSifiedText(markovify.Text):
def word_split(self, sentence):
words = re.split(self.word_split_pattern, sentence)
if words[0] != "":
words = ["::".join(tag) for tag in nltk.pos_tag(words)]
else:
words = list("",)
return words
def word_join(self, words):
sentence = " ".join(word.split("::")[0] for word in words)
return sentence
db.execute("SELECT body FROM fics ORDER BY date LIMIT 500")
posts = db.fetchall()
text = [post[0].strip() for post in posts]
text = " ".join(text)
text_model = POSifiedText(text, state_size=3)
output = ""
for i in range(abs(int(np.random.normal(9, 4, 1))) + 1):
sentence = text_model.make_sentence()
if sentence is not None:
output += sentence.strip() + " "
title = text_model.make_short_sentence(70)
client = oauth.Client(
oauth.Consumer(key=auth["consumer_key"], secret=auth["consumer_secret"]),
oauth.Token(key=auth["oauth_token"], secret=auth["oauth_token_secret"])
)
resp, content = client.request(
f"https://api.tumblr.com/v2/blog/{blogName}/post",
method="POST",
body=urllib.parse.urlencode({
"title": title,
"body": output,
"tags": ",".join(post_tags),
"format": "markdown"
})
)
|
{
"content_hash": "6883722ba3503dc91d361e6a68997c0b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 26.145454545454545,
"alnum_prop": 0.6258692628650904,
"repo_name": "veggiedefender/miraculousladybot",
"id": "f55036f06d6f44bf01d1567b6498c49d083b7b80",
"size": "1438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2806"
}
],
"symlink_target": ""
}
|
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/Images" % SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/Sounds" % SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD("mkdir -p %s/Images" % SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("%s/*.jpg" % SCRIPT_DIR, "%s/Images" % SRC_DIR):
action_status = False
(return_code, output) = doRemoteCMD("mkdir -p %s/Sounds" % SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("%s/*.mp3" % SCRIPT_DIR, "%s/Sounds" % SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
{
"content_hash": "794bc430c0c23aff9d818b5e355cf9e0",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 101,
"avg_line_length": 29.88607594936709,
"alnum_prop": 0.5444020895100946,
"repo_name": "JianfengXu/crosswalk-test-suite",
"id": "5b1a0ea21e740d8fdf2cad0725e86a07440cb8a2",
"size": "7106",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "webapi/tct-notification-tizen-tests/inst.wgt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1693"
},
{
"name": "C",
"bytes": "28136"
},
{
"name": "CSS",
"bytes": "401725"
},
{
"name": "CoffeeScript",
"bytes": "18978"
},
{
"name": "Cucumber",
"bytes": "106420"
},
{
"name": "GLSL",
"bytes": "6990"
},
{
"name": "Groff",
"bytes": "12"
},
{
"name": "HTML",
"bytes": "40865855"
},
{
"name": "Java",
"bytes": "879556"
},
{
"name": "JavaScript",
"bytes": "4750576"
},
{
"name": "Logos",
"bytes": "12"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "45437"
},
{
"name": "Python",
"bytes": "4108034"
},
{
"name": "Shell",
"bytes": "851074"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import sys
def main(total_rows, number_of_error_rows):
error_count = 0
for i in xrange(number_of_error_rows):
print "error_%s" %str(error_count)
for i in xrange(total_rows - number_of_error_rows):
print "%s|%s_number" %(i,i)
if __name__ == '__main__':
total_rows = 20
error_rows = 0
if len(sys.argv) > 1:
total_rows = int(sys.argv[1])
error_rows = int(sys.argv[2])
main(total_rows, error_rows)
|
{
"content_hash": "77c381ab1f01aad41bb99ad82d070ce3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 32.8,
"alnum_prop": 0.6933797909407665,
"repo_name": "janebeckman/gpdb",
"id": "b77d897f9106b772186185a8a7abe27044a42692",
"size": "1148",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/queries/basic/exttab/errlog/sql/datagen_first_errors.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35420653"
},
{
"name": "C++",
"bytes": "3779804"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "737638"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268244"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196216"
},
{
"name": "M4",
"bytes": "104559"
},
{
"name": "Makefile",
"bytes": "437850"
},
{
"name": "PLSQL",
"bytes": "260471"
},
{
"name": "PLpgSQL",
"bytes": "5511854"
},
{
"name": "Perl",
"bytes": "3894341"
},
{
"name": "Perl 6",
"bytes": "14377"
},
{
"name": "Python",
"bytes": "8762921"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3825811"
},
{
"name": "Shell",
"bytes": "554328"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "490694"
}
],
"symlink_target": ""
}
|
"""Android automated build script.
This script may be used for builds of LiquidFun Paint.
Optional environment variables:
ANDROID_SDK_HOME = Path to the Android SDK. Required if it is not passed on the
command line.
NDK_HOME = Path to the Android NDK. Required if it is not passed on the
command line.
MAKE_FLAGS = String to override the default make flags with for ndk-build.
ANT_PATH = Path to ant executable. Required if it is not in $PATH or passed on
the command line.
LIQUIDFUN_SRC_PATH = Path to LiquidFun source directory. Required if it is not
passed on the command line.
SWIG_BIN = Path to SWIG binary executable. Required if it is not passed on the
command line.
SWIG_LIB = Path to SWIG shared files directory. If it is not passed on the
command line, we will rely on SWIG_BIN to get it.
OUTPUT_ZIP = Path and name to the output archive of build artifacts
"""
import argparse
import distutils.spawn
import os
import shutil
import subprocess
import sys
import buildutils
_ANT_FLAGS = 'ant_flags'
_LIQUIDFUN_SRC_PATH_ENV_VAR = 'LIQUIDFUN_SRC_PATH'
_LIQUIDFUN_SRC_PATH = 'liquidfun_src_path'
_SWIG_LIB_ENV_VAR = 'SWIG_LIB'
_SWIG_LIB = 'swig_lib'
_SWIG_BIN_ENV_VAR = 'SWIG_BIN'
_SWIG_BIN = 'swig_bin'
_OUTPUT_ZIP = 'output_zip'
_OUTPUT_APK_DIR = 'output_apk_dir'
def AddArguments(parser):
"""Add module-specific command line arguments to an argparse parser.
This will take an argument parser and add arguments appropriate for this
module. It will also set appropriate default values.
Args:
parser: The argparse.ArgumentParser instance to use.
"""
buildutils.AddArguments(parser)
defaults = {}
defaults[_ANT_FLAGS] = 'release'
defaults[_LIQUIDFUN_SRC_PATH] = (os.getenv(_LIQUIDFUN_SRC_PATH_ENV_VAR) or
'../../libs/liquidfun/Box2D')
defaults[_SWIG_BIN] = (os.getenv(_SWIG_BIN_ENV_VAR) or
distutils.spawn.find_executable('swig'))
defaults[_SWIG_LIB] = os.getenv(_SWIG_LIB_ENV_VAR)
defaults[_OUTPUT_ZIP] = None
defaults[_OUTPUT_APK_DIR] = None
parser.add_argument('-A', '--' + _ANT_FLAGS,
help='Flags to use to override ant flags',
dest=_ANT_FLAGS, default=defaults[_ANT_FLAGS])
parser.add_argument('-l', '--' + _LIQUIDFUN_SRC_PATH,
help='Path to LiquidFun/Box2D source directory',
dest=_LIQUIDFUN_SRC_PATH,
default=defaults[_LIQUIDFUN_SRC_PATH])
parser.add_argument('--' + _SWIG_BIN,
help='Path to SWIG binary', dest=_SWIG_BIN,
default=defaults[_SWIG_BIN])
parser.add_argument('--' + _SWIG_LIB,
help='Path to SWIG shared libraries', dest=_SWIG_LIB,
default=defaults[_SWIG_LIB])
parser.add_argument('-z', help='Path and name to the output archive',
dest=_OUTPUT_ZIP, default=defaults[_OUTPUT_ZIP])
parser.add_argument('-o', '--' + _OUTPUT_APK_DIR,
help='Path to copy output APKs to.',
dest=_OUTPUT_APK_DIR, default=defaults[_OUTPUT_APK_DIR])
class BuildEnvironment(buildutils.BuildEnvironment):
"""Class representing the build environment we will be building in.
This class is derived from buildutils.BuildEnvironment and adds specific
attributes for this project.
This class resolves and exposes various build parameters as properties,
which can be customized by users before building. It also provides methods
to accomplish common build tasks such as executing build tools and archiving
the resulting build artifacts.
Attributes:
ant_flags: Flags to pass to ant, for ant builds.
"""
def __init__(self, arguments):
"""Constructs the BuildEnvironment with basic information needed to build.
The build properties as set by argument parsing are also available
to be modified by code using this object after construction.
It is required to call this function with a valid arguments object,
obtained either by calling argparse.ArgumentParser.parse_args() after
adding this modules arguments via buildutils.AddArguments(), or by passing
in an object returned from buildutils.BuildDefaults().
Args:
arguments: The argument object returned from ArgumentParser.parse_args().
"""
super(BuildEnvironment, self).__init__(arguments)
if type(arguments) is dict:
args = arguments
else:
args = vars(arguments)
self.ant_flags = args[_ANT_FLAGS]
os.environ[_LIQUIDFUN_SRC_PATH_ENV_VAR] = args[_LIQUIDFUN_SRC_PATH]
os.environ[_SWIG_BIN_ENV_VAR] = args[_SWIG_BIN]
os.environ[_SWIG_LIB_ENV_VAR] = (args[_SWIG_LIB] or
self.CaptureSubprocessOutput(
[args[_SWIG_BIN],
'-swiglib']))
self.output_zip = args[_OUTPUT_ZIP]
self.output_apk_dir = args[_OUTPUT_APK_DIR]
def CaptureSubprocessOutput(self, argv):
"""Returns the output of a subprocess as run with the given argument list.
Runs a process via popen().
Args:
argv: A list of process arguments starting with the binary name, in the
form returned by shlex.
Returns:
The commandline output from the subprocess, with the last newline
stripped.
"""
try:
if self.verbose:
print 'Running subcommand as: %s' % str(argv)
process = subprocess.Popen(argv, stdout=subprocess.PIPE)
process.wait()
return process.communicate()[0].rstrip()
except OSError:
return ''
def CopyFilesWithExtension(self, dirlist, extension, output_path,
flatten=False, exclude=None):
"""Copy files from the specified directory path to the output path.
Copy any files of a certain extension, present in the directories specified
in dirlist, to the specified output path. All dirlist paths are relative
from the project top directory.
Args:
dirlist: A list of directories to search for files in.
extension: The extension of the files we are searching for.
output_path: Path to the output directory, relative to the value of
the project_directory property.
flatten: If true, copy all files to the same directory without preserving
directory structure.
exclude: Optional list of directory names to filter from dir trees in
dirlist. Subdirectories with these names will be skipped when writing
the archive.
Raises:
IOError: An error occurred writing or copying the archive.
"""
outputabs = os.path.join(self.project_directory, output_path)
for d in dirlist:
srcdir = os.path.join(self.project_directory, d)
for root, dirs, files in os.walk(srcdir):
if exclude:
for ex in exclude:
if ex in dirs:
dirs.remove(ex)
for f in files:
if f.endswith(extension):
outabspath = outputabs
if not flatten:
outrelpath = os.path.relpath(root, self.project_directory)
outabspath = os.path.join(outputabs, outrelpath)
if not os.path.exists(outabspath):
os.makedirs(outabspath)
copyf = os.path.join(root, f)
if self.verbose:
print 'Copying %s to: %s' % (copyf, outabspath)
shutil.copy2(copyf, outabspath)
def main():
parser = argparse.ArgumentParser()
AddArguments(parser)
args = parser.parse_args()
retval = -1
env = BuildEnvironment(args)
try:
env.GitClean()
env.BuildAndroidLibraries(['.'])
env.RunSubprocess([env.ant_path, env.ant_flags])
if env.output_zip is not None:
env.MakeArchive(['bin', 'libs', 'gen', 'obj'], env.output_zip)
if env.output_apk_dir is not None:
env.CopyFilesWithExtension(['bin'], '.apk', env.output_apk_dir,
True, ['latest'])
retval = 0
except buildutils.Error as e:
print >> sys.stderr, 'Caught buildutils error: %s' % e.error_message
retval = e.error_code
except IOError as e:
print >> sys.stderr, 'Caught IOError for file %s: %s' % (e.filename,
e.strerror)
retval = -1
return retval
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "cf20489aec67bdb741495cc5483fc96e",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 79,
"avg_line_length": 36.0948275862069,
"alnum_prop": 0.6472414616670648,
"repo_name": "luiseduardohdbackup/LiquidFunPaint",
"id": "39e72c1dc0a4f2c2d540f96a175a17409fec5919",
"size": "8991",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "AutoBuild/build_android.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "GLSL",
"bytes": "10249"
},
{
"name": "Java",
"bytes": "152168"
},
{
"name": "Makefile",
"bytes": "1590"
},
{
"name": "Python",
"bytes": "28746"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="ticktextsrc",
parent_name="layout.polar.angularaxis",
**kwargs,
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "df4b4388a8298c569c1b8d12b12aa1ad",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 28.875,
"alnum_prop": 0.5865800865800865,
"repo_name": "plotly/plotly.py",
"id": "31a4de19d0ed1f94d12866206ee6a299b22e8125",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/polar/angularaxis/_ticktextsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""MongoDB storage backend"""
from oslo.config import cfg
import pymongo
from ceilometer.alarm.storage import pymongo_base
from ceilometer.openstack.common import log
from ceilometer import storage
from ceilometer.storage.mongo import utils as pymongo_utils
cfg.CONF.import_opt('time_to_live', 'ceilometer.storage',
group="database")
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""Put the alarm data into a MongoDB database."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url):
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instanciate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correcly updated if
# needed.
self.upgrade()
def clear(self):
self.conn.drop_database(self.db)
# Connection will be reopened automatically if needed
self.conn.close()
|
{
"content_hash": "5657d84f91251dd9f900945cfc6b330c",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 36.229166666666664,
"alnum_prop": 0.6687751581368603,
"repo_name": "froyobin/ceilometer",
"id": "19fff006174b15d74af9757c7e3282f913ef7460",
"size": "2543",
"binary": false,
"copies": "5",
"ref": "refs/heads/out_branch",
"path": "ceilometer/alarm/storage/impl_mongodb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6027"
},
{
"name": "Python",
"bytes": "2682026"
},
{
"name": "Shell",
"bytes": "3204"
}
],
"symlink_target": ""
}
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Molecule design gene table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
__docformat__ = 'reStructuredText en'
__all__ = ['create_table']
def create_table(metadata, molecule_design_tbl, gene_tbl):
"Table factory."
tbl = Table('molecule_design_gene', metadata,
Column('molecule_design_id', Integer,
ForeignKey(molecule_design_tbl.c.molecule_design_id,
onupdate='CASCADE', ondelete='CASCADE'),
primary_key=True, index=True),
Column('gene_id', Integer,
ForeignKey(gene_tbl.c.gene_id,
onupdate='CASCADE', ondelete='CASCADE'),
primary_key=True, index=True),
)
return tbl
|
{
"content_hash": "1d3e54403c2e4e3bfadfa6441e0dd3d4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 33.241379310344826,
"alnum_prop": 0.6493775933609959,
"repo_name": "helixyte/TheLMA",
"id": "229e5b1dc1bd8379b11677aa02853f64b2c56150",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thelma/repositories/rdb/schema/tables/moleculedesigngene.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3126"
},
{
"name": "Python",
"bytes": "3329729"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
import threading
import time
class myThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print('Starting ' + self.name)
calculateMass(self.name, self.counter, 5)
print('Exiting ' + self.name)
calculations = {}
threadLock = threading.Lock()
def calculateMass(threadName, delay, counter):
for i in range(counter):
time.sleep(delay)
print('{0}: on iteration {1}'.format(threadName, i+1))
# Mass has been calculated. Store final computation.
# If there are 3 active threads, we know the density
# hasn't been calculated yet, so wait.
while threading.active_count() == 3:
time.sleep(0.5)
calculations['mass'] = calculations['density'] / 2
print('{} finished calculating mass...'.format(threadName))
def calculateDensity(threadName):
print('{} starting to calculate the density...'.format(threadName))
total = 0
for i in range(6555):
for j in range(9999):
total += 1
calculations['density'] = total
print('{} finished calculating density...'.format(threadName))
thread1 = myThread(1, 'Thread-1', 1)
thread2 = threading.Thread(target=calculateDensity, name='Thread-2', args=['Thread-2'])
# Start threads
thread1.start()
thread2.start()
print('Exiting main thread...')
|
{
"content_hash": "9d1f88503590a0fa4b4cb4e1b1a8c3b3",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 27.58490566037736,
"alnum_prop": 0.6484268125854993,
"repo_name": "UALR-ACM/Practice-Problems",
"id": "f66d82c5509c01c1b9342b771ade39f9e9b97ac4",
"size": "1482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017-1-26/threading_example3_solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1628"
},
{
"name": "Java",
"bytes": "24280"
},
{
"name": "Python",
"bytes": "29013"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas.io.data as web
from math import sqrt
import math
#############################
# FINANCIAL HELPER ROUTINES #
#############################
def compute_nyears(x) :
return np.double((x.index[-1] - x.index[0]).days) / 365.
def compute_cagr(equity) :
return np.double((equity.ix[-1] / equity.ix[0]) ** (1. / compute_nyears(equity)) - 1)
def compute_annual_factor(equity) :
possible_values = [252,52,26,13,12,6,4,3,2,1]
L = pd.Series(len(equity) / compute_nyears(equity) - possible_values)
return possible_values[L.index[L == L.min()]]
def compute_sharpe(equity) :
rets = equity / equity.shift(1) - 1
temp = compute_annual_factor(rets)
rets = pd.Series(rets)
return sqrt(temp) * rets.mean()/rets.std()
def compute_DVR(equity):
return compute_sharpe(equity) * compute_R2(equity)
def compute_drawdown(x) :
return (x - pd.expanding_max(x))/pd.expanding_max(x)
def compute_max_drawdown(x):
return compute_drawdown(x).min()
def compute_rolling_drawdown(equity) :
rolling_dd = pd.rolling_apply(equity, 252, compute_max_drawdown, min_periods=0)
df = pd.concat([equity, rolling_dd], axis=1)
df.columns = ['x', 'rol_dd_10']
plt.plot(df)
plt.grid()
def compute_avg_drawdown(x) :
drawdown = compute_drawdown(x).shift(-1)
drawdown[-1]=0.
dend = [drawdown.index[i] for i in range(len(drawdown)) if drawdown[i] == 0 and drawdown[i-1] != 0]
dstart = [drawdown.index[i] for i in range(len(drawdown)-1) if drawdown[i] == 0 and drawdown[i+1] != 0]
f = pd.DataFrame([dstart, dend], index=['dstart', 'dend']).transpose()
f['drawdown'] = [drawdown[f['dstart'][i]:f['dend'][i]].min() for i in range(len(f))]
return f.drawdown.mean()
def compute_calmar(x) :
return compute_cagr(x) / compute_max_drawdown(x)
def compute_R2(equity) :
x = pd.DataFrame(equity)
x.columns=[0]
x[1]=[equity.index[i].toordinal() for i in range(len(equity))]
return x[0].corr(x[1]) ** 2
def compute_volatility(x) :
temp = compute_annual_factor(x)
return sqrt(temp) * x.std()
def compute_var(x, probs=0.05) :
return x.quantile(probs)
def compute_cvar(x, probs=0.05) :
return x[ x < x.quantile(probs) ].mean()
def print_stats(equity) :
print '**** STATISTICS ****'
print '====================\n'
print 'n_years : ', compute_nyears(equity)
print 'cagr : ', compute_cagr(equity) * 100, '%'
rets = equity / equity.shift(1) - 1
print 'annual_factor : ', compute_annual_factor(equity)
print 'sharpe : ', compute_sharpe(equity)
compute_drawdown(rets)
print 'max_drawdown : ', compute_max_drawdown(equity) * 100, '%'
print 'avg_drawdown : ', compute_avg_drawdown(equity) * 100, '%'
print 'calmar : ', compute_calmar(equity)
print 'R-squared : ', compute_R2(equity)
print 'DVR : ', compute_DVR(equity)
print 'volatility : ', compute_volatility(rets)
#print 'exposure : ', compute_exposure(models$equal_weight)
print 'VAR 5% : ', compute_var(equity)
print 'CVAR 5% : ', compute_cvar(equity)
from zipline.utils import tradingcalendar
def endpoints(start, end, period='m') :
dates = tradingcalendar.get_trading_days(start, end)
if isinstance(period, int) :
dates = [dates[i] for i in range(0, len(dates), period)]
else :
if period == 'm' : months = 1
elif period == 'q' : months = 3
elif period == 'b' : months = 6
elif period == 'y' : months = 12
e_dates = [dates[i - 1] for i in range(1,len(dates))\
if dates[i].month > dates[i-1].month\
or dates[i].year > dates[i-1].year ]+ list([dates[-1]])
dates = [e_dates[i] for i in range(0,len(e_dates),months)]
return dates
def add_portfolio(name, portfolios) :
return dict(portfolios.items() + {name : {}}.items())
# topn
def ntop(prices, n) :
weights = pd.DataFrame(0., index=prices.index, columns=prices.columns)
for i in range(len(prices)) :
n_not_na = prices.ix[i].count()
n_row = min(n, n_not_na)
for s in prices.columns :
if prices.ix[i][s] <= n :
weights.ix[i][s] = 1. / n_row
else :
weights.ix[i][s] = 0.
return weights
def Monthly_Return_Table (monthly_returns) :
# monthly_returns is a pandas Series of monthly returns indexed by date(Y-m)
df = pd.DataFrame(monthly_returns.values, columns=['Data'])
df['Month'] = monthly_returns.index.month
df['Year']= monthly_returns.index.year
table = df.pivot_table(rows='Year', cols='Month').fillna(0)
table['Annual Return'] = table.apply(np.sum, axis=1) * 100
return table
def generate_orders(transactions) :
orders = pd.DataFrame()
for i in range(len(transactions)):
for j in range(len(transactions.columns)):
t = transactions.ix[i]
if transactions.ix[i][j] < 0 :
orders = orders.append([[t.name.date().year, t.name.date().month, t.name.date().day, t.index[j], 'Sell', abs(t[j])]])
if transactions.ix[i][j] > 0 :
orders = orders.append([[t.name.date().year, t.name.date().month, t.name.date().day, t.index[j], 'Buy', abs(t[j])]])
orders.columns = ['Year', 'Month', 'Day', 'Symbol', 'Action', 'Qty']
return orders
def save_portfolio_metrics (portfolios, portfolio_name, period_ends, prices, \
p_value, p_weights, path=None) :
rebalance_qtys = (p_weights.ix[period_ends] / prices.ix[period_ends]) * p_value.ix[period_ends]
p_holdings = rebalance_qtys.align(prices)[0].shift(1).ffill().fillna(0)
transactions = p_holdings - p_holdings.shift(1).fillna(0)
p_returns = p_value.pct_change(periods=1)
p_index = np.cumproduct(1 + p_returns)
m_rets = (1 + p_returns).resample('M', how='prod', kind='period') - 1
portfolios[portfolio_name]['equity'] = p_value
portfolios[portfolio_name]['ret'] = p_returns
portfolios[portfolio_name]['cagr'] = compute_cagr(p_value) * 100
portfolios[portfolio_name]['sharpe'] = compute_sharpe(p_value)
portfolios[portfolio_name]['weight'] = p_weights
portfolios[portfolio_name]['transactions'] = transactions
portfolios[portfolio_name]['period_return'] = 100 * (p_value.ix[-1] / p_value[0] - 1)
portfolios[portfolio_name]['avg_monthly_return'] = p_index.resample('BM', how='last').pct_change().mean() * 100
portfolios[portfolio_name]['monthly_return_table'] = Monthly_Return_Table(m_rets)
portfolios[portfolio_name]['drawdowns'] = compute_drawdown(p_value).dropna()
portfolios[portfolio_name]['max_drawdown'] = compute_max_drawdown(p_value) * 100
portfolios[portfolio_name]['max_drawdown_date'] = p_value.index[compute_drawdown(p_value)==compute_max_drawdown(p_value)][0].date().isoformat()
portfolios[portfolio_name]['avg_drawdown'] = compute_avg_drawdown(p_value) * 100
portfolios[portfolio_name]['calmar'] = compute_calmar(p_value)
portfolios[portfolio_name]['R_squared'] = compute_calmar(p_value)
portfolios[portfolio_name]['DVR'] = compute_DVR(p_value)
portfolios[portfolio_name]['volatility'] = compute_volatility(p_returns)
portfolios[portfolio_name]['VAR'] = compute_var(p_value)
portfolios[portfolio_name]['CVAR'] = compute_cvar(p_value)
portfolios[portfolio_name]['rolling_annual_returns'] = pd.rolling_apply(p_returns, 252, np.sum)
portfolios[portfolio_name]['p_holdings'] = p_holdings
portfolios[portfolio_name]['transactions'] = np.round(transactions[transactions.sum(1)!=0], 0)
portfolios[portfolio_name]['share'] = p_holdings
portfolios[portfolio_name]['orders'] = generate_orders(transactions)
portfolios[portfolio_name]['best'] = max(p_returns)
portfolios[portfolio_name]['worst'] = min(p_returns)
if path != None :
portfolios[portfolio_name].equity.to_csv(path + portfolio_name + '_equity.csv')
portfolios[portfolio_name].weight.to_csv(path + portfolio_name + '_weight.csv')
portfolios[portfolio_name].share.to_csv(path + portfolio_name + '_share.csv')
portfolios[portfolio_name].transactions.to_csv(path + portfolio_name + '_transactions.csv')
portfolios[portfolio_name].orders.to_csv(path + portfolio_name + '_orders.csv')
return
def backtest(prices, weights, period_ends, capital, offset=0., commission=0.) :
p_holdings = (capital / prices * weights.align(prices)[0]).shift(offset).ffill().fillna(0)
w = weights.align(prices)[0].shift(offset).fillna(0)
trade_dates = w[w.sum(1) != 0].index
p_cash = capital - (p_holdings * prices.shift(offset)).sum(1)
totalcash = p_cash[trade_dates].align(prices[prices.columns[0]])[0].ffill().fillna(0)
p_returns = (totalcash + (p_holdings * prices).sum(1) - \
(abs(p_holdings - p_holdings.shift(1)) * commission).sum(1)) / \
(totalcash + (p_holdings * prices.shift(1)).sum(1)) - 1
p_returns = p_returns.fillna(0)
# p_weights = p_holdings * prices.shift(offset) / (totalcash + (p_holdings * prices.shift(offset)).sum(1))
p_weights = pd.DataFrame([(p_holdings * prices.shift(offset))[symbol] / \
(totalcash + (p_holdings * prices.shift(offset)).sum(1)) \
for symbol in prices.columns], index=prices.columns).T
p_weights = p_weights.fillna(0)
return np.cumproduct(1. + p_returns) * capital, p_holdings, p_returns, p_weights
# note: hist_returns are CONTINUOUSLY COMPOUNDED RETURNS
# ie R = e ** hist_returns
def create_historical_ia(symbols, hist_returns, annual_factor=252) :
ia = {}
ia['n'] = len(symbols)
ia['annual_factor'] = annual_factor
ia['symbols'] = hist_returns.columns
ia['symbol_names'] = hist_returns.columns
ia['hist_returns'] = hist_returns[symbols]
# ret = hist_returns[symbols].apply(lambda(x): (e ** x) -1.)
ia['arithmetic_return'] = hist_returns[symbols].mean()
ia['geometric_return'] = hist_returns[symbols].apply(lambda(x): np.prod(1. + x) ** (1. / len(x)) -1.)
ia['std_deviation'] = hist_returns[symbols].std()
ia['correlation'] = hist_returns[symbols].corr()
ia['arithmetic_return'] = (1. + ia['arithmetic_return']) ** ia['annual_factor'] - 1.
ia['geometric_return'] = (1. + ia['geometric_return']) ** ia['annual_factor'] - 1.
ia['risk'] = sqrt(ia['annual_factor']) * ia['std_deviation']
for i in range(len(ia['risk'])):
if ia['risk'][i].round(6) == 0.0 : ia['risk'][i] = 0.0000001
ia['cov'] = ia['correlation'] * (ia['risk'].dot(ia['risk'].T))
ia['expected_return'] = ia['arithmetic_return']
return(ia)
#p_value, p_holdings, p_returns, p_weights = backtest(prices, weights, period_ends, capital, offset=1., commission=0.)
iif = lambda a,b,c: (b,c)[not a]
def ifna(x,y) :
return(iif(math.isnan(x)(x) or math.isinf(x), y, x))
#!/usr/bin/env python
# On 20130210, v0.2
# Critical Line Algorithm
# by MLdP <lopezdeprado@lbl.gov>
#---------------------------------------------------------------
#---------------------------------------------------------------
class CLA:
def __init__(self,mean,covar,lB,uB):
# Initialize the class
self.mean=mean
self.covar=covar
self.lB=lB
self.uB=uB
self.w=[] # solution
self.l=[] # lambdas
self.g=[] # gammas
self.f=[] # free weights
#---------------------------------------------------------------
def solve(self):
# Compute the turning points,free sets and weights
f,w=self.initAlgo()
self.w.append(np.copy(w)) # store solution
self.l.append(None)
self.g.append(None)
self.f.append(f[:])
while True:
#1) case a): Bound one free weight
l_in=None
if len(f)>1:
covarF,covarFB,meanF,wB=self.getMatrices(f)
covarF_inv=np.linalg.inv(covarF)
j=0
for i in f:
l,bi=self.computeLambda(covarF_inv,covarFB,meanF,wB,j,[self.lB[i],self.uB[i]])
if l>l_in:l_in,i_in,bi_in=l,i,bi
j+=1
#2) case b): Free one bounded weight
l_out=None
if len(f)<self.mean.shape[0]:
b=self.getB(f)
for i in b:
covarF,covarFB,meanF,wB=self.getMatrices(f+[i])
covarF_inv=np.linalg.inv(covarF)
l,bi=self.computeLambda(covarF_inv,covarFB,meanF,wB,meanF.shape[0]-1, \
self.w[-1][i])
if (self.l[-1]==None or l<self.l[-1]) and l>l_out:l_out,i_out=l,i
if (l_in==None or l_in<0) and (l_out==None or l_out<0):
#3) compute minimum variance solution
self.l.append(0)
covarF,covarFB,meanF,wB=self.getMatrices(f)
covarF_inv=np.linalg.inv(covarF)
meanF=np.zeros(meanF.shape)
else:
#4) decide lambda
if l_in>l_out:
self.l.append(l_in)
f.remove(i_in)
w[i_in]=bi_in # set value at the correct boundary
else:
self.l.append(l_out)
f.append(i_out)
covarF,covarFB,meanF,wB=self.getMatrices(f)
covarF_inv=np.linalg.inv(covarF)
#5) compute solution vector
wF,g=self.computeW(covarF_inv,covarFB,meanF,wB)
for i in range(len(f)):w[f[i]]=wF[i]
self.w.append(np.copy(w)) # store solution
self.g.append(g)
self.f.append(f[:])
if self.l[-1]==0:break
#6) Purge turning points
self.purgeNumErr(10e-10)
self.purgeExcess()
#---------------------------------------------------------------
def initAlgo(self):
# Initialize the algo
#1) Form structured array
a=np.zeros((self.mean.shape[0]),dtype=[('id',int),('mu',float)])
b=[self.mean[i][0] for i in range(self.mean.shape[0])] # dump array into list
a[:]=zip(range(self.mean.shape[0]),b) # fill structured array
#2) Sort structured array
b=np.sort(a,order='mu')
#3) First free weight
i,w=b.shape[0],np.copy(self.lB)
while np.sum(w)<1:
i-=1
w[b[i][0]]=self.uB[b[i][0]]
w[b[i][0]]+=1-np.sum(w)
return [b[i][0]],w
#---------------------------------------------------------------
def computeBi(self,c,bi):
if c>0:
bi=bi[1][0]
if c<0:
bi=bi[0][0]
return bi
#---------------------------------------------------------------
def computeW(self,covarF_inv,covarFB,meanF,wB):
#1) compute gamma
onesF=np.ones(meanF.shape)
g1=np.dot(np.dot(onesF.T,covarF_inv),meanF)
g2=np.dot(np.dot(onesF.T,covarF_inv),onesF)
if wB==None:
g,w1=float(-self.l[-1]*g1/g2+1/g2),0
else:
onesB=np.ones(wB.shape)
g3=np.dot(onesB.T,wB)
g4=np.dot(covarF_inv,covarFB)
w1=np.dot(g4,wB)
g4=np.dot(onesF.T,w1)
g=float(-self.l[-1]*g1/g2+(1-g3+g4)/g2)
#2) compute weights
w2=np.dot(covarF_inv,onesF)
w3=np.dot(covarF_inv,meanF)
return -w1+g*w2+self.l[-1]*w3,g
#---------------------------------------------------------------
def computeLambda(self,covarF_inv,covarFB,meanF,wB,i,bi):
#1) C
onesF=np.ones(meanF.shape)
c1=np.dot(np.dot(onesF.T,covarF_inv),onesF)
c2=np.dot(covarF_inv,meanF)
c3=np.dot(np.dot(onesF.T,covarF_inv),meanF)
c4=np.dot(covarF_inv,onesF)
c=-c1*c2[i]+c3*c4[i]
if c==0:return None,None
#2) bi
if type(bi)==list:bi=self.computeBi(c,bi)
#3) Lambda
if wB==None:
# All free assets
return float((c4[i]-c1*bi)/c),bi
else:
onesB=np.ones(wB.shape)
l1=np.dot(onesB.T,wB)
l2=np.dot(covarF_inv,covarFB)
l3=np.dot(l2,wB)
l2=np.dot(onesF.T,l3)
return float(((1-l1+l2)*c4[i]-c1*(bi+l3[i]))/c),bi
#---------------------------------------------------------------
def getMatrices(self,f):
# Slice covarF,covarFB,covarB,meanF,meanB,wF,wB
covarF=self.reduceMatrix(self.covar,f,f)
meanF=self.reduceMatrix(self.mean,f,[0])
b=self.getB(f)
covarFB=self.reduceMatrix(self.covar,f,b)
wB=self.reduceMatrix(self.w[-1],b,[0])
return covarF,covarFB,meanF,wB
#---------------------------------------------------------------
def getB(self,f):
return self.diffLists(range(self.mean.shape[0]),f)
#---------------------------------------------------------------
def diffLists(self,list1,list2):
return list(set(list1)-set(list2))
#---------------------------------------------------------------
def reduceMatrix(self,matrix,listX,listY):
# Reduce a matrix to the provided list of rows and columns
if len(listX)==0 or len(listY)==0:return
matrix_=matrix[:,listY[0]:listY[0]+1]
for i in listY[1:]:
a=matrix[:,i:i+1]
matrix_=np.append(matrix_,a,1)
matrix__=matrix_[listX[0]:listX[0]+1,:]
for i in listX[1:]:
a=matrix_[i:i+1,:]
matrix__=np.append(matrix__,a,0)
return matrix__
#---------------------------------------------------------------
def purgeNumErr(self,tol):
# Purge violations of inequality constraints (associated with ill-conditioned covar matrix)
i=0
while True:
flag=False
if i==len(self.w):break
if abs(np.sum(self.w[i])-1)>tol:
flag=True
else:
for j in range(self.w[i].shape[0]):
if self.w[i][j]-self.lB[j]<-tol or self.w[i][j]-self.uB[j]>tol:
flag=True;break
if flag==True:
del self.w[i]
del self.l[i]
del self.g[i]
del self.f[i]
else:
i+=1
return
#---------------------------------------------------------------
def purgeExcess(self):
# Remove violations of the convex hull
i,repeat=0,False
while True:
if repeat==False:i+=1
if i==len(self.w)-1:break
w=self.w[i]
mu=np.dot(w.T,self.mean)[0,0]
j,repeat=i+1,False
while True:
if j==len(self.w):break
w=self.w[j]
mu_=np.dot(w.T,self.mean)[0,0]
if mu<mu_:
del self.w[i]
del self.l[i]
del self.g[i]
del self.f[i]
repeat=True
break
else:
j+=1
return
#---------------------------------------------------------------
def getMinVar(self):
# Get the minimum variance solution
var=[]
for w in self.w:
a=np.dot(np.dot(w.T,self.covar),w)
var.append(a)
return min(var)**.5,self.w[var.index(min(var))]
#---------------------------------------------------------------
def getMaxSR(self):
# Get the max Sharpe ratio portfolio
#1) Compute the local max SR portfolio between any two neighbor turning points
w_sr,sr=[],[]
for i in range(len(self.w)-1):
w0=np.copy(self.w[i])
w1=np.copy(self.w[i+1])
kargs={'minimum':False,'args':(w0,w1)}
a,b=self.goldenSection(self.evalSR,0,1,**kargs)
w_sr.append(a*w0+(1-a)*w1)
sr.append(b)
return max(sr),w_sr[sr.index(max(sr))]
#---------------------------------------------------------------
def evalSR(self,a,w0,w1):
# Evaluate SR of the portfolio within the convex combination
w=a*w0+(1-a)*w1
b=np.dot(w.T,self.mean)[0,0]
c=np.dot(np.dot(w.T,self.covar),w)[0,0]**.5
return b/c
#---------------------------------------------------------------
def goldenSection(self,obj,a,b,**kargs):
# Golden section method. Maximum if kargs['minimum']==False is passed
from math import log,ceil
tol,sign,args=1.0e-9,1,None
if 'minimum' in kargs and kargs['minimum']==False:sign=-1
if 'args' in kargs:args=kargs['args']
numIter=int(ceil(-2.078087*log(tol/abs(b-a))))
r=0.618033989
c=1.0-r
# Initialize
x1=r*a+c*b;x2=c*a+r*b
f1=sign*obj(x1,*args);f2=sign*obj(x2,*args)
# Loop
for i in range(numIter):
if f1>f2:
a=x1
x1=x2;f1=f2
x2=c*a+r*b;f2=sign*obj(x2,*args)
else:
b=x2
x2=x1;f2=f1
x1=r*a+c*b;f1=sign*obj(x1,*args)
if f1<f2:return x1,sign*f1
else:return x2,sign*f2
#---------------------------------------------------------------
def efFrontier(self,points):
# Get the efficient frontier
mu,sigma,weights=[],[],[]
a=np.linspace(0,1,points/len(self.w))[:-1] # remove the 1, to avoid duplications
b=range(len(self.w)-1)
for i in b:
w0,w1=self.w[i],self.w[i+1]
if i==b[-1]:a=np.linspace(0,1,points/len(self.w)) # include the 1 in the last iteration
for j in a:
w=w1*j+(1-j)*w0
weights.append(np.copy(w))
mu.append(np.dot(w.T,self.mean)[0,0])
sigma.append(np.dot(np.dot(w.T,self.covar),w)[0,0]**.5)
return mu,sigma,weights
#---------------------------------------------------------------
#---------------------------------------------------------------
def get_history(symbols, start, end, data_path):
""" to get Yahoo data from saved csv files. If the file does not exist for the symbol,
data is read from Yahoo finance and the csv saved.
symbols: symbol list
start, end : datetime start/end dates
data_path : datapath for csv files - use double \\ and terminate path with \\
"""
symbols_ls = list(symbols)
for ticker in symbols:
print ticker,
try:
#see if csv data available
data = pd.read_csv(data_path + ticker + '.csv', index_col='Date', parse_dates=True)
except:
#if no csv data, create an empty dataframe
data = pd.DataFrame(data=None, index=[start])
#check if there is data for the start-end data range
if start.toordinal() < data.index[0].toordinal() \
or end.toordinal() > data.index[-1].toordinal():
print 'Refresh data.. ',
try:
new_data = web.get_data_yahoo(ticker, start, end)
if new_data.empty==False:
if data.empty==False:
try:
ticker_data = data.append(new_data).groupby(level=0, by=['rownum']).last()
except:
print 'Merge failed.. '
else:
ticker_data = new_data
try:
ticker_data.to_csv(data_path + ticker + '.csv')
print ' UPDATED.. '
except:
print 'Save failed.. '
else:
print 'No new data.. '
except:
print 'Download failed.. '
# remove symbol from list
symbols_ls.remove(ticker)
else:
print 'OK.. '
pass
pdata = pd.Panel(dict((symbols_ls[i], pd.read_csv(data_path + symbols_ls[i] + '.csv',\
index_col='Date', parse_dates=True).sort(ascending=True)) for i in range(len(symbols_ls))) )
return pdata.ix[:, start:end, :]
def get_trading_dates(start, end, offset=0):
''' to create a list of trading dates (timestamps) for use with Zipline or Quantopian.
offset = 0 -> 1st trading day of month, offset = -1 -> last trading day of month.
start, end are datetime.dates'''
trading_dates = list([])
trading_days= tradingcalendar.get_trading_days(start, end)
month = trading_days[0].month
for i in range(len(trading_days)) :
if trading_days[i].month != month :
try :
trading_dates = trading_dates + list([trading_days[i + offset]])
except :
raise
month = trading_days[i].month
return trading_dates
|
{
"content_hash": "dce86d44b9ea777c4d9d2d02168b46b2",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 147,
"avg_line_length": 40.923202614379086,
"alnum_prop": 0.5274905170692753,
"repo_name": "scubamut/SIT-Python",
"id": "fbe554e08f4cb6bb17534c35e3e8ef3b262a13cb",
"size": "25045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finhelpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34127"
}
],
"symlink_target": ""
}
|
"""
Autopsy Forensic Browser
Copyright 2019-2020 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Long
from java.lang import String
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.apache.commons.codec.binary import Base64
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import MessageReadStatus
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CommunicationDirection
from org.sleuthkit.datamodel.blackboardutils.attributes import MessageAttachments
from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import FileAttachment
from TskMessagesParser import TskMessagesParser
from TskContactsParser import TskContactsParser
from TskCallLogsParser import TskCallLogsParser
import traceback
import general
class SkypeAnalyzer(general.AndroidComponentAnalyzer):
"""
Parses the Skype App databases for TSK contacts, message
and calllog artifacts.
About version 8.15.0.428 (9/17/2019) Skype database:
- There are 4 tables this parser uses:
1) person - this table appears to hold all contacts known to the user.
2) user - this table holds information about the user.
3) particiapnt - Yes, that is not a typo. This table maps group chat
ids to skype ids (1 to many).
4) chatItem - This table contains all messages. It maps the group id or
skype id (for 1 to 1 communication) to the message content
and metadata. Either the group id or skype id is stored in
a column named 'conversation_link'.
More info and implementation details:
- The person table does not include groups. To get
all 1 to 1 communications, we could simply join the person and chatItem tables.
This would mean we'd need to do a second pass to get all the group information
as they would be excluded in the join. Since the chatItem table stores both the
group id or skype_id in one column, an implementation decision was made to union
the person and particiapnt table together so that all rows are matched in one join
with chatItem. This result is consistently labeled contact_book_w_groups in the
following queries.
"""
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self._SKYPE_PACKAGE_NAME = "com.skype.raider"
self._PARSER_NAME = "Skype Parser"
self._VERSION = "8.15.0.428"
def get_user_account(self, skype_db):
account_query_result = skype_db.runQuery(
"""
SELECT entry_id,
CASE
WHEN Ifnull(first_name, "") == "" AND Ifnull(last_name, "") == "" THEN entry_id
WHEN first_name is NULL THEN replace(last_name, ",", "")
WHEN last_name is NULL THEN replace(first_name, ",", "")
ELSE replace(first_name, ",", "") || " " || replace(last_name, ",", "")
END AS name
FROM user
"""
)
if account_query_result is not None and account_query_result.next():
return account_query_result.getString("entry_id")
return None
def analyze(self, dataSource, fileManager, context):
#Skype databases are of the form: live:XYZ.db, where
#XYZ is the skype id of the user. The following search
#does a generic substring match for 'live' in the skype
#package.
skype_dbs = AppSQLiteDB.findAppDatabases(dataSource,
"live:", False, self._SKYPE_PACKAGE_NAME)
try:
for skype_db in skype_dbs:
#Attempt to get the user account id from the database
user_account_instance = None
try:
user_account_instance = self.get_user_account(skype_db)
except SQLException as ex:
self._logger.log(Level.WARNING,
"Error querying for the user account in the Skype db.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
current_case = Case.getCurrentCaseThrows()
if user_account_instance is None:
helper = CommunicationArtifactsHelper(
current_case.getSleuthkitCase(), self._PARSER_NAME,
skype_db.getDBFile(), Account.Type.SKYPE
)
else:
helper = CommunicationArtifactsHelper(
current_case.getSleuthkitCase(), self._PARSER_NAME,
skype_db.getDBFile(), Account.Type.SKYPE,
Account.Type.SKYPE, user_account_instance
)
self.parse_contacts(skype_db, helper)
self.parse_calllogs(skype_db, helper)
self.parse_messages(skype_db, helper, current_case)
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
for skype_db in skype_dbs:
skype_db.close()
def parse_contacts(self, skype_db, helper):
#Query for contacts and iterate row by row adding
#each contact artifact
try:
contacts_parser = SkypeContactsParser(skype_db, self._PARSER_NAME)
while contacts_parser.next():
helper.addContact(
contacts_parser.get_contact_name(),
contacts_parser.get_phone(),
contacts_parser.get_home_phone(),
contacts_parser.get_mobile_phone(),
contacts_parser.get_email(),
contacts_parser.get_other_attributes()
)
contacts_parser.close()
except SQLException as ex:
#Error parsing Skype db
self._logger.log(Level.WARNING,
"Error parsing contact database for call logs artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Severe error trying to add to case database.. case is not complete.
#These exceptions are thrown by the CommunicationArtifactsHelper.
self._logger.log(Level.SEVERE,
"Failed to add contact artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Failed to post notification to blackboard
self._logger.log(Level.WARNING,
"Failed to post contact artifact to the blackboard", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
def parse_calllogs(self, skype_db, helper):
#Query for call logs and iterate row by row adding
#each call log artifact
try:
calllog_parser = SkypeCallLogsParser(skype_db)
while calllog_parser.next():
helper.addCalllog(
calllog_parser.get_call_direction(),
calllog_parser.get_phone_number_from(),
calllog_parser.get_phone_number_to(),
calllog_parser.get_call_start_date_time(),
calllog_parser.get_call_end_date_time(),
calllog_parser.get_call_type()
)
calllog_parser.close()
except SQLException as ex:
#Error parsing Skype db
self._logger.log(Level.WARNING,
"Error parsing Skype database for call logs artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Severe error trying to add to case database.. case is not complete.
#These exceptions are thrown by the CommunicationArtifactsHelper.
self._logger.log(Level.SEVERE,
"Failed to add call log artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Failed to post notification to blackboard
self._logger.log(Level.WARNING,
"Failed to post call log artifact to the blackboard", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
def parse_messages(self, skype_db, helper, current_case):
#Query for messages and iterate row by row adding
#each message artifact
try:
messages_parser = SkypeMessagesParser(skype_db)
while messages_parser.next():
message_artifact = helper.addMessage(
messages_parser.get_message_type(),
messages_parser.get_message_direction(),
messages_parser.get_phone_number_from(),
messages_parser.get_phone_number_to(),
messages_parser.get_message_date_time(),
messages_parser.get_message_read_status(),
messages_parser.get_message_subject(),
messages_parser.get_message_text(),
messages_parser.get_thread_id()
)
if (messages_parser.get_file_attachment() is not None):
file_attachments = ArrayList()
file_attachments.add(FileAttachment(current_case.getSleuthkitCase(), skype_db.getDBFile().getDataSource(), messages_parser.get_file_attachment()))
message_attachments = MessageAttachments(file_attachments, [])
helper.addAttachments(message_artifact, message_attachments)
messages_parser.close()
except SQLException as ex:
#Error parsing Skype db
self._logger.log(Level.WARNING,
"Error parsing Skype database for message artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Severe error trying to add to case database.. case is not complete.
#These exceptions are thrown by the CommunicationArtifactsHelper.
self._logger.log(Level.SEVERE,
"Failed to add message artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Failed to post notification to blackboard
self._logger.log(Level.WARNING,
"Failed to post message artifact to the blackboard", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
class SkypeCallLogsParser(TskCallLogsParser):
"""
Extracts TSK_CALLLOG information from the Skype database.
TSK_CALLLOG fields that are not in the Skype database are given
a default value inherited from the super class.
"""
def __init__(self, calllog_db):
"""
Implementation details:
- message_type w/ value 3 appeared to be the call type, regardless
of if it was audio or video.
"""
super(SkypeCallLogsParser, self).__init__(calllog_db.runQuery(
"""
SELECT contact_book_w_groups.conversation_id,
contact_book_w_groups.participant_ids,
messages.time,
messages.duration,
messages.is_sender_me,
messages.person_id AS sender_id
FROM (SELECT conversation_id,
Group_concat(person_id) AS participant_ids
FROM particiapnt
GROUP BY conversation_id
UNION
SELECT entry_id AS conversation_id,
NULL
FROM person) AS contact_book_w_groups
join chatitem AS messages
ON messages.conversation_link = contact_book_w_groups.conversation_id
WHERE message_type == 3
"""
)
)
self._INCOMING_CALL_TYPE = 0
self._OUTGOING_CALL_TYPE = 1
def get_phone_number_from(self):
if self.get_call_direction() == self.INCOMING_CALL:
return self.result_set.getString("sender_id")
def get_phone_number_to(self):
if self.get_call_direction() == self.OUTGOING_CALL:
group_ids = self.result_set.getString("participant_ids")
if group_ids is not None:
group_ids = group_ids.split(",")
return group_ids
return self.result_set.getString("conversation_id")
return super(SkypeCallLogsParser, self).get_phone_number_to()
def get_call_direction(self):
direction = self.result_set.getInt("is_sender_me")
if direction == self._INCOMING_CALL_TYPE:
return self.INCOMING_CALL
if direction == self._OUTGOING_CALL_TYPE:
return self.OUTGOING_CALL
return super(SkypeCallLogsParser, self).get_call_direction()
def get_call_start_date_time(self):
return self.result_set.getLong("time") / 1000
def get_call_end_date_time(self):
start = self.get_call_start_date_time()
duration = self.result_set.getInt("duration") / 1000
return start + duration
class SkypeContactsParser(TskContactsParser):
"""
Extracts TSK_CONTACT information from the Skype database.
TSK_CONTACT fields that are not in the Skype database are given
a default value inherited from the super class.
"""
def __init__(self, contact_db, analyzer):
super(SkypeContactsParser, self).__init__(contact_db.runQuery(
"""
SELECT entry_id,
CASE
WHEN Ifnull(first_name, "") == "" AND Ifnull(last_name, "") == "" THEN entry_id
WHEN first_name is NULL THEN replace(last_name, ",", "")
WHEN last_name is NULL THEN replace(first_name, ",", "")
ELSE replace(first_name, ",", "") || " " || replace(last_name, ",", "")
END AS name
FROM person
"""
)
)
self._PARENT_ANALYZER = analyzer
def get_contact_name(self):
return self.result_set.getString("name")
def get_other_attributes(self):
return [BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID,
self._PARENT_ANALYZER,
self.result_set.getString("entry_id"))]
class SkypeMessagesParser(TskMessagesParser):
"""
Extract TSK_MESSAGE information from the Skype database.
TSK_CONTACT fields that are not in the Skype database are given
a default value inherited from the super class.
"""
def __init__(self, message_db):
"""
This query is very similar to the call logs query, the only difference is
it grabs more columns in the SELECT and excludes message_types which have
the call type value (3).
"""
super(SkypeMessagesParser, self).__init__(message_db.runQuery(
"""
SELECT contact_book_w_groups.conversation_id,
contact_book_w_groups.participant_ids,
messages.time,
messages.content,
messages.device_gallery_path,
messages.is_sender_me,
messages.person_id as sender_id
FROM (SELECT conversation_id,
Group_concat(person_id) AS participant_ids
FROM particiapnt
GROUP BY conversation_id
UNION
SELECT entry_id as conversation_id,
NULL
FROM person) AS contact_book_w_groups
JOIN chatitem AS messages
ON messages.conversation_link = contact_book_w_groups.conversation_id
WHERE message_type != 3
"""
)
)
self._SKYPE_MESSAGE_TYPE = "Skype Message"
self._OUTGOING_MESSAGE_TYPE = 1
self._INCOMING_MESSAGE_TYPE = 0
def get_message_type(self):
return self._SKYPE_MESSAGE_TYPE
def get_phone_number_from(self):
if self.get_message_direction() == self.INCOMING:
return self.result_set.getString("sender_id")
return super(SkypeMessagesParser, self).get_phone_number_from()
def get_message_direction(self):
direction = self.result_set.getInt("is_sender_me")
if direction == self._OUTGOING_MESSAGE_TYPE:
return self.OUTGOING
if direction == self._INCOMING_MESSAGE_TYPE:
return self.INCOMING
return super(SkypeMessagesParser, self).get_message_direction()
def get_phone_number_to(self):
if self.get_message_direction() == self.OUTGOING:
group_ids = self.result_set.getString("participant_ids")
if group_ids is not None:
group_ids = group_ids.split(",")
return group_ids
return self.result_set.getString("conversation_id")
return super(SkypeMessagesParser, self).get_phone_number_to()
def get_message_date_time(self):
date = self.result_set.getLong("time")
return date / 1000
def get_message_text(self):
content = self.result_set.getString("content")
if content is not None:
return content
return super(SkypeMessagesParser, self).get_message_text()
def get_thread_id(self):
group_ids = self.result_set.getString("participant_ids")
if group_ids is not None:
return self.result_set.getString("conversation_id")
return super(SkypeMessagesParser, self).get_thread_id()
def get_file_attachment(self):
if (self.result_set.getString("device_gallery_path") is None):
return None
else:
return self.result_set.getString("device_gallery_path")
|
{
"content_hash": "54c18a0bc1c36f6b3c4ca15db6756fd4",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 166,
"avg_line_length": 46.18984547461368,
"alnum_prop": 0.5810074555534315,
"repo_name": "wschaeferB/autopsy",
"id": "908a7da451564a6cb9c0cb9b24db0ee9abc0f2d4",
"size": "20924",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "InternalPythonModules/android/skype.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AutoIt",
"bytes": "27605"
},
{
"name": "CSS",
"bytes": "4467"
},
{
"name": "HTML",
"bytes": "10721"
},
{
"name": "Java",
"bytes": "14146746"
},
{
"name": "Perl",
"bytes": "12052"
},
{
"name": "Python",
"bytes": "518989"
},
{
"name": "Shell",
"bytes": "10815"
}
],
"symlink_target": ""
}
|
"""
Copy of gi.py with recolor changed to fast recolor
"""
from basicgraphs import graph # , GraphError, vertex, edge
import permgrputil
from permv2 import permutation
from basicpermutationgroup import Orbit
# deprecated
def color_gradient(bg_1, bg_2, colors):
# types correct?
if not (isinstance(bg_1, graph) and isinstance(bg_2, graph)):
print("Not two graphs provided!")
return False
def count_nbs(edges):
def inc_nbs(v):
if hasattr(v, 'colornum'):
v.colornum += 1
else:
v.colornum = 1
for e in edges:
inc_nbs(e.head())
inc_nbs(e.tail())
count_nbs(bg_1._E) # Instead of E() we use _E so we need not set unsafe to true.
count_nbs(bg_2._E) # Instead of E() we use _E so we need not set unsafe to true.
nbs_count_2_color_num = {}
# basic colonisation based on degrees
combined_v = bg_1._V + bg_2._V # Instead of the safe but slow V() we use here _V
for v in combined_v:
count = v.colornum
if count not in nbs_count_2_color_num:
nbs_count_2_color_num[count] = len(colors)
colors[len(colors)] = []
c = nbs_count_2_color_num[count]
v.colornum = c
colors[c].append(v)
def recolor(colors, queue):
"""
:param colors: Dictionary containing the coloring of two graphs which are colored together.
:param queue: The colors that need to be considered for refining operations.
:return: <False> iff unbalanced coloring is detected;
otherwise when done returns <True>, coloring can still be unbalanced.
"""
queue = set(queue) # TODO: check if this is the best data structure
while len(queue) > 0:
c = queue.pop()
if len(colors[c]) % 2 == 1:
return False
neighbours = {} # for all neighboring vertexes the number of neighbours they have in c
nbs_colors = set() # all neighboring colors
for v in colors[c]:
for n in v.get_cached_nbs():
if n not in neighbours:
nbs_colors.add(n.colornum)
neighbours[n] = 1
else:
neighbours[n] += 1
for nbs_c in nbs_colors:
partitions = {} # partition nbs_c based on number of neighbours in c
# partition_sizes = {}
for n in colors[nbs_c]:
nbs_count = neighbours.get(n, 0)
partitions.setdefault(nbs_count, []).append(n)
# partition_sizes[nbs_count] = partition_sizes.get(nbs_count, 0) + 1
if len(partitions) > 1:
largest = -1
may_omit = len(partitions) > 2 or nbs_c not in queue
if may_omit:
smallest = min(partitions, key=lambda i: len(partitions[i]))
largest = max(partitions, key=lambda i: len(partitions[i]))
colors[nbs_c] = partitions.pop(smallest)
else:
colors[nbs_c] = partitions.popitem()[1]
queue.add(nbs_c)
while len(partitions) > 0:
nbs_count, partition = partitions.popitem()
new_color = len(colors)
colors[new_color] = partition
if nbs_count != largest:
queue.add(new_color)
for v in partition:
v.colornum = new_color
return True
def create_color_dict(g, h):
"""
Creates the color dict based on the colornums of the vertices in graphs g and h.
"""
colors = {}
for v in g._V:
l = colors.get(v.colornum, [])
l.append(v)
colors[v.colornum] = l
for v in h._V:
l = colors.get(v.colornum, [])
l.append(v)
colors[v.colornum] = l
return colors
def set_colors(graph, l):
"""
Assigns a color to every vertex of the given graph, 0 if a vertex is not in l,
or the index in l + 1 otherwise.
"""
for v in graph:
v.colornum = 0
i = 1
for v in l:
v.colornum = i
i += 1
# see slides lecture 2 page 23
# g and h instance of graph
def count_isomorphism(g, h, d=None, i=None, stop_early=False):
"""
Returns the number of isomorphisms between graphs g and h. If stop_early is specified,
the algorithm terminates as soon as an isomorphism is found, returns 1 if an isomorphism
is found, 0 if none.
If you want #Aut of a graph, one should create a deep copy of the graph as the second
argument before calling this function.
"""
if d is None:
d = []
if i is None:
i = []
set_colors(g, d)
set_colors(h, i)
colors = create_color_dict(g, h)
if not recolor(colors, [len(colors) - 1]): # recolor can return if the coloring is unbalanced
return 0
if defines_bijection(colors):
return 1
if not is_balanced(colors): # recolor doesnt always know if the coloring is unbalanced
return 0
# Choose a color class C with |C| ≥ 4
# note that c is the list of vertices, not an int representing the color
c = None
for color in colors.values():
if len(color) >= 4:
c = color
break
x = None # vertex of g with color c
for v in c:
if v._graph is g:
x = v
break
num = 0
for y in c:
if y._graph is h:
num += count_isomorphism(g, h, d + [x], i + [y], stop_early=stop_early)
if stop_early:
if num > 0: # found isomorphism, no need to continue if we dont care about the amount
return num
return num
def is_balanced(colors):
for vertices in colors.values():
if len(vertices) != 0:
num0 = 0 # amount of vertices in graph0
num1 = 0 # amount of vertices in the other graph
graph0 = vertices[0]._graph
for vertex in vertices:
if vertex._graph is graph0:
num0 += 1
else:
num1 += 1
if num0 != num1:
return False
return True
def defines_bijection(colors):
for vertices in colors.values():
if len(vertices) != 2: # found a color with #vertices != 2
return False
if vertices[0]._graph is vertices[1]._graph: # both vertices belong to same graph, no bijection
return False
return True
def generate_automorphisms(graph, gCopy, verticesD, verticesI, x, firstPruningRule = True, secondPruningRule = True, membershipTesting = False):
"""
Requires arguments gCopy to be a deepcopy of graph, parameters d, i and x should be []
return type is irrelevant for the working principle of this function, that is reserved for internal purposes only.
"""
# set original colors, only based on D and I
set_colors(graph, verticesD)
set_colors(gCopy, verticesI)
colors = create_color_dict(graph, gCopy)
if not recolor(colors, [len(colors) - 1]): # recolor can return if the coloring is unbalanced
return False
if not is_balanced(colors): # recolor doesnt always know if the coloring is unbalanced
return False
# unique automorphism
if defines_bijection(colors):
mapping = list(range(0, len(graph._V)))
for i in range(0, len(colors)):
if colors[i][0] in graph:
mapping[colors[i][0]._label] = colors[i][1]._label
else:
mapping[colors[i][1]._label] = colors[i][0]._label
# print(mapping)
# add to generating set (assuming we return to trivial node, by pruning rule #1)
perm = permutation(len(mapping), mapping=mapping)
if mapping != list(range(0, len(mapping))):
if not membershipTesting or not permgrputil.is_member(perm, x): # membership testing?
x.append(perm)
return True # return to last visited trivial ancestor (if firstpruningRule)
# multiple automorphisms
# Choose a color class C with |C| ≥ 4
col = None
newEl = None
instBreak = False
for color in colors.values():
if len(color) >= 4:
col = color
for v1 in col:
if v1._graph is graph:
for v2 in col:
if v2._graph is gCopy and v1._label == v2._label:
newEl = v1
instBreak = True
break
if instBreak:
break
if instBreak:
break
# no trivial color has been found, thus no vertex with trivial option can be selected either
if newEl is None:
for v in col:
if v._graph is graph:
newEl = v
break
# build list of vertices of gCopy to check, while also looking for a similar node as newEl
# this guarantees that it starts with the trivial node, if possible
checklist = []
for v in col:
if v._graph is gCopy:
checklist.append(v)
if v._label == newEl._label:
checklist[0], checklist[len(checklist) - 1] = v, checklist[0]
# returns the orbit of an generating set and a specific element, used for the second pruning rule
def get_orbit(x, label):
if len(x) == 0:
return [label]
return Orbit(x, label)
# calculate whether D, I is trivial, used for second pruning rule
trivial = True
if secondPruningRule:
for i in range(0, len(verticesD)):
if verticesD[i]._label != verticesI[i]._label:
trivial = False
break
for v in checklist:
# this version of the second pruning rule only applies to branches of a trivial mapping,
# otherwise it should not be applied checkes whether the automorphism created with mapping newEl
# to (non trivial!) v is already produces by the generating set
if (not trivial or not secondPruningRule) or (newEl._label == v._label) or (not v._label in get_orbit(x, newEl._label)):
res = generate_automorphisms(graph, gCopy, verticesD + [newEl], verticesI + [v], x, firstPruningRule, secondPruningRule, membershipTesting)
if firstPruningRule and res and not trivial: # return to last trivial ancestor
return True # not trivial, return to last trivial ancestor
# No automorphism found
return False
def count_automorphisms(graph, graphCopy):
x = []
generate_automorphisms(graph, graphCopy, [], [], x)
return permgrputil.order(x)
|
{
"content_hash": "35f5c2c07c2b1407fd35e6503139b91a",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 151,
"avg_line_length": 33.45031055900621,
"alnum_prop": 0.57023488998236,
"repo_name": "Walnoot/graph-isomorphism",
"id": "20f793895d94fcd688ecd15c84764b2e56cd702d",
"size": "10775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56445"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0007_usercheckout_braintree_id'),
]
operations = [
migrations.AddField(
model_name='order',
name='order_id',
field=models.CharField(max_length=20, null=True, blank=True),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(default=b'created', max_length=120, choices=[(b'created', b'Created'), (b'paid', b'Paid'), (b'shipped', b'Shipped')]),
),
]
|
{
"content_hash": "502771d3e27fa90b98f901b1f9ddfb4e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 153,
"avg_line_length": 28.73913043478261,
"alnum_prop": 0.5809379727685325,
"repo_name": "DiptoDas8/Biponi",
"id": "04ce1d9f135238c400a092db4ffa49f7b2bfad5c",
"size": "685",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/orders/migrations/0008_auto_20150901_2226.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "136782"
},
{
"name": "HTML",
"bytes": "159099"
},
{
"name": "JavaScript",
"bytes": "330026"
},
{
"name": "Python",
"bytes": "7657307"
},
{
"name": "Shell",
"bytes": "3208"
}
],
"symlink_target": ""
}
|
import json
from jsonobject.exceptions import BadValueError
from corehq.apps.reports_core.filters import DatespanFilter, ChoiceListFilter, Choice, DynamicChoiceListFilter, \
NumericFilter, PreFilter, QuarterFilter
from corehq.apps.userreports.exceptions import BadSpecError
from django.utils.translation import ugettext as _
from corehq.apps.userreports.reports.filters.choice_providers import DATA_SOURCE_COLUMN, \
LOCATION, DataSourceColumnChoiceProvider, LocationChoiceProvider, UserChoiceProvider, \
USER, OWNER, OwnerChoiceProvider
from corehq.apps.userreports.reports.filters.values import(
dynamic_choice_list_url,
NONE_CHOICE,
SHOW_ALL_CHOICE,
)
from corehq.apps.userreports.reports.filters.specs import (
ChoiceListFilterSpec, DynamicChoiceListFilterSpec, NumericFilterSpec, DateFilterSpec,
PreFilterSpec, QuarterFilterSpec)
def _build_date_filter(spec, report):
wrapped = DateFilterSpec.wrap(spec)
return DatespanFilter(
name=wrapped.slug,
label=wrapped.get_display(),
)
def _build_quarter_filter(spec, report):
wrapped = QuarterFilterSpec.wrap(spec)
return QuarterFilter(
name=wrapped.slug,
label=wrapped.get_display(),
)
def _build_numeric_filter(spec, report):
wrapped = NumericFilterSpec.wrap(spec)
return NumericFilter(
name=wrapped.slug,
label=wrapped.get_display(),
)
def _build_pre_filter(spec, report):
wrapped = PreFilterSpec.wrap(spec)
return PreFilter(
name=wrapped.slug,
datatype=wrapped.datatype,
pre_value=wrapped.pre_value,
pre_operator=wrapped.pre_operator,
)
def _build_choice_list_filter(spec, report):
wrapped = ChoiceListFilterSpec.wrap(spec)
choices = [Choice(
fc.value if fc.value is not None else NONE_CHOICE,
fc.get_display()
) for fc in wrapped.choices]
if wrapped.show_all:
choices.insert(0, Choice(SHOW_ALL_CHOICE, _('Show all')))
return ChoiceListFilter(
name=wrapped.slug,
datatype=wrapped.datatype,
label=wrapped.display,
choices=choices,
)
def _build_dynamic_choice_list_filter(spec, report):
wrapped = DynamicChoiceListFilterSpec.wrap(spec)
choice_provider_spec = wrapped.get_choice_provider_spec()
choice_provider = FilterChoiceProviderFactory.from_spec(choice_provider_spec)(report, wrapped.slug)
choice_provider.configure(choice_provider_spec)
return DynamicChoiceListFilter(
name=wrapped.slug,
datatype=wrapped.datatype,
field=wrapped.field,
label=wrapped.display,
show_all=wrapped.show_all,
url_generator=dynamic_choice_list_url,
choice_provider=choice_provider,
)
class ReportFilterFactory(object):
constructor_map = {
'date': _build_date_filter,
'quarter': _build_quarter_filter,
'pre': _build_pre_filter,
'choice_list': _build_choice_list_filter,
'dynamic_choice_list': _build_dynamic_choice_list_filter,
'numeric': _build_numeric_filter
}
@classmethod
def from_spec(cls, spec, report=None):
cls.validate_spec(spec)
try:
return cls.constructor_map[spec['type']](spec, report)
except (AssertionError, BadValueError) as e:
raise BadSpecError(_('Problem creating report filter from spec: {}, message is: {}').format(
json.dumps(spec, indent=2),
str(e),
))
@classmethod
def validate_spec(cls, spec):
if spec.get('type') not in cls.constructor_map:
raise BadSpecError(
_('Illegal report filter type: {0}, must be one of the following choice: ({1})').format(
spec.get('type', _('(missing from spec)')),
', '.join(cls.constructor_map.keys())
)
)
class FilterChoiceProviderFactory(object):
constructor_map = {
DATA_SOURCE_COLUMN: DataSourceColumnChoiceProvider,
LOCATION: LocationChoiceProvider,
USER: UserChoiceProvider,
OWNER: OwnerChoiceProvider
}
@classmethod
def from_spec(cls, choice_provider_spec):
return cls.constructor_map.get(choice_provider_spec['type'], DataSourceColumnChoiceProvider)
|
{
"content_hash": "42eb7586edc81db9e476a676caef6a1d",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 113,
"avg_line_length": 33.6328125,
"alnum_prop": 0.6720092915214867,
"repo_name": "qedsoftware/commcare-hq",
"id": "6dacf26e7947437c9a37f0a2f9f58b37e9137a44",
"size": "4305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/userreports/reports/filters/factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
from django.contrib.postgres.fields import JSONField
from django.db import models
class Character(models.Model):
name = models.CharField(max_length=200)
data = JSONField()
other_data = JSONField()
def __str__(self): # __unicode__ on Python 2
return self.name
|
{
"content_hash": "90580d9d558b469715c94f9277f73264",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 26.09090909090909,
"alnum_prop": 0.686411149825784,
"repo_name": "jmrivas86/django-json-widget",
"id": "4b75cdad1619b31c55015d66dd619f13fcc4f502",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/characters/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3731"
},
{
"name": "Makefile",
"bytes": "1665"
},
{
"name": "Python",
"bytes": "12366"
}
],
"symlink_target": ""
}
|
"""Unit tests."""
import mock
import pytest
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import enums
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestMetricServiceClient(object):
def test_list_monitored_resource_descriptors(self):
# Setup Expected Response
next_page_token = ""
resource_descriptors_element = {}
resource_descriptors = [resource_descriptors_element]
expected_response = {
"next_page_token": next_page_token,
"resource_descriptors": resource_descriptors,
}
expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_monitored_resource_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.resource_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_monitored_resource_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_monitored_resource_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_monitored_resource_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name_2,
"type": type_,
"display_name": display_name,
"description": description,
}
expected_response = monitored_resource_pb2.MonitoredResourceDescriptor(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.monitored_resource_descriptor_path(
"[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]"
)
response = client.get_monitored_resource_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_monitored_resource_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.monitored_resource_descriptor_path(
"[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]"
)
with pytest.raises(CustomException):
client.get_monitored_resource_descriptor(name)
def test_list_metric_descriptors(self):
# Setup Expected Response
next_page_token = ""
metric_descriptors_element = {}
metric_descriptors = [metric_descriptors_element]
expected_response = {
"next_page_token": next_page_token,
"metric_descriptors": metric_descriptors,
}
expected_response = metric_service_pb2.ListMetricDescriptorsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_metric_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.metric_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMetricDescriptorsRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_metric_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_metric_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_metric_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
unit = "unit3594628"
description = "description-1724546052"
display_name = "displayName1615086568"
expected_response = {
"name": name_2,
"type": type_,
"unit": unit,
"description": description,
"display_name": display_name,
}
expected_response = api_metric_pb2.MetricDescriptor(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
response = client.get_metric_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMetricDescriptorRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
with pytest.raises(CustomException):
client.get_metric_descriptor(name)
def test_create_metric_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
unit = "unit3594628"
description = "description-1724546052"
display_name = "displayName1615086568"
expected_response = {
"name": name_2,
"type": type_,
"unit": unit,
"description": description,
"display_name": display_name,
}
expected_response = api_metric_pb2.MetricDescriptor(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
metric_descriptor = {}
response = client.create_metric_descriptor(name, metric_descriptor)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
metric_descriptor = {}
with pytest.raises(CustomException):
client.create_metric_descriptor(name, metric_descriptor)
def test_delete_metric_descriptor(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
client.delete_metric_descriptor(name)
assert len(channel.requests) == 1
expected_request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
with pytest.raises(CustomException):
client.delete_metric_descriptor(name)
def test_list_time_series(self):
# Setup Expected Response
next_page_token = ""
time_series_element = {}
time_series = [time_series_element]
expected_response = {
"next_page_token": next_page_token,
"time_series": time_series,
}
expected_response = metric_service_pb2.ListTimeSeriesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
filter_ = "filter-1274492040"
interval = {}
view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
paged_list_response = client.list_time_series(name, filter_, interval, view)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.time_series[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListTimeSeriesRequest(
name=name, filter=filter_, interval=interval, view=view
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_time_series_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
filter_ = "filter-1274492040"
interval = {}
view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
paged_list_response = client.list_time_series(name, filter_, interval, view)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_time_series(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
time_series = []
client.create_time_series(name, time_series)
assert len(channel.requests) == 1
expected_request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_time_series_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
time_series = []
with pytest.raises(CustomException):
client.create_time_series(name, time_series)
|
{
"content_hash": "98a3dacce681f3b7ca91ab95ec3e060e",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 88,
"avg_line_length": 37.169411764705885,
"alnum_prop": 0.6412609989238462,
"repo_name": "tseaver/google-cloud-python",
"id": "dd5e692eb4138269739077060f8b1d88bce5f90e",
"size": "16399",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monitoring/tests/unit/gapic/v3/test_metric_service_client_v3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
import os.path
import shutil
import sublime
import sublime_plugin
import subprocess
SETTINGS_FILE = "BeautifyRust.sublime-settings"
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
return shutil.which(program)
class BeautifyRustOnSave(sublime_plugin.EventListener):
def on_post_save(self, view):
if sublime.load_settings(SETTINGS_FILE).get("run_on_save", False):
return view.run_command("beautify_rust")
return
class BeautifyRustCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.filename = self.view.file_name()
self.fname = os.path.basename(self.filename)
self.settings = sublime.load_settings(SETTINGS_FILE)
if self.is_rust_file():
self.run_format(edit)
def is_rust_file(self):
return self.fname.endswith(".rs")
def pipe(self, cmd):
cwd = os.path.dirname(self.filename)
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
beautifier = subprocess.Popen(
cmd, cwd=cwd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo)
(_, err) = beautifier.communicate()
return (beautifier.wait(), err.decode('utf8'))
def run_format(self, edit):
buffer_region = sublime.Region(0, self.view.size())
buffer_text = self.view.substr(buffer_region)
if buffer_text == "":
return
rustfmt_bin = which(self.settings.get("rustfmt", "rustfmt"))
if rustfmt_bin is None:
return sublime.error_message(
"Beautify rust: can not find {0} in path.".format(self.settings.get("rustfmt", "rustfmt")))
cmd_list = [rustfmt_bin, self.filename] + self.settings.get("args", [])
self.save_viewport_state()
(exit_code, err) = self.pipe(cmd_list)
if exit_code != 0 or (err != "" and not err.startswith("Using rustfmt")):
self.view.replace(edit, buffer_region, buffer_text)
print("failed: exit_code: {0}\n{1}".format(exit_code, err))
if sublime.load_settings(SETTINGS_FILE).get("show_errors", True):
sublime.error_message(
"Beautify rust: rustfmt process call failed. See log (ctrl + `) for details.")
self.view.window().run_command("reload_all_files")
self.reset_viewport_state()
def save_viewport_state(self):
self.previous_selection = [(region.a, region.b)
for region in self.view.sel()]
self.previous_position = self.view.viewport_position()
def reset_viewport_state(self):
self.view.set_viewport_position((0, 0,), False)
self.view.set_viewport_position(self.previous_position, False)
self.view.sel().clear()
for a, b in self.previous_selection:
self.view.sel().add(sublime.Region(a, b))
|
{
"content_hash": "dcde20f8a99bf918f44840c4cb9b7d96",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 107,
"avg_line_length": 37.964285714285715,
"alnum_prop": 0.6180620884289746,
"repo_name": "vincenting/BeautifyRust",
"id": "e8121ce7c7a5e9d6601ebd6efc656e218793675d",
"size": "3189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BeautifyRust.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3189"
}
],
"symlink_target": ""
}
|
__author__ = 'Stephen P. Henrie'
import unittest
from mock import Mock
from pyon.util.unit_test import PyonTestCase
from pyon.util.int_test import IonIntegrationTestCase
from nose.plugins.attrib import attr
from pyon.core.exception import BadRequest, NotFound
from pyon.public import RT, IonObject
from interface.services.coi.iobject_management_service import ObjectManagementServiceClient
from ion.services.coi.object_management_service import ObjectManagementService
@attr('UNIT', group='coi')
class TestObjectManagementServiceUnit(PyonTestCase):
def setUp(self):
self.mock_clients = self._create_service_mock('object_management')
self.oms = ObjectManagementService()
self.oms.clients = self.mock_clients
self.yaml_definition = '''
TimerSchedulerEntry2: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
self.bad_yaml ='''
TimerSchedulerEntry2: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
def rr_return_value(self):
return ['123',1]
def test_create_object(self):
ot = Mock()
ot.definition = self.bad_yaml
ot.name = "name"
with self.assertRaises(BadRequest):
self.oms.create_object_type(ot)
ot.name = "bad name"
with self.assertRaises(BadRequest):
self.oms.create_object_type(ot)
ot.name = "name"
ot.definition = self.yaml_definition
self.oms.clients.resource_registry.create.return_value = self.rr_return_value()
object_id = self.oms.create_object_type(ot)
self.assertEqual(object_id, '123')
self.oms.clients.resource_registry.create.assert_called_once_with(ot)
def test_read_and_update_object(self):
with self.assertRaises(BadRequest):
self.oms.read_object_type(None)
ot = Mock()
ot.definition = self.yaml_definition
ot.name = "name"
ot.description = "This is just a test, don't panic"
self.oms.clients.resource_registry.read.return_value = ot
ot_return = self.oms.read_object_type("123")
self.assertTrue(ot_return is ot)
self.oms.clients.resource_registry.read.assert_called_once_with('123','')
ot_return.name = "new name"
with self.assertRaises(BadRequest):
self.oms.update_object_type(ot_return)
ot_return.name = "new_name"
ot_return.definition = self.bad_yaml
with self.assertRaises(BadRequest):
self.oms.update_object_type(ot_return)
ot.definition = self.yaml_definition
self.oms.clients.resource_registry.update.return_value = ['123', 2]
ot_id = self.oms.update_object_type(ot_return)
self.assertEqual(ot_id, '123')
self.oms.clients.resource_registry.update.assert_called_once_with(ot_return)
def test_read_not_found(self):
self.oms.clients.resource_registry.read.side_effect = NotFound
with self.assertRaises(NotFound):
self.oms.read_object_type("0xBADC0FFEE")
self.oms.clients.resource_registry.read.assert_called_once_with('0xBADC0FFEE','')
def test_delete_object(self):
with self.assertRaises(BadRequest):
self.oms.delete_object_type(None)
self.oms.clients.resource_registry.delete.return_value = True
status = self.oms.delete_object_type("123")
self.assertEqual(status, True)
self.oms.clients.resource_registry.delete.assert_called_once_with("123")
def test_delete_not_found(self):
self.oms.clients.resource_registry.delete.side_effect = NotFound
with self.assertRaises(NotFound):
self.oms.delete_object_type("0xBADC0FFEE")
self.oms.clients.resource_registry.delete.assert_called_once_with('0xBADC0FFEE')
@attr('INT', group='coi')
class TestObjectManagementService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.oms = ObjectManagementServiceClient()
def test_create_object(self):
yaml_str = '''
TimerSchedulerEntry2: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
ot = IonObject(RT.ObjectType, {"definition": yaml_str})
object_type_id = self.oms.create_object_type(ot)
self.assertTrue(type(object_type_id) == str)
self.oms.delete_object_type(object_type_id)
def test_read_and_update_object(self):
# Create object type
# Read object type and validate
# Update object type
# Read back the object type and validate
# Delete the object type
object_definition = '''
TimerSchedulerEntry3: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
ot = IonObject(RT.ObjectType, {"definition": object_definition})
object_type_id = self.oms.create_object_type(ot)
object_type = self.oms.read_object_type(object_type_id)
self.assertEqual(object_definition,object_type.definition)
object_definition2 = '''
TimerSchedulerEntry3: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
object_type.definition = object_definition2
self.oms.update_object_type(object_type)
object_type = self.oms.read_object_type(object_type_id)
self.assertEqual(object_definition2, object_type.definition)
self.oms.delete_object_type(object_type_id)
def test_read_object_not_found(self):
object_type_id = "0xbadc0ffee"
with self.assertRaises(NotFound):
self.oms.read_object_type(object_type_id)
def test_delete_object_not_found(self):
object_type_id = "0xbadc0ffee"
with self.assertRaises(NotFound):
self.oms.delete_object_type(object_type_id)
|
{
"content_hash": "1b09312af1947b5d90bc476eba21f7f3",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 91,
"avg_line_length": 36.83040935672515,
"alnum_prop": 0.6770403302635757,
"repo_name": "ooici/coi-services",
"id": "5532c9e6978b0c0773cf06f1fff9f15c21da2976",
"size": "6321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ion/services/coi/test/test_object_management_service.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "403012"
},
{
"name": "C++",
"bytes": "251803"
},
{
"name": "CSS",
"bytes": "689"
},
{
"name": "Erlang",
"bytes": "532"
},
{
"name": "JavaScript",
"bytes": "11627"
},
{
"name": "Objective-C",
"bytes": "8918"
},
{
"name": "Python",
"bytes": "7964384"
},
{
"name": "Shell",
"bytes": "9221"
},
{
"name": "nesC",
"bytes": "57712131"
}
],
"symlink_target": ""
}
|
import copy
from unittest import TestCase
import TestResults
__author__ = 'zthomae'
class TestArgumentParser(TestCase):
def setUp(self):
self.parser = TestResults.make_parser()
def test_type_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--input foo --results bar'.split())
except ValueError:
return
self.fail('Parser does not fail when a test type is not present')
def test_inputs_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--type foo --results bar'.split())
except ValueError:
return
self.fail('Parser does not fail when input files are not present')
def test_results_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--type foo --input bar'.split())
except ValueError:
return
self.fail('Parser does not fail when results files are not present')
def test_only_one_type_allowed(self):
try:
TestResults.parse_args(self.parser, '--type two words --input foo --results bar'.split())
except SystemExit: # TODO: Wrap argparse behavior
return
self.fail('Parser accepts more than one test type')
def test_should_expect_at_least_one_input_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input --results bar'.split())
except SystemExit:
return
self.fail('Parser accepts zero input files')
def test_should_expect_at_least_one_results_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input bar --results'.split())
except SystemExit:
return
self.fail('Parser accepts zero results files')
def test_should_allow_more_than_one_input_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input these files --results bar'.split())
except SystemExit:
self.fail("Parser doesn't accept multiple input files")
def test_should_allow_more_than_one_results_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input file --results these files'.split())
except SystemExit:
self.fail("Parser doesn't accept multiple results files")
class TestInputFileParser(TestCase):
def setUp(self):
self.input_files = ['/foo/bar/Text1.txt', 'bar/baz/Text2.txt', 'Text3.txt', '../Text4.txt']
self.results = TestResults.parse_input_files(self.input_files)
def test_should_use_basename(self):
if sorted(self.results.keys()) != sorted(['Text1.txt', 'Text2.txt', 'Text3.txt', 'Text4.txt']):
self.fail('parse_input_files should return a dictionary with input file basenames as keys')
def test_should_return_fullpaths(self):
if any(map(lambda x: 'fullpath' not in x, self.results.values())):
self.fail('parse_input_files should return fullpaths to input files')
class TestDocuscopeResultsParser(TestCase):
def setUp(self):
self.ds_results_file = ''.join([
'<AnnotatedText File="Text1.txt" Group="foo" />',
'<AnnotatedText File="Text2.txt" Group="foo" />',
'<AnnotatedText File="Text3.txt" Group="bar" />'
])
self.ds_results_file_2 = ''.join([
'<AnnotatedText File="Text4.txt" Group="foo" />',
'<AnnotatedText File="Text5.txt" Group="bar" />'
])
self.ds_wrong_tag_results_file = ''.join([
'<Text File="Text1.txt" Group="foo" />',
'<Text File="Text2.txt" Group="foo" />',
'<AnnotatedText File="Text3.txt" Group="foo" />'
])
self.ds_wrong_attr_results_file = ''.join([
'<AnnotatedText Fil="Text1.txt" Group="foo" />',
'<AnnotatedText File="Text2.txt" Group="foo" />',
])
def test_should_handle_one_file(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
keys = results.keys()
if any([
'Text1.txt' not in keys,
'Text2.txt' not in keys,
'Text3.txt' not in keys
]):
self.fail("parse_docuscope_results didn't add expected files for one input file")
def test_should_handle_multiples_files(self):
results = TestResults.parse_docuscope_results([self.ds_results_file, self.ds_results_file_2])
keys = results.keys()
if any([
'Text1.txt' not in keys,
'Text2.txt' not in keys,
'Text3.txt' not in keys,
'Text4.txt' not in keys,
'Text5.txt' not in keys
]):
self.fail("parse_docuscope_results didn't add expected files for multiple input files")
def test_should_not_add_files_in_wrong_element(self):
results = TestResults.parse_docuscope_results([self.ds_wrong_tag_results_file])
if len(results.keys()) > 1:
self.fail('parse_docuscope_results added files not in AnnotatedText elements')
def test_should_do_nothing_if_missing_file_attribute(self):
results = TestResults.parse_docuscope_results([self.ds_wrong_attr_results_file])
# TODO: Bad test
if len(results.keys()) != 1:
self.fail("parse_docuscope_results didn't add files correctly")
def test_should_add_present_status(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
if any(map(lambda x: 'present' not in x, results.values())):
self.fail('parse_docuscope_results should add "present" key')
def test_should_add_text(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
# TODO: This test doesn't check as much as it should
if any(map(lambda x: 'text' not in x, results.values())):
self.fail('parse_docuscope_results should add "text" key')
class TestMatchFiles(TestCase):
def setUp(self):
self.results_files = {
'Text1.txt': {'text': '', 'present': False},
'Text2.txt': {'text': '', 'present': False},
'Text3.txt': {'text': '', 'present': False}
}
def test_should_copy_results(self):
if self.results_files != TestResults.match_files([], self.results_files):
self.fail('match_files should return results_files if input_files empty')
def test_should_set_file_true_if_in_inputs(self):
files = TestResults.match_files(['Text1.txt'], self.results_files)
if files['Text1.txt']['present'] is not True:
self.fail('match_files should set entries to True if present in input_files')
def test_should_keep_file_false_if_not_in_inputs(self):
files = TestResults.match_files(['Text1.txt'], self.results_files)
if any([
files['Text2.txt']['present'] is not False,
files['Text3.txt']['present'] is not False
]):
self.fail('match_files should keep entries set to False if not present in input_files')
def test_should_not_change_input_files(self):
input_files = ['Text1.txt']
old_input = copy.copy(input_files)
TestResults.match_files(input_files, self.results_files)
if old_input != input_files:
self.fail('match_files should not change input_files')
def test_should_not_change_results_files(self):
old_results = copy.copy(self.results_files)
TestResults.match_files(['Text1.txt'], self.results_files)
if old_results != self.results_files:
self.fail('match_files should not change results_files')
class TestComputeTestPairs(TestCase):
def setUp(self):
self.job = {
'Text1.txt': {'text': 'first', 'present': True},
'Text2.txt': {'text': 'second', 'present': False},
'Text3.txt': {'text': 'third', 'present': True}
}
self.input_files = {
'Text1.txt': {'fullpath': '/Text1.txt', 'text': ''},
'Text2.txt': {'fullpath': '/Text2.txt', 'text': ''},
'Text3.txt': {'fullpath': '/Text3.txt', 'text': ''},
}
self.results = TestResults.compute_test_pairs(self.job, self.input_files, self.format)
@staticmethod
def format(text):
return text
def test_should_throw_valueerror_if_too_few_input_files(self):
input_files = copy.copy(self.input_files)
del input_files['Text3.txt']
try:
TestResults.compute_test_pairs(self.job, input_files, self.format)
except ValueError:
return
self.fail('compute_test_pairs should throw ValueError if an input file is not in input_files')
def test_should_not_include_not_present_job_files(self):
if 'Text2.txt' in self.results:
self.fail('compute_test_pairs should not include texts if they are not "present" in the job')
def test_should_not_check_if_non_present_input_files_are_missing(self):
input_files = copy.copy(self.input_files)
del input_files['Text2.txt']
try:
TestResults.compute_test_pairs(self.job, input_files, self.format)
except ValueError:
self.fail("compute_test_pairs shouldn't throw ValueError if non-present job file is not in input_files")
def test_should_return_names(self):
for v in self.results.values():
if 'name' not in v:
self.fail('compute_test_pairs should return text names')
def test_should_return_ground_truths(self):
for text in self.results:
if self.results[text]['ground_truth'] != self.job[text]['text']:
self.fail('compute_test_pairs should return ground_truth text')
def test_should_return_formatted_input_file(self):
for v in self.results.values():
if 'test_input' not in v:
self.fail('compute_test_pairs should return test_input')
class TestCompareTestPairs(TestCase):
def setUp(self):
self.test_pairs = {
'Text1.txt': {
'name': 'Text1.txt',
'ground_truth': 'foo',
'test_input': 'foo'
},
'Text2.txt': {
'name': 'Text2.txt',
'ground_truth': 'foo',
'test_input': 'bar'
}
}
self.results = TestResults.compare_test_pairs(self.test_pairs, self.compare)
@staticmethod
def compare(t1, t2):
return {}
def test_should_return_results_for_each_pair(self):
if 'results' not in self.results['Text1.txt'] or 'results' not in self.results['Text2.txt']:
self.fail('compare_test_pairs should return results for each of the test pairs')
# TODO: Test more thoroughly
|
{
"content_hash": "bb90378a4f9551cd0d33aa7fe59c5fac",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 116,
"avg_line_length": 40.86037735849057,
"alnum_prop": 0.6048208348725527,
"repo_name": "uwgraphics/Ubiqu-Ity",
"id": "444a190a1b95aaf1953da26833ea09f6dfc98b31",
"size": "10828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Ity/Tools/test_TestResults.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "604180"
},
{
"name": "HTML",
"bytes": "750751"
},
{
"name": "JavaScript",
"bytes": "381542"
},
{
"name": "Makefile",
"bytes": "10123"
},
{
"name": "Python",
"bytes": "348678"
},
{
"name": "Ruby",
"bytes": "1797"
}
],
"symlink_target": ""
}
|
"""
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
from .bases import CellLoop
from .compatibility import one_dimension, two_dimensions, activity, random_generator
from .utils import offset_pos
from itertools import product, izip
import numpy as np
class OneDimCellLoop(CellLoop):
"""The OneDimCellLoop iterates over all cells in order from 0 to sizeX."""
requires_features = [one_dimension]
def get_pos(self):
return "loop_x",
def visit(self):
super(OneDimCellLoop, self).visit()
self.code.add_weave_code("loop_begin",
"""for(int loop_x=0; loop_x < sizeX; loop_x++) {""")
self.code.add_weave_code("loop_end",
"""}""")
def get_iter(self):
return iter(izip(xrange(0, self.code.acc.get_size_of(0))))
def build_name(self, parts):
parts.insert(0, "1d")
class TwoDimCellLoop(CellLoop):
"""The TwoDimCellLoop iterates over all cells from left to right, then from
top to bottom."""
requires_features = [two_dimensions]
def get_pos(self):
return "loop_x", "loop_y"
def visit(self):
super(TwoDimCellLoop, self).visit()
size_names = self.code.acc.size_names
self.code.add_weave_code("loop_begin",
"""for(int loop_x=0; loop_x < %s; loop_x++) {
for(int loop_y=0; loop_y < %s; loop_y++) {""" % (size_names))
self.code.add_weave_code("loop_end",
"""}
}""")
def get_iter(self):
return iter(product(xrange(0, self.code.acc.get_size_of(0)),
xrange(0, self.code.acc.get_size_of(1))))
def build_name(self, parts):
parts.insert(0, "2d")
class SparseCellLoop(CellLoop):
"""The SparseCellLoop offers common code for loops that only calculate
those fields, where the neighbours have changed in the last step.
This is based on a list of positions called `sparse_list` as well as a mask
of booleans called `sparse_mask`, that only internally gets used to make
sure, that no fields are enlisted more than once.
The `sparse_list` is duplicated into the property `prev_sparse_list`, from
which reads are performed. All entries in the sparse_list arrays are valid
up to the first -1.
For the pure-py version, a normal python set is used.
It requires an ActivityRecord for the `was_active` flag."""
probab = None
def set_target(self, target):
"""Adds the activity mask and position list to the target attributes."""
super(SparseCellLoop, self).set_target(target)
size = self.calculate_size()
target.sparse_mask = np.zeros(size, dtype=bool)
target.sparse_list = np.zeros(size, dtype=int)
target.sparse_set = set()
def bind(self, code):
super(SparseCellLoop, self).bind(code)
if self.probab is not None:
code.consts["NONDET_PROBAB"] = self.probab
def get_pos(self):
return self.position_names
def calculate_size(self):
"""Calculate how big the mask and list have to be.
The current strategy is to just allocate one field for each field of the
configuration."""
return reduce(lambda a, b: a * b, self.target.size)
def mark_cell_py(self, pos):
positions = [offset_pos(pos, offs) for offs in
self.code.neigh.affected_cells()]
positions = [pos
if self.code.border.is_position_valid(pos)
else self.code.border.correct_position(pos)
for pos in positions]
self.target.sparse_set.update(positions)
def new_config(self):
size = self.calculate_size()
self.target.sparse_mask = np.ones(size, dtype=bool)
self.target.sparse_list = np.array(list(range(size)) + [-1] , dtype=int)
self.target.prev_sparse_list = self.target.sparse_list.copy()
self.target.sparse_set = set(product(*[range(siz) for siz in self.target.size]))
def get_iter(self):
# iterate over a copy of the set, so that it can be modified while running
the_list = list(self.target.sparse_set)
self.target.sparse_set.clear()
if self.probab is not None:
# get a list of entries to go through
sublist = []
for pos in the_list:
if self.code.random.random() >= self.probab:
sublist.append(pos)
else:
self.target.sparse_set.update([pos])
return iter(sublist)
else:
return iter(the_list)
def visit(self):
super(SparseCellLoop, self).visit()
self.code.attrs.append("sparse_mask")
self.code.attrs.append("sparse_list")
self.code.attrs.append("prev_sparse_list")
self.code.add_py_code("loop_end",
"""if was_active: self.loop.mark_cell_py(pos)""")
self.code.add_weave_code("localvars",
"""int sparse_cell_write_idx = 0;""")
# copy all data over, because of the inactive cells.
self.code.add_weave_code("localvars",
"""nconf = cconf.copy();""")
self.code.add_weave_code("loop_begin",
"""for(int cell_idx=0; prev_sparse_list(cell_idx) != -1; cell_idx++) {""")
if self.probab is not None:
self.code.add_weave_code("loop_begin",
"""if(rand() >= RAND_MAX * NONDET_PROBAB) {
if(!sparse_mask(cell_idx)) {
sparse_list(sparse_cell_write_idx) = cell_idx;
sparse_mask(cell_idx) = true;
sparse_cell_write_idx++;
}
continue;
}""")
if len(self.position_names) == 1:
self.code.add_weave_code("loop_begin",
""" int %s = prev_sparse_list(cell_idx);""" % self.position_names)
elif len(self.position_names) == 2:
self.code.add_weave_code("loop_begin",
""" int %(pos_a)s = prev_sparse_list(cell_idx) %% %(size_a)s;
int %(pos_b)s = prev_sparse_list(cell_idx) / %(size_b)s;""" %
dict(pos_a = self.position_names[0],
pos_b = self.position_names[1],
size_a = self.code.acc.size_names[0],
size_b = self.code.acc.size_names[1]))
# FIXME use proper position names here
if len(self.position_names) == 1:
self.code.add_weave_code("loop_end",
"""if(was_active) {
%s
}""" % ("\n".join([
"""
{int idx = %(wrap_x)s;
if(!sparse_mask(idx)) {
sparse_list(sparse_cell_write_idx) = idx;
sparse_mask(idx) = true;
sparse_cell_write_idx++;
}}""" % dict(offs_x=offs[0],
wrap_x=self.code.border.correct_position_c(["loop_x + %s" % (offs[0])])[0])
for offs in self.code.neigh.offsets])))
elif len(self.position_names) == 2:
self.code.add_weave_code("loop_end",
"""if(was_active) {
%s
}""" % ("\n".join([
"""
{int px = loop_x + %(offs_x)s;
int py = loop_y + %(offs_y)s;
%(wrap)s;
int idx = px * %(size_x)s + py;
if(!sparse_mask(idx)) {
sparse_list(sparse_cell_write_idx) = idx;
sparse_mask(idx) = true;
sparse_cell_write_idx++;
}}""" % dict(offs_x=offs[0], offs_y=offs[1],
size_x=self.code.acc.size_names[0],
wrap="px = " + ("; py = ".join(self.code.border.correct_position_c(
("px", "py")
))))
for offs in self.code.neigh.offsets])))
self.code.add_weave_code("loop_end",
"""
}
// null the sparse mask
sparse_mask = 0;
sparse_list(sparse_cell_write_idx) = -1;
""")
class OneDimSparseCellLoop(SparseCellLoop):
requires_features = [one_dimension, activity]
def __init__(self):
self.position_names = "loop_x",
class TwoDimSparseCellLoop(SparseCellLoop):
requires_features = [two_dimensions, activity]
def __init__(self):
self.position_names = "loop_x", "loop_y"
class OneDimSparseNondetCellLoop(OneDimSparseCellLoop):
requires_features = [one_dimension, activity, random_generator]
def __init__(self, probab=0.5):
super(OneDimSparseNondetCellLoop, self).__init__()
self.probab = probab
class TwoDimSparseNondetCellLoop(SparseCellLoop):
requires_features = [two_dimensions, activity, random_generator]
def __init__(self, probab=0.5):
super(OneDimSparseNondetCellLoop, self).__init__()
self.probab = probab
|
{
"content_hash": "623d06f6e1295bba58b0521646ff9257",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 115,
"avg_line_length": 39.87866108786611,
"alnum_prop": 0.5254432903158116,
"repo_name": "timo/zasim",
"id": "3d75f5cfd9008f83e1523ea0fc3f8f6083ea9df4",
"size": "9531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zasim/cagen/loops.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "423165"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import unittest
import pytest
from _pydev_bundle.pydev_stdin import StdIn
from _pydev_bundle.pydev_localhost import get_localhost
from _pydev_comm.pydev_rpc import make_rpc_client
from _pydevd_bundle import pydevd_io
from pydev_console.pydev_protocol import PythonConsoleFrontendService, PythonConsoleBackendService
from pydevconsole import enable_thrift_logging, create_server_handler_factory
try:
xrange
except:
xrange = range
def eq_(a, b):
if a != b:
raise AssertionError('%s != %s' % (a, b))
try:
from IPython import core
has_ipython = True
except:
has_ipython = False
@pytest.mark.skipif(not has_ipython, reason='IPython not available')
class TestBase(unittest.TestCase):
def setUp(self):
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_ipython_frontend
# PyDevFrontEnd depends on singleton in IPython, so you
# can't make multiple versions. So we reuse self.front_end for
# all the tests
self.front_end = get_pydev_ipython_frontend(None)
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(lambda:True)
self.front_end.clear_buffer()
def tearDown(self):
pass
def add_exec(self, code, expected_more=False):
more = self.front_end.add_exec(code)
eq_(expected_more, more)
def redirect_stdout(self):
from IPython.utils import io
self.original_stdout = sys.stdout
sys.stdout = io.stdout = pydevd_io.IOBuf()
def restore_stdout(self):
from IPython.utils import io
io.stdout = sys.stdout = self.original_stdout
@pytest.mark.skipif(not has_ipython, reason='IPython not available')
class TestPyDevFrontEnd(TestBase):
def testAddExec_1(self):
self.add_exec('if True:', True)
def testAddExec_2(self):
#Change: 'more' must now be controlled in the client side after the initial 'True' returned.
self.add_exec('if True:\n testAddExec_a = 10\n', False)
assert 'testAddExec_a' in self.front_end.get_namespace()
def testAddExec_3(self):
assert 'testAddExec_x' not in self.front_end.get_namespace()
self.add_exec('if True:\n testAddExec_x = 10\n\n')
assert 'testAddExec_x' in self.front_end.get_namespace()
eq_(self.front_end.get_namespace()['testAddExec_x'], 10)
def test_get_namespace(self):
assert 'testGetNamespace_a' not in self.front_end.get_namespace()
self.add_exec('testGetNamespace_a = 10')
assert 'testGetNamespace_a' in self.front_end.get_namespace()
eq_(self.front_end.get_namespace()['testGetNamespace_a'], 10)
def test_complete(self):
unused_text, matches = self.front_end.complete('%')
assert len(matches) > 1, 'at least one magic should appear in completions'
def test_complete_does_not_do_python_matches(self):
# Test that IPython's completions do not do the things that
# PyDev's completions will handle
self.add_exec('testComplete_a = 5')
self.add_exec('testComplete_b = 10')
self.add_exec('testComplete_c = 15')
unused_text, matches = self.front_end.complete('testComplete_')
assert len(matches) == 0
def testGetCompletions_1(self):
# Test the merged completions include the standard completions
self.add_exec('testComplete_a = 5')
self.add_exec('testComplete_b = 10')
self.add_exec('testComplete_c = 15')
res = self.front_end.getCompletions('testComplete_', 'testComplete_')
matches = [f[0] for f in res]
assert len(matches) == 3
eq_(set(['testComplete_a', 'testComplete_b', 'testComplete_c']), set(matches))
def testGetCompletions_2(self):
# Test that we get IPython completions in results
# we do this by checking kw completion which PyDev does
# not do by default
self.add_exec('def ccc(ABC=123): pass')
res = self.front_end.getCompletions('ccc(', '')
matches = [f[0] for f in res]
assert 'ABC=' in matches
def testGetCompletions_3(self):
# Test that magics return IPYTHON magic as type
res = self.front_end.getCompletions('%cd', '%cd')
assert len(res) == 1
eq_(res[0][3], '12') # '12' == IToken.TYPE_IPYTHON_MAGIC
assert len(res[0][1]) > 100, 'docstring for %cd should be a reasonably long string'
@pytest.mark.skipif(not has_ipython, reason='IPython not available')
class TestRunningCode(TestBase):
def test_print(self):
self.redirect_stdout()
try:
self.add_exec('print("output")')
eq_(sys.stdout.getvalue(), 'output\n')
finally:
self.restore_stdout()
def testQuestionMark_1(self):
self.redirect_stdout()
try:
self.add_exec('?')
found = sys.stdout.getvalue()
if len(found) < 1000:
raise AssertionError('Expected IPython help to be big. Found: %s' % (found,))
finally:
self.restore_stdout()
def testQuestionMark_2(self):
self.redirect_stdout()
try:
self.add_exec('int?')
found = sys.stdout.getvalue()
if 'Convert' not in found:
raise AssertionError('Expected to find "Convert" in %s' % (found,))
finally:
self.restore_stdout()
def test_gui(self):
try:
import Tkinter
except:
return
else:
from pydev_ipython.inputhook import get_inputhook
assert get_inputhook() is None
self.add_exec('%gui tk')
# we can't test the GUI works here because we aren't connected to XML-RPC so
# nowhere for hook to run
assert get_inputhook() is not None
self.add_exec('%gui none')
assert get_inputhook() is None
def test_history(self):
''' Make sure commands are added to IPython's history '''
self.redirect_stdout()
try:
self.add_exec('a=1')
self.add_exec('b=2')
_ih = self.front_end.get_namespace()['_ih']
eq_(_ih[-1], 'b=2')
eq_(_ih[-2], 'a=1')
self.add_exec('history')
hist = sys.stdout.getvalue().split('\n')
eq_(hist[-1], '')
eq_(hist[-2], 'history')
eq_(hist[-3], 'b=2')
eq_(hist[-4], 'a=1')
finally:
self.restore_stdout()
def test_edit(self):
''' Make sure we can issue an edit command'''
if os.environ.get('TRAVIS') == 'true':
# This test is too flaky on travis.
return
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_ipython_frontend
from _pydev_comm.pydev_rpc import start_rpc_server_and_make_client
called_RequestInput = [False]
called_IPythonEditor = [False]
class RequestInputHandler:
def __init__(self):
self.rpc_client = None
def requestInput(self, path):
called_RequestInput[0] = True
return '\n'
def IPythonEditor(self, name, line):
called_IPythonEditor[0] = (name, line)
return True
enable_thrift_logging()
# here we start the test server
server_socket = start_rpc_server_and_make_client(get_localhost(), 0,
PythonConsoleFrontendService,
PythonConsoleBackendService,
create_server_handler_factory(RequestInputHandler()))
host, port = server_socket.getsockname()
rpc_client, _ = make_rpc_client(PythonConsoleFrontendService, host, port)
# PyDevFrontEnd depends on singleton in IPython, so you
# can't make multiple versions. So we reuse self.front_end for
# all the tests
self.front_end = get_pydev_ipython_frontend(rpc_client)
orig_stdin = sys.stdin
sys.stdin = StdIn(self, rpc_client)
try:
filename = 'made_up_file.py'
self.add_exec('%edit ' + filename)
for i in xrange(10):
if called_IPythonEditor[0] == (os.path.abspath(filename), '0'):
break
time.sleep(.1)
if not called_IPythonEditor[0]:
# File "/home/travis/miniconda/lib/python3.3/site-packages/IPython/core/interactiveshell.py", line 2883, in run_code
# exec(code_obj, self.user_global_ns, self.user_ns)
# File "<ipython-input-15-09583ca3bce1>", line 1, in <module>
# get_ipython().magic('edit made_up_file.py')
# File "/home/travis/miniconda/lib/python3.3/site-packages/IPython/core/interactiveshell.py", line 2205, in magic
# return self.run_line_magic(magic_name, magic_arg_s)
# File "/home/travis/miniconda/lib/python3.3/site-packages/IPython/core/interactiveshell.py", line 2126, in run_line_magic
# result = fn(*args,**kwargs)
# File "<string>", line 2, in edit
# File "/home/travis/miniconda/lib/python3.3/site-packages/IPython/core/magic.py", line 193, in <lambda>
# call = lambda f, *a, **k: f(*a, **k)
# File "/home/travis/miniconda/lib/python3.3/site-packages/IPython/core/magics/code.py", line 662, in edit
# self.shell.hooks.editor(filename,lineno)
# File "/home/travis/build/fabioz/PyDev.Debugger/pydev_ipython_console_011.py", line 70, in call_editor
# server.IPythonEditor(filename, str(line))
# File "/home/travis/miniconda/lib/python3.3/xmlrpc/client.py", line 1090, in __call__
# return self.__send(self.__name, args)
# File "/home/travis/miniconda/lib/python3.3/xmlrpc/client.py", line 1419, in __request
# verbose=self.__verbose
# File "/home/travis/miniconda/lib/python3.3/xmlrpc/client.py", line 1132, in request
# return self.single_request(host, handler, request_body, verbose)
# File "/home/travis/miniconda/lib/python3.3/xmlrpc/client.py", line 1143, in single_request
# http_conn = self.send_request(host, handler, request_body, verbose)
# File "/home/travis/miniconda/lib/python3.3/xmlrpc/client.py", line 1255, in send_request
# self.send_content(connection, request_body)
# File "/home/travis/miniconda/lib/python3.3/xmlrpc/client.py", line 1285, in send_content
# connection.endheaders(request_body)
# File "/home/travis/miniconda/lib/python3.3/http/client.py", line 1061, in endheaders
# self._send_output(message_body)
# File "/home/travis/miniconda/lib/python3.3/http/client.py", line 906, in _send_output
# self.send(msg)
# File "/home/travis/miniconda/lib/python3.3/http/client.py", line 844, in send
# self.connect()
# File "/home/travis/miniconda/lib/python3.3/http/client.py", line 822, in connect
# self.timeout, self.source_address)
# File "/home/travis/miniconda/lib/python3.3/socket.py", line 435, in create_connection
# raise err
# File "/home/travis/miniconda/lib/python3.3/socket.py", line 426, in create_connection
# sock.connect(sa)
# ConnectionRefusedError: [Errno 111] Connection refused
# I.e.: just warn that the test failing, don't actually fail.
sys.stderr.write('Test failed: this test is brittle in travis because sometimes the connection is refused (as above) and we do not have a callback.\n')
return
eq_(called_IPythonEditor[0], (os.path.abspath(filename), '0'))
assert called_RequestInput[0], "Make sure the 'wait' parameter has been respected"
finally:
sys.stdin = orig_stdin
|
{
"content_hash": "b371e76772b75eac0d3a05ecdcc1e3a2",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 167,
"avg_line_length": 40.62866449511401,
"alnum_prop": 0.5863064218712419,
"repo_name": "siosio/intellij-community",
"id": "8a1b63d7381ab184ac2d3414b5d90cbf0be379c9",
"size": "12473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/pydev_tests/test_pydev_ipython_011.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
def extractToffeedragontlCom(item):
'''
Parser for 'toffeedragontl.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "f7d78c18037adf146bd91a1715fbe161",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.095238095238095,
"alnum_prop": 0.6295620437956204,
"repo_name": "fake-name/ReadableWebProxy",
"id": "ddb3cb0b6559b715176914c409076cc736534e7a",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractToffeedragontlCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
"""Tool to automatically perform volumetric meshing prior to
executing a Glossia Python Container Module simulation. Updates
region YAML files to note mesh labels for specific regions."""
import sys
import asyncio
import click
import os
import shutil
import yaml
import traceback
import lxml.etree
import mesher_gssf
import gosmart
gosmart.setup(False)
@asyncio.coroutine
def mesh_and_go(target, mesh=None, gssf_settings_xml='/shared/input/settings.xml'):
working_directory = '/shared/output/run'
original_input = '/shared/input'
run_input = os.path.join(working_directory, 'input')
input_msh = os.path.join(run_input, 'input.msh')
labelling_yaml = os.path.join(run_input, 'mesh_labelling.yml')
original_regions_yaml = os.path.join(original_input, 'regions.yml')
regions_yaml = os.path.join(run_input, 'regions.yml')
try:
shutil.rmtree(run_input)
except FileNotFoundError:
pass
shutil.copytree(original_input, run_input)
if mesh is None:
# Launch
task = yield from asyncio.create_subprocess_exec(
'go-smart-launcher',
gssf_settings_xml,
cwd=working_directory
)
# Hold off until meshing is complete
yield from task.wait()
# Pick out the relevant mesher output
msh_input = os.path.join(
working_directory,
"mesher",
"elmer_libnuma.msh"
)
mesh_labelling_yaml = os.path.join(
working_directory,
"mesher",
"mesh_labelling.yml"
)
# Check for success from GSSF mesher-cgal
success = (task.returncode == 0)
if not success:
return task.returncode
else:
msh_input, mesh_labelling_yaml = mesh.split(':')
shutil.copyfile(msh_input, input_msh)
shutil.copyfile(mesh_labelling_yaml, labelling_yaml)
# Update the regions based on this regions file
with open(labelling_yaml, "r") as f:
mesh_labelling = yaml.load(f)
regions = mesh_labelling.copy()
with open(original_regions_yaml, "r") as f:
region_dict = yaml.load(f)
regions.update(region_dict)
for k, v in regions.items():
if k in mesh_labelling:
v.update(mesh_labelling[k])
# Update the regions based on this regions file
with open(regions_yaml, "w") as f:
yaml.dump(regions, f, default_flow_style=False)
# Launch
print("Running target", target)
task = yield from asyncio.create_subprocess_exec(
'/usr/bin/python2',
target,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=working_directory
)
yield from task.wait()
print("Target run")
return task.returncode
@click.command()
@click.option('--mesh', default=None,
help='Colon separated mesh filename and labelling filename')
@click.option('--gssa-xml', default=None)
@click.argument('target')
def run(mesh, gssa_xml, target):
print("Starting Mesh & Go...")
gssf_settings_xml = '/shared/input/settings.xml'
if gssa_xml:
if not os.path.exists(gssa_xml):
raise RuntimeError("Passed GSSA-XML file does not exist")
with open(gssa_xml, 'r') as f:
tree = lxml.etree.parse(f)
gssf_xml_root = mesher_gssf.to_mesh_xml(tree.getroot())
gssf_settings_xml = '/shared/output/settings.xml'
with open(gssf_settings_xml, 'w') as f:
f.write(lxml.etree.tostring(gssf_xml_root, pretty_print=True).decode('utf-8'))
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(mesh_and_go(target, mesh, gssf_settings_xml))
try:
loop.run_until_complete(future)
except:
traceback.print_exc()
result = 1
else:
result = future.result()
finally:
loop.close()
print("Exiting Mesh & Go with code %d" % int(result))
if result != 0:
raise SystemExit(result)
return 0
if __name__ == '__main__':
sys.exit(run())
|
{
"content_hash": "a4c6a3138b341f2d15302ad7ec08bb6e",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 90,
"avg_line_length": 26.71523178807947,
"alnum_prop": 0.6249380267724343,
"repo_name": "go-smart/glossia-container-fenics",
"id": "a6162bb5465b4c3576a6b7bef69d809f9d03d6ec",
"size": "4057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesh_and_go.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2927"
},
{
"name": "Shell",
"bytes": "43"
}
],
"symlink_target": ""
}
|
"""\
Tests if install_as_pyqrcode works
"""
from __future__ import absolute_import, unicode_literals
import segno_mimos
import pytest
import io
def test_install_segno_as_pyqrcode():
segno_mimos.install_as_pyqrcode()
import pyqrcode
qr = pyqrcode.create('Hello')
out = io.BytesIO()
qr.png(out)
out.seek(0)
assert out.getvalue().startswith(b'\211PNG\r\n\032\n')
if __name__ == '__main__':
pytest.main([__file__])
|
{
"content_hash": "4a1adcb9ce80eb0fecb6b1a12ffcdb32",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.6554809843400448,
"repo_name": "heuer/segno-mimos",
"id": "9110f41a3602b548523c4820a43ce5005a4a4cf2",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pyqrcode/test_fake_pyqrcode_segno.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "71189"
}
],
"symlink_target": ""
}
|
from hashlib import md5
from flask_mongoengine.documents import DynamicDocument
from mongoengine import signals, EmbeddedDocument
from mongoengine.fields import StringField, ListField, IntField, EmbeddedDocumentField
from mongoengine.queryset.manager import queryset_manager
from mpcontribs.api.contributions.document import format_cell, get_resource, get_md5, COMPONENTS
class Labels(EmbeddedDocument):
index = StringField(help_text="index name / x-axis label")
value = StringField(help_text="columns name / y-axis label")
variable = StringField(help_text="legend name")
class Attributes(EmbeddedDocument):
title = StringField(help_text="title")
labels = EmbeddedDocumentField(Labels)
class Tables(DynamicDocument):
name = StringField(required=True, help_text="name / title")
attrs = EmbeddedDocumentField(Attributes)
index = ListField(StringField(), required=True, help_text="index column")
columns = ListField(StringField(), required=True, help_text="column names/headers")
data = ListField(ListField(StringField()), required=True, help_text="table rows")
md5 = StringField(regex=r"^[a-z0-9]{32}$", unique=True, help_text="md5 sum")
total_data_rows = IntField(help_text="total number of rows")
meta = {"collection": "tables", "indexes": [
"name", "columns", "md5", "attrs.title",
"attrs.labels.index", "attrs.labels.value", "attrs.labels.variable"
]}
@queryset_manager
def objects(doc_cls, queryset):
return queryset.only("name", "md5", "attrs", "columns", "total_data_rows")
@classmethod
def post_init(cls, sender, document, **kwargs):
document.data = [[format_cell(cell) for cell in row] for row in document.data]
@classmethod
def pre_save_post_validation(cls, sender, document, **kwargs):
# significant digits, md5 and total_data_rows
resource = get_resource("tables")
document.md5 = get_md5(resource, document, COMPONENTS["tables"])
document.total_data_rows = len(document.data)
signals.post_init.connect(Tables.post_init, sender=Tables)
signals.pre_save_post_validation.connect(Tables.pre_save_post_validation, sender=Tables)
|
{
"content_hash": "31b331e4a6ce2751e5820746a50599d1",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 96,
"avg_line_length": 42.94117647058823,
"alnum_prop": 0.7178082191780822,
"repo_name": "materialsproject/MPContribs",
"id": "dc52876df12b8de493e3037f2ae2657c8761240c",
"size": "2214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpcontribs-api/mpcontribs/api/tables/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7070"
},
{
"name": "HTML",
"bytes": "93413"
},
{
"name": "JavaScript",
"bytes": "86685"
},
{
"name": "Jinja",
"bytes": "4696"
},
{
"name": "Jupyter Notebook",
"bytes": "244012"
},
{
"name": "Makefile",
"bytes": "1682"
},
{
"name": "Python",
"bytes": "349352"
},
{
"name": "SCSS",
"bytes": "4141"
},
{
"name": "Shell",
"bytes": "2354"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import unittest
from adynaton.ftp import FileTransferProtocolClient
class TestFileTransferProtocolClient(unittest.TestCase):
"""
Test for Object
"""
def setUp(self):
"""
Setup for unit tests
"""
self.theclass = FileTransferProtocolClient()
def test_dummy(self):
"""
Method Test
"""
self.assertTrue(self.theclass.dummy())
|
{
"content_hash": "8871c0e0e2fe32d6a01c567663268f9a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 60,
"avg_line_length": 30.076923076923077,
"alnum_prop": 0.7297527706734868,
"repo_name": "lathama/Adynaton",
"id": "206a41c5de1c103777e0d683c272efd7fd98f1f6",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adynaton/unittests/test_FileTransferProtocolClient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168176"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
import time
from .settings import Settings
class Logger:
def __init__(self):
self.logfolder = Settings.log_location + os.path.sep
self.loggerobj = self.get_logger(Settings.log_output_toconsole)
def set_logfolder(self):
if not os.path.exists(self.logfolder):
os.makedirs(self.logfolder)
def set_logfile(self):
if Settings.log_file_per_run is True:
timestr = time.strftime("%Y-%m-%d-%H-%M-%S")
file = '{}general'.format(self.logfolder) + ' ' + timestr + '.log'
else:
file = '{}general.log'.format(self.logfolder)
return file
def get_logger(self, show_logs):
if sys.version_info >= (3, 7):
sys.stdout.reconfigure(encoding='utf-8')
existing_logger = Settings.loggers.get(__name__)
if existing_logger is not None:
# print('logger already exists')
return existing_logger
else:
# print('logger catch new one')
self.set_logfolder()
# initialize and setup logging system
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logfile = self.set_logfile()
file_handler = logging.FileHandler(logfile, encoding='UTF-8')
file_handler.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter('%(levelname)s [%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(logger_formatter)
logger.addHandler(file_handler)
if show_logs:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(logger_formatter)
logger.addHandler(console_handler)
Settings.loggers[__name__] = logger
Settings.logger = logger
return logger
class InstaLogger:
def __init__(self):
print('init log')
@staticmethod
def logger():
return Logger().loggerobj
|
{
"content_hash": "e90218fc2d782260f11ef38e1df6b033",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 120,
"avg_line_length": 31.954545454545453,
"alnum_prop": 0.586533902323376,
"repo_name": "timgrossmann/instagram-profilecrawl",
"id": "532bac518802be8212afe5a7a0dac1cf2f3e3eb1",
"size": "2109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/instalogger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1223"
},
{
"name": "Python",
"bytes": "68413"
},
{
"name": "Shell",
"bytes": "1860"
}
],
"symlink_target": ""
}
|
import copy
import os
import mock
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume.drivers import smbfs
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': _FAKE_SHARE,
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash')
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_LISTDIR = [_FAKE_VOLUME_NAME, _FAKE_VOLUME_NAME + '.vhd',
_FAKE_VOLUME_NAME + '.vhdx', 'fake_folder']
_FAKE_SMBFS_CONFIG = mock.MagicMock()
_FAKE_SMBFS_CONFIG.smbfs_oversub_ratio = 2
_FAKE_SMBFS_CONFIG.smbfs_used_ratio = 0.5
_FAKE_SMBFS_CONFIG.smbfs_shares_config = '/fake/config/path'
_FAKE_SMBFS_CONFIG.smbfs_default_volume_format = 'raw'
_FAKE_SMBFS_CONFIG.smbfs_sparsed_volumes = False
def setUp(self):
super(SmbFsTestCase, self).setUp()
smbfs.SmbfsDriver.__init__ = lambda x: None
self._smbfs_driver = smbfs.SmbfsDriver()
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self._FAKE_VOLUME)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def _test_setup(self, config, share_config_exists=True):
fake_exists = mock.Mock(return_value=share_config_exists)
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
with mock.patch('os.path.exists', fake_exists):
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(None)
self.assertEqual(self._smbfs_driver.shares, {})
fake_ensure_mounted.assert_called_once_with()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(fake_config, None)
def test_setup_missing_shares_config_file(self):
self._test_setup(self._FAKE_SMBFS_CONFIG, False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(fake_config)
def _test_create_volume(self, volume_exists=False, volume_format=None):
fake_method = mock.MagicMock()
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock()
fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
self._smbfs_driver.get_volume_format = mock.MagicMock()
windows_image_format = False
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_volume_format.return_value = volume_format
if volume_format:
if volume_format in ('vhd', 'vhdx'):
windows_image_format = volume_format
if volume_format == 'vhd':
windows_image_format = 'vpc'
method = '_create_windows_image'
fake_vol_path += '.' + volume_format
else:
method = '_create_%s_file' % volume_format
if volume_format == 'sparsed':
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
True)
else:
method = '_create_regular_file'
setattr(self._smbfs_driver, method, fake_method)
with mock.patch('os.path.exists', new=lambda x: volume_exists):
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self._FAKE_VOLUME)
return
self._smbfs_driver._do_create_volume(self._FAKE_VOLUME)
if windows_image_format:
fake_method.assert_called_once_with(
fake_vol_path,
self._FAKE_VOLUME['size'],
windows_image_format)
else:
fake_method.assert_called_once_with(
fake_vol_path, self._FAKE_VOLUME['size'])
fake_set_permissions.assert_called_once_with(fake_vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='sparsed')
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
fake_capacity_info = ((2, 1, 5), (2, 1, 4), (2, 1, 1))
self._smbfs_driver._get_capacity_info = mock.Mock(
side_effect=fake_capacity_info)
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
else:
ret_value = self._smbfs_driver._find_share(
self._FAKE_VOLUME['size'])
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual(ret_value, 'fake_share3')
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertTrue(ret_value)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
@mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template')
@mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.SmbfsDriver, 'get_volume_format')
def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume,
mock_get_path_template, volume_exists=True,
volume_format='raw'):
drv = self._smbfs_driver
mock_get_path_template.return_value = self._FAKE_VOLUME_PATH
expected_vol_path = self._FAKE_VOLUME_PATH
if volume_format in (drv._DISK_FORMAT_VHD, drv._DISK_FORMAT_VHDX):
expected_vol_path += '.' + volume_format
mock_lookup_volume.return_value = (
expected_vol_path if volume_exists else None)
mock_get_volume_format.return_value = volume_format
ret_val = drv.local_path(self._FAKE_VOLUME)
if volume_exists:
self.assertFalse(mock_get_volume_format.called)
else:
mock_get_volume_format.assert_called_once_with(self._FAKE_VOLUME)
self.assertEqual(expected_vol_path, ret_val)
def test_get_existing_volume_path(self):
self._test_get_volume_path()
def test_get_new_raw_volume_path(self):
self._test_get_volume_path(volume_exists=False)
def test_get_new_vhd_volume_path(self):
self._test_get_volume_path(volume_exists=False, volume_format='vhd')
@mock.patch.object(smbfs.SmbfsDriver, '_local_volume_dir')
def test_get_local_volume_path_template(self, mock_get_local_dir):
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
ret_val = self._smbfs_driver._get_local_volume_path_template(
self._FAKE_VOLUME)
self.assertEqual(self._FAKE_VOLUME_PATH, ret_val)
@mock.patch('os.path.exists')
def test_lookup_local_volume_path(self, mock_exists):
expected_path = self._FAKE_VOLUME_PATH + '.vhdx'
mock_exists.side_effect = lambda x: x == expected_path
ret_val = self._smbfs_driver._lookup_local_volume_path(
self._FAKE_VOLUME_PATH)
possible_paths = [self._FAKE_VOLUME_PATH + ext
for ext in ('', '.vhd', '.vhdx')]
mock_exists.assert_has_calls(
[mock.call(path) for path in possible_paths])
self.assertEqual(expected_path, ret_val)
@mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template')
@mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.SmbfsDriver, '_qemu_img_info')
@mock.patch.object(smbfs.SmbfsDriver, '_get_volume_format_spec')
def _mock_get_volume_format(self, mock_get_format_spec, mock_qemu_img_info,
mock_lookup_volume, mock_get_path_template,
qemu_format=False, volume_format='raw',
volume_exists=True):
mock_get_path_template.return_value = self._FAKE_VOLUME_PATH
mock_lookup_volume.return_value = (
self._FAKE_VOLUME_PATH if volume_exists else None)
mock_qemu_img_info.return_value.file_format = volume_format
mock_get_format_spec.return_value = volume_format
ret_val = self._smbfs_driver.get_volume_format(self._FAKE_VOLUME,
qemu_format)
if volume_exists:
mock_qemu_img_info.assert_called_once_with(self._FAKE_VOLUME_PATH,
self._FAKE_VOLUME_NAME)
self.assertFalse(mock_get_format_spec.called)
else:
mock_get_format_spec.assert_called_once_with(self._FAKE_VOLUME)
self.assertFalse(mock_qemu_img_info.called)
return ret_val
def test_get_existing_raw_volume_format(self):
fmt = self._mock_get_volume_format()
self.assertEqual(fmt, 'raw')
def test_get_new_vhd_volume_format(self):
expected_fmt = 'vhd'
fmt = self._mock_get_volume_format(volume_format=expected_fmt,
volume_exists=False)
self.assertEqual(expected_fmt, fmt)
def test_get_new_vhd_legacy_volume_format(self):
img_fmt = 'vhd'
expected_fmt = 'vpc'
ret_val = self._mock_get_volume_format(volume_format=img_fmt,
volume_exists=False,
qemu_format=True)
self.assertEqual(expected_fmt, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format='raw'))
fake_data = {'export': self._FAKE_SHARE,
'format': 'raw',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self._FAKE_VOLUME, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
drv._delete = mock.Mock()
with mock.patch.object(image_utils, 'resize_image') as fake_resize, \
mock.patch.object(image_utils, 'convert_image') as \
fake_convert:
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv._extend_volume,
self._FAKE_VOLUME, mock.sentinel.new_size)
else:
drv._extend_volume(
self._FAKE_VOLUME,
mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self._FAKE_VOLUME, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
def test_create_volume_from_in_use_snapshot(self):
fake_snapshot = {'status': 'in-use'}
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self._FAKE_VOLUME, fake_snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self._FAKE_SNAPSHOT, self._FAKE_VOLUME,
self._FAKE_VOLUME['size'])
drv._extend_volume.assert_called_once_with(
self._FAKE_VOLUME, self._FAKE_VOLUME['size'])
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, unsupported_qemu_version=False,
wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self._FAKE_VOLUME['size'] << 30
fake_image_service = mock.MagicMock()
fake_image_service.show.return_value = (
{'id': 'fake_image_id', 'disk_format': 'raw'})
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
if unsupported_qemu_version:
qemu_version = [1, 5]
else:
qemu_version = [1, 7]
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.get_qemu_version = mock.Mock(
return_value=qemu_version)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
exc = None
with mock.patch.object(image_utils, 'fetch_to_volume_format') as \
fake_fetch, mock.patch.object(image_utils, 'qemu_img_info') as \
fake_qemu_img_info:
if wrong_size_after_fetch:
exc = exception.ImageUnacceptable
elif unsupported_qemu_version:
exc = exception.InvalidVolume
fake_qemu_img_info.return_value = fake_img_info
if exc:
self.assertRaises(
exc, drv.copy_image_to_volume,
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, fake_image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH,
self._FAKE_VOLUME['size'],
self._FAKE_VOLUME['name'])
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_copy_image_to_volume_unsupported_qemu_version(self):
self._test_copy_image_to_volume(unsupported_qemu_version=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_total_allocated = fake_total_blocks * fake_block_size
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
fake_du = (str(fake_total_allocated), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock(
side_effect=(fake_df, fake_du))
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
fake_total_allocated)
self.assertEqual(expected, ret_val)
|
{
"content_hash": "8bd816a85113ba5bceef8964a369f28e",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 80,
"avg_line_length": 42.10819672131147,
"alnum_prop": 0.5751382075838979,
"repo_name": "tmenjo/cinder-2015.1.0",
"id": "df04f9d3c8559f896378f799ddf02f3c57280ebf",
"size": "26303",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/test_smbfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10650346"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
"""Stop Entity."""
import mzgeohash
import geom
import util
import errors
from entity import Entity
class Stop(Entity):
"""Transitland Stop Entity."""
onestop_type = 's'
def init(self, **data):
self.timezone = data.pop('timezone', None)
def geohash(self):
"""Return 10 characters of geohash."""
return mzgeohash.encode(self.point())
# Work with other interfaces
def point(self):
return self.geometry()['coordinates']
def add_tags_gtfs(self, gtfs_entity):
keys = [
'wheelchair_boarding',
'stop_desc',
'stop_url',
'zone_id'
]
data = gtfs_entity.data._asdict()
self.timezone = data.pop('stop_timezone', None)
for key in keys:
if key in data:
self.set_tag(key, data[key])
def get_timezone(self):
if self.timezone:
return self.timezone
tz = set(i.timezone for i in self.operators())
if len(tz) > 1:
raise ValueError, "Ambiguous timezone; stop used by multiple agencies with differing timezones"
return tz.pop()
# Load / dump
def json(self):
return {
'type': 'Feature',
'properties': {},
'geometry': self.geometry(),
'onestopId': self.onestop(),
'name': self.name(),
'tags': self.tags(),
'timezone': self.get_timezone(),
'identifiers': sorted(self.identifiers()),
'servedBy': sorted(self.servedBy()),
}
# Graph
def servedBy(self):
"""Return the operators serving this stop."""
ret = set([i.onestop() for i in self.operators()])
ret |= set(self.data.get('servedBy', []))
return ret
def operators(self):
agencies = set()
for i in self.parents:
agencies |= i.parents
return agencies
def operator(self, onestop_id):
"""Return a single operator by Onestop ID."""
return util.filtfirst(self.operators(), onestop=onestop_id)
|
{
"content_hash": "f0086271db41e9a417a43b9a3d33fd02",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 101,
"avg_line_length": 25.14864864864865,
"alnum_prop": 0.6190220311660397,
"repo_name": "srthurman/transitland-python-client",
"id": "c70de641ae98dc858c030805f13192405a5475d2",
"size": "1861",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transitland/stop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48540"
}
],
"symlink_target": ""
}
|
from click.testing import CliRunner
from pyskel.scripts.cli import cli
def test_cli_count():
runner = CliRunner()
result = runner.invoke(cli, ['3'])
assert result.exit_code == 0
assert result.output == "False\nFalse\nFalse\n"
|
{
"content_hash": "f5c55b5beba1458c569f8ba8250ffb8a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 24.5,
"alnum_prop": 0.689795918367347,
"repo_name": "mapbox/pyskel",
"id": "924cc90ee67e70a429deb803c0c84d018232b81a",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1465"
}
],
"symlink_target": ""
}
|
"""The Gumbel distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
class _Gumbel(distribution.Distribution):
"""The scalar Gumbel distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The cumulative density function of this distribution is,
```cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))```
The Gumbel distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Gumbel(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
tfd = tf.contrib.distributions
# Define a single scalar Gumbel distribution.
dist = tfd.Gumbel(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Gumbels.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Gumbel(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Gumbel(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Gumbel"):
"""Construct Gumbel distributions with location and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]):
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(_Gumbel, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = -math_ops.log(-math_ops.log(uniform))
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -math_ops.exp(-self._z(x))
def _cdf(self, x):
return math_ops.exp(-math_ops.exp(-self._z(x)))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - math_ops.exp(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 1 + math_ops.log(scale) + np.euler_gamma
def _mean(self):
return self.loc + self.scale * np.euler_gamma
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(6)
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
{
"content_hash": "9f2d108511ddea6efd58eaa531f7f5b8",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 80,
"avg_line_length": 33.0622009569378,
"alnum_prop": 0.6589001447178003,
"repo_name": "Xeralux/tensorflow",
"id": "8d05ad6b8032fb8bada99389959091fb1c28beda",
"size": "7599",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/gumbel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
}
|
"""This module contains functions for fetching and extracting archived builds.
The builds may be stored in different places by different types of builders;
for example, builders on tryserver.chromium.perf stores builds in one place,
while builders on chromium.linux store builds in another.
This module can be either imported or run as a stand-alone script to download
and extract a build.
Usage: fetch_build.py <type> <revision> <output_dir> [options]
"""
import argparse
import errno
import logging
import os
import shutil
import sys
import zipfile
# Telemetry (src/tools/telemetry) is expected to be in the PYTHONPATH.
from telemetry.util import cloud_storage
import bisect_utils
# Possible builder types.
PERF_BUILDER = 'perf'
FULL_BUILDER = 'full'
def GetBucketAndRemotePath(revision, builder_type=PERF_BUILDER,
target_arch='ia32', target_platform='chromium',
deps_patch_sha=None):
"""Returns the location where a build archive is expected to be.
Args:
revision: Revision string, e.g. a git commit hash or SVN revision.
builder_type: Type of build archive.
target_arch: Architecture, e.g. "ia32".
target_platform: Platform name, e.g. "chromium" or "android".
deps_patch_sha: SHA1 hash which identifies a particular combination of
custom revisions for dependency repositories.
Returns:
A pair of strings (bucket, path), where the archive is expected to be.
"""
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform)
bucket = build_archive.BucketName()
remote_path = build_archive.FilePath(revision, deps_patch_sha=deps_patch_sha)
return bucket, remote_path
class BuildArchive(object):
"""Represents a place where builds of some type are stored.
There are two pieces of information required to locate a file in Google
Cloud Storage, bucket name and file path. Subclasses of this class contain
specific logic about which bucket names and paths should be used to fetch
a build.
"""
@staticmethod
def Create(builder_type, target_arch='ia32', target_platform='chromium'):
if builder_type == PERF_BUILDER:
return PerfBuildArchive(target_arch, target_platform)
if builder_type == FULL_BUILDER:
return FullBuildArchive(target_arch, target_platform)
raise NotImplementedError('Builder type "%s" not supported.' % builder_type)
def __init__(self, target_arch='ia32', target_platform='chromium'):
if bisect_utils.IsLinuxHost() and target_platform == 'android':
self._platform = 'android'
elif bisect_utils.IsLinuxHost():
self._platform = 'linux'
elif bisect_utils.IsMacHost():
self._platform = 'mac'
elif bisect_utils.Is64BitWindows() and target_arch == 'x64':
self._platform = 'win64'
elif bisect_utils.IsWindowsHost():
self._platform = 'win'
else:
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def BucketName(self):
raise NotImplementedError()
def FilePath(self, revision, deps_patch_sha=None):
"""Returns the remote file path to download a build from.
Args:
revision: A Chromium revision; this could be a git commit hash or
commit position or SVN revision number.
deps_patch_sha: The SHA1 hash of a patch to the DEPS file, which
uniquely identifies a change to use a particular revision of
a dependency.
Returns:
A file path, which not does not include a bucket name.
"""
raise NotImplementedError()
def _ZipFileName(self, revision, deps_patch_sha=None):
"""Gets the file name of a zip archive for a particular revision.
This returns a file name of the form full-build-<platform>_<revision>.zip,
which is a format used by multiple types of builders that store archives.
Args:
revision: A git commit hash or other revision string.
deps_patch_sha: SHA1 hash of a DEPS file patch.
Returns:
The archive file name.
"""
base_name = 'full-build-%s' % self._PlatformName()
if deps_patch_sha:
revision = '%s_%s' % (revision, deps_patch_sha)
return '%s_%s.zip' % (base_name, revision)
def _PlatformName(self):
"""Return a string to be used in paths for the platform."""
if self._platform in ('win', 'win64'):
# Build archive for win64 is still stored with "win32" in the name.
return 'win32'
if self._platform in ('linux', 'android'):
# Android builds are also stored with "linux" in the name.
return 'linux'
if self._platform == 'mac':
return 'mac'
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
class PerfBuildArchive(BuildArchive):
def BucketName(self):
return 'chrome-perf'
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the directory name to download builds from."""
platform_to_directory = {
'android': 'android_perf_rel',
'linux': 'Linux Builder',
'mac': 'Mac Builder',
'win64': 'Win x64 Builder',
'win': 'Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
class FullBuildArchive(BuildArchive):
def BucketName(self):
platform_to_bucket = {
'android': 'chromium-android',
'linux': 'chromium-linux-archive',
'mac': 'chromium-mac-archive',
'win64': 'chromium-win-archive',
'win': 'chromium-win-archive',
}
assert self._platform in platform_to_bucket
return platform_to_bucket.get(self._platform)
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the remote directory to download builds from."""
platform_to_directory = {
'android': 'android_main_rel',
'linux': 'chromium.linux/Linux Builder',
'mac': 'chromium.mac/Mac Builder',
'win64': 'chromium.win/Win x64 Builder',
'win': 'chromium.win/Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
def BuildIsAvailable(bucket_name, remote_path):
"""Checks whether a build is currently archived at some place."""
logging.info('Checking existance: gs://%s/%s' % (bucket_name, remote_path))
try:
exists = cloud_storage.Exists(bucket_name, remote_path)
logging.info('Exists? %s' % exists)
return exists
except cloud_storage.CloudStorageError:
return False
def FetchFromCloudStorage(bucket_name, source_path, destination_dir):
"""Fetches file(s) from the Google Cloud Storage.
As a side-effect, this prints messages to stdout about what's happening.
Args:
bucket_name: Google Storage bucket name.
source_path: Source file path.
destination_dir: Destination file path.
Returns:
Local file path of downloaded file if it was downloaded. If the file does
not exist in the given bucket, or if there was an error while downloading,
None is returned.
"""
target_file = os.path.join(destination_dir, os.path.basename(source_path))
gs_url = 'gs://%s/%s' % (bucket_name, source_path)
try:
if cloud_storage.Exists(bucket_name, source_path):
logging.info('Fetching file from %s...', gs_url)
cloud_storage.Get(bucket_name, source_path, target_file)
if os.path.exists(target_file):
return target_file
else:
logging.info('File %s not found in cloud storage.', gs_url)
return None
except Exception as e:
logging.warn('Exception while fetching from cloud storage: %s', e)
if os.path.exists(target_file):
os.remove(target_file)
return None
def Unzip(file_path, output_dir, verbose=True):
"""Extracts a zip archive's contents into the given output directory.
This was based on ExtractZip from build/scripts/common/chromium_utils.py.
Args:
file_path: Path of the zip file to extract.
output_dir: Path to the destination directory.
verbose: Whether to print out what is being extracted.
Raises:
IOError: The unzip command had a non-zero exit code.
RuntimeError: Failed to create the output directory.
"""
_MakeDirectory(output_dir)
# On Linux and Mac, we use the unzip command because it handles links and
# file permissions bits, so achieving this behavior is easier than with
# ZipInfo options.
#
# The Mac Version of unzip unfortunately does not support Zip64, whereas
# the python module does, so we have to fall back to the python zip module
# on Mac if the file size is greater than 4GB.
mac_zip_size_limit = 2 ** 32 # 4GB
if (bisect_utils.IsLinuxHost() or
(bisect_utils.IsMacHost()
and os.path.getsize(file_path) < mac_zip_size_limit)):
unzip_command = ['unzip', '-o']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
# On Windows, try to use 7z if it is installed, otherwise fall back to the
# Python zipfile module. If 7z is not installed, then this may fail if the
# zip file is larger than 512MB.
sevenzip_path = r'C:\Program Files\7-Zip\7z.exe'
if bisect_utils.IsWindowsHost() and os.path.exists(sevenzip_path):
unzip_command = [sevenzip_path, 'x', '-y']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
_UnzipUsingZipFile(file_path, output_dir, verbose)
def _UnzipUsingCommand(unzip_command, file_path, output_dir):
"""Extracts a zip file using an external command.
Args:
unzip_command: An unzipping command, as a string list, without the filename.
file_path: Path to the zip file.
output_dir: The directory which the contents should be extracted to.
Raises:
IOError: The command had a non-zero exit code.
"""
absolute_filepath = os.path.abspath(file_path)
command = unzip_command + [absolute_filepath]
return_code = _RunCommandInDirectory(output_dir, command)
if return_code:
_RemoveDirectoryTree(output_dir)
raise IOError('Unzip failed: %s => %s' % (str(command), return_code))
def _RunCommandInDirectory(directory, command):
"""Changes to a directory, runs a command, then changes back."""
saved_dir = os.getcwd()
os.chdir(directory)
return_code = bisect_utils.RunProcess(command)
os.chdir(saved_dir)
return return_code
def _UnzipUsingZipFile(file_path, output_dir, verbose=True):
"""Extracts a zip file using the Python zipfile module."""
assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
zf = zipfile.ZipFile(file_path)
for name in zf.namelist():
if verbose:
print 'Extracting %s' % name
zf.extract(name, output_dir)
if bisect_utils.IsMacHost():
# Restore file permission bits.
mode = zf.getinfo(name).external_attr >> 16
os.chmod(os.path.join(output_dir, name), mode)
def _MakeDirectory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _RemoveDirectoryTree(path):
try:
if os.path.exists(path):
shutil.rmtree(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def Main(argv):
"""Downloads and extracts a build based on the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('builder_type')
parser.add_argument('revision')
parser.add_argument('output_dir')
parser.add_argument('--target-arch', default='ia32')
parser.add_argument('--target-platform', default='chromium')
parser.add_argument('--deps-patch-sha')
args = parser.parse_args(argv[1:])
bucket_name, remote_path = GetBucketAndRemotePath(
args.revision, args.builder_type, target_arch=args.target_arch,
target_platform=args.target_platform,
deps_patch_sha=args.deps_patch_sha)
print 'Bucket name: %s, remote path: %s' % (bucket_name, remote_path)
if not BuildIsAvailable(bucket_name, remote_path):
print 'Build is not available.'
return 1
FetchFromCloudStorage(bucket_name, remote_path, args.output_dir)
print 'Build has been downloaded to and extracted in %s.' % args.output_dir
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
{
"content_hash": "041985ab0da5d6c88ef90cbf7eca7f4b",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 80,
"avg_line_length": 34.34444444444444,
"alnum_prop": 0.6895017793594306,
"repo_name": "Jonekee/chromium.src",
"id": "4aaee54fbbfc13d36e51a4955205a8497abff5c3",
"size": "12527",
"binary": false,
"copies": "9",
"ref": "refs/heads/nw12",
"path": "tools/auto_bisect/fetch_build.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "34522"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9249764"
},
{
"name": "C++",
"bytes": "222763973"
},
{
"name": "CSS",
"bytes": "875874"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "27190037"
},
{
"name": "Java",
"bytes": "7645280"
},
{
"name": "JavaScript",
"bytes": "18828195"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1397246"
},
{
"name": "Objective-C++",
"bytes": "7575073"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "248854"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418340"
},
{
"name": "Python",
"bytes": "8032766"
},
{
"name": "Shell",
"bytes": "464218"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
}
|
import threading
from rpyc.utils.authenticators import SSLAuthenticator
from rpyc.utils.server import ThreadPoolServer
from . import PupyService
import textwrap
import pkgutil
import modules
import logging
from .PupyErrors import PupyModuleExit, PupyModuleError
from .PupyJob import PupyJob
try:
import ConfigParser as configparser
except ImportError:
import configparser
from . import PupyClient
import os.path
class PupyServer(threading.Thread):
def __init__(self, configFile="pupy.conf"):
super(PupyServer, self).__init__()
self.daemon=True
self.server=None
self.authenticator=None
self.clients=[]
self.jobs={}
self.jobs_id=1
self.clients_lock=threading.Lock()
self.current_id=1
self.config = configparser.ConfigParser()
self.config.read(configFile)
self.port=self.config.getint("pupyd","port")
self.address=self.config.get("pupyd","address")
def add_client(self, conn):
with self.clients_lock:
conn.execute(textwrap.dedent(
"""
import platform
import getpass
import uuid
import sys
import os
def get_uuid():
user=None
node=None
plat=None
release=None
version=None
machine=None
macaddr=None
pid=None
proc_arch=None
proc_path=sys.executable
try:
user=getpass.getuser()
except Exception:
pass
try:
node=platform.node()
except Exception:
pass
try:
version=platform.platform()
except Exception:
pass
try:
plat=platform.system()
except Exception:
pass
try:
release=platform.release()
except Exception:
pass
try:
version=platform.version()
except Exception:
pass
try:
machine=platform.machine()
except Exception:
pass
try:
pid=os.getpid()
except Exception:
pass
try:
proc_arch=platform.architecture()[0]
except Exception:
pass
try:
macaddr=uuid.getnode()
macaddr=':'.join(("%012X" % macaddr)[i:i+2] for i in range(0, 12, 2))
except Exception:
pass
return (user, node, plat, release, version, machine, macaddr, pid, proc_arch, proc_path)
"""))
l=conn.namespace["get_uuid"]()
self.clients.append(PupyClient.PupyClient({
"id": self.current_id,
"conn" : conn,
"user" : l[0],
"hostname" : l[1],
"platform" : l[2],
"release" : l[3],
"version" : l[4],
"os_arch" : l[5],
"proc_arch" : l[8],
"exec_path" : l[9],
"macaddr" : l[6],
"pid" : l[7],
"address" : conn._conn._config['connid'].split(':')[0],
}, self))
self.current_id+=1
def remove_client(self, client):
with self.clients_lock:
for i,c in enumerate(self.clients):
if c.conn is client:
del self.clients[i]
break
def get_clients(self, search_criteria):
""" return a list of clients corresponding to the search criteria. ex: platform:*win* """
#if the criteria is a simple id we return the good client
try:
index=int(search_criteria)
for c in self.clients:
if int(c.desc["id"])==index:
return [c]
return []
except Exception:
pass
l=set([])
if search_criteria=="*":
return self.clients
for c in self.clients:
take=False
for sc in search_criteria.split():
tab=sc.split(":",1)
if len(tab)==2 and tab[0] in [x for x in c.desc.iterkeys()]:#if the field is specified we search for the value in this field
take=True
if not tab[1].lower() in str(c.desc[tab[0]]).lower():
take=False
break
elif len(tab)!=2:#if there is no field specified we search in every field for at least one match
take=False
for k,v in c.desc.iteritems():
if type(v) is unicode or type(v) is str:
if tab[0].lower() in v.decode('utf8').lower():
take=True
break
else:
if tab[0].lower() in str(v).decode('utf8').lower():
take=True
break
if not take:
break
if take:
l.add(c)
return list(l)
def get_clients_list(self):
return self.clients
def list_modules(self):
l=[]
for loader, module_name, is_pkg in pkgutil.iter_modules(modules.__path__):
module=self.get_module(module_name)
l.append((module_name,module.__doc__))
return l
def get_module(self, name):
script_found=False
for loader, module_name, is_pkg in pkgutil.iter_modules(modules.__path__):
if module_name==name:
script_found=True
module=loader.find_module(module_name).load_module(module_name)
class_name=None
if hasattr(module,"__class_name__"):
class_name=module.__class_name__
if not hasattr(module,class_name):
logging.error("script %s has a class_name=\"%s\" global variable defined but this class does not exists in the script !"%(script_name,class_name))
if not class_name:
#TODO automatically search the class name in the file
pass
return getattr(module,class_name)
def module_parse_args(self, module_name, args):
""" This method is used by the PupyCmd class to verify validity of arguments passed to a specific module """
module=self.get_module(module_name)
ps=module(None,None)
return ps.arg_parser.parse_args(args)
def del_job(self, job_id):
if job_id is not None:
job_id=int(job_id)
if job_id in self.jobs:
del self.jobs[job_id]
def add_job(self, job):
job.id=self.jobs_id
self.jobs[self.jobs_id]=job
self.jobs_id+=1
def get_job(self, job_id):
try:
job_id=int(job_id)
except ValueError:
raise PupyModuleError("job id must be an integer !")
if job_id not in self.jobs:
raise PupyModuleError("%s: no such job !"%job_id)
return self.jobs[job_id]
def run(self):
self.authenticator = SSLAuthenticator(self.config.get("pupyd","keyfile").replace("\\",os.sep).replace("/",os.sep), self.config.get("pupyd","certfile").replace("\\",os.sep).replace("/",os.sep), ciphers="SHA256+AES256:SHA1+AES256:@STRENGTH")
self.server = ThreadPoolServer(PupyService.PupyService, port = self.port, hostname=self.address, authenticator=self.authenticator)
self.server.start()
|
{
"content_hash": "523023fadcd00980f7412dccf012569c",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 241,
"avg_line_length": 27.212669683257918,
"alnum_prop": 0.655470568673096,
"repo_name": "paran0ids0ul/pupy",
"id": "66c8431f834ff0b09ecd11ca380d87ef21c9df83",
"size": "7696",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "pupy/pupylib/PupyServer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7428"
},
{
"name": "C",
"bytes": "203798"
},
{
"name": "Python",
"bytes": "517858"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name = 'localbitcoins',
version = "0.1dev",
license = "GPL3",
description = "trade bitcoins locally",
long_description = open('README').read(),
author = "dinosaur",
author_email = "dinosaur@riseup.net",
package_dir = { '' : 'src' },
packages = [ 'localbitcoins'],
install_requires = ['setuptools', 'requests', 'campbx'],
entry_points = {
'console_scripts' : [
"main = localbitcoins.main:main",
"test = localbitcoins.test.main:main"
]
},
zip_safe = False)
|
{
"content_hash": "ace89c564f25c9098923f0c0445243d5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 31.105263157894736,
"alnum_prop": 0.5685279187817259,
"repo_name": "ahdinosaur/localbitcoins",
"id": "54653e0b03f1cb22fe22e09cff26ce538f936148",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21460"
}
],
"symlink_target": ""
}
|
"""
This module contains class for representing base json object
"""
import json
import copy
__all__ = ['BaseJsonObject']
class BaseJsonObject:
"""
Every Codeforces Json object should extend this class
"""
def __init__(self, data):
"""
:param data: Data in JSON format
:type data: str or dict
"""
assert isinstance(data, (str, dict)) or data is None
if data is not None:
if isinstance(data, str):
self.load_from_json(data)
else:
self.load_from_dict(data)
def __eq__(self, other):
if type(self) == type(other):
return self.__dict__ == other.__dict__
else:
return False
def __hash__(self):
return make_hash(self.__dict__)
def load_from_json(self, s):
"""
Loads data from given string in JSON format
:param s: Data in JSON format
:type s: str
"""
values = json.loads(s)
self.load_from_dict(values)
def load_from_dict(self, values):
"""
Loads data from given dictionary
:param values: Dictionary with values
:type values: dict
:exception ValueError: raised when given dictionary does not contain required field
"""
try:
self.load_required_fields_from_dict(values)
except KeyError as e:
raise ValueError('Missed required field', e.args[0])
self.load_optional_fields_from_dict(values)
def load_required_fields_from_dict(self, values):
"""
Loads required fields from given dictionary.
This method SHOULD NOT care if value was not given
Note: given dictionary may contain extra fields. just ignore them
:param values: Dictionary with values
:type values: dict
"""
assert isinstance(values, dict)
def load_optional_fields_from_dict(self, values):
"""
Loads optional fields from given dictionary.
Note: given dictionary may not contain needed value. It is recommended to use dict.get method
The given dictionary may also contain extra fields. Just ignore them
:param values: Dictionary with optional values
:type values: dict
"""
assert isinstance(values, dict)
# http://stackoverflow.com/a/8714242/1532460
def make_hash(o):
"""
Makes a hash from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
"""
if isinstance(o, (set, tuple, list)):
return tuple([make_hash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items()))))
|
{
"content_hash": "402dc225af3d6c6cdaec0a66ef28e258",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 101,
"avg_line_length": 26.824074074074073,
"alnum_prop": 0.597859855022437,
"repo_name": "soon/CodeforcesAPI",
"id": "70d3dbe41783fd9cd5b19ca5e8959b0d79391805",
"size": "2897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codeforces/api/json_objects/base_json_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150288"
}
],
"symlink_target": ""
}
|
from flask import Flask, url_for, request, session, redirect, render_template
from flask_oauth import OAuth
FACEBOOK_APP_ID = '293823757717281'
FACEBOOK_APP_SECRET = '49b409de34973230b046a4a091cea4a6'
app = Flask(__name__)
app.secret_key = '9ujYZLNmd6NIdPdlTSVJ'
oauth = OAuth()
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': ('email, ')}
)
@facebook.tokengetter
def get_facebook_token():
return session.get('facebook_token')
def pop_login_session():
session.pop('logged_in', None)
session.pop('facebook_token', None)
@app.route("/facebook_login")
def facebook_login():
return facebook.authorize(callback=url_for('facebook_authorized',
next=request.args.get('next'), _external=True))
@app.route('/')
def index():
if session.get('logged_in'):
data = facebook.get('/me').data
return render_template('index.html', data = op)
return render_template('index.html')
@app.route("/facebook_authorized")
@facebook.authorized_handler
def facebook_authorized(resp):
next_url = request.args.get('next') or url_for('index')
if resp is None or 'access_token' not in resp:
return redirect(next_url)
session['logged_in'] = True
session['facebook_token'] = (resp['access_token'], '')
return redirect(next_url)
@app.route("/logout")
def logout():
pop_login_session()
return redirect(url_for('index'))
if __name__ == "__main__":
app.run(host='0.0.0.0', debug = True)
|
{
"content_hash": "46eb9d8cdd81007f5e6110dcaca62b8e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 29.344827586206897,
"alnum_prop": 0.6880141010575793,
"repo_name": "adwalvekar/FacebookAppTest",
"id": "e793558c92adc86ff1bb7817d48369c82fd64f8e",
"size": "1702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159514"
},
{
"name": "HTML",
"bytes": "1337"
},
{
"name": "Python",
"bytes": "1702"
}
],
"symlink_target": ""
}
|
class QWeiboError(Exception):
"""basic weibo error class"""
pass
def assertion(condition, msg):
try:
assert condition, msg
except AssertionError as e:
raise QWeiboError(e.message)
|
{
"content_hash": "a25f6ae53385c480d786d2ad72889eb6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 36,
"avg_line_length": 19.545454545454547,
"alnum_prop": 0.6558139534883721,
"repo_name": "2014/qqweibo",
"id": "c151eb172d4dbd99cd51f1b34d8e80c6d790f932",
"size": "371",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "qqweibo/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135853"
}
],
"symlink_target": ""
}
|
from BeautifulSoup import BeautifulSoup
# for each file in folder
import glob
files = glob.glob("amazonproducts/*.html")
# find all the products
for fileName in files :
with open(fileName) as f:
text = BeautifulSoup(f)
print text.findAll(class="productTitle")
# write out file for later
|
{
"content_hash": "9e3dae1e9346572d8204137678933740",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 44,
"avg_line_length": 21.5,
"alnum_prop": 0.7375415282392026,
"repo_name": "dgonzo/bmdc_skullcandy",
"id": "bf02fbc9f63f296e7ca852555c87bb4f59d36ac2",
"size": "301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collections/amazon_review_downloader/extractAmazonProducts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1322"
},
{
"name": "JavaScript",
"bytes": "24280"
},
{
"name": "Perl",
"bytes": "5862"
},
{
"name": "Python",
"bytes": "30009"
},
{
"name": "R",
"bytes": "9795"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from snisi_core.models.Projects import Domain
from snisi_sms.common import test, echo, change_passwd, ask_for_help
logger = logging.getLogger(__name__)
def snisi_sms_handler(message):
# migration to non-snisi prefixed SMS
if message.content.startswith('snisi '):
message.text = message.content[6:]
message.save()
logger.debug("Incoming SMS from {}: {}".format(
message.identity, message.content))
keywords = {'test': test,
'echo': echo,
'passwd': change_passwd,
'help': ask_for_help}
for domain in Domain.active.all():
domain_kw = domain.import_from('sms_handlers.KEYWORDS')
if domain_kw:
keywords.update(domain_kw)
for keyword, handler in keywords.items():
if message.content.lower().startswith(keyword):
return handler(message)
# message.respond("Message non pris en charge.")
return False
|
{
"content_hash": "bb315ed3ccfd02c69c2e1be7f52e67df",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 30.771428571428572,
"alnum_prop": 0.627669452181987,
"repo_name": "yeleman/snisi",
"id": "1836e0e3681620d8fc4eb0183ed92b15a1e39f67",
"size": "1156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snisi_sms/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "410022"
},
{
"name": "HTML",
"bytes": "1007275"
},
{
"name": "Java",
"bytes": "7211"
},
{
"name": "JavaScript",
"bytes": "292583"
},
{
"name": "Python",
"bytes": "2237855"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
import mimeo
import argparse
import os
import sys
import shutil
def mainArgs():
parser = argparse.ArgumentParser(description='Find all high-identity segments shared between genomes.',
prog='mimeo-map')
# Input options
parser.add_argument('--adir',type=str,default=None,help='Name of directory containing sequences from A genome.')
parser.add_argument('--bdir',type=str,default=None,help='Name of directory containing sequences from B genome.')
parser.add_argument('--afasta',type=str,default=None,help='A genome as multifasta.')
parser.add_argument('--bfasta',type=str,default=None,help='B genome as multifasta.')
parser.add_argument('-r','--recycle',action="store_true",help='Use existing alignment "--outfile" if found.')
# Output options
parser.add_argument('-d', '--outdir',type=str,default=None,help='Write output files to this directory. (Default: cwd)')
parser.add_argument('--gffout',type=str,default=None,help='Name of GFF3 annotation file. If not set, suppress output.')
parser.add_argument('--outfile',type=str,default="mimeo_alignment.tab",help='Name of alignment result file.')
parser.add_argument('--verbose',action="store_true",default=False,help='If set report LASTZ progress.')
parser.add_argument('--label',type=str,default="BHit",help='Set annotation TYPE field in gff.')
parser.add_argument('--prefix',type=str,default="BHit",help='ID prefix for B-genome hits annotated in A-genome.')
parser.add_argument('--keeptemp',action="store_true",default=False,help='If set do not remove temp files.')
# Alignment options
parser.add_argument('--lzpath',type=str,default="lastz",help='Custom path to LASTZ executable if not in $PATH.')
parser.add_argument('--minIdt',type=int,default=60,help='Minimum alignment identity to report.')
parser.add_argument('--minLen',type=int,default=100,help='Minimum alignment length to report.')
parser.add_argument('--hspthresh',type=int,default=3000,help='Set HSP min score threshold for LASTZ.')
# TRF filtering
parser.add_argument('--TRFpath',type=str,default="trf",help='Custom path to TRF executable if not in $PATH.')
parser.add_argument('--tmatch',type=int,default=2,help='TRF matching weight')
parser.add_argument('--tmismatch',type=int,default=7,help='TRF mismatching penalty')
parser.add_argument('--tdelta',type=int,default=7,help='TRF indel penalty')
parser.add_argument('--tPM',type=int,default=80,help='TRF match probability')
parser.add_argument('--tPI',type=int,default=10,help='TRF indel probability')
parser.add_argument('--tminscore',type=int,default=50,help='TRF minimum alignment score to report')
parser.add_argument('--tmaxperiod',type=int,default=50,help='TRF maximum period size to report')
parser.add_argument('--maxtandem',type=float,default=None,help='Max percentage of an A-genome alignment which may be masked by TRF. If exceeded, alignment will be discarded.')
parser.add_argument('--writeTRF',action="store_true",default=False,help='If set write TRF filtered alignment file for use with other mimeo modules.')
args = parser.parse_args()
return args
def main():
# Get cmd line args
args = mainArgs()
# Check for required programs.
tools = [args.lzpath, args.TRFpath]
missing_tools = []
for tool in tools:
missing_tools += mimeo.missing_tool(tool)
if missing_tools:
print('WARNING: Some tools required by mimeo could not be found: ' +
', '.join(missing_tools))
print('You may need to install them to use all features.')
# Set output paths
adir_path, bdir_path, outdir, outtab, gffout, tempdir = mimeo.set_paths(
adir=args.adir, bdir=args.bdir, afasta=args.afasta, bfasta=args.bfasta,
outdir=args.outdir, outtab=args.outfile, gffout=args.gffout,
runtrf=args.maxtandem)
# Get all B to A alignment pairs
pairs = mimeo.get_all_pairs(Adir=adir_path, Bdir=bdir_path)
# Get chrm lens for GFF header
chrLens = mimeo.chromlens(seqDir=adir_path)
# Do not realign if outtab exists AND recycle mode is set
if not args.recycle or not os.path.isfile(outtab):
if not pairs:
print("No files to align. Check --adir and --bdir contain \
at least one fasta each.")
sys.exit(1)
# Compose alignment commands
cmds = mimeo.map_LZ_cmds(lzpath=args.lzpath, pairs=pairs,
minIdt=args.minIdt, minLen=args.minLen,
hspthresh=args.hspthresh,
outfile=outtab, verbose=args.verbose)
# Run alignments
mimeo.run_cmd(cmds, verbose=args.verbose, keeptemp=args.keeptemp)
# Import alignment as df
alignments = mimeo.import_Align(infile=outtab, prefix=args.prefix,
minLen=args.minLen, minIdt=args.minIdt)
# Filter alignments if A-genome location >= x% masked by TRF
if args.maxtandem:
alignments = mimeo.trfFilter(alignDF=alignments, tempdir=tempdir,
prefix=args.prefix, adir=adir_path,
TRFpath=args.TRFpath, tmatch=args.tmatch,
tmismatch=args.tmismatch,
tdelta=args.tdelta, tPM=args.tPM,
tPI=args.tPI, tminscore=args.tminscore,
tmaxperiod=args.tmaxperiod,
maxtandem=args.maxtandem)
if args.writeTRF:
mimeo.writetrf(alignDF=alignments, outtab=outtab)
# Write to GFF3
if gffout:
with open(gffout, 'w') as f:
for x in mimeo.writeGFFlines(alnDF=alignments, chrlens=chrLens,
ftype=args.label):
f.write(x)
if tempdir and os.path.isdir(tempdir) and not args.keeptemp:
shutil.rmtree(tempdir)
print("Finished!")
|
{
"content_hash": "a21863c85b71d327915ac84e6d2a2e89",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 179,
"avg_line_length": 59.34313725490196,
"alnum_prop": 0.6489344126879233,
"repo_name": "Adamtaranto/mimeo",
"id": "460d2420ab3a949af8a7d2566760172627633104",
"size": "6100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mimeo/run_map.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40899"
},
{
"name": "Shell",
"bytes": "18180"
}
],
"symlink_target": ""
}
|
from .auth import auth
|
{
"content_hash": "41a7766c8e789c5c5a4cc0bb4c1082a3",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 12,
"alnum_prop": 0.75,
"repo_name": "najce/freebook",
"id": "f683fb30744044b3e47aefcf35670530fb528393",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api/users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "770"
},
{
"name": "HTML",
"bytes": "28785"
},
{
"name": "JavaScript",
"bytes": "5268"
},
{
"name": "Python",
"bytes": "31230"
}
],
"symlink_target": ""
}
|
from subconscious.model import RedisModel, Column, InvalidModelDefinition, UnexpectedColumnError
from uuid import uuid1
from .base import BaseTestCase
import enum
class StatusEnum(enum.Enum):
ACTIVE = 'active'
class TestUser(RedisModel):
id = Column(primary_key=True)
name = Column(index=True)
age = Column(index=True, type=int)
locale = Column(index=True, type=int, required=False)
status = Column(type=str, enum=StatusEnum, index=True)
class TestSaveAndLoad(BaseTestCase):
def test_save_and_load(self):
user_id = str(uuid1())
user = TestUser(id=user_id, name='Test name', age=100, status='active')
ret = self.loop.run_until_complete(user.save(self.db))
self.assertTrue(ret)
# load
user_in_db = self.loop.run_until_complete(TestUser.load(self.db, identifier=user_id))
self.assertEqual(user_in_db.name, user.name)
def test_init_model_with_no_indexed_cols_should_error(self):
with self.assertRaises(InvalidModelDefinition):
class BadModel(RedisModel):
unindex_col = Column()
class BadSave(BaseTestCase):
def test_unexpected_column_should_fail(self):
class TestModel(RedisModel):
id = Column(type=int, primary_key=True)
with self.assertRaises(UnexpectedColumnError):
TestModel(id=1, this_column_does_not_exist='foo')
|
{
"content_hash": "09b8f16b89634b98b9cbf1a213374e22",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 96,
"avg_line_length": 31.044444444444444,
"alnum_prop": 0.6800286327845383,
"repo_name": "paxos-bankchain/subconscious",
"id": "14988f88c8c8903f5d5120e03bb6bb4347d00f78",
"size": "1397",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_save_and_load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34889"
}
],
"symlink_target": ""
}
|
from typing import Tuple
from typing import List
from typing import Any
from matplotlib.patches import Circle
from matplotlib.transforms import ScaledTranslation
from compas.geometry import Point
from compas.artists import PrimitiveArtist
from .artist import PlotterArtist
Color = Tuple[float, float, float]
class PointArtist(PlotterArtist, PrimitiveArtist):
"""Artist for COMPAS points.
Parameters
----------
point : :class:`~compas.geometry.Point`
A COMPAS point.
size : int, optional
The size of the point.
facecolor : Color, optional
Color of the interior of the point representing the point.
edgecolor : Color, optional
Color of the boundary of the point representing the point.
zorder : int, optional
Stacking order above the XY plane of the plotter canvas.
**kwargs : dict, optional
Additional keyword arguments. See :class:`PlotterArtist` or :class:`PrimitiveArtist`.
Attributes
----------
point : :class:`~compas.geometry.Point`
The point associated with the artist.
size : float
Size of the point, relative to the resolution of the plotter.
``size = self._size / self.plotter.dpi``.
"""
def __init__(
self,
point: Point,
size: int = 5,
facecolor: Color = (1.0, 1.0, 1.0),
edgecolor: Color = (0, 0, 0),
zorder: int = 9000,
**kwargs: Any
):
super().__init__(primitive=point, **kwargs)
self._mpl_circle = None
self._size = None
self.size = size
self.facecolor = facecolor
self.edgecolor = edgecolor
self.zorder = zorder
@property
def point(self) -> Point:
return self.primitive
@point.setter
def point(self, point: Point):
self.primitive = point
@property
def _T(self):
F = self.plotter.figure.dpi_scale_trans
S = ScaledTranslation(self.point[0], self.point[1], self.plotter.axes.transData)
T = F + S
return T
@property
def size(self) -> float:
return self._size / self.plotter.dpi
@size.setter
def size(self, size: int):
self._size = size
@property
def data(self) -> List[List[float]]:
return [self.point[:2]]
def draw(self) -> None:
"""Draw the circle.
Returns
-------
None
"""
circle = Circle(
[0, 0],
radius=self.size,
facecolor=self.facecolor,
edgecolor=self.edgecolor,
transform=self._T,
zorder=self.zorder,
)
self._mpl_circle = self.plotter.axes.add_artist(circle)
self.update_data()
def redraw(self) -> None:
"""Update the point using the current geometry and visualization settings.
Returns
-------
None
"""
self._mpl_circle.set_radius(self.size)
self._mpl_circle.set_edgecolor(self.edgecolor)
self._mpl_circle.set_facecolor(self.facecolor)
self._mpl_circle.set_transform(self._T)
self.update_data()
|
{
"content_hash": "239885342b714c924e796c12c0fb4aac",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 93,
"avg_line_length": 26.158333333333335,
"alnum_prop": 0.5931825422108952,
"repo_name": "compas-dev/compas",
"id": "30ec854e1f65f269b36c183dc52b19019562f480",
"size": "3139",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas_plotters/artists/pointartist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
}
|
from email.message import Message
import os
import tempfile
from tests.BaseTestClasses import Email2PDFTestCase
class TestBasic(Email2PDFTestCase):
def setUp(self):
super(TestBasic, self).setUp()
self.msg = Message()
def test_simple(self):
self.addHeaders()
(rc, output, error) = self.invokeAsSubprocess()
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_simple_with_explicit_stdin(self):
self.addHeaders()
(rc, output, error) = self.invokeAsSubprocess(extraParams=['-i-'])
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_help(self):
(rc, output, error) = self.invokeAsSubprocess(extraParams=['--help'], expectOutput=True)
self.assertEqual(0, rc)
self.assertRegex(output, 'usage:')
self.assertEqual(error, '')
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_invalid_option(self):
(rc, output, error) = self.invokeAsSubprocess(extraParams=['--invalid-option'])
self.assertEqual(2, rc)
self.assertRegex(error, 'ERROR: unrecognized.*')
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_dont_print_body(self):
(rc, output, error) = self.invokeAsSubprocess(extraParams=['--no-body'])
self.assertEqual(1, rc)
self.assertFalse(self.existsByTime())
self.assertRegex(error, "body.*any.*attachments")
self.assertTrue(self.existsByTimeWarning())
self.assertRegex(self.getWarningFileContents(), "body.*any.*attachments")
self.assertTrue(self.existsByTimeOriginal())
self.assertValidOriginalFileContents()
def test_dont_print_body_mostly_hide_warnings(self):
(rc, output, error) = self.invokeAsSubprocess(extraParams=['--no-body', '--mostly-hide-warnings'])
self.assertEqual(0, rc)
self.assertFalse(self.existsByTime())
self.assertEqual('', error)
self.assertTrue(self.existsByTimeWarning())
self.assertRegex(self.getWarningFileContents(), "body.*any.*attachments")
self.assertTrue(self.existsByTimeOriginal())
self.assertValidOriginalFileContents()
def test_no_message_headers(self):
(rc, output, error) = self.invokeAsSubprocess()
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_no_message_headers_mostly_hide_warnings(self):
(rc, output, error) = self.invokeAsSubprocess(extraParams=['--mostly-hide-warnings'])
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_withinputfile(self):
self.addHeaders()
(rc, output, error) = self.invokeAsSubprocess(inputFile=True)
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_nosubject(self):
self.addHeaders(Email2PDFTestCase.DEFAULT_FROM, Email2PDFTestCase.DEFAULT_TO, None)
(rc, output, error) = self.invokeAsSubprocess()
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent(self):
self.addHeaders()
self.setPlainContent("Hello!")
(rc, output, error) = self.invokeAsSubprocess()
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertRegex(self.getPDFText(self.getTimedFilename()), "Hello!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent_upsidedown(self):
self.addHeaders()
self.setPlainContent("ɯɐɹƃoɹd ɟpdᄅlᴉɐɯǝ ǝɥʇ ɟo ʇsǝʇ ɐ sᴉ sᴉɥʇ ollǝH")
(rc, output, error) = self.invokeAsSubprocess()
self.assertEqual(0, rc)
self.assertTrue(self.existsByTime())
self.assertEqual('', error)
self.assertRegex(self.getPDFText(self.getTimedFilename()), "ɯɐɹƃoɹd ɟpdᄅlᴉɐɯǝ ǝɥʇ ɟo ʇsǝʇ ɐ sᴉ sᴉɥʇ ollǝH")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent_poundsign_iso88591(self):
self.addHeaders()
path = os.path.join(self.examineDir, "plaincontent_poundsign_iso88591.pdf")
self.setPlainContent("Hello - this email costs \xa35!", charset="ISO-8859-1")
(rc, output, error) = self.invokeAsSubprocess(outputFile=path)
self.assertEqual(0, rc)
self.assertEqual('', error)
self.assertTrue(os.path.exists(path))
self.assertRegex(self.getPDFText(path), "Hello - this email costs \xa35!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent_notrailingslash(self):
self.setPlainContent("Hello!")
(rc, output, error) = self.invokeAsSubprocess(outputDirectory="/tmp")
self.assertEqual(0, rc)
self.assertEqual('', error)
self.assertTrue(self.existsByTime("/tmp"))
self.assertRegex(self.getPDFText(self.getTimedFilename("/tmp/")), "Hello!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent_trailingslash(self):
self.setPlainContent("Hello!")
(rc, output, error) = self.invokeAsSubprocess(outputDirectory="/tmp/")
self.assertEqual(0, rc)
self.assertEqual('', error)
self.assertTrue(self.existsByTime("/tmp/"))
self.assertRegex(self.getPDFText(self.getTimedFilename("/tmp/")), "Hello!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent_outputfileoverrides(self):
filename = os.path.join(self.examineDir, "outputFileOverrides.pdf")
with tempfile.TemporaryDirectory() as pathname:
self.setPlainContent("Hello!")
(rc, output, error) = self.invokeAsSubprocess(outputDirectory=pathname, outputFile=filename)
self.assertEqual(0, rc)
self.assertEqual('', error)
self.assertFalse(self.existsByTime(pathname))
self.assertTrue(os.path.exists(filename))
self.assertRegex(self.getPDFText(filename), "Hello!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_plaincontent_fileexist(self):
self.setPlainContent("Hello!")
with tempfile.NamedTemporaryFile() as tmpfile:
(rc, output, error) = self.invokeAsSubprocess(outputFile=tmpfile.name, okToExist=True)
self.assertEqual(2, rc)
self.assertRegex(error, "file.*exist")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_verbose(self):
self.setPlainContent("Hello!")
(rc, output, error) = self.invokeAsSubprocess(extraParams=['-v'])
self.assertEqual(0, rc)
self.assertNotEqual('', error)
self.assertTrue(self.existsByTime())
self.assertRegex(self.getPDFText(self.getTimedFilename()), "Hello!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
def test_veryverbose(self):
self.setPlainContent("Hello!")
(rc, output, error) = self.invokeAsSubprocess(extraParams=['-vv'])
self.assertEqual(0, rc)
self.assertNotEqual('', error)
self.assertTrue(self.existsByTime())
self.assertRegex(self.getPDFText(self.getTimedFilename()), "Hello!")
self.assertFalse(self.existsByTimeWarning())
self.assertFalse(self.existsByTimeOriginal())
|
{
"content_hash": "a565d30a2e61320caa99529daacfd907",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 115,
"avg_line_length": 44.333333333333336,
"alnum_prop": 0.6681318681318681,
"repo_name": "andrewferrier/email2pdf",
"id": "5a27e23280dfd3e653f024476c85156179f29670",
"size": "8701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/Subprocess/test_Subprocess_Basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1393"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Python",
"bytes": "123550"
}
],
"symlink_target": ""
}
|
class ChannelApplicator(object):
def __init__(self,):
super(ChannelApplicator, self).__init__()
self.target_channel = None
def set_target_channel(self, target_channel):
assert self.target_channel is None
self.target_channel = target_channel
# Check that everything that is over-ridden actually exists:
for varname in self.get_variables_overriden():
assert varname in self.target_channel.get_variables(), 'unexpected setting of %s' % varname
def get_variable_value_for_section(self, variablename, section):
raise NotImplementedError()
def get_description(self):
raise NotImplementedError()
def get_variables_overriden(self):
raise NotImplementedError()
class ChannelApplicatorUniform(ChannelApplicator):
def __init__(self, parameter_multipliers=None, parameter_overrides=None):
super(ChannelApplicatorUniform, self).__init__()
self._parameter_multipliers = parameter_multipliers or {}
self._parameter_overrides = parameter_overrides or {}
# Check no parameters are specified twice:
duplicate_defs = set(self._parameter_multipliers.keys()) & set(self._parameter_overrides.keys())
assert len(duplicate_defs) == 0, 'Ambiguity: Parameter specified twice: %s' % duplicate_defs
def get_variables_overriden(self):
return set(self._parameter_multipliers.keys()) | set(self._parameter_overrides.keys())
def get_variable_value_for_section(self, variable_name, section):
assert not ( variable_name in self._parameter_multipliers and variable_name in self._parameter_overrides)
if variable_name in self._parameter_multipliers:
return self._parameter_multipliers[variable_name] * self.target_channel.get_default(variable_name)
if variable_name in self._parameter_overrides:
return self._parameter_overrides[variable_name]
return self.target_channel.get_default(variable_name)
def get_description(self):
s1 = 'Uniform Applicator: '
s2 = ('Overrides:{%s} ' % (','.join( [ "%s=%s" % (key,value) for (key,value) in self._parameter_overrides.iteritems()] )) ) if self._parameter_overrides else ''
s3 = ('Multipliers:{%s} ' % (','.join( [ "%s=%s" % (key,value) for (key,value) in self._parameter_multipliers.iteritems()] )) ) if self._parameter_multipliers else ''
return (s1 + s2 + s3).strip()
|
{
"content_hash": "7bb2b19665246b43e03d6493243e4e24",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 174,
"avg_line_length": 41.083333333333336,
"alnum_prop": 0.6709939148073022,
"repo_name": "mikehulluk/morphforge",
"id": "f3c5c8dd9a663a834c6e870880a0318c8749de9e",
"size": "4005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforge/simulation/base/biophysics/channelapplicators.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
}
|
"""A helper module to work with metadata associated with the dataset.
The metadata is a JSON file containing a single dictionary with key/value
pairs.
The module contains both helper functions to work with the metadata and
constants for fields that are expected to be included.
"""
import json
import logging
import os.path
import tensorflow.compat.v1 as tf
gfile = tf.gfile
# Constants for fields contained in the metadata for the dataset.
MIN_WIDTH = 'min_width' # The min width of an image in the dataset.
MIN_HEIGHT = 'min_height' # The max height of an image in the dataset.
MAX_WIDTH = 'max_width' # The max width of an image in the dataset.
MAX_HEIGHT = 'max_height' # The max height of an image in the dataset.
NUM_EXAMPLES_POS = 'num_examples_positive' # Number of positive examples.
NUM_EXAMPLES_NEG = 'num_examples_negative' # Number of negative examples.
NUM_EXAMPLES_TRAIN = 'num_examples_train' # Number of negative examples.
NUM_EXAMPLES_TEST = 'num_examples_test' # Number of negative examples.
NUM_EXAMPLES_IGNORED = 'num_examples_ignored' # Number of negative examples.
def metadata_path(data_dir=None, model_dir=None):
"""Returns path to serialized metadata under either data_dir or model_dir.
This metadata file is generated by mr_tfexamples and records information about
the table's contents and construction.
Args:
data_dir: Directory with training, testing data. Takes precedence over
model_dir.
model_dir: Model training directory.
Returns:
String path to file with serialized metadata.
Raises:
ValueError: If neither data_dir or model_dir are set.
"""
if data_dir and not model_dir:
return os.path.join(data_dir, 'metadata.json')
if model_dir and not data_dir:
return os.path.join(model_dir, 'input_metadata.json')
raise ValueError('One of data_dir or model_dir must be set')
def shape_from_metadata(metadata):
"""Returns the shape of the largest images in the dataset.
Args:
metadata: The metadata dictionary
Returns:
A (height, width) shape tuple.
"""
max_shape = (metadata[MAX_HEIGHT], metadata[MAX_WIDTH])
return max_shape
def load_metadata(data_dir=None, model_dir=None):
"""Loads metadata structure from file.
Args:
data_dir: Directory with training, testing data.
model_dir: Model training directory.
Returns:
A dictionary of the metadata for the dataset.
"""
path = metadata_path(data_dir=data_dir, model_dir=model_dir)
logging.info('Loading metadata from path: %s', path)
with gfile.Open(path) as fl:
return json.load(fl)
|
{
"content_hash": "5ada4a6e1ca3a542a12dff28991b9bd9",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 32.81012658227848,
"alnum_prop": 0.7291666666666666,
"repo_name": "verilylifesciences/classifaedes",
"id": "127208bae3fc7810692648de8581f40c2f6872d8",
"size": "3181",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "classifaedes/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "782"
},
{
"name": "Python",
"bytes": "39504"
},
{
"name": "Starlark",
"bytes": "5233"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('geo', '0022_auto_20210129_0951'),
('time_based', '0054_auto_20210129_0951'),
]
operations = [
migrations.AddField(
model_name='periodactivity',
name='is_online',
field=models.NullBooleanField(choices=[(None, 'Not set yet'), (True, 'Yes, participants can join from anywhere or online'), (False, 'No, enter a location')], default=None, verbose_name='is online'),
),
migrations.AddField(
model_name='periodactivity',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='geo.Geolocation', verbose_name='location'),
),
migrations.AddField(
model_name='periodactivity',
name='location_hint',
field=models.TextField(blank=True, null=True, verbose_name='location hint'),
),
]
|
{
"content_hash": "4e6ebfa4466955719550505f035ad775",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 210,
"avg_line_length": 36.46666666666667,
"alnum_prop": 0.6188299817184644,
"repo_name": "onepercentclub/bluebottle",
"id": "7f818516d10f9d0ba4ae6e01ba57cf5259f58ce3",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/time_based/migrations/0055_auto_20210129_0951.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
from designate.objects import base
class Server(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
FIELDS = {
'name': {
'schema': {
'type': 'string',
'description': 'Zone name',
'format': 'domainname',
'maxLength': 255,
},
'immutable': True,
'required': True
}
}
STRING_KEYS = [
'id', 'name'
]
class ServerList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = Server
|
{
"content_hash": "41d9c8eb0bac060da74527a4e4b2a042",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 23.04,
"alnum_prop": 0.5017361111111112,
"repo_name": "ramsateesh/designate",
"id": "b2d9cfb7f9ee0c7a1c6728ea53d886f706b066cb",
"size": "1212",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "designate/objects/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2214082"
},
{
"name": "Ruby",
"bytes": "4170"
},
{
"name": "Shell",
"bytes": "12933"
}
],
"symlink_target": ""
}
|
import sys, time, random, math, pygame
# Class
# class Point
class Point(object):
def __init__(self, x, y):
self.__x = x
self.__y = y
# X property
def getx(self):
return self.__x
def setx(self, x):
self.__x = x
x = property(getx, setx)
# Y property
def gety(self):
return self.__y
def sety(self, y):
self.__y = y
y = property(gety, sety)
def __str__(self):
return "{X:" + "{:.0f}".format(self.__x) + \
", Y:" + "{:.0f}".format(self.__y) + "}"
# class Sprite
class Sprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)# extend the base Sprite class
self.master_image = None
self.frame = 0
self.ord_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 0
self.last_frame = 0
self.columns = 1
self.last_time = 0
self.direction = 0
self.velocity = Point(0 ,0)
# x property
def _getx(self): return self.rect.x
def _setx(self, value): self.rect.x = value
x = property(_getx, _setx)
# y property
def _gety(self): return self.rect.y
def _sety(self, value): self.rect.y = value
y = property(_gety, _sety)
# position property
def _getpos(self): return self.rect.topleft
def _setpos(self, pos): self.rect.topleft = pos
position = property(_getpos, _setpos)
def load(self, filename, width, height, columns):
self.master_image = pygame.image.load(filename).convert_alpha()
self.frame_width = width
self.frame_height = height
self.rect = pygame.Rect(0, 0, width, height)
self.columns = columns
# try to auto-calculate total frames
rect = self.master_image.get_rect()
self.last_frame = (rect.width // width) * (rect.height // height) - 1
def update(self, current_time, rate=30):
# upate animation frame number
if current_time > self.last_time + rate:
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
self.last_time = current_time
# build current frame only if it changed
if self.frame != self.ord_frame:
frame_x = (self.frame % self.columns) * self.frame_width
frame_y = (self.frame // self.columns) * self.frame_height
rect = pygame.Rect(frame_x, frame_y, self.frame_width, self.frame_height)
self.image = self.master_image.subsurface(rect)
self.ord_frame = self.frame
def __str__(self):
return str(self.frame) + ',' + str(self.first_frame) + \
',' + str(self.last_frame) + ',' + str(self.frame_width) + \
',' + str(self.frame_height) + ',' + str(self.columns) + \
',' + str(self.rect)
# Def
# def print_text
def print_text(font,screen,x, y, text, color=(255, 255, 255)):
imgText = font.render(text, True, color)
screen.blit(imgText, (x, y))
|
{
"content_hash": "0e8be5888fe999fe2ef613969813918b",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 85,
"avg_line_length": 31.585858585858585,
"alnum_prop": 0.5481291973137192,
"repo_name": "sun1218/Pygames",
"id": "47dffe40df70f38eabd521c575b37634ddab066c",
"size": "3127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "game/zombie_mob/library.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126530"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from mongoengine import *
from hackathon.util import get_now, make_serializable
from hackathon.constants import TEMPLATE_STATUS, HACK_USER_TYPE
from pagination import Pagination
def to_dic(obj):
ret = make_serializable(obj.to_mongo().to_dict())
# normalize
if "_id" in ret:
ret["id"] = ret.pop("_id")
if "_cls" in ret:
ret.pop("_cls")
return ret
class HQuerySet(QuerySet):
"""add some handy helpers on the default query set from mongoengine
"""
def paginate(self, page, per_page):
return Pagination(self, page, per_page)
class HDocumentBase(DynamicDocument):
"""
DB model base class, providing basic functions
"""
create_time = DateTimeField(default=get_now())
update_time = DateTimeField(default=get_now())
meta = {
'allow_inheritance': True,
'abstract': True,
'queryset_class': HQuerySet}
def __init__(self, **kwargs):
super(HDocumentBase, self).__init__(**kwargs)
def dic(self):
return to_dic(self)
def __repr__(self):
return '%s: %s' % (self.__class__.__name__, self.to_json())
class UserEmail(EmbeddedDocument):
email = StringField()
primary_email = BooleanField()
verified = BooleanField()
class UserProfile(DynamicEmbeddedDocument):
address = StringField()
age = IntField(min_value=1)
career = StringField()
career_type = StringField()
gender = IntField() # 0:women 1:man
notes = StringField() # a short activity or mood
phone = StringField(max_length=20)
qq = StringField()
real_name = StringField(max_length=80)
self_introduction = StringField()
skype = StringField()
wechat = StringField()
weibo = StringField()
avatar_url = URLField() # high priority than avatar_url in User
class User(HDocumentBase):
name = StringField(max_length=50, min_length=1, required=True)
nickname = StringField(max_length=50, min_length=1, required=True)
password = StringField(max_length=100)
emails = EmbeddedDocumentListField(UserEmail)
is_super = BooleanField(default=False)
profile = EmbeddedDocumentField(UserProfile)
provider = StringField(max_length=20)
openid = StringField(max_length=100)
avatar_url = StringField() # if avatar_url in UserProfile setted, this is not used
access_token = StringField(max_length=1024)
online = BooleanField(default=False)
last_login_time = DateTimeField()
login_times = IntField(default=1) # a new user usually added upon whose first login, by default 1 thus
meta = {
"indexes": [
{
# default unqiue is not sparse, so we have to set it by ourselves
"fields": ["provider", "openid"],
"unqiue": True,
"sparse": True}]}
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
class UserToken(HDocumentBase):
token = UUIDField(required=True)
user = ReferenceField(User)
issue_date = DateTimeField(default=get_now())
expire_date = DateTimeField(required=True)
meta = {
'indexes': [
{
# See mongodb and mongo engine documentation for details
# by default, mongoengine will add a `_cls` field with the index as a compund index
# but mongodb only support Single Key Index on Hashed Token so far
# set the `cls` option to False can disable this beahviour on mongoengine
"fields": ["#token"],
"cls": False}]}
def __init__(self, **kwargs):
super(UserToken, self).__init__(**kwargs)
class Template(HDocumentBase):
name = StringField(required=True, unique=True)
url = URLField(required=True)
provider = IntField()
status = IntField(default=TEMPLATE_STATUS.UNCHECKED) # constants.TEMPLATE_STATUS
description = StringField()
virtual_environment_count = IntField(min_value=1, required=True)
creator = ReferenceField(User)
def __init__(self, **kwargs):
super(Template, self).__init__(**kwargs)
class Organization(DynamicEmbeddedDocument):
id = UUIDField(required=True)
name = StringField(required=True)
description = StringField()
homepage = URLField()
logo = URLField()
organization_type = IntField() # see ORGANIZATION_TYPE : ORGANIZER = 1, PARTNER = 2
class Award(EmbeddedDocument):
id = UUIDField(required=True)
name = StringField(required=True)
description = StringField(required=True)
level = IntField(min_value=0, max_value=10) # 0-10
quota = IntField(min_value=1, default=1, required=True)
award_url = StringField()
class AzureKey(HDocumentBase):
"""
Azure certificate information of user/hackathon
Open-hackathon will try to use local certification file, if it doesn't exist, open-hackathon will try to
recover it from azure.
"""
cert_url = StringField(required=True) # cert_url is cert file path in azure
# pem_url is "encrypted" pem file path in azure, so be careful to use this,
# at the most time you should use get_local_pem_url()
pem_url = StringField(required=True)
subscription_id = StringField(required=True)
management_host = StringField(required=True)
verified = BooleanField()
def __init__(self, **kwargs):
super(AzureKey, self).__init__(**kwargs)
class Hackathon(HDocumentBase):
name = StringField(unique=True, required=True)
display_name = StringField(required=True)
ribbon = StringField() # a short sentence of advertisement
short_description = StringField()
location = StringField()
description = StringField()
banners = ListField()
status = IntField(default=0) # 0-new 1-online 2-offline
creator = ReferenceField(User)
config = DictField() # max_enrollment, auto_approve, login_provider
type = IntField(default=1) # enum.HACK_TYPE
organizers = EmbeddedDocumentListField(Organization)
tags = ListField()
awards = EmbeddedDocumentListField(Award)
templates = ListField(ReferenceField(Template, reverse_delete_rule=PULL)) # templates for hackathon
azure_keys = ListField(ReferenceField(AzureKey))
event_start_time = DateTimeField()
event_end_time = DateTimeField()
registration_start_time = DateTimeField()
registration_end_time = DateTimeField()
judge_start_time = DateTimeField()
judge_end_time = DateTimeField()
archive_time = DateTimeField()
def __init__(self, **kwargs):
super(Hackathon, self).__init__(**kwargs)
class UserHackathon(HDocumentBase):
user = ReferenceField(User)
hackathon = ReferenceField(Hackathon)
role = IntField(default=HACK_USER_TYPE.COMPETITOR) # 0-visitor 1-admin 2-judge 3-competitor
status = IntField() # 0-not approved user 1-approved user 2-refused user 3-auto approved user
like = BooleanField(default=True)
assets = DictField(default={}) # assets for user
remark = StringField()
deleted = BooleanField(default=False)
def __init__(self, **kwargs):
super(UserHackathon, self).__init__(**kwargs)
class HackathonStat(HDocumentBase):
type = StringField() # class HACKATHON_STAT
count = IntField(min_value=0)
hackathon = ReferenceField(Hackathon)
class HackathonNotice(HDocumentBase):
category = IntField() # category: Class HACK_NOTICE_CATEGORY, controls how icons/descriptions are shown at front-end
event = IntField() # event: Class HACK_NOTICE_EVENT, records the specfic event that triggers current notice
content = StringField()
related_id = DynamicField()
link = StringField()
creator = ReferenceField(User)
hackathon = ReferenceField(Hackathon)
receiver = ReferenceField(User)
is_read = BooleanField(default=False)
def __init__(self, **kwargs):
super(HackathonNotice, self).__init__(**kwargs)
class TeamWork(EmbeddedDocument):
id = UUIDField(required=True)
description = StringField()
type = IntField(required=True) # see TEAM_SHOW_TYPE
uri = StringField()
create_time = DateTimeField(default=get_now())
class TeamScore(EmbeddedDocument):
type = IntField(default=0)
score = IntField(required=True, min_value=0)
reason = StringField()
score_date = DateTimeField(default=get_now())
judge = ReferenceField(User)
class TeamMember(EmbeddedDocument):
join_time = DateTimeField()
status = IntField() # 0:unaudit ,1:audit_passed, 2:audit_refused
user = ReferenceField(User)
class Team(HDocumentBase):
name = StringField(required=True)
description = StringField()
logo = StringField()
leader = ReferenceField(User)
cover = StringField()
project_name = StringField()
project_description = StringField()
dev_plan = StringField()
hackathon = ReferenceField(Hackathon)
works = EmbeddedDocumentListField(TeamWork)
scores = EmbeddedDocumentListField(TeamScore)
members = EmbeddedDocumentListField(TeamMember)
awards = ListField() # list of uuid. UUID reference class Award-id
assets = DictField() # assets for team
azure_keys = ListField(ReferenceField(AzureKey))
templates = ListField(ReferenceField(Template)) # templates for team
def __init__(self, **kwargs):
super(Team, self).__init__(**kwargs)
class DockerHostServer(HDocumentBase):
vm_name = StringField(required=True)
public_dns = StringField()
public_ip = StringField()
public_docker_api_port = IntField(min_value=1, max_value=65535, default=4243)
private_ip = StringField()
private_docker_api_port = IntField(min_value=1, max_value=65535, default=4243)
container_count = IntField(required=True, min_value=0, default=0)
container_max_count = IntField(required=True, min_value=0)
is_auto = BooleanField(default=False) # 0-started manually 1-started by OHP server
state = IntField(default=0) # 0-VM starting, 1-docker init, 2-docker API ready, 3-unavailable
disabled = BooleanField(default=False) # T-disabled by manager, F-available
hackathon = ReferenceField(Hackathon)
def __init__(self, **kwargs):
super(DockerHostServer, self).__init__(**kwargs)
class PortBinding(DynamicEmbeddedDocument):
# for simplicity, the port won't be released until the corresponding container removed(not stopped).
# that means a port occupied by stopped container won't be allocated to new container. So it's possible to start the
# container again. And the number of port should be enough since we won't have too many containers on the same VM.
name = StringField()
is_public = BooleanField()
public_port = IntField() # port that are public accessible
host_port = IntField() # port on hosted VM
container_port = IntField() # port inside docker container
url = StringField() # public url pattern for display where host and port should be replaced
class DockerContainer(DynamicEmbeddedDocument):
name = StringField(required=True, unique=True)
image = StringField()
container_id = StringField()
host_server = ReferenceField(DockerHostServer)
port_bindings = EmbeddedDocumentListField(PortBinding, default=[])
class AzureStorageAccount(DynamicEmbeddedDocument):
name = StringField(required=True)
description = StringField()
label = StringField()
location = StringField(required=True)
# ASAStatus in enum.py
status = StringField()
create_time = DateTimeField()
update_time = DateTimeField()
deletable = BooleanField() # F-cannot delete T-can be deleted
class AzureCloudService(DynamicEmbeddedDocument):
name = StringField()
label = StringField()
location = StringField()
# ACSStatus in enum.py
status = StringField()
azure_key = ReferenceField(AzureKey)
deletable = BooleanField() # F-cannot delete T-can be deleted
class AzureDeployment(DynamicEmbeddedDocument):
name = StringField()
slot = StringField()
# ADStatus in enum.py
status = StringField()
cloud_service = EmbeddedDocumentField(AzureCloudService)
create_time = DateTimeField()
update_time = DateTimeField()
deletable = BooleanField() # F-cannot delete T-can be deleted
class AzureEndPoint(DynamicEmbeddedDocument):
name = StringField()
protocol = StringField()
public_port = IntField()
private_port = IntField()
url = StringField()
class AzureVirtualMachine(DynamicEmbeddedDocument):
name = StringField(required=True)
label = StringField()
# AVMStatus in enum.py
dns = StringField()
public_ip = StringField()
private_ip = StringField()
deployment = EmbeddedDocumentField(AzureDeployment)
create_time = DateTimeField()
update_time = DateTimeField()
deletable = BooleanField() # F-cannot delete T-can be deleted
end_points = EmbeddedDocumentListField(AzureEndPoint, default=[])
class VirtualEnvironment(DynamicEmbeddedDocument):
"""
Virtual environment is abstraction of smallest environment unit in template
"""
provider = IntField() # VE_PROVIDER in enum.py
name = StringField(required=True, unique=True)
status = IntField(required=True) # VEStatus in enum.py
remote_provider = IntField() # VERemoteProvider in enum.py
remote_paras = DictField()
create_time = DateTimeField(default=get_now())
update_time = DateTimeField()
docker_container = EmbeddedDocumentField(DockerContainer)
azure_resource = EmbeddedDocumentField(AzureVirtualMachine)
class Experiment(HDocumentBase):
status = IntField() # EStatus in enum.py
last_heart_beat_time = DateTimeField()
template = ReferenceField(Template)
user = ReferenceField(User)
azure_key = ReferenceField(AzureKey)
hackathon = ReferenceField(Hackathon)
virtual_environments = EmbeddedDocumentListField(VirtualEnvironment, default=[])
def __init__(self, **kwargs):
super(Experiment, self).__init__(**kwargs)
|
{
"content_hash": "0d032fdc3da0e305a4ecbd7378456ab7",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 121,
"avg_line_length": 35.66823529411765,
"alnum_prop": 0.6958242628141698,
"repo_name": "lclchen/open-hackathon",
"id": "235336b816335dce91601509dfc0ae97127dbad2",
"size": "15184",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "open-hackathon-server/src/hackathon/hmongo/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "189542"
},
{
"name": "HTML",
"bytes": "494496"
},
{
"name": "Java",
"bytes": "9272"
},
{
"name": "JavaScript",
"bytes": "634650"
},
{
"name": "Python",
"bytes": "672432"
},
{
"name": "Ruby",
"bytes": "1595"
},
{
"name": "Shell",
"bytes": "16003"
}
],
"symlink_target": ""
}
|
import sys
from setuptools import setup, find_packages
install_requires = ['redis']
print find_packages()
setup(
name='twiceredis',
version='2.0.0',
author='Trey Morris',
author_email='trey@treymorris.com',
description='sentinel pool backed read and write redis client',
long_description=open('README.rst').read(),
install_requires=install_requires,
classifiers=['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License'],
keywords='redis',
packages=find_packages(),
license='Apache Software License',
url='https://github.com/tr3buchet/twiceredis')
|
{
"content_hash": "dcbad6d5095647efe500ab57b2a728ef",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 30,
"alnum_prop": 0.6833333333333333,
"repo_name": "tr3buchet/twiceredis",
"id": "19d80d3969a823114a320a464cb24a372b20ee10",
"size": "1279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13876"
}
],
"symlink_target": ""
}
|
"""Unit tests for the raised exception documentation checking in the
`DocstringChecker` in :mod:`pylint.extensions.check_docs`
"""
# pylint: disable=too-many-public-methods
import astroid
from pylint.extensions.docparams import DocstringParameterChecker
from pylint.testutils import CheckerTestCase, Message, set_config
class TestDocstringCheckerRaise(CheckerTestCase):
"""Tests for pylint_plugin.RaiseDocChecker"""
CHECKER_CLASS = DocstringParameterChecker
def test_ignores_no_docstring(self):
raise_node = astroid.extract_node(
"""
def my_func(self):
raise RuntimeError('hi') #@
"""
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_ignores_unknown_style(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring."""
raise RuntimeError('hi')
'''
)
raise_node = node.body[0]
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
@set_config(accept_no_raise_doc=False)
def test_warns_unknown_style(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring."""
raise RuntimeError('hi')
'''
)
raise_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_missing_sphinx_raises(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Never
"""
raise RuntimeError('hi')
raise NameError('hi')
'''
)
raise_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_missing_google_raises(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises:
NameError: Never
"""
raise RuntimeError('hi')
raise NameError('hi')
'''
)
raise_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_google_attr_raises_exact_exc(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a google docstring.
Raises:
re.error: Sometimes
"""
import re
raise re.error('hi') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_google_attr_raises_substr_exc(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a google docstring.
Raises:
re.error: Sometimes
"""
from re import error
raise error('hi') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_valid_missing_google_attr_raises(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a google docstring.
Raises:
re.anothererror: Sometimes
"""
from re import error
raise error('hi')
'''
)
raise_node = node.body[1]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("error",))
):
self.checker.visit_raise(raise_node)
def test_find_invalid_missing_google_attr_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a google docstring.
Raises:
bogusmodule.error: Sometimes
"""
from re import error
raise error('hi') #@
'''
)
# pylint allows this to pass since the comparison between Raises and
# raise are based on the class name, not the qualified name.
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_google_raises_local_reference(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a google docstring.
Raises:
.LocalException: Always
"""
from neighbor_module import LocalException
raise LocalException('hi') #@
'''
)
# pylint allows this to pass since the comparison between Raises and
# raise are based on the class name, not the qualified name.
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
@set_config(accept_no_raise_doc=False)
def test_google_raises_with_prefix(self):
code_snippet = '''
def my_func(self):
"""This is a google docstring.
Raises:
{prefix}re.error: Sometimes
"""
import re
raise re.error('hi') #@
'''
for prefix in ["~", "!"]:
raise_node = astroid.extract_node(code_snippet.format(prefix=prefix))
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_missing_numpy_raises(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises
------
NameError
Never
"""
raise RuntimeError('hi')
raise NameError('hi')
'''
)
raise_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_ignore_spurious_sphinx_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises RuntimeError: Always
:except NameError: Never
:raise OSError: Never
:exception ValueError: Never
"""
raise RuntimeError('Blah') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_all_sphinx_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises RuntimeError: Always
:except NameError: Never
:raise OSError: Never
:exception ValueError: Never
"""
raise RuntimeError('hi') #@
raise NameError('hi')
raise OSError(2, 'abort!')
raise ValueError('foo')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_all_google_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises:
RuntimeError: Always
NameError: Never
"""
raise RuntimeError('hi') #@
raise NameError('hi')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_all_numpy_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises
------
RuntimeError
Always
NameError
Never
"""
raise RuntimeError('hi') #@
raise NameError('hi')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_multiple_sphinx_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises RuntimeError: Always
:raises NameError, OSError, ValueError: Never
"""
raise RuntimeError('hi')
raise NameError('hi') #@
raise OSError(2, 'abort!')
raise ValueError('foo')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_multiple_google_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises:
RuntimeError: Always
NameError, OSError, ValueError: Never
"""
raise RuntimeError('hi')
raise NameError('hi') #@
raise OSError(2, 'abort!')
raise ValueError('foo')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_finds_rethrown_sphinx_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Sometimes
"""
try:
fake_func()
except RuntimeError:
raise #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_rethrown_google_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises:
NameError: Sometimes
"""
try:
fake_func()
except RuntimeError:
raise #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_rethrown_numpy_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises
------
NameError
Sometimes
"""
try:
fake_func()
except RuntimeError:
raise #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_finds_rethrown_sphinx_multiple_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Sometimes
"""
try:
fake_func()
except (RuntimeError, ValueError):
raise #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(
msg_id="missing-raises-doc",
node=node,
args=("RuntimeError, ValueError",),
)
):
self.checker.visit_raise(raise_node)
def test_find_rethrown_google_multiple_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises:
NameError: Sometimes
"""
try:
fake_func()
except (RuntimeError, ValueError):
raise #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(
msg_id="missing-raises-doc",
node=node,
args=("RuntimeError, ValueError",),
)
):
self.checker.visit_raise(raise_node)
def test_find_rethrown_numpy_multiple_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises
------
NameError
Sometimes
"""
try:
fake_func()
except (RuntimeError, ValueError):
raise #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(
msg_id="missing-raises-doc",
node=node,
args=("RuntimeError, ValueError",),
)
):
self.checker.visit_raise(raise_node)
def test_ignores_caught_sphinx_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Sometimes
"""
try:
raise RuntimeError('hi') #@
except RuntimeError:
pass
raise NameError('hi')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_ignores_caught_google_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
Raises:
NameError: Sometimes
"""
try:
raise RuntimeError('hi') #@
except RuntimeError:
pass
raise NameError('hi')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_ignores_caught_numpy_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a numpy docstring.
Raises
------
NameError
Sometimes
"""
try:
raise RuntimeError('hi') #@
except RuntimeError:
pass
raise NameError('hi')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_numpy_attr_raises_exact_exc(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a numpy docstring.
Raises
------
re.error
Sometimes
"""
import re
raise re.error('hi') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_numpy_attr_raises_substr_exc(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a numpy docstring.
Raises
------
re.error
Sometimes
"""
from re import error
raise error('hi') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_valid_missing_numpy_attr_raises(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a numpy docstring.
Raises
------
re.anothererror
Sometimes
"""
from re import error
raise error('hi')
'''
)
raise_node = node.body[1]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("error",))
):
self.checker.visit_raise(raise_node)
def test_find_invalid_missing_numpy_attr_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a numpy docstring.
Raises
------
bogusmodule.error
Sometimes
"""
from re import error
raise error('hi') #@
'''
)
# pylint allows this to pass since the comparison between Raises and
# raise are based on the class name, not the qualified name.
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
@set_config(accept_no_raise_doc=False)
def test_numpy_raises_with_prefix(self):
code_snippet = '''
def my_func(self):
"""This is a numpy docstring.
Raises
------
{prefix}re.error
Sometimes
"""
import re
raise re.error('hi') #@
'''
for prefix in ["~", "!"]:
raise_node = astroid.extract_node(code_snippet.format(prefix=prefix))
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_missing_sphinx_raises_infer_from_instance(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Never
"""
my_exception = RuntimeError('hi')
raise my_exception #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_missing_sphinx_raises_infer_from_function(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Never
"""
def ex_func(val):
return RuntimeError(val)
raise ex_func('hi') #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
self.checker.visit_raise(raise_node)
def test_find_sphinx_attr_raises_exact_exc(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a sphinx docstring.
:raises re.error: Sometimes
"""
import re
raise re.error('hi') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_sphinx_attr_raises_substr_exc(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a sphinx docstring.
:raises re.error: Sometimes
"""
from re import error
raise error('hi') #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_find_valid_missing_sphinx_attr_raises(self):
node = astroid.extract_node(
'''
def my_func(self):
"""This is a sphinx docstring.
:raises re.anothererror: Sometimes
"""
from re import error
raise error('hi')
'''
)
raise_node = node.body[1]
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("error",))
):
self.checker.visit_raise(raise_node)
def test_find_invalid_missing_sphinx_attr_raises(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a sphinx docstring.
:raises bogusmodule.error: Sometimes
"""
from re import error
raise error('hi') #@
'''
)
# pylint allows this to pass since the comparison between Raises and
# raise are based on the class name, not the qualified name.
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
@set_config(accept_no_raise_doc=False)
def test_sphinx_raises_with_prefix(self):
code_snippet = '''
def my_func(self):
"""This is a sphinx docstring.
:raises {prefix}re.error: Sometimes
"""
import re
raise re.error('hi') #@
'''
for prefix in ["~", "!"]:
raise_node = astroid.extract_node(code_snippet.format(prefix=prefix))
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_ignores_raise_uninferable(self):
raise_node = astroid.extract_node(
'''
from unknown import Unknown
def my_func(self):
"""This is a docstring.
:raises NameError: Never
"""
raise Unknown('hi') #@
raise NameError('hi')
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_ignores_returns_from_inner_functions(self):
raise_node = astroid.extract_node(
'''
def my_func(self):
"""This is a docstring.
:raises NameError: Never
"""
def ex_func(val):
def inner_func(value):
return OSError(value)
return RuntimeError(val)
raise ex_func('hi') #@
raise NameError('hi')
'''
)
node = raise_node.frame()
with self.assertAddsMessages(
Message(msg_id="missing-raises-doc", node=node, args=("RuntimeError",))
):
# we do NOT expect a warning about the OSError in inner_func!
self.checker.visit_raise(raise_node)
def test_ignores_returns_use_only_names(self):
raise_node = astroid.extract_node(
'''
def myfunc():
"""This is a docstring
:raises NameError: Never
"""
def inner_func():
return 42
raise inner_func() #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_ignores_returns_use_only_exception_instances(self):
raise_node = astroid.extract_node(
'''
def myfunc():
"""This is a docstring
:raises MyException: Never
"""
class MyException(Exception):
pass
def inner_func():
return MyException
raise inner_func() #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_no_crash_when_inferring_handlers(self):
raise_node = astroid.extract_node(
'''
import collections
def test():
"""raises
:raise U: pass
"""
try:
pass
except collections.U as exc:
raise #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_no_crash_when_cant_find_exception(self):
raise_node = astroid.extract_node(
'''
import collections
def test():
"""raises
:raise U: pass
"""
try:
pass
except U as exc:
raise #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
def test_no_error_notimplemented_documented(self):
raise_node = astroid.extract_node(
'''
def my_func():
"""
Raises:
NotImplementedError: When called.
"""
raise NotImplementedError #@
'''
)
with self.assertNoMessages():
self.checker.visit_raise(raise_node)
|
{
"content_hash": "12f4d6d9297277c8d9aafffec32a208b",
"timestamp": "",
"source": "github",
"line_count": 886,
"max_line_length": 83,
"avg_line_length": 28.159142212189618,
"alnum_prop": 0.49232434165698025,
"repo_name": "ruchee/vimrc",
"id": "7ed7c405cf69857e50c03877298bf4c0a8ec39ce",
"size": "25733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/tests/extensions/test_check_raise_docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
}
|
'''Display positioned, scaled and rotated images.
A sprite is an instance of an image displayed on-screen. Multiple sprites can
display the same image at different positions on the screen. Sprites can also
be scaled larger or smaller, rotated at any angle and drawn at a fractional
opacity.
The following complete example loads a ``"ball.png"`` image and creates a
sprite for that image. The sprite is then drawn in the window's
draw event handler::
import pyglet
ball_image = pyglet.image.load('ball.png')
ball = pyglet.sprite.Sprite(ball_image, x=50, y=50)
window = pyglet.window.Window()
@window.event
def on_draw():
ball.draw()
pyglet.app.run()
The sprite can be moved by modifying the ``x`` and ``y`` properties. Other
properties determine the sprite's rotation, scale and opacity.
The sprite's positioning, rotation and scaling all honor the original
image's anchor (anchor_x, anchor_y).
Drawing multiple sprites
========================
Sprites can be "batched" together and drawn at once more quickly than if each
of their ``draw`` methods were called individually. The following example
creates one hundred ball sprites and adds each of them to a `Batch`. The
entire batch of sprites is then drawn in one call::
batch = pyglet.graphics.Batch()
ball_sprites = []
for i in range(100):
x, y = i * 10, 50
ball_sprites.append(pyglet.sprite.Sprite(ball_image, x, y, batch=batch)
@window.event
def on_draw():
batch.draw()
Sprites can be freely modified in any way even after being added to a batch,
however a sprite can belong to at most one batch. See the documentation for
`pyglet.graphics` for more details on batched rendering, and grouping of
sprites within batches.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: sprite.py 2541 2009-12-31 04:31:11Z benjamin.coder.smith@gmail.com $'
import math
import sys
from pyglet.gl import *
from pyglet import clock
from pyglet import event
from pyglet import graphics
from pyglet import image
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
class SpriteGroup(graphics.Group):
'''Shared sprite rendering group.
The group is automatically coalesced with other sprite groups sharing the
same parent group, texture and blend parameters.
'''
def __init__(self, texture, blend_src, blend_dest, parent=None):
'''Create a sprite group.
The group is created internally within `Sprite`; applications usually
do not need to explicitly create it.
:Parameters:
`texture` : `Texture`
The (top-level) texture containing the sprite image.
`blend_src` : int
OpenGL blend source mode; for example,
``GL_SRC_ALPHA``.
`blend_dest` : int
OpenGL blend destination mode; for example,
``GL_ONE_MINUS_SRC_ALPHA``.
`parent` : `Group`
Optional parent group.
'''
super(SpriteGroup, self).__init__(parent)
self.texture = texture
self.blend_src = blend_src
self.blend_dest = blend_dest
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
glBlendFunc(self.blend_src, self.blend_dest)
def unset_state(self):
glPopAttrib()
glDisable(self.texture.target)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.texture)
def __eq__(self, other):
return (other.__class__ is self.__class__ and
self.parent is other.parent and
self.texture.target == other.texture.target and
self.texture.id == other.texture.id and
self.blend_src == other.blend_src and
self.blend_dest == other.blend_dest)
def __hash__(self):
return hash((id(self.parent),
self.texture.id, self.texture.target,
self.blend_src, self.blend_dest))
class Sprite(event.EventDispatcher):
'''Instance of an on-screen image.
See the module documentation for usage.
'''
_batch = None
_animation = None
_rotation = 0
_opacity = 255
_rgb = (255, 255, 255)
_scale = 1.0
_visible = True
_vertex_list = None
def __init__(self,
img, x=0, y=0,
blend_src=GL_SRC_ALPHA,
blend_dest=GL_ONE_MINUS_SRC_ALPHA,
batch=None,
group=None,
usage='dynamic'):
'''Create a sprite.
:Parameters:
`img` : `AbstractImage` or `Animation`
Image or animation to display.
`x` : int
X coordinate of the sprite.
`y` : int
Y coordinate of the sprite.
`blend_src` : int
OpenGL blend source mode. The default is suitable for
compositing sprites drawn from back-to-front.
`blend_dest` : int
OpenGL blend destination mode. The default is suitable for
compositing sprites drawn from back-to-front.
`batch` : `Batch`
Optional batch to add the sprite to.
`group` : `Group`
Optional parent group of the sprite.
`usage` : str
Vertex buffer object usage hint, one of ``"none"`` (default),
``"stream"``, ``"dynamic"`` or ``"static"``. Applies
only to vertex data.
'''
if batch is not None:
self._batch = batch
self._x = x
self._y = y
if isinstance(img, image.Animation):
self._animation = img
self._frame_index = 0
self._texture = img.frames[0].image.get_texture()
self._next_dt = img.frames[0].duration
if self._next_dt:
clock.schedule_once(self._animate, self._next_dt)
else:
self._texture = img.get_texture()
self._group = SpriteGroup(self._texture, blend_src, blend_dest, group)
self._usage = usage
self._create_vertex_list()
def __del__(self):
try:
if self._vertex_list is not None:
self._vertex_list.delete()
except:
pass
def delete(self):
'''Force immediate removal of the sprite from video memory.
This is often necessary when using batches, as the Python garbage
collector will not necessarily call the finalizer as soon as the
sprite is garbage.
'''
if self._animation:
clock.unschedule(self._animate)
self._vertex_list.delete()
self._vertex_list = None
self._texture = None
# Easy way to break circular reference, speeds up GC
self._group = None
def _animate(self, dt):
self._frame_index += 1
if self._frame_index >= len(self._animation.frames):
self._frame_index = 0
self.dispatch_event('on_animation_end')
if self._vertex_list is None:
return # Deleted in event handler.
frame = self._animation.frames[self._frame_index]
self._set_texture(frame.image.get_texture())
if frame.duration is not None:
duration = frame.duration - (self._next_dt - dt)
duration = min(max(0, duration), frame.duration)
clock.schedule_once(self._animate, duration)
self._next_dt = duration
else:
self.dispatch_event('on_animation_end')
def _set_batch(self, batch):
if self._batch == batch:
return
if batch is not None and self._batch is not None:
self._batch.migrate(self._vertex_list, GL_QUADS, self._group, batch)
self._batch = batch
else:
self._vertex_list.delete()
self._batch = batch
self._create_vertex_list()
def _get_batch(self):
return self._batch
batch = property(_get_batch, _set_batch,
doc='''Graphics batch.
The sprite can be migrated from one batch to another, or removed from its
batch (for individual drawing). Note that this can be an expensive
operation.
:type: `Batch`
''')
def _set_group(self, group):
if self._group.parent == group:
return
self._group = SpriteGroup(self._texture,
self._group.blend_src,
self._group.blend_dest,
group)
if self._batch is not None:
self._batch.migrate(self._vertex_list, GL_QUADS, self._group,
self._batch)
def _get_group(self):
return self._group.parent
group = property(_get_group, _set_group,
doc='''Parent graphics group.
The sprite can change its rendering group, however this can be an
expensive operation.
:type: `Group`
''')
def _get_image(self):
if self._animation:
return self._animation
return self._texture
def _set_image(self, img):
if self._animation is not None:
clock.unschedule(self._animate)
self._animation = None
if isinstance(img, image.Animation):
self._animation = img
self._frame_index = 0
self._set_texture(img.frames[0].image.get_texture())
self._next_dt = img.frames[0].duration
clock.schedule_once(self._animate, self._next_dt)
else:
self._set_texture(img.get_texture())
self._update_position()
image = property(_get_image, _set_image,
doc='''Image or animation to display.
:type: `AbstractImage` or `Animation`
''')
def _set_texture(self, texture):
if texture.id is not self._texture.id:
self._group = SpriteGroup(texture,
self._group.blend_src,
self._group.blend_dest,
self._group.parent)
if self._batch is None:
self._vertex_list.tex_coords[:] = texture.tex_coords
else:
self._vertex_list.delete()
self._texture = texture
self._create_vertex_list()
else:
self._vertex_list.tex_coords[:] = texture.tex_coords
self._texture = texture
def _create_vertex_list(self):
if self._batch is None:
self._vertex_list = graphics.vertex_list(4,
'v2i/%s' % self._usage,
'c4B', ('t3f', self._texture.tex_coords))
else:
self._vertex_list = self._batch.add(4, GL_QUADS, self._group,
'v2i/%s' % self._usage,
'c4B', ('t3f', self._texture.tex_coords))
self._update_position()
self._update_color()
def _update_position(self):
img = self._texture
if not self._visible:
self._vertex_list.vertices[:] = [0, 0, 0, 0, 0, 0, 0, 0]
elif self._rotation:
x1 = -img.anchor_x * self._scale
y1 = -img.anchor_y * self._scale
x2 = x1 + img.width * self._scale
y2 = y1 + img.height * self._scale
x = self._x
y = self._y
r = -math.radians(self._rotation)
cr = math.cos(r)
sr = math.sin(r)
ax = int(x1 * cr - y1 * sr + x)
ay = int(x1 * sr + y1 * cr + y)
bx = int(x2 * cr - y1 * sr + x)
by = int(x2 * sr + y1 * cr + y)
cx = int(x2 * cr - y2 * sr + x)
cy = int(x2 * sr + y2 * cr + y)
dx = int(x1 * cr - y2 * sr + x)
dy = int(x1 * sr + y2 * cr + y)
self._vertex_list.vertices[:] = [ax, ay, bx, by, cx, cy, dx, dy]
elif self._scale != 1.0:
x1 = int(self._x - img.anchor_x * self._scale)
y1 = int(self._y - img.anchor_y * self._scale)
x2 = int(x1 + img.width * self._scale)
y2 = int(y1 + img.height * self._scale)
self._vertex_list.vertices[:] = [x1, y1, x2, y1, x2, y2, x1, y2]
else:
x1 = int(self._x - img.anchor_x)
y1 = int(self._y - img.anchor_y)
x2 = x1 + img.width
y2 = y1 + img.height
self._vertex_list.vertices[:] = [x1, y1, x2, y1, x2, y2, x1, y2]
def _update_color(self):
r, g, b = self._rgb
self._vertex_list.colors[:] = [r, g, b, int(self._opacity)] * 4
def set_position(self, x, y):
'''Set the X and Y coordinates of the sprite simultaneously.
:Parameters:
`x` : int
X coordinate of the sprite.
`y` : int
Y coordinate of the sprite.
'''
self._x = x
self._y = y
self._update_position()
position = property(lambda self: (self._x, self._y),
lambda self, t: self.set_position(*t),
doc='''The (x, y) coordinates of the sprite.
:type: (int, int)
''')
def _set_x(self, x):
self._x = x
self._update_position()
x = property(lambda self: self._x, _set_x,
doc='''X coordinate of the sprite.
:type: int
''')
def _set_y(self, y):
self._y = y
self._update_position()
y = property(lambda self: self._y, _set_y,
doc='''Y coordinate of the sprite.
:type: int
''')
def _set_rotation(self, rotation):
self._rotation = rotation
self._update_position()
rotation = property(lambda self: self._rotation, _set_rotation,
doc='''Clockwise rotation of the sprite, in degrees.
The sprite image will be rotated about its image's (anchor_x, anchor_y)
position.
:type: float
''')
def _set_scale(self, scale):
self._scale = scale
self._update_position()
scale = property(lambda self: self._scale, _set_scale,
doc='''Scaling factor.
A scaling factor of 1 (the default) has no effect. A scale of 2 will draw
the sprite at twice the native size of its image.
:type: float
''')
width = property(lambda self: int(self._texture.width * self._scale),
doc='''Scaled width of the sprite.
Read-only. Invariant under rotation.
:type: int
''')
height = property(lambda self: int(self._texture.height * self._scale),
doc='''Scaled height of the sprite.
Read-only. Invariant under rotation.
:type: int
''')
def _set_opacity(self, opacity):
self._opacity = opacity
self._update_color()
opacity = property(lambda self: self._opacity, _set_opacity,
doc='''Blend opacity.
This property sets the alpha component of the colour of the sprite's
vertices. With the default blend mode (see the constructor), this
allows the sprite to be drawn with fractional opacity, blending with the
background.
An opacity of 255 (the default) has no effect. An opacity of 128 will
make the sprite appear translucent.
:type: int
''')
def _set_color(self, rgb):
self._rgb = map(int, rgb)
self._update_color()
color = property(lambda self: self._rgb, _set_color,
doc='''Blend color.
This property sets the color of the sprite's vertices. This allows the
sprite to be drawn with a color tint.
The color is specified as an RGB tuple of integers ``(red, green, blue)``.
Each color component must be in the range 0 (dark) to 255 (saturated).
:type: (int, int, int)
''')
def _set_visible(self, visible):
self._visible = visible
self._update_position()
visible = property(lambda self: self._visible, _set_visible,
'''True if the sprite will be drawn.
:type: bool
''')
def draw(self):
'''Draw the sprite at its current position.
See the module documentation for hints on drawing multiple sprites
efficiently.
'''
self._group.set_state_recursive()
self._vertex_list.draw(GL_QUADS)
self._group.unset_state_recursive()
if _is_epydoc:
def on_animation_end(self):
'''The sprite animation reached the final frame.
The event is triggered only if the sprite has an animation, not an
image. For looping animations, the event is triggered each time
the animation loops.
:event:
'''
Sprite.register_event_type('on_animation_end')
|
{
"content_hash": "ac8d928ac65370eecd7658f0255938ff",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 89,
"avg_line_length": 31.803370786516854,
"alnum_prop": 0.5562621444974386,
"repo_name": "Codlydodly/python-client",
"id": "f160b1eff4b31d62ddbe83d3f9530a94e430cf1d",
"size": "18699",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pyglet/sprite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4631454"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
}
|
from functools import wraps
from json import loads
from datetime import datetime, timedelta
from time import mktime
try:
from urllib import urlencode
from urllib2 import Request, urlopen
from urlparse import urlsplit, urlunsplit, parse_qsl
# monkeypatch httpmessage
from httplib import HTTPMessage
def get_charset(self):
try:
data = filter(lambda s: 'Content-Type' in s, self.headers)[0]
if 'charset' in data:
cs = data[data.index(';') + 1:-2].split('=')[1].lower()
return cs
except IndexError:
pass
return 'utf-8'
HTTPMessage.get_content_charset = get_charset
except ImportError: # pragma: no cover
from urllib.parse import urlencode, urlsplit, urlunsplit, parse_qsl
from urllib.request import Request, urlopen
class Client(object):
""" OAuth 2.0 client object
"""
def __init__(self, auth_endpoint=None, token_endpoint=None,
resource_endpoint=None, client_id=None, client_secret=None,
token_transport=None):
""" Instantiates a `Client` to authorize and authenticate a user
:param auth_endpoint: The authorization endpoint as issued by the
provider. This is where the user should be
redirect to provider authorization for your
application.
:param token_endpoint: The endpoint against which a `code` will be
exchanged for an access token.
:param resource_endpoint: The base url to use when accessing resources
via `Client.request`.
:param client_id: The client ID as issued by the provider.
:param client_secret: The client secret as issued by the provider. This
must not be shared.
"""
assert token_transport is None or hasattr(token_transport, '__call__')
self.auth_endpoint = auth_endpoint
self.token_endpoint = token_endpoint
self.resource_endpoint = resource_endpoint
self.client_id = client_id
self.client_secret = client_secret
self.access_token = None
self.token_transport = token_transport or transport_query
self.token_expires = -1
self.refresh_token = None
def auth_uri(self, redirect_uri=None, scope=None, scope_delim=None,
state=None, **kwargs):
""" Builds the auth URI for the authorization endpoint
:param scope: (optional) The `scope` parameter to pass for
authorization. The format should match that expected by
the provider (i.e. Facebook expects comma-delimited,
while Google expects space-delimited)
:param state: (optional) The `state` parameter to pass for
authorization. If the provider follows the OAuth 2.0
spec, this will be returned to your `redirect_uri` after
authorization. Generally used for CSRF protection.
:param **kwargs: Any other querystring parameters to be passed to the
provider.
"""
kwargs.update({
'client_id': self.client_id,
'response_type': 'code',
})
if scope is not None:
kwargs['scope'] = scope
if state is not None:
kwargs['state'] = state
if redirect_uri is not None:
kwargs['redirect_uri'] = redirect_uri
return '%s?%s' % (self.auth_endpoint, urlencode(kwargs))
def request_token(self, parser=None, redirect_uri=None, **kwargs):
""" Request an access token from the token endpoint.
This is largely a helper method and expects the client code to
understand what the server expects. Anything that's passed into
``**kwargs`` will be sent (``urlencode``d) to the endpoint. Client
secret and client ID are automatically included, so are not required
as kwargs. For example::
# if requesting access token from auth flow:
{
'code': rval_from_auth,
}
# if refreshing access token:
{
'refresh_token': stored_refresh_token,
'grant_type': 'refresh_token',
}
:param parser: Callback to deal with returned data. Not all providers
use JSON.
"""
kwargs = kwargs and kwargs or {}
parser = parser or _default_parser
kwargs.update({
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'grant_type' in kwargs and kwargs['grant_type'] or \
'authorization_code'
})
if redirect_uri is not None:
kwargs.update({'redirect_uri': redirect_uri})
# TODO: maybe raise an exception here if status code isn't 200?
msg = urlopen(self.token_endpoint, urlencode(kwargs).encode(
'utf-8'))
data = parser(msg.read().decode(msg.info().get_content_charset() or
'utf-8'))
for key in data:
setattr(self, key, data[key])
# expires_in is RFC-compliant. if anything else is used by the
# provider, token_expires must be set manually
if hasattr(self, 'expires_in'):
try:
# python3 dosn't support long
seconds = long(self.expires_in)
except:
seconds = int(self.expires_in)
self.token_expires = mktime((datetime.utcnow() + timedelta(
seconds=seconds)).timetuple())
def refresh(self):
self.request_token(refresh_token=self.refresh_token,
grant_type='refresh_token')
def request(self, url, method=None, data=None, headers=None, parser=None):
""" Request user data from the resource endpoint
:param url: The path to the resource and querystring if required
:param method: HTTP method. Defaults to ``GET`` unless data is not None
in which case it defaults to ``POST``
:param data: Data to be POSTed to the resource endpoint
:param parser: Parser callback to deal with the returned data. Defaults
to ``json.loads`.`
"""
assert self.access_token is not None
parser = parser or loads
if not method:
method = 'GET' if not data else 'POST'
req = self.token_transport('{0}{1}'.format(self.resource_endpoint,
url), self.access_token, data=data, method=method, headers=headers)
resp = urlopen(req)
data = resp.read()
try:
return parser(data.decode(resp.info().get_content_charset() or
'utf-8'))
# try to decode it first using either the content charset, falling
# back to utf-8
except UnicodeDecodeError:
# if we've gotten a decoder error, the calling code better know how
# to deal with it. some providers (i.e. stackexchange) like to gzip
# their responses, so this allows the client code to handle it
# directly.
return parser(data)
def transport_headers(url, access_token, data=None, method=None, headers=None):
try:
req = Request(url, data=data, method=method)
except TypeError:
req = Request(url, data=data)
req.get_method = lambda: method
add_headers = {'Authorization': 'Bearer {0}'.format(access_token)}
if headers is not None:
add_headers.update(headers)
req.headers.update(add_headers)
return req
def transport_query(url, access_token, data=None, method=None, headers=None):
parts = urlsplit(url)
query = dict(parse_qsl(parts.query))
query.update({
'access_token': access_token
})
url = urlunsplit((parts.scheme, parts.netloc, parts.path,
urlencode(query), parts.fragment))
try:
req = Request(url, data=data, method=method)
except TypeError:
req = Request(url, data=data)
req.get_method = lambda: method
if headers is not None:
req.headers.update(headers)
return req
def _default_parser(data):
try:
return loads(data)
except ValueError:
return dict(parse_qsl(data))
|
{
"content_hash": "a7e02dca69ad453806fcc885949657a0",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 79,
"avg_line_length": 37.620535714285715,
"alnum_prop": 0.5932122938174914,
"repo_name": "demianbrecht/sanction",
"id": "a4946d7f450cbe3be67e36392baaaa33e1f7d748",
"size": "8449",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sanction/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35578"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
}
|
from app.auth.models import Role
def run():
Role.insert_roles()
|
{
"content_hash": "c5d6adbb3e225175e5add6b3c24c35d4",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 14,
"alnum_prop": 0.6857142857142857,
"repo_name": "teracyhq/flask-boilerplate",
"id": "3362370e803cb2dcc2d43d4e0292e70207b1cad5",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/integration/fixtures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "298"
},
{
"name": "Makefile",
"bytes": "596"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "192155"
},
{
"name": "Shell",
"bytes": "789"
}
],
"symlink_target": ""
}
|
"""
Ensure that we can use pathlib.Path objects in all relevant IO functions.
"""
import sys
from pathlib import Path
import numpy as np
import scipy.io
import scipy.io.wavfile
from scipy._lib._tmpdirs import tempdir
import scipy.sparse
class TestPaths:
data = np.arange(5).astype(np.int64)
def test_savemat(self):
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(path, {'data': self.data})
assert path.is_file()
def test_loadmat(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(str(path), {'data': self.data})
mat_contents = scipy.io.loadmat(path)
assert (mat_contents['data'] == self.data).all()
def test_whosmat(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(str(path), {'data': self.data})
contents = scipy.io.whosmat(path)
assert contents[0] == ('data', (1, 5), 'int64')
def test_readsav(self):
path = Path(__file__).parent / 'data/scalar_string.sav'
scipy.io.readsav(path)
def test_hb_read(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.hb'
scipy.io.harwell_boeing.hb_write(str(path), data)
data_new = scipy.io.harwell_boeing.hb_read(path)
assert (data_new != data).nnz == 0
def test_hb_write(self):
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.hb'
scipy.io.harwell_boeing.hb_write(path, data)
assert path.is_file()
def test_mmio_read(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.mtx'
scipy.io.mmwrite(str(path), data)
data_new = scipy.io.mmread(path)
assert (data_new != data).nnz == 0
def test_mmio_write(self):
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.mtx'
scipy.io.mmwrite(path, data)
def test_netcdf_file(self):
path = Path(__file__).parent / 'data/example_1.nc'
scipy.io.netcdf.netcdf_file(path)
def test_wavfile_read(self):
path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
scipy.io.wavfile.read(path)
def test_wavfile_write(self):
# Read from str path, write to Path
input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
rate, data = scipy.io.wavfile.read(str(input_path))
with tempdir() as temp_dir:
output_path = Path(temp_dir) / input_path.name
scipy.io.wavfile.write(output_path, rate, data)
|
{
"content_hash": "b3cd30890c08683494b3147861a28eb7",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 81,
"avg_line_length": 34.47872340425532,
"alnum_prop": 0.5871644554149954,
"repo_name": "pizzathief/scipy",
"id": "4ba6dc312dcc7b573669e662546a4f3b3c1f7214",
"size": "3241",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scipy/io/tests/test_paths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4395775"
},
{
"name": "C++",
"bytes": "649767"
},
{
"name": "Dockerfile",
"bytes": "1236"
},
{
"name": "Fortran",
"bytes": "5367672"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12449825"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import json
import urllib
BASE_URL = 'https://blockchain.info/'
def query(call):
return urllib.urlopen(BASE_URL + call).read()
def block_count():
"""Return blockchain.info's most recent block count. This is
useful in comparison with the local blockcount, which may be
lagging behind."""
return int(query('q/getblockcount'))
def transaction(txid):
s = query('tx-index/' + txid + '?format=json')
return json.loads(s)
|
{
"content_hash": "a5f83381bc7df8ad2f6f7e970b813d41",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 25.058823529411764,
"alnum_prop": 0.7183098591549296,
"repo_name": "dasmithii/Stone",
"id": "a66f0be7db921909eab836ba9f321fb27f52c6f4",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stone/info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7393"
}
],
"symlink_target": ""
}
|
"""
fMRIprep base processing workflows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_fmriprep_wf
.. autofunction:: init_single_subject_wf
"""
import sys
import os
from collections import OrderedDict
from copy import deepcopy
from nipype import __version__ as nipype_ver
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nilearn import __version__ as nilearn_ver
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.bids import (
BIDSInfo, BIDSDataGrabber, BIDSFreeSurferDir
)
from niworkflows.utils.bids import collect_data
from niworkflows.utils.misc import fix_multi_T1w_source_name
from smriprep.workflows.anatomical import init_anat_preproc_wf
from ..interfaces import SubjectSummary, AboutSummary, DerivativesDataSink
from ..__about__ import __version__
from .bold import init_func_preproc_wf
def init_fmriprep_wf(
anat_only,
aroma_melodic_dim,
bold2t1w_dof,
cifti_output,
debug,
dummy_scans,
echo_idx,
err_on_aroma_warn,
fmap_bspline,
fmap_demean,
force_syn,
freesurfer,
hires,
ignore,
layout,
longitudinal,
low_mem,
medial_surface_nan,
omp_nthreads,
output_dir,
output_spaces,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
run_uuid,
skull_strip_fixed_seed,
skull_strip_template,
subject_list,
t2s_coreg,
task_id,
use_aroma,
use_bbr,
use_syn,
work_dir,
):
"""
This workflow organizes the execution of FMRIPREP, with a sub-workflow for
each subject.
If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is
created and populated with any needed template subjects.
.. workflow::
:graph2use: orig
:simple_form: yes
import os
from collections import namedtuple, OrderedDict
BIDSLayout = namedtuple('BIDSLayout', ['root'])
from fmriprep.workflows.base import init_fmriprep_wf
os.environ['FREESURFER_HOME'] = os.getcwd()
wf = init_fmriprep_wf(
anat_only=False,
aroma_melodic_dim=-200,
bold2t1w_dof=9,
cifti_output=False,
debug=False,
dummy_scans=None,
echo_idx=None,
err_on_aroma_warn=False,
fmap_bspline=False,
fmap_demean=True,
force_syn=True,
freesurfer=True,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
medial_surface_nan=False,
omp_nthreads=1,
output_dir='.',
output_spaces=OrderedDict([
('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
('T1w', {}), ('fsnative', {})]),
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
run_uuid='X',
skull_strip_fixed_seed=False,
skull_strip_template='OASIS30ANTs',
subject_list=['fmripreptest'],
t2s_coreg=False,
task_id='',
use_aroma=False,
use_bbr=True,
use_syn=True,
work_dir='.',
)
Parameters
anat_only : bool
Disable functional workflows
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
cifti_output : bool
Generate bold CIFTI file in output spaces
debug : bool
Enable debugging outputs
dummy_scans : int or None
Number of volumes to consider as non steady state
echo_idx : int or None
Index of echo to preprocess in multiecho BOLD series,
or ``None`` to preprocess all
err_on_aroma_warn : bool
Do not fail on ICA-AROMA errors
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
medial_surface_nan : bool
Replace medial wall values with NaNs on functional GIFTI files
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
output_spaces : OrderedDict
Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
or paths pointing to custom templates organized in a TemplateFlow-like structure.
Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
resolution version of the selected template).
regressors_all_comps
Return all CompCor component time series instead of the top fraction
regressors_dvars_th
Criterion for flagging DVARS outliers
regressors_fd_th
Criterion for flagging framewise displacement outliers
run_uuid : str
Unique identifier for execution instance
skull_strip_template : str
Name of ANTs skull-stripping template ('OASIS30ANTs' or 'NKI')
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
subject_list : list
List of subject labels
t2s_coreg : bool
For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
task_id : str or None
Task ID of BOLD series to preprocess, or ``None`` to preprocess all
use_aroma : bool
Perform ICA-AROMA on MNI-resampled functional series
use_bbr : bool or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
work_dir : str
Directory in which to store workflow execution state and temporary files
"""
fmriprep_wf = Workflow(name='fmriprep_wf')
fmriprep_wf.base_dir = work_dir
if freesurfer:
fsdir = pe.Node(
BIDSFreeSurferDir(
derivatives=output_dir,
freesurfer_home=os.getenv('FREESURFER_HOME'),
spaces=[s for s in output_spaces.keys() if s.startswith('fsaverage')] + [
'fsnative'] * ('fsnative' in output_spaces)),
name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True)
reportlets_dir = os.path.join(work_dir, 'reportlets')
for subject_id in subject_list:
single_subject_wf = init_single_subject_wf(
anat_only=anat_only,
aroma_melodic_dim=aroma_melodic_dim,
bold2t1w_dof=bold2t1w_dof,
cifti_output=cifti_output,
debug=debug,
dummy_scans=dummy_scans,
echo_idx=echo_idx,
err_on_aroma_warn=err_on_aroma_warn,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
force_syn=force_syn,
freesurfer=freesurfer,
hires=hires,
ignore=ignore,
layout=layout,
longitudinal=longitudinal,
low_mem=low_mem,
medial_surface_nan=medial_surface_nan,
name="single_subject_" + subject_id + "_wf",
omp_nthreads=omp_nthreads,
output_dir=output_dir,
output_spaces=output_spaces,
regressors_all_comps=regressors_all_comps,
regressors_dvars_th=regressors_dvars_th,
regressors_fd_th=regressors_fd_th,
reportlets_dir=reportlets_dir,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
subject_id=subject_id,
t2s_coreg=t2s_coreg,
task_id=task_id,
use_aroma=use_aroma,
use_bbr=use_bbr,
use_syn=use_syn,
)
single_subject_wf.config['execution']['crashdump_dir'] = (
os.path.join(output_dir, "fmriprep", "sub-" + subject_id, 'log', run_uuid)
)
for node in single_subject_wf._get_all_nodes():
node.config = deepcopy(single_subject_wf.config)
if freesurfer:
fmriprep_wf.connect(fsdir, 'subjects_dir',
single_subject_wf, 'inputnode.subjects_dir')
else:
fmriprep_wf.add_nodes([single_subject_wf])
return fmriprep_wf
def init_single_subject_wf(
anat_only,
aroma_melodic_dim,
bold2t1w_dof,
cifti_output,
debug,
dummy_scans,
echo_idx,
err_on_aroma_warn,
fmap_bspline,
fmap_demean,
force_syn,
freesurfer,
hires,
ignore,
layout,
longitudinal,
low_mem,
medial_surface_nan,
name,
omp_nthreads,
output_dir,
output_spaces,
reportlets_dir,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
skull_strip_fixed_seed,
skull_strip_template,
subject_id,
t2s_coreg,
task_id,
use_aroma,
use_bbr,
use_syn,
):
"""
This workflow organizes the preprocessing pipeline for a single subject.
It collects and reports information about the subject, and prepares
sub-workflows to perform anatomical and functional preprocessing.
Anatomical preprocessing is performed in a single workflow, regardless of
the number of sessions.
Functional preprocessing is performed using a separate workflow for each
individual BOLD series.
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.base import init_single_subject_wf
from collections import namedtuple, OrderedDict
BIDSLayout = namedtuple('BIDSLayout', ['root'])
wf = init_single_subject_wf(
anat_only=False,
aroma_melodic_dim=-200,
bold2t1w_dof=9,
cifti_output=False,
debug=False,
dummy_scans=None,
echo_idx=None,
err_on_aroma_warn=False,
fmap_bspline=False,
fmap_demean=True,
force_syn=True,
freesurfer=True,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
medial_surface_nan=False,
name='single_subject_wf',
omp_nthreads=1,
output_dir='.',
output_spaces=OrderedDict([
('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
('T1w', {}), ('fsnative', {})]),
reportlets_dir='.',
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
skull_strip_fixed_seed=False,
skull_strip_template='OASIS30ANTs',
subject_id='test',
t2s_coreg=False,
task_id='',
use_aroma=False,
use_bbr=True,
use_syn=True,
)
Parameters
anat_only : bool
Disable functional workflows
aroma_melodic_dim : int
Maximum number of components identified by MELODIC within ICA-AROMA
(default is -200, i.e., no limitation).
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
cifti_output : bool
Generate bold CIFTI file in output spaces
debug : bool
Enable debugging outputs
dummy_scans : int or None
Number of volumes to consider as non steady state
echo_idx : int or None
Index of echo to preprocess in multiecho BOLD series,
or ``None`` to preprocess all
err_on_aroma_warn : bool
Do not fail on ICA-AROMA errors
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
medial_surface_nan : bool
Replace medial wall values with NaNs on functional GIFTI files
name : str
Name of workflow
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
output_spaces : OrderedDict
Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
or paths pointing to custom templates organized in a TemplateFlow-like structure.
Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
resolution version of the selected template).
reportlets_dir : str
Directory in which to save reportlets
regressors_all_comps
Return all CompCor component time series instead of the top fraction
regressors_fd_th
Criterion for flagging framewise displacement outliers
regressors_dvars_th
Criterion for flagging DVARS outliers
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
skull_strip_template : str
Name of ANTs skull-stripping template ('OASIS30ANTs' or 'NKI')
subject_id : str
List of subject labels
t2s_coreg : bool
For multi-echo EPI, use the calculated T2*-map for T2*-driven coregistration
task_id : str or None
Task ID of BOLD series to preprocess, or ``None`` to preprocess all
use_aroma : bool
Perform ICA-AROMA on MNI-resampled functional series
use_bbr : bool or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
Inputs
subjects_dir
FreeSurfer SUBJECTS_DIR
"""
from .bold.resampling import NONSTANDARD_REFERENCES
if name in ('single_subject_wf', 'single_subject_fmripreptest_wf'):
# for documentation purposes
subject_data = {
't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
'bold': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
}
else:
subject_data = collect_data(layout, subject_id, task_id, echo_idx)[0]
# Make sure we always go through these two checks
if not anat_only and subject_data['bold'] == []:
raise Exception("No BOLD images found for participant {} and task {}. "
"All workflows require BOLD images.".format(
subject_id, task_id if task_id else '<all>'))
if not subject_data['t1w']:
raise Exception("No T1w images found for participant {}. "
"All workflows require T1w images.".format(subject_id))
workflow = Workflow(name=name)
workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).
""".format(fmriprep_ver=__version__, nipype_ver=nipype_ver)
workflow.__postdesc__ = """
Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").
### References
""".format(nilearn_ver=nilearn_ver)
# Filter out standard spaces to a separate dict
std_spaces = OrderedDict([
(key, modifiers) for key, modifiers in output_spaces.items()
if key not in NONSTANDARD_REFERENCES])
inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
name='inputnode')
bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only),
name='bidssrc')
bids_info = pe.Node(BIDSInfo(
bids_dir=layout.root, bids_validate=False), name='bids_info')
summary = pe.Node(SubjectSummary(
std_spaces=list(std_spaces.keys()),
nstd_spaces=list(set(NONSTANDARD_REFERENCES).intersection(output_spaces.keys()))),
name='summary', run_without_submitting=True)
about = pe.Node(AboutSummary(version=__version__,
command=' '.join(sys.argv)),
name='about', run_without_submitting=True)
ds_report_summary = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='summary', keep_dtype=True),
name='ds_report_summary', run_without_submitting=True)
ds_report_about = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='about', keep_dtype=True),
name='ds_report_about', run_without_submitting=True)
# Preprocessing of T1w (includes registration to MNI)
anat_preproc_wf = init_anat_preproc_wf(
bids_root=layout.root,
debug=debug,
freesurfer=freesurfer,
hires=hires,
longitudinal=longitudinal,
name="anat_preproc_wf",
num_t1w=len(subject_data['t1w']),
omp_nthreads=omp_nthreads,
output_dir=output_dir,
output_spaces=std_spaces,
reportlets_dir=reportlets_dir,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
)
workflow.connect([
(inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
(bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
(inputnode, summary, [('subjects_dir', 'subjects_dir')]),
(bidssrc, summary, [('t1w', 't1w'),
('t2w', 't2w'),
('bold', 'bold')]),
(bids_info, summary, [('subject', 'subject_id')]),
(bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]),
(bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
('t2w', 'inputnode.t2w'),
('roi', 'inputnode.roi'),
('flair', 'inputnode.flair')]),
(bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(summary, ds_report_summary, [('out_report', 'in_file')]),
(bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(about, ds_report_about, [('out_report', 'in_file')]),
])
# Overwrite ``out_path_base`` of smriprep's DataSinks
for node in workflow.list_node_names():
if node.split('.')[-1].startswith('ds_'):
workflow.get_node(node).interface.out_path_base = 'fmriprep'
if anat_only:
return workflow
for bold_file in subject_data['bold']:
func_preproc_wf = init_func_preproc_wf(
aroma_melodic_dim=aroma_melodic_dim,
bold2t1w_dof=bold2t1w_dof,
bold_file=bold_file,
cifti_output=cifti_output,
debug=debug,
dummy_scans=dummy_scans,
err_on_aroma_warn=err_on_aroma_warn,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
force_syn=force_syn,
freesurfer=freesurfer,
ignore=ignore,
layout=layout,
low_mem=low_mem,
medial_surface_nan=medial_surface_nan,
num_bold=len(subject_data['bold']),
omp_nthreads=omp_nthreads,
output_dir=output_dir,
output_spaces=output_spaces,
reportlets_dir=reportlets_dir,
regressors_all_comps=regressors_all_comps,
regressors_fd_th=regressors_fd_th,
regressors_dvars_th=regressors_dvars_th,
t2s_coreg=t2s_coreg,
use_aroma=use_aroma,
use_bbr=use_bbr,
use_syn=use_syn,
)
workflow.connect([
(anat_preproc_wf, func_preproc_wf,
[(('outputnode.t1_preproc', _pop), 'inputnode.t1_preproc'),
('outputnode.t1_brain', 'inputnode.t1_brain'),
('outputnode.t1_mask', 'inputnode.t1_mask'),
('outputnode.t1_seg', 'inputnode.t1_seg'),
('outputnode.t1_aseg', 'inputnode.t1_aseg'),
('outputnode.t1_aparc', 'inputnode.t1_aparc'),
('outputnode.t1_tpms', 'inputnode.t1_tpms'),
('outputnode.template', 'inputnode.template'),
('outputnode.forward_transform', 'inputnode.anat2std_xfm'),
('outputnode.reverse_transform', 'inputnode.std2anat_xfm'),
('outputnode.joint_template', 'inputnode.joint_template'),
('outputnode.joint_forward_transform', 'inputnode.joint_anat2std_xfm'),
('outputnode.joint_reverse_transform', 'inputnode.joint_std2anat_xfm'),
# Undefined if --no-freesurfer, but this is safe
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id'),
('outputnode.t1_2_fsnative_forward_transform',
'inputnode.t1_2_fsnative_forward_transform'),
('outputnode.t1_2_fsnative_reverse_transform',
'inputnode.t1_2_fsnative_reverse_transform')]),
])
return workflow
def _prefix(subid):
if subid.startswith('sub-'):
return subid
return '-'.join(('sub', subid))
def _pop(inlist):
if isinstance(inlist, (list, tuple)):
return inlist[0]
return inlist
|
{
"content_hash": "7279a449fd73253a28a2148918b7e9f2",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 99,
"avg_line_length": 37.78971962616822,
"alnum_prop": 0.5955236799802152,
"repo_name": "oesteban/preprocessing-workflow",
"id": "bd4b977dc641d39e594d80df833167d5d0719686",
"size": "24421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fmriprep/workflows/base.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "146866"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
}
|
"""NApp responsible for installing a DROP ipv6 flow on switch setup."""
from kytos.core import KytosEvent, KytosNApp, log
from kytos.core.helpers import listen_to
from pyof.v0x01.common.flow_match import Match
from pyof.v0x01.controller2switch.flow_mod import FlowMod, FlowModCommand
class Main(KytosNApp):
"""Main class of of_ipv6drop NApp."""
def setup(self):
"""Replace the 'init' method for the KytosApp subclass.
The setup method is automatically called by the run method.
Users shouldn't call this method directly.
"""
pass
def execute(self):
"""Method to be runned once on app 'start' or in a loop.
The execute method is called by the run method of KytosNApp class.
Users shouldn't call this method directly.
"""
pass
@listen_to('kytos/core.switch.new')
def ipv6_drop(self, event):
"""Install a flow on the switch that drop all incoming ipv6 packets."""
switch = event.content['switch']
if switch.connection.protocol.version is not 0x01:
return
flow_mod = FlowMod()
flow_mod.command = FlowModCommand.OFPFC_ADD
flow_mod.match = Match()
flow_mod.match.dl_type = 0x86dd # ipv6
event_out = KytosEvent(name=('kytos/of_ipv6drop.messages.out.'
'ofpt_flow_mod'),
content={'destination': switch.connection,
'message': flow_mod})
log.info('Sending "IPv6 DROP" flow to switch %s', switch.id)
self.controller.buffers.msg_out.put(event_out)
def shutdown(self):
"""End of the application."""
pass
|
{
"content_hash": "6be4c2f6f816c258c510ad0c12ee013d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 35.854166666666664,
"alnum_prop": 0.6130156885531668,
"repo_name": "kytos/kyco-core-napps",
"id": "4ffd3d24c14ff6a1db8eb85ba3f8aa366ff7cec7",
"size": "1721",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "napps/kytos/of_ipv6drop/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73608"
}
],
"symlink_target": ""
}
|
"""Tests for epi_forecast_stat_mech.evaluation.run_on_data."""
import functools
from absl.testing import absltest
from epi_forecast_stat_mech import sir_sim
from epi_forecast_stat_mech.evaluation import run_on_data
import numpy as np
def create_synthetic_dataset(
seed=0,
num_epidemics=50,
num_important_cov=1,
num_unimportant_cov=2,
num_time_steps=100,
):
"""Creates synthetic data."""
np.random.seed(seed) # TODO(shoyer): use np.random.RandomState
beta_fn = functools.partial(sir_sim.generate_betas_many_cov2,
num_pred=num_important_cov,
num_not_pred=num_unimportant_cov)
trajectories = sir_sim.generate_simulations(
beta_fn,
num_epidemics,
num_time_steps=num_time_steps)
return trajectories
class TestRunOnData(absltest.TestCase):
"""Tests for run_on_data."""
def test_TrainTestSplitTime(self):
"""Verify we can split data at a time point."""
first_test_day = 20
data = create_synthetic_dataset(num_epidemics=50, num_time_steps=100)
train_data, test_data = run_on_data.train_test_split_time(
data, first_test_day)
self.assertCountEqual(['location', 'time'], test_data.dims)
self.assertLen(test_data.time, 100 - first_test_day)
self.assertLen(train_data.time, first_test_day)
np.testing.assert_array_equal(data.location, train_data.location)
np.testing.assert_array_equal(data.location, test_data.location)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "cd27285f48a345207258cb660aa81b05",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 31.6875,
"alnum_prop": 0.6811308349769888,
"repo_name": "HopkinsIDD/EpiForecastStatMech",
"id": "19cc6ed23243a6ed87eff5dab491691137a50690",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_forecast_stat_mech/evaluation/run_on_data_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "365929"
},
{
"name": "R",
"bytes": "68202"
}
],
"symlink_target": ""
}
|
import argparse
import tkinter as tk
import math
import numpy
import PIL.ImageTk
import PIL.Image
from colormath.color_objects import sRGBColor, LabColor, HSVColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
IMAGE_SIZE = (600, 400)
def cap_number(number, min_, max_):
if number < min_:
return min_
elif number > max_:
return max_
else:
return number
def barkovsky_distance_3000(hsv1, hsv2):
return (
abs(hsv1.hsv_h - hsv2.hsv_h) +
abs(hsv1.hsv_s - hsv2.hsv_s) * 0.25 +
abs(hsv1.hsv_v - hsv2.hsv_v) * 0.25
)
class RecolorWindow:
def __init__(self, image_path):
self.root = tk.Tk()
self.image = PIL.Image.open(image_path)
self._original_tk_image = None
self._result_tk_image = None
self._from_lab = None
self._to_lab = None
self.label_original = tk.Label(self.root)
self.label_original.grid(row=0, column=0, columnspan=3)
self.set_original_image(self.image)
self.label_result = tk.Label(self.root)
self.label_result.grid(row=0, column=3, columnspan=3)
self.set_result_image(self.image)
self.from_r_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='From red'
)
self.from_r_scale.grid(row=1, column=0, sticky='nsew')
self.from_g_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='From green'
)
self.from_g_scale.grid(row=1, column=1, sticky='nsew')
self.from_b_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='From blue'
)
self.from_b_scale.grid(row=1, column=2, sticky='nsew')
self.to_r_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='To red'
)
self.to_r_scale.grid(row=1, column=3, sticky='nsew')
self.to_g_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='To green'
)
self.to_g_scale.grid(row=1, column=4, sticky='nsew')
self.to_b_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='To blue'
)
self.to_b_scale.grid(row=1, column=5, sticky='nsew')
self.range_scale = tk.Scale(
self.root, from_=0, to=255,
orient=tk.HORIZONTAL, label='Range'
)
self.range_scale.grid(row=2, column=0, sticky='nsew')
self.button = tk.Button(self.root, text="Recolor", command=self.recolor)
self.button.grid(row=3, column=0, sticky='nsew')
def set_original_image(self, image):
scaled_image = image.copy()
scaled_image.thumbnail(IMAGE_SIZE)
self.image = scaled_image
self._original_tk_image = PIL.ImageTk.PhotoImage(scaled_image)
self.label_original.config(image=self._original_tk_image)
def set_result_image(self, image):
scaled_image = image.copy()
scaled_image.thumbnail(IMAGE_SIZE)
self._result_tk_image = PIL.ImageTk.PhotoImage(scaled_image)
self.label_result.config(image=self._result_tk_image)
@property
def from_color(self):
red = self.from_r_scale.get()
green = self.from_g_scale.get()
blue = self.from_b_scale.get()
return (red, green, blue)
@property
def to_color(self):
red = self.to_r_scale.get()
green = self.to_g_scale.get()
blue = self.to_b_scale.get()
return (red, green, blue)
@property
def range(self):
return self.range_scale.get()
def recolor(self):
from_lab = convert_color(sRGBColor(*self.from_color), LabColor)
from_hsv = convert_color(sRGBColor(*self.from_color), HSVColor)
self._to_lab = convert_color(sRGBColor(*self.to_color), LabColor)
range_ = self.range
from_r, from_g, from_b = self.from_color
to_r, to_g, to_b = self.to_color
width, height = self.image.width, self.image.height
pixel_count = width * height
source_pixels = numpy.asarray(self.image)
result_image = PIL.Image.new('RGB', (width, height), "black")
target_pixels = result_image.load()
pixels_done = 0
for i in range(width):
for j in range(height):
r, g, b = source_pixels[j, i]
hsv_pixel = convert_color(sRGBColor(r, g, b), HSVColor)
distance = barkovsky_distance_3000(hsv_pixel, from_hsv)
#distance = delta_e_cie2000(lab_pixel, from_lab)
# distance = math.sqrt(
# (r - from_r) ** 2 +
# (g - from_g) ** 2 +
# (b - from_b) ** 2
# )
pixels_done += 1
if pixels_done % 10000 == 0:
print('%d%%' % (pixels_done / pixel_count * 100))
if distance > range_:
target_pixels[i, j] = (r, g, b)
continue
r_diff = r - from_r
g_diff = g - from_g
b_diff = b - from_b
r_new = cap_number(to_r + r_diff, 0, 255)
g_new = cap_number(to_g + g_diff, 0, 255)
b_new = cap_number(to_b + b_diff, 0, 255)
target_pixels[i, j] = (
int(r_new), int(g_new), int(b_new)
)
self.set_result_image(result_image)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('image_path', type=str, help='Path to the image file')
args = parser.parse_args()
window = RecolorWindow(args.image_path)
window.root.mainloop()
if __name__ == '__main__':
main()
|
{
"content_hash": "05c10921b6a958bb5054d39fc31a4360",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 80,
"avg_line_length": 30.590673575129532,
"alnum_prop": 0.5523373983739838,
"repo_name": "CG2016/barkovsky_3",
"id": "db68b1236dd99e831dee8cbfcf4c9ca5ad9e0086",
"size": "5926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab2/recolor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "810"
},
{
"name": "HTML",
"bytes": "3707"
},
{
"name": "JavaScript",
"bytes": "66031"
},
{
"name": "Python",
"bytes": "45966"
}
],
"symlink_target": ""
}
|
"""This example gets the current network that you can make requests against."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201505')
# Get the current network.
network = network_service.getCurrentNetwork()
# Display results.
print ('Current network has network code \'%s\' and display name \'%s\'.'
% (network['networkCode'], network['displayName']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "9314a1057254334345a0b08a81ce8de1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 29.875,
"alnum_prop": 0.6903765690376569,
"repo_name": "cctaylor/googleads-python-lib",
"id": "52138ce9354106078805c1235622594b43f69265",
"size": "1335",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/dfp/v201505/network_service/get_current_network.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2774292"
}
],
"symlink_target": ""
}
|
"""Test Home Assistant yaml loader."""
import io
import os
import unittest
from unittest.mock import patch
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import yaml
from homeassistant.config import YAML_CONFIG_FILE, load_yaml_config_file
from tests.common import get_test_config_dir, patch_yaml_files
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
# pylint: disable=no-self-use, invalid-name
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test duplicate dict keys."""
files = {YAML_CONFIG_FILE: 'key: thing1\nkey: thing2'}
with self.assertRaises(HomeAssistantError):
with patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_unhashable_key(self):
"""Test an unhasable key."""
files = {YAML_CONFIG_FILE: 'message:\n {{ states.state }}'}
with self.assertRaises(HomeAssistantError), \
patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_no_key(self):
"""Test item without an key."""
files = {YAML_CONFIG_FILE: 'a: a\nnokeyhere'}
with self.assertRaises(HomeAssistantError), \
patch_yaml_files(files):
yaml.load_yaml(YAML_CONFIG_FILE)
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
with self.assertRaises(HomeAssistantError):
with io.StringIO(conf) as file:
yaml.yaml.safe_load(file)
def test_include_yaml(self):
"""Test include yaml."""
with patch_yaml_files({'test.yaml': 'value'}):
conf = 'key: !include test.yaml'
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == "value"
with patch_yaml_files({'test.yaml': None}):
conf = 'key: !include test.yaml'
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == {}
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_list(self, mock_walk):
"""Test include dir list yaml."""
mock_walk.return_value = [
['/tmp', [], ['one.yaml', 'two.yaml']],
]
with patch_yaml_files({
'/tmp/one.yaml': 'one',
'/tmp/two.yaml': 'two',
}):
conf = "key: !include_dir_list /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_list_recursive(self, mock_walk):
"""Test include dir recursive list yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['zero.yaml']],
['/tmp/tmp2', [], ['one.yaml', 'two.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/zero.yaml': 'zero',
'/tmp/tmp2/one.yaml': 'one',
'/tmp/tmp2/two.yaml': 'two'
}):
conf = "key: !include_dir_list /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["zero", "one", "two"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_named(self, mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
['/tmp', [], ['first.yaml', 'second.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'one',
'/tmp/second.yaml': 'two'
}):
conf = "key: !include_dir_named /tmp"
correct = {'first': 'one', 'second': 'two'}
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == correct
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_named_recursive(self, mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'one',
'/tmp/tmp2/second.yaml': 'two',
'/tmp/tmp2/third.yaml': 'three'
}):
conf = "key: !include_dir_named /tmp"
correct = {'first': 'one', 'second': 'two', 'third': 'three'}
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert doc["key"] == correct
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_list(self, mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [['/tmp', [], ['first.yaml', 'second.yaml']]]
with patch_yaml_files({
'/tmp/first.yaml': '- one',
'/tmp/second.yaml': '- two\n- three'
}):
conf = "key: !include_dir_merge_list /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two", "three"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_list_recursive(self, mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': '- one',
'/tmp/tmp2/second.yaml': '- two',
'/tmp/tmp2/third.yaml': '- three\n- four'
}):
conf = "key: !include_dir_merge_list /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["one", "two",
"three", "four"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_named(self, mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [['/tmp', [], ['first.yaml', 'second.yaml']]]
files = {
'/tmp/first.yaml': 'key1: one',
'/tmp/second.yaml': 'key2: two\nkey3: three',
}
with patch_yaml_files(files):
conf = "key: !include_dir_merge_named /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three"
}
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_named_recursive(self, mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'key1: one',
'/tmp/tmp2/second.yaml': 'key2: two',
'/tmp/tmp2/third.yaml': 'key3: three\nkey4: four'
}):
conf = "key: !include_dir_merge_named /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three",
"key4": "four"
}
@patch('homeassistant.util.yaml.open', create=True)
def test_load_yaml_encoding_error(self, mock_open):
"""Test raising a UnicodeDecodeError."""
mock_open.side_effect = UnicodeDecodeError('', b'', 1, 0, '')
self.assertRaises(HomeAssistantError, yaml.load_yaml, 'test')
def test_dump(self):
"""The that the dump method returns empty None values."""
assert yaml.dump({'a': None, 'b': 'b'}) == 'a:\nb: b\n'
FILES = {}
def load_yaml(fname, string):
"""Write a string to file and return the parsed yaml."""
FILES[fname] = string
with patch_yaml_files(FILES):
return load_yaml_config_file(fname)
class FakeKeyring():
"""Fake a keyring class."""
def __init__(self, secrets_dict):
"""Store keyring dictionary."""
self._secrets = secrets_dict
# pylint: disable=protected-access
def get_password(self, domain, name):
"""Retrieve password."""
assert domain == yaml._SECRET_NAMESPACE
return self._secrets.get(name)
class TestSecrets(unittest.TestCase):
"""Test the secrets parameter in the yaml utility."""
# pylint: disable=protected-access,invalid-name
def setUp(self):
"""Create & load secrets file."""
config_dir = get_test_config_dir()
yaml.clear_secret_cache()
self._yaml_path = os.path.join(config_dir, YAML_CONFIG_FILE)
self._secret_path = os.path.join(config_dir, yaml._SECRET_YAML)
self._sub_folder_path = os.path.join(config_dir, 'subFolder')
self._unrelated_path = os.path.join(config_dir, 'unrelated')
load_yaml(self._secret_path,
'http_pw: pwhttp\n'
'comp1_un: un1\n'
'comp1_pw: pw1\n'
'stale_pw: not_used\n'
'logger: debug\n')
self._yaml = load_yaml(self._yaml_path,
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
def tearDown(self):
"""Clean up secrets."""
yaml.clear_secret_cache()
FILES.clear()
def test_secrets_from_yaml(self):
"""Did secrets load ok."""
expected = {'api_password': 'pwhttp'}
self.assertEqual(expected, self._yaml['http'])
expected = {
'username': 'un1',
'password': 'pw1'}
self.assertEqual(expected, self._yaml['component'])
def test_secrets_from_parent_folder(self):
"""Test loading secrets from parent foler."""
expected = {'api_password': 'pwhttp'}
self._yaml = load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
self.assertEqual(expected, self._yaml['http'])
def test_secret_overrides_parent(self):
"""Test loading current directory secret overrides the parent."""
expected = {'api_password': 'override'}
load_yaml(os.path.join(self._sub_folder_path, yaml._SECRET_YAML),
'http_pw: override')
self._yaml = load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
self.assertEqual(expected, self._yaml['http'])
def test_secrets_from_unrelated_fails(self):
"""Test loading secrets from unrelated folder fails."""
load_yaml(os.path.join(self._unrelated_path, yaml._SECRET_YAML),
'test: failure')
with self.assertRaises(HomeAssistantError):
load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret test')
def test_secrets_keyring(self):
"""Test keyring fallback & get_password."""
yaml.keyring = None # Ensure its not there
yaml_str = 'http:\n api_password: !secret http_pw_keyring'
with self.assertRaises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, yaml_str)
yaml.keyring = FakeKeyring({'http_pw_keyring': 'yeah'})
_yaml = load_yaml(self._yaml_path, yaml_str)
self.assertEqual({'http': {'api_password': 'yeah'}}, _yaml)
def test_secrets_logger_removed(self):
"""Ensure logger: debug was removed."""
with self.assertRaises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, 'api_password: !secret logger')
@patch('homeassistant.util.yaml._LOGGER.error')
def test_bad_logger_value(self, mock_error):
"""Ensure logger: debug was removed."""
yaml.clear_secret_cache()
load_yaml(self._secret_path, 'logger: info\npw: abc')
load_yaml(self._yaml_path, 'api_password: !secret pw')
assert mock_error.call_count == 1, \
"Expected an error about logger: value"
|
{
"content_hash": "ed009e2af15321a5954fa989c64cf7d0",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 79,
"avg_line_length": 39.48837209302326,
"alnum_prop": 0.5240151812589975,
"repo_name": "eagleamon/home-assistant",
"id": "79fd994ce86c221b216d525c00657d123e74fdf9",
"size": "15282",
"binary": false,
"copies": "12",
"ref": "refs/heads/dev",
"path": "tests/util/test_yaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510047"
},
{
"name": "Python",
"bytes": "5066084"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
}
|
"""MPEG audio stream information and tags."""
import os
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen._util import MutagenError
from mutagen.id3 import ID3FileType, BitPaddedInt, delete
__all__ = ["MP3", "Open", "delete", "MP3"]
class error(RuntimeError, MutagenError):
pass
class HeaderNotFoundError(error, IOError):
pass
class InvalidMPEGHeader(error, IOError):
pass
# Mode values.
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4)
class MPEGInfo(StreamInfo):
"""MPEG audio stream information
Parse information about an MPEG audio file. This also reads the
Xing VBR header format.
This code was implemented based on the format documentation at
http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm.
Useful attributes:
* length -- audio length, in seconds
* bitrate -- audio bitrate, in bits per second
* sketchy -- if true, the file may not be valid MPEG audio
Useless attributes:
* version -- MPEG version (1, 2, 2.5)
* layer -- 1, 2, or 3
* mode -- One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3)
* protected -- whether or not the file is "protected"
* padding -- whether or not audio frames are padded
* sample_rate -- audio sample rate, in Hz
"""
# Map (version, layer) tuples to bitrates.
__BITRATE = {
(1, 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
}
__BITRATE[(2, 3)] = __BITRATE[(2, 2)]
for i in range(1, 4):
__BITRATE[(2.5, i)] = __BITRATE[(2, i)]
# Map version to sample rates.
__RATES = {
1: [44100, 48000, 32000],
2: [22050, 24000, 16000],
2.5: [11025, 12000, 8000]
}
sketchy = False
def __init__(self, fileobj, offset=None):
"""Parse MPEG stream information from a file-like object.
If an offset argument is given, it is used to start looking
for stream information and Xing headers; otherwise, ID3v2 tags
will be skipped automatically. A correct offset can make
loading files significantly faster.
"""
try:
size = os.path.getsize(fileobj.name)
except (IOError, OSError, AttributeError):
fileobj.seek(0, 2)
size = fileobj.tell()
# If we don't get an offset, try to skip an ID3v2 tag.
if offset is None:
fileobj.seek(0, 0)
idata = fileobj.read(10)
try:
id3, insize = struct.unpack('>3sxxx4s', idata)
except struct.error:
id3, insize = b'', 0
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize > 0:
offset = insize + 10
else:
offset = 0
# Try to find two valid headers (meaning, very likely MPEG data)
# at the given offset, 30% through the file, 60% through the file,
# and 90% through the file.
for i in [offset, 0.3 * size, 0.6 * size, 0.9 * size]:
try:
self.__try(fileobj, int(i), size - offset)
except error:
pass
else:
break
# If we can't find any two consecutive frames, try to find just
# one frame back at the original offset given.
else:
self.__try(fileobj, offset, size - offset, False)
self.sketchy = True
def __try(self, fileobj, offset, real_size, check_second=True):
# This is going to be one really long function; bear with it,
# because there's not really a sane point to cut it up.
fileobj.seek(offset, 0)
# We "know" we have an MPEG file if we find two frames that look like
# valid MPEG data. If we can't find them in 32k of reads, something
# is horribly wrong (the longest frame can only be about 4k). This
# is assuming the offset didn't lie.
data = fileobj.read(32768)
frame_1 = data.find(b"\xff")
while 0 <= frame_1 <= (len(data) - 4):
frame_data = struct.unpack(">I", data[frame_1:frame_1 + 4])[0]
if ((frame_data >> 16) & 0xE0) != 0xE0:
frame_1 = data.find(b"\xff", frame_1 + 2)
else:
version = (frame_data >> 19) & 0x3
layer = (frame_data >> 17) & 0x3
protection = (frame_data >> 16) & 0x1
bitrate = (frame_data >> 12) & 0xF
sample_rate = (frame_data >> 10) & 0x3
padding = (frame_data >> 9) & 0x1
# private = (frame_data >> 8) & 0x1
self.mode = (frame_data >> 6) & 0x3
# mode_extension = (frame_data >> 4) & 0x3
# copyright = (frame_data >> 3) & 0x1
# original = (frame_data >> 2) & 0x1
# emphasis = (frame_data >> 0) & 0x3
if (version == 1 or layer == 0 or sample_rate == 0x3 or
bitrate == 0 or bitrate == 0xF):
frame_1 = data.find(b"\xff", frame_1 + 2)
else:
break
else:
raise HeaderNotFoundError("can't sync to an MPEG frame")
# There is a serious problem here, which is that many flags
# in an MPEG header are backwards.
self.version = [2.5, None, 2, 1][version]
self.layer = 4 - layer
self.protected = not protection
self.padding = bool(padding)
self.bitrate = self.__BITRATE[(self.version, self.layer)][bitrate]
self.bitrate *= 1000
self.sample_rate = self.__RATES[self.version][sample_rate]
if self.layer == 1:
frame_length = (
(12 * self.bitrate // self.sample_rate) + padding) * 4
frame_size = 384
elif self.version >= 2 and self.layer == 3:
frame_length = (72 * self.bitrate // self.sample_rate) + padding
frame_size = 576
else:
frame_length = (144 * self.bitrate // self.sample_rate) + padding
frame_size = 1152
if check_second:
possible = int(frame_1 + frame_length)
if possible > len(data) + 4:
raise HeaderNotFoundError("can't sync to second MPEG frame")
try:
frame_data = struct.unpack(
">H", data[possible:possible + 2])[0]
except struct.error:
raise HeaderNotFoundError("can't sync to second MPEG frame")
if (frame_data & 0xFFE0) != 0xFFE0:
raise HeaderNotFoundError("can't sync to second MPEG frame")
self.length = 8 * real_size / float(self.bitrate)
# Try to find/parse the Xing header, which trumps the above length
# and bitrate calculation.
fileobj.seek(offset, 0)
data = fileobj.read(32768)
try:
xing = data[:-4].index(b"Xing")
except ValueError:
# Try to find/parse the VBRI header, which trumps the above length
# calculation.
try:
vbri = data[:-24].index(b"VBRI")
except ValueError:
pass
else:
# If a VBRI header was found, this is definitely MPEG audio.
self.sketchy = False
vbri_version = struct.unpack('>H', data[vbri + 4:vbri + 6])[0]
if vbri_version == 1:
frame_count = struct.unpack(
'>I', data[vbri + 14:vbri + 18])[0]
samples = float(frame_size * frame_count)
self.length = (samples / self.sample_rate) or self.length
else:
# If a Xing header was found, this is definitely MPEG audio.
self.sketchy = False
flags = struct.unpack('>I', data[xing + 4:xing + 8])[0]
if flags & 0x1:
frame_count = struct.unpack('>I', data[xing + 8:xing + 12])[0]
samples = float(frame_size * frame_count)
self.length = (samples / self.sample_rate) or self.length
if flags & 0x2:
bitrate_data = struct.unpack(
'>I', data[xing + 12:xing + 16])[0]
self.bitrate = int((bitrate_data * 8) // self.length)
def pprint(self):
s = "MPEG %s layer %d, %d bps, %s Hz, %.2f seconds" % (
self.version, self.layer, self.bitrate, self.sample_rate,
self.length)
if self.sketchy:
s += " (sketchy)"
return s
class MP3(ID3FileType):
"""An MPEG audio (usually MPEG-1 Layer 3) file.
:ivar info: :class:`MPEGInfo`
:ivar tags: :class:`ID3 <mutagen.id3.ID3>`
"""
_Info = MPEGInfo
_mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"]
@property
def mime(self):
l = self.info.layer
return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime
@staticmethod
def score(filename, fileobj, header_data):
filename = filename.lower()
return (header_data.startswith(b"ID3") * 2 +
endswith(filename, b".mp3") +
endswith(filename, b".mp2") + endswith(filename, b".mpg") +
endswith(filename, b".mpeg"))
Open = MP3
class EasyMP3(MP3):
"""Like MP3, but uses EasyID3 for tags.
:ivar info: :class:`MPEGInfo`
:ivar tags: :class:`EasyID3 <mutagen.easyid3.EasyID3>`
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3
|
{
"content_hash": "37cd5147958e0829ecd2389698516a4d",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 78,
"avg_line_length": 35.405693950177934,
"alnum_prop": 0.5358327470097497,
"repo_name": "jwayneroth/mpd-touch",
"id": "afbfd9535ee1b76dc00d02fbd6edbdaf0c840c73",
"size": "10200",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mutagen/mp3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "870833"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
}
|
import time
from utils.cu_io import IO
from utils.cu_config import config
from managers.products import Products
from managers.listings import Listings
from managers.rates import Rates
from collections import OrderedDict
__author__ = 'Stone'
# Module 'match' -- common domain service
"""service for common records linkage operation
importing this match class directly, you can trigger
the matching process
"""
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
message = config()['method_messages'].get(method.__name__)
if message is not None:
print (message % (te - ts))
else:
print ('%r %2.2f sec' % (method.__name__, te - ts))
return result
return timed
# class as a service of the matching process
class Match:
def __init__(self, conf=None):
"""
Initialize config from a cu_config
:param conf: load a conf or used the default
:return:
"""
if conf is None:
conf = config()
if conf is None:
raise ValueError("config not found")
self.config = conf
self.io = IO()
self.pm_dict = None
self.count = 0
@timeit
def initial(self):
"""
initialize data from a JSON files and prepossess the data
:return: products and listings instances to be operated on
"""
products = Products.from_json(self.config['input_files']['products'], self.config['data_path']).to_lower_case()
rates = Rates.from_json(products.get_max_year(), self.config['input_files']['rates'], self.config['data_path'])
listings = Listings.from_json(self.config['input_files']['listings'],
self.config['data_path']).convert_to_usd_price(rates).add_lower_case()
self.cache_pm_dict(listings, products)
return products, listings
def cache_pm_dict(self, listings, products=None):
"""
load cache of list of index by pm if spot the cache file
or create new dict and cache it
:param listings: listing data to be divided
:param products: products to provide valid manufacturers
:return:
"""
if self.pm_dict is None:
self.pm_dict = {}
count = 0
pm_dict = self.io.load_cache(self.config['input_files']['cache'], self.config['data_path'])
if products is not None and pm_dict is None:
self.pm_dict = listings.get_pm_dict(products.get_manufacturers(), self.config["pm_mapping"])
pm_dict = {}
for k, v in self.pm_dict.items():
pm_dict[k] = v.index_to_list()
count += v.size()
self.io.write_to_json(pm_dict, self.config['input_files']['cache'], self.config['data_path'])
else:
for k, v in pm_dict.items():
self.pm_dict[k] = listings.find_by_index_set(v)
count += self.pm_dict[k].size()
self.count = count
@timeit
def matching(self, products, listings):
"""
associates a Product with a list of matching Listing objects
more concrete explanation please check the docstrings of
filtered_by_pm , filtered_by_model and filtered_by_price and methods in cu_data
:param products: products to be associated
:param listings: all listings found in the file
:return: the python dict object of the associated listings and count of the valid records
"""
count, res = 0, []
for i, product in products.iter_rows():
# find current product manufacturer(pm), model, product_name(pn), family(only when presents in pn)
pm, model, pn = product.manufacturer, product.model, product.product_name
family = None if (type(product.family) is not unicode or product.family not in pn.lower()) \
else product.family
# if pm_listings of this product manufacturer was cached, fetch it.
pm_listings = self.pm_dict.get(pm)
# get matched items
pn_listings = pm_listings.filtered_by_pm(pm).filtered_by_model(
model).filtered_by_price(pm, family)
# cache the unmatched items
self.pm_dict[pm] = pm_listings.exclude(pn_listings)
# count the listings
count += pn_listings.size()
# put the data into result
res.append(OrderedDict([("product_name", pn), ("listings", pn_listings.to_ordered_dict())]))
# write result into a file
self.io.write_to_json_line(res, self.config['output_file'], self.config['data_path'])
self.pm_dict = None
self.cache_pm_dict(listings, products)
return count
def get_valid_listings_size(self):
"""
find all records with a valid manufacturer either in manufacturer col or title col
greedy search
:return: count of the valid records
"""
return self.count
|
{
"content_hash": "9d6d340fe39aba5bfd6af2c3545f80e9",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 119,
"avg_line_length": 38.44117647058823,
"alnum_prop": 0.5837796480489671,
"repo_name": "alpenliebe/Sortable",
"id": "ef3d0842fae09e6616cb17428e6b847d7a55ddc4",
"size": "5228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1587400"
},
{
"name": "Shell",
"bytes": "1537"
}
],
"symlink_target": ""
}
|
def install_dependencies_ode():
from biokit.rtools import package
pm = package.RPackageManager()
if "Rsge" not in pm.installed.index:
#rtools.install_packages("http://cran.r-project.org/src/contrib/Archive/Rsge/Rsge_0.6.3.tar.gz")
pm.install("Rsge")
#pm.install_packages(["snowfall", "Rsolnp"], repos=None)
if "MEIGOR" not in pm.installed.index:
pm.install_packages("http://www.cellnopt.org/downloads/MEIGOR_0.99.6_svn3222.tar.gz",
type="source")
from .cnorode import CNORode
|
{
"content_hash": "cd5602cdb6be52f5cec561793db3ad53",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 104,
"avg_line_length": 33.8125,
"alnum_prop": 0.6654343807763401,
"repo_name": "cellnopt/cellnopt",
"id": "9b76de4809efadbed4a0fcb901a850536f3b6eca",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cno/ode/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "11056"
},
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Jupyter Notebook",
"bytes": "3748599"
},
{
"name": "Python",
"bytes": "845977"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
import module1
print(module1.sys)
print(module1.name)
print(module1.func)
print(module1.klass)
|
{
"content_hash": "c7a1e320c711830987d8af7aaaeae986",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 26,
"avg_line_length": 20.428571428571427,
"alnum_prop": 0.6083916083916084,
"repo_name": "ordinary-developer/education",
"id": "a3668ccbee1bf89d1303a28ed637041c6708806b",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/m_lutz-learning_py-5_ed/code/part_5-modules/ch_23-coding_basics/09-namespaces/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2435"
},
{
"name": "C",
"bytes": "109331"
},
{
"name": "C#",
"bytes": "163418"
},
{
"name": "C++",
"bytes": "2504348"
},
{
"name": "CMake",
"bytes": "24185"
},
{
"name": "GLSL",
"bytes": "10134"
},
{
"name": "HTML",
"bytes": "58556"
},
{
"name": "JavaScript",
"bytes": "46010"
},
{
"name": "Makefile",
"bytes": "748"
},
{
"name": "Python",
"bytes": "189834"
},
{
"name": "QML",
"bytes": "191657"
},
{
"name": "QMake",
"bytes": "265220"
},
{
"name": "Scheme",
"bytes": "32484"
},
{
"name": "Shell",
"bytes": "2412"
}
],
"symlink_target": ""
}
|
"""Provides a :class:`.Key` for Google Cloud Datastore.
.. testsetup:: *
from google.cloud import ndb
A key encapsulates the following pieces of information, which together
uniquely designate a (possible) entity in Google Cloud Datastore:
* a Google Cloud Platform project (a string)
* a list of one or more ``(kind, id)`` pairs where ``kind`` is a string
and ``id`` is either a string or an integer
* an optional namespace (a string)
The application ID must always be part of the key, but since most
applications can only access their own entities, it defaults to the
current application ID and you rarely need to worry about it.
The namespace designates a top-level partition of the key space for a
particular application. If you've never heard of namespaces, you can
safely ignore this feature.
Most of the action is in the ``(kind, id)`` pairs. A key must have at
least one ``(kind, id)`` pair. The last ``(kind, id)`` pair gives the kind
and the ID of the entity that the key refers to, the others merely
specify a "parent key".
The kind is a string giving the name of the model class used to
represent the entity. In more traditional databases this would be
the table name. A model class is a Python class derived from
:class:`.Model`. Only the class name itself is used as the kind. This means
all your model classes must be uniquely named within one application. You can
override this on a per-class basis.
The ID is either a string or an integer. When the ID is a string, the
application is in control of how it assigns IDs. For example, you
could use an email address as the ID for Account entities.
To use integer IDs, it's common to let the datastore choose a unique ID for
an entity when first inserted into the datastore. The ID can be set to
:data:`None` to represent the key for an entity that hasn't yet been
inserted into the datastore. The completed key (including the assigned ID)
will be returned after the entity is successfully inserted into the datastore.
A key for which the ID of the last ``(kind, id)`` pair is set to :data:`None`
is called an **incomplete key** or **partial key**. Such keys can only be used
to insert entities into the datastore.
A key with exactly one ``(kind, id)`` pair is called a top level key or a
root key. Top level keys are also used as entity groups, which play a
role in transaction management.
If there is more than one ``(kind, id)`` pair, all but the last pair
represent the "ancestor path", also known as the key of the "parent entity".
Other constraints:
* Kinds and string IDs must not be empty and must be at most 1500 bytes
long (after UTF-8 encoding)
* Integer IDs must be at least ``1`` and at most ``2**63 - 1`` (i.e. the
positive part of the range for a 64-bit signed integer)
For more info about namespaces, see the multitenancy `overview`_.
In the "legacy" Google App Engine runtime, the default namespace could be
set via the namespace manager (``google.appengine.api.namespace_manager``).
On the gVisor Google App Engine runtime (e.g. Python 3.7), the namespace
manager is not available so the default is to have an unset or empty
namespace. To explicitly select the empty namespace pass ``namespace=""``.
.. _overview: https://cloud.google.com/appengine/docs/standard/python/multitenancy/
"""
import base64
import os
from google.cloud.datastore import _app_engine_key_pb2
from google.cloud.datastore import key as _key_module
import google.cloud.datastore
from google.cloud.ndb import exceptions
__all__ = ["Key"]
_APP_ID_ENVIRONMENT = "APPLICATION_ID"
_APP_ID_DEFAULT = "_"
_WRONG_TYPE = "Cannot construct Key reference on non-Key class; received {!r}"
_REFERENCE_APP_MISMATCH = (
"Key reference constructed uses a different app {!r} than "
"the one specified {!r}"
)
_REFERENCE_NAMESPACE_MISMATCH = (
"Key reference constructed uses a different namespace {!r} than "
"the one specified {!r}"
)
_INVALID_ID_TYPE = "Key ID must be a string or a number; received {!r}"
_NO_LEGACY = "The `google.appengine.ext.db` module is not available."
_MAX_INTEGER_ID = 0x7FFFFFFFFFFFFFFF # 2 ** 63 - 1
_MAX_KEYPART_BYTES = 1500
_BAD_KIND = (
"Key kind string must be a non-empty string up to {:d} bytes; received {}"
)
_BAD_INTEGER_ID = (
"Key ID number is outside of range [1, 2^63 - 1]; received {:d}"
)
_BAD_STRING_ID = (
"Key name strings must be non-empty strings up to {:d} bytes; received {}"
)
class Key:
"""An immutable datastore key.
For flexibility and convenience, multiple constructor signatures are
supported.
The primary way to construct a key is using positional arguments:
.. testsetup:: *
kind1, id1 = "Parent", "C"
kind2, id2 = "Child", 42
.. doctest:: key-constructor-primary
>>> ndb.Key(kind1, id1, kind2, id2)
Key('Parent', 'C', 'Child', 42)
This is shorthand for either of the following two longer forms:
.. doctest:: key-constructor-flat-or-pairs
>>> ndb.Key(pairs=[(kind1, id1), (kind2, id2)])
Key('Parent', 'C', 'Child', 42)
>>> ndb.Key(flat=[kind1, id1, kind2, id2])
Key('Parent', 'C', 'Child', 42)
Either of the above constructor forms can additionally pass in another
key via the ``parent`` keyword. The ``(kind, id)`` pairs of the parent key
are inserted before the ``(kind, id)`` pairs passed explicitly.
.. doctest:: key-constructor-parent
>>> parent = ndb.Key(kind1, id1)
>>> parent
Key('Parent', 'C')
>>> ndb.Key(kind2, id2, parent=parent)
Key('Parent', 'C', 'Child', 42)
You can also construct a Key from a "url-safe" encoded string:
.. doctest:: key-constructor-urlsafe
>>> ndb.Key(urlsafe=b"agdleGFtcGxlcgsLEgRLaW5kGLkKDA")
Key('Kind', 1337, app='example')
For rare use cases the following constructors exist:
.. testsetup:: key-constructor-rare
from google.cloud.datastore import _app_engine_key_pb2
reference = _app_engine_key_pb2.Reference(
app="example",
path=_app_engine_key_pb2.Path(element=[
_app_engine_key_pb2.Path.Element(type="Kind", id=1337),
]),
)
.. doctest:: key-constructor-rare
>>> # Passing in a low-level Reference object
>>> reference
app: "example"
path {
Element {
type: "Kind"
id: 1337
}
}
<BLANKLINE>
>>> ndb.Key(reference=reference)
Key('Kind', 1337, app='example')
>>> # Passing in a serialized low-level Reference
>>> serialized = reference.SerializeToString()
>>> serialized
b'j\\x07exampler\\x0b\\x0b\\x12\\x04Kind\\x18\\xb9\\n\\x0c'
>>> ndb.Key(serialized=serialized)
Key('Kind', 1337, app='example')
>>> # For unpickling, the same as ndb.Key(**kwargs)
>>> kwargs = {"pairs": [("Cheese", "Cheddar")], "namespace": "good"}
>>> ndb.Key(kwargs)
Key('Cheese', 'Cheddar', namespace='good')
The "url-safe" string is really a websafe-base64-encoded serialized
``Reference``, but it's best to think of it as just an opaque unique
string.
If a ``Reference`` is passed (using one of the ``reference``,
``serialized`` or ``urlsafe`` keywords), the positional arguments and
``namespace`` must match what is already present in the ``Reference``
(after decoding if necessary). The parent keyword cannot be combined with
a ``Reference`` in any form.
Keys are immutable, which means that a Key object cannot be modified
once it has been created. This is enforced by the implementation as
well as Python allows.
Keys also support interaction with the datastore; the methods :meth:`get`,
:meth:`get_async`, :meth:`delete` and :meth:`delete_async` are
the only ones that engage in any kind of I/O activity.
Keys may be pickled.
Subclassing Key is best avoided; it would be hard to get right.
Args:
path_args (Union[Tuple[str, ...], Tuple[Dict]]): Either a tuple of
``(kind, id)`` pairs or a single dictionary containing only keyword
arguments.
reference (Optional[\
~google.cloud.datastore._app_engine_key_pb2.Reference]): A
reference protobuf representing a key.
serialized (Optional[bytes]): A reference protobuf serialized to bytes.
urlsafe (Optional[str]): A reference protobuf serialized to bytes. The
raw bytes are then converted to a websafe base64-encoded string.
pairs (Optional[Iterable[Tuple[str, Union[str, int]]]]): An iterable
of ``(kind, id)`` pairs. If this argument is used, then
``path_args`` should be empty.
flat (Optional[Iterable[Union[str, int]]]): An iterable of the
``(kind, id)`` pairs but flattened into a single value. For
example, the pairs ``[("Parent", 1), ("Child", "a")]`` would be
flattened to ``["Parent", 1, "Child", "a"]``.
app (Optional[str]): The Google Cloud Platform project (previously
on Google App Engine, this was called the Application ID).
namespace (Optional[str]): The namespace for the key.
parent (Optional[Key]): The parent of the key being
constructed. If provided, the key path will be **relative** to the
parent key's path.
Raises:
TypeError: If none of ``reference``, ``serialized``, ``urlsafe``,
``pairs`` or ``flat`` is provided as an argument and no positional
arguments were given with the path.
"""
__slots__ = ("_key", "_reference")
def __new__(cls, *path_args, **kwargs):
_constructor_handle_positional(path_args, kwargs)
instance = super(Key, cls).__new__(cls)
if (
"reference" in kwargs
or "serialized" in kwargs
or "urlsafe" in kwargs
):
ds_key, reference = _parse_from_ref(cls, **kwargs)
elif "pairs" in kwargs or "flat" in kwargs:
ds_key = _parse_from_args(**kwargs)
reference = None
else:
raise TypeError(
"Key() cannot create a Key instance without arguments."
)
instance._key = ds_key
instance._reference = reference
return instance
@classmethod
def _from_ds_key(cls, ds_key):
"""Factory constructor for a :class:`~google.cloud.datastore.key.Key`.
This bypasses the actual constructor and directly sets the ``_key``
attribute to ``ds_key``.
Args:
ds_key (~google.cloud.datastore.key.Key): A key from
``google-cloud-datastore``.
Returns:
Key: The constructed :class:`Key`.
"""
key = super(Key, cls).__new__(cls)
key._key = ds_key
key._reference = None
return key
def __repr__(self):
"""String representation used by :class:`str() <str>` and :func:`repr`.
We produce a short string that conveys all relevant information,
suppressing app and namespace when they are equal to the default.
In many cases, this string should be able to be used to invoke the
constructor.
For example:
.. doctest:: key-repr
>>> key = ndb.Key("hi", 100)
>>> repr(key)
"Key('hi', 100)"
>>>
>>> key = ndb.Key(
... "bye", "hundred", app="specific", namespace="space"
... )
>>> str(key)
"Key('bye', 'hundred', app='specific', namespace='space')"
"""
args = ["{!r}".format(item) for item in self.flat()]
if self.app() != _project_from_app(None):
args.append("app={!r}".format(self.app()))
if self.namespace() is not None:
args.append("namespace={!r}".format(self.namespace()))
return "Key({})".format(", ".join(args))
def __str__(self):
"""Alias for :meth:`__repr__`."""
return self.__repr__()
def __hash__(self):
"""Hash value, for use in dictionary lookups.
.. note::
This ignores ``app`` and ``namespace``. Since :func:`hash` isn't
expected to return a unique value (it just reduces the chance of
collision), this doesn't try to increase entropy by including other
values. The primary concern is that hashes of equal keys are
equal, not the other way around.
"""
return hash(self.pairs())
def _tuple(self):
"""Helper to return an orderable tuple."""
return (self.app(), self.namespace(), self.pairs())
def __eq__(self, other):
"""Equality comparison operation."""
if not isinstance(other, Key):
return NotImplemented
return self._tuple() == other._tuple()
def __ne__(self, other):
"""Inequality comparison operation."""
return not self == other
def __lt__(self, other):
"""Less than ordering."""
if not isinstance(other, Key):
return NotImplemented
return self._tuple() < other._tuple()
def __le__(self, other):
"""Less than or equal ordering."""
if not isinstance(other, Key):
return NotImplemented
return self._tuple() <= other._tuple()
def __gt__(self, other):
"""Greater than ordering."""
return not self <= other
def __ge__(self, other):
"""Greater than or equal ordering."""
return not self < other
def __getstate__(self):
"""Private API used for pickling.
Returns:
Tuple[Dict[str, Any]]: A tuple containing a single dictionary of
state to pickle. The dictionary has three keys ``pairs``, ``app``
and ``namespace``.
"""
return (
{
"pairs": self.pairs(),
"app": self.app(),
"namespace": self.namespace(),
},
)
def __setstate__(self, state):
"""Private API used for unpickling.
Args:
state (Tuple[Dict[str, Any]]): A tuple containing a single
dictionary of pickled state. This should match the signature
returned from :func:`__getstate__`, in particular, it should
have three keys ``pairs``, ``app`` and ``namespace``.
Raises:
TypeError: If the ``state`` does not have length 1.
TypeError: If the single element in ``state`` is not a dictionary.
"""
if len(state) != 1:
msg = "Invalid state length, expected 1; received {:d}".format(
len(state)
)
raise TypeError(msg)
kwargs = state[0]
if not isinstance(kwargs, dict):
raise TypeError(
"Key accepts a dict of keyword arguments as state; "
"received {!r}".format(kwargs)
)
flat = _get_path(None, kwargs["pairs"])
project = _project_from_app(kwargs["app"])
self._key = _key_module.Key(
*flat, project=project, namespace=kwargs["namespace"]
)
self._reference = None
def __getnewargs__(self):
"""Private API used to specify ``__new__`` arguments when unpickling.
.. note::
This method is provided for backwards compatibility, though it
isn't needed.
Returns:
Tuple[Dict[str, Any]]: A tuple containing a single dictionary of
state to pickle. The dictionary has three keys ``pairs``, ``app``
and ``namespace``.
"""
return (
{
"pairs": self.pairs(),
"app": self.app(),
"namespace": self.namespace(),
},
)
def parent(self):
"""Parent key constructed from all but the last ``(kind, id)`` pairs.
If there is only one ``(kind, id)`` pair, return :data:`None`.
.. doctest:: key-parent
>>> key = ndb.Key(
... pairs=[
... ("Purchase", "Food"),
... ("Type", "Drink"),
... ("Coffee", 11),
... ]
... )
>>> parent = key.parent()
>>> parent
Key('Purchase', 'Food', 'Type', 'Drink')
>>>
>>> grandparent = parent.parent()
>>> grandparent
Key('Purchase', 'Food')
>>>
>>> grandparent.parent() is None
True
"""
if self._key.parent is None:
return None
return Key._from_ds_key(self._key.parent)
def root(self):
"""The root key.
This is either the current key or the highest parent.
.. doctest:: key-root
>>> key = ndb.Key("a", 1, "steak", "sauce")
>>> root_key = key.root()
>>> root_key
Key('a', 1)
>>> root_key.root() is root_key
True
"""
root_key = self._key
while root_key.parent is not None:
root_key = root_key.parent
if root_key is self._key:
return self
return Key._from_ds_key(root_key)
def namespace(self):
"""The namespace for the key, if set.
.. doctest:: key-namespace
>>> key = ndb.Key("A", "B")
>>> key.namespace() is None
True
>>>
>>> key = ndb.Key("A", "B", namespace="rock")
>>> key.namespace()
'rock'
"""
return self._key.namespace
def app(self):
"""The project ID for the key.
.. warning::
This **may** differ from the original ``app`` passed in to the
constructor. This is because prefixed application IDs like
``s~example`` are "legacy" identifiers from Google App Engine.
They have been replaced by equivalent project IDs, e.g. here it
would be ``example``.
.. doctest:: key-app
>>> key = ndb.Key("A", "B", app="s~example")
>>> key.app()
'example'
>>>
>>> key = ndb.Key("A", "B", app="example")
>>> key.app()
'example'
"""
return self._key.project
def id(self):
"""The string or integer ID in the last ``(kind, id)`` pair, if any.
.. doctest:: key-id
>>> key_int = ndb.Key("A", 37)
>>> key_int.id()
37
>>> key_str = ndb.Key("A", "B")
>>> key_str.id()
'B'
>>> key_partial = ndb.Key("A", None)
>>> key_partial.id() is None
True
"""
return self._key.id_or_name
def string_id(self):
"""The string ID in the last ``(kind, id)`` pair, if any.
.. doctest:: key-string-id
>>> key_int = ndb.Key("A", 37)
>>> key_int.string_id() is None
True
>>> key_str = ndb.Key("A", "B")
>>> key_str.string_id()
'B'
>>> key_partial = ndb.Key("A", None)
>>> key_partial.string_id() is None
True
"""
return self._key.name
def integer_id(self):
"""The string ID in the last ``(kind, id)`` pair, if any.
.. doctest:: key-integer-id
>>> key_int = ndb.Key("A", 37)
>>> key_int.integer_id()
37
>>> key_str = ndb.Key("A", "B")
>>> key_str.integer_id() is None
True
>>> key_partial = ndb.Key("A", None)
>>> key_partial.integer_id() is None
True
"""
return self._key.id
def pairs(self):
"""The ``(kind, id)`` pairs for the key.
.. doctest:: key-pairs
>>> key = ndb.Key("Satellite", "Moon", "Space", "Dust")
>>> key.pairs()
(('Satellite', 'Moon'), ('Space', 'Dust'))
>>>
>>> partial_key = ndb.Key("Known", None)
>>> partial_key.pairs()
(('Known', None),)
"""
flat = self.flat()
pairs = []
for i in range(0, len(flat), 2):
pairs.append(flat[i : i + 2])
return tuple(pairs)
def flat(self):
"""The flat path for the key.
.. doctest:: key-flat
>>> key = ndb.Key("Satellite", "Moon", "Space", "Dust")
>>> key.flat()
('Satellite', 'Moon', 'Space', 'Dust')
>>>
>>> partial_key = ndb.Key("Known", None)
>>> partial_key.flat()
('Known', None)
"""
flat_path = self._key.flat_path
if len(flat_path) % 2 == 1:
flat_path += (None,)
return flat_path
def kind(self):
"""The kind of the entity referenced.
This comes from the last ``(kind, id)`` pair.
.. doctest:: key-kind
>>> key = ndb.Key("Satellite", "Moon", "Space", "Dust")
>>> key.kind()
'Space'
>>>
>>> partial_key = ndb.Key("Known", None)
>>> partial_key.kind()
'Known'
"""
return self._key.kind
def reference(self):
"""The ``Reference`` protobuf object for this key.
The return value will be stored on the current key, so the caller
promises not to mutate it.
.. doctest:: key-reference
>>> key = ndb.Key("Trampoline", 88, app="xy", namespace="zt")
>>> key.reference()
app: "xy"
path {
Element {
type: "Trampoline"
id: 88
}
}
name_space: "zt"
<BLANKLINE>
"""
if self._reference is None:
self._reference = _app_engine_key_pb2.Reference(
app=self._key.project,
path=_to_legacy_path(self._key.path),
name_space=self._key.namespace,
)
return self._reference
def serialized(self):
"""A ``Reference`` protobuf serialized to bytes.
.. doctest:: key-serialized
>>> key = ndb.Key("Kind", 1337, app="example")
>>> key.serialized()
b'j\\x07exampler\\x0b\\x0b\\x12\\x04Kind\\x18\\xb9\\n\\x0c'
"""
reference = self.reference()
return reference.SerializeToString()
def urlsafe(self):
"""A ``Reference`` protobuf encoded as urlsafe base 64.
.. doctest:: key-urlsafe
>>> key = ndb.Key("Kind", 1337, app="example")
>>> key.urlsafe()
b'agdleGFtcGxlcgsLEgRLaW5kGLkKDA'
"""
raw_bytes = self.serialized()
return base64.urlsafe_b64encode(raw_bytes).strip(b"=")
def get(self, **ctx_options):
"""Synchronously get the entity for this key.
Returns the retrieved :class:`.Model` or :data:`None` if there is no
such entity.
Args:
ctx_options (Dict[str, Any]): The context options for the request.
For example, ``{"read_policy": EVENTUAL_CONSISTENCY}``.
Raises:
NotImplementedError: Always. The method has not yet been
implemented.
"""
raise NotImplementedError
def get_async(self, **ctx_options):
"""Asynchronously get the entity for this key.
The result for the returned future with either by the retrieved
:class:`.Model` or :data:`None` if there is no such entity.
Args:
ctx_options (Dict[str, Any]): The context options for the request.
For example, ``{"read_policy": EVENTUAL_CONSISTENCY}``.
Raises:
NotImplementedError: Always. The method has not yet been
implemented.
"""
raise NotImplementedError
def delete(self, **ctx_options):
"""Synchronously delete the entity for this key.
This is a no-op if no such entity exists.
Args:
ctx_options (Dict[str, Any]): The context options for the request.
For example, ``{"deadline": 5}``.
Raises:
NotImplementedError: Always. The method has not yet been
implemented.
"""
raise NotImplementedError
def delete_async(self, **ctx_options):
"""Schedule deletion of the entity for this key.
This result of the returned a future becomes available once the
deletion is complete. In all cases the future's result is :data:`None`
(i.e. there is no way to tell whether the entity existed or not).
Args:
ctx_options (Dict[str, Any]): The context options for the request.
For example, ``{"deadline": 5}``.
Raises:
NotImplementedError: Always. The method has not yet been
implemented.
"""
raise NotImplementedError
@classmethod
def from_old_key(cls, old_key):
"""Factory constructor to convert from an "old"-style datastore key.
The ``old_key`` was expected to be a ``google.appengine.ext.db.Key``
(which was an alias for ``google.appengine.api.datastore_types.Key``).
However, the ``google.appengine.ext.db`` module was part of the legacy
Google App Engine runtime and is not generally available.
Raises:
NotImplementedError: Always.
"""
raise NotImplementedError(_NO_LEGACY)
def to_old_key(self):
"""Convert to an "old"-style datastore key.
See :meth:`from_old_key` for more information on why this method
is not supported.
Raises:
NotImplementedError: Always.
"""
raise NotImplementedError(_NO_LEGACY)
def _project_from_app(app, allow_empty=False):
"""Convert a legacy Google App Engine app string to a project.
Args:
app (str): The application value to be used. If the caller passes
:data:`None` then this will use the ``APPLICATION_ID`` environment
variable to determine the running application.
allow_empty (bool): Flag determining if an empty (i.e. :data:`None`)
project is allowed. Defaults to :data:`False`.
Returns:
str: The cleaned project.
"""
if app is None:
if allow_empty:
return None
app = os.environ.get(_APP_ID_ENVIRONMENT, _APP_ID_DEFAULT)
# NOTE: This is the same behavior as in the helper
# ``google.cloud.datastore.key._clean_app()``.
parts = app.split("~", 1)
return parts[-1]
def _from_reference(reference, app, namespace):
"""Convert Reference protobuf to :class:`~google.cloud.datastore.key.Key`.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``serialized`` was created within an App
Engine app via something like ``ndb.Key(...).reference()``.
However, the actual type used here is different since this code will not
run in the App Engine standard environment where the type was
``google.appengine.datastore.entity_pb.Reference``.
Args:
serialized (bytes): A reference protobuf serialized to bytes.
app (Optional[str]): The application ID / project ID for the
constructed key.
namespace (Optional[str]): The namespace for the constructed key.
Returns:
google.cloud.datastore.key.Key: The key corresponding to
``serialized``.
Raises:
RuntimeError: If ``app`` is not :data:`None`, but not the same as
``reference.app``.
RuntimeError: If ``namespace`` is not :data:`None`, but not the same as
``reference.name_space``.
"""
project = _project_from_app(reference.app)
if app is not None:
if _project_from_app(app) != project:
raise RuntimeError(
_REFERENCE_APP_MISMATCH.format(reference.app, app)
)
parsed_namespace = _key_module._get_empty(reference.name_space, "")
if namespace is not None:
if namespace != parsed_namespace:
raise RuntimeError(
_REFERENCE_NAMESPACE_MISMATCH.format(
reference.name_space, namespace
)
)
_key_module._check_database_id(reference.database_id)
flat_path = _key_module._get_flat_path(reference.path)
return google.cloud.datastore.Key(
*flat_path, project=project, namespace=parsed_namespace
)
def _from_serialized(serialized, app, namespace):
"""Convert serialized protobuf to :class:`~google.cloud.datastore.key.Key`.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``serialized`` was created within an App
Engine app via something like ``ndb.Key(...).serialized()``.
Args:
serialized (bytes): A reference protobuf serialized to bytes.
app (Optional[str]): The application ID / project ID for the
constructed key.
namespace (Optional[str]): The namespace for the constructed key.
Returns:
Tuple[google.cloud.datastore.key.Key, .Reference]: The key
corresponding to ``serialized`` and the Reference protobuf.
"""
reference = _app_engine_key_pb2.Reference()
reference.ParseFromString(serialized)
return _from_reference(reference, app, namespace), reference
def _from_urlsafe(urlsafe, app, namespace):
"""Convert urlsafe string to :class:`~google.cloud.datastore.key.Key`.
.. note::
This is borrowed from
:meth:`~google.cloud.datastore.key.Key.from_legacy_urlsafe`.
It is provided here, rather than calling that method, since component
parts need to be re-used.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``urlsafe`` was created within an App
Engine app via something like ``ndb.Key(...).urlsafe()``.
Args:
urlsafe (Union[bytes, str]): The base64 encoded (ASCII) string
corresponding to a datastore "Key" / "Reference".
app (Optional[str]): The application ID / project ID for the
constructed key.
namespace (Optional[str]): The namespace for the constructed key.
Returns:
Tuple[google.cloud.datastore.key.Key, .Reference]: The key
corresponding to ``urlsafe`` and the Reference protobuf.
"""
if isinstance(urlsafe, str):
urlsafe = urlsafe.encode("ascii")
padding = b"=" * (-len(urlsafe) % 4)
urlsafe += padding
raw_bytes = base64.urlsafe_b64decode(urlsafe)
return _from_serialized(raw_bytes, app, namespace)
def _constructor_handle_positional(path_args, kwargs):
"""Properly handle positional arguments to Key constructor.
This will modify ``kwargs`` in a few cases:
* The constructor was called with a dictionary as the only
positional argument (and no keyword arguments were passed). In
this case, the contents of the dictionary passed in will be copied
into ``kwargs``.
* The constructor was called with at least one (non-dictionary)
positional argument. In this case all of the positional arguments
will be added to ``kwargs`` for the key ``flat``.
Args:
path_args (Tuple): The positional arguments.
kwargs (Dict[str, Any]): The keyword arguments.
Raises:
TypeError: If keyword arguments were used while the first and
only positional argument was a dictionary.
TypeError: If positional arguments were provided and the keyword
``flat`` was used.
"""
if not path_args:
return
if len(path_args) == 1 and isinstance(path_args[0], dict):
if kwargs:
raise TypeError(
"Key() takes no keyword arguments when a dict is the "
"the first and only non-keyword argument (for "
"unpickling)."
)
kwargs.update(path_args[0])
else:
if "flat" in kwargs:
raise TypeError(
"Key() with positional arguments "
"cannot accept flat as a keyword argument."
)
kwargs["flat"] = path_args
def _exactly_one_specified(*values):
"""Make sure exactly one of ``values`` is truthy.
Args:
values (Tuple[Any, ...]): Some values to be checked.
Returns:
bool: Indicating if exactly one of ``values`` was truthy.
"""
count = sum(1 for value in values if value)
return count == 1
def _parse_from_ref(
klass,
reference=None,
serialized=None,
urlsafe=None,
app=None,
namespace=None,
**kwargs
):
"""Construct a key from a Reference.
This makes sure that **exactly** one of ``reference``, ``serialized`` and
``urlsafe`` is specified (all three are different representations of a
``Reference`` protobuf).
Args:
klass (type): The class of the instance being constructed. It must
be :class:`.Key`; we do not allow constructing :class:`.Key`
subclasses from a serialized Reference protobuf.
reference (Optional[\
~google.cloud.datastore._app_engine_key_pb2.Reference]): A
reference protobuf representing a key.
serialized (Optional[bytes]): A reference protobuf serialized to bytes.
urlsafe (Optional[str]): A reference protobuf serialized to bytes. The
raw bytes are then converted to a websafe base64-encoded string.
app (Optional[str]): The Google Cloud Platform project (previously
on Google App Engine, this was called the Application ID).
namespace (Optional[str]): The namespace for the key.
kwargs (Dict[str, Any]): Any extra keyword arguments not covered by
the explicitly provided ones. These are passed through to indicate
to the user that the wrong combination of arguments was used, e.g.
if ``parent`` and ``urlsafe`` were used together.
Returns:
Tuple[~.datastore.Key, \
~google.cloud.datastore._app_engine_key_pb2.Reference]:
A pair of the constructed key and the reference that was serialized
in one of the arguments.
Raises:
TypeError: If ``klass`` is not :class:`.Key`.
TypeError: If ``kwargs`` isn't empty.
TypeError: If any number other than exactly one of ``reference``,
``serialized`` or ``urlsafe`` is provided.
"""
if klass is not Key:
raise TypeError(_WRONG_TYPE.format(klass))
if kwargs or not _exactly_one_specified(reference, serialized, urlsafe):
raise TypeError(
"Cannot construct Key reference from incompatible "
"keyword arguments."
)
if reference:
ds_key = _from_reference(reference, app, namespace)
elif serialized:
ds_key, reference = _from_serialized(serialized, app, namespace)
else:
# NOTE: We know here that ``urlsafe`` is truth-y;
# ``_exactly_one_specified()`` guarantees this.
ds_key, reference = _from_urlsafe(urlsafe, app, namespace)
return ds_key, reference
def _parse_from_args(
pairs=None, flat=None, app=None, namespace=None, parent=None
):
"""Construct a key the path (and possibly a parent key).
Args:
pairs (Optional[Iterable[Tuple[str, Union[str, int]]]]): An iterable
of (kind, ID) pairs.
flat (Optional[Iterable[Union[str, int]]]): An iterable of the
(kind, ID) pairs but flattened into a single value. For example,
the pairs ``[("Parent", 1), ("Child", "a")]`` would be flattened to
``["Parent", 1, "Child", "a"]``.
app (Optional[str]): The Google Cloud Platform project (previously
on Google App Engine, this was called the Application ID).
namespace (Optional[str]): The namespace for the key.
parent (Optional[~.ndb.key.Key]): The parent of the key being
constructed. If provided, the key path will be **relative** to the
parent key's path.
Returns:
~.datastore.Key: The constructed key.
Raises:
.BadValueError: If ``parent`` is passed but is not a ``Key``.
"""
flat = _get_path(flat, pairs)
_clean_flat_path(flat)
parent_ds_key = None
if parent is None:
project = _project_from_app(app)
else:
project = _project_from_app(app, allow_empty=True)
if not isinstance(parent, Key):
raise exceptions.BadValueError(
"Expected Key instance, got {!r}".format(parent)
)
# Offload verification of parent to ``google.cloud.datastore.Key()``.
parent_ds_key = parent._key
return google.cloud.datastore.Key(
*flat, parent=parent_ds_key, project=project, namespace=namespace
)
def _get_path(flat, pairs):
"""Get a flat path of key arguments.
Does this from exactly one of ``flat`` or ``pairs``.
Args:
pairs (Optional[Iterable[Tuple[str, Union[str, int]]]]): An iterable
of (kind, ID) pairs.
flat (Optional[Iterable[Union[str, int]]]): An iterable of the
(kind, ID) pairs but flattened into a single value. For example,
the pairs ``[("Parent", 1), ("Child", "a")]`` would be flattened to
``["Parent", 1, "Child", "a"]``.
Returns:
List[Union[str, int]]: The flattened path as a list.
Raises:
TypeError: If both ``flat`` and ``pairs`` are provided.
ValueError: If the ``flat`` path does not have an even number of
elements.
TypeError: If the paths are both empty.
"""
if flat:
if pairs is not None:
raise TypeError(
"Key() cannot accept both flat and pairs arguments."
)
if len(flat) % 2:
raise ValueError(
"Key() must have an even number of positional arguments."
)
flat = list(flat)
else:
flat = []
for kind, id_ in pairs:
flat.extend((kind, id_))
if not flat:
raise TypeError("Key must consist of at least one pair.")
return flat
def _clean_flat_path(flat):
"""Verify and convert the flat path for a key.
This may modify ``flat`` in place. In particular, if the last element is
:data:`None` (for a partial key), this will pop it off the end. Also
if some of the kinds are instance of :class:`.Model`, they will be
converted to strings in ``flat``.
Args:
flat (List[Union[str, int]]): The flattened path as a list.
Raises:
TypeError: If the kind in a pair is an invalid type.
.BadArgumentError: If a key ID is :data:`None` (indicating a partial
key), but in a pair other than the last one.
TypeError: If a key ID is not a string or integer.
"""
# Verify the inputs in ``flat``.
for i in range(0, len(flat), 2):
# Make sure the ``kind`` is either a string or a Model.
kind = flat[i]
if isinstance(kind, type):
kind = kind._get_kind()
flat[i] = kind
if not isinstance(kind, str):
raise TypeError(
"Key kind must be a string or Model class; "
"received {!r}".format(kind)
)
# Make sure the ``id_`` is either a string or int. In the special case
# of a partial key, ``id_`` can be ``None`` for the last pair.
id_ = flat[i + 1]
if id_ is None:
if i + 2 < len(flat):
raise exceptions.BadArgumentError(
"Incomplete Key entry must be last"
)
elif not isinstance(id_, (str, int)):
raise TypeError(_INVALID_ID_TYPE.format(id_))
# Remove trailing ``None`` for a partial key.
if flat[-1] is None:
flat.pop()
def _verify_path_value(value, is_str, is_kind=False):
"""Verify a key path value: one of a kind, string ID or integer ID.
Args:
value (Union[str, int]): The value to verify
is_str (bool): Flag indicating if the ``value`` is a string. If
:data:`False`, then the ``value`` is assumed to be an integer.
is_kind (Optional[bool]): Flag indicating if the value is meant to
be a kind. Defaults to :data:`False`.
Returns:
Union[str, int]: The ``value`` passed in, if it passed verification
checks.
Raises:
ValueError: If the ``value`` is a ``str`` for the kind, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is a ``str`` for the name, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is an integer but lies outside of the
range ``[1, 2^63 - 1]``.
"""
if is_str:
if 1 <= len(value.encode("utf-8")) <= _MAX_KEYPART_BYTES:
return value
if is_kind:
raise ValueError(_BAD_KIND.format(_MAX_KEYPART_BYTES, value))
else:
raise ValueError(_BAD_STRING_ID.format(_MAX_KEYPART_BYTES, value))
else:
if 1 <= value <= _MAX_INTEGER_ID:
return value
raise ValueError(_BAD_INTEGER_ID.format(value))
def _to_legacy_path(dict_path):
"""Convert a tuple of ints and strings in a legacy "Path".
.. note:
This assumes, but does not verify, that each entry in
``dict_path`` is valid (i.e. doesn't have more than one
key out of "name" / "id").
Args:
dict_path (Iterable[Tuple[str, Union[str, int]]]): The "structured"
path for a ``google-cloud-datastore`` key, i.e. it is a list of
dictionaries, each of which has "kind" and one of "name" / "id" as
keys.
Returns:
_app_engine_key_pb2.Path: The legacy path corresponding to
``dict_path``.
"""
elements = []
for part in dict_path:
element_kwargs = {
"type": _verify_path_value(part["kind"], True, is_kind=True)
}
if "id" in part:
element_kwargs["id"] = _verify_path_value(part["id"], False)
elif "name" in part:
element_kwargs["name"] = _verify_path_value(part["name"], True)
element = _app_engine_key_pb2.Path.Element(**element_kwargs)
elements.append(element)
return _app_engine_key_pb2.Path(element=elements)
|
{
"content_hash": "256dcec00c48258b0e67f25fc0f7a868",
"timestamp": "",
"source": "github",
"line_count": 1233,
"max_line_length": 83,
"avg_line_length": 34.850770478507705,
"alnum_prop": 0.5808568569500361,
"repo_name": "jonparrott/gcloud-python",
"id": "b9fc411c7c9ca00ab1f551ce30ec002caedc30c2",
"size": "43547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndb/src/google/cloud/ndb/key.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
# pylint: enable=import-error
import os
import sys
# pylint: disable=import-error
# pylint: disable=unused-import
import py2exe
# pylint: enable=unused-import
# pylint: enable=import-error
import zmq
import zmq.libzmq
def tree(root_dir, dest):
"""
Create a list with all files root_dir and its subfolders in an
appropriate format for distutils data_files.
"""
prefix = os.path.dirname(root_dir)
data_files = [
(dest+root[len(prefix):], [os.path.join(root, f) for f in files])
for root, _, files in os.walk(os.path.normpath(root_dir))
]
return data_files
def main():
setup_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.dirname(os.path.dirname(setup_dir))
zmq_dir = os.path.dirname(zmq.__file__)
# py2exe depedency detection is quite problematic
sys.path.insert(0, zmq_dir) # for libzmq.pyd
sys.path.insert(0, root_dir) # for node
sys.path.insert(0, setup_dir) # for local openbazaar.py
data_files = tree(os.path.join(root_dir, "html"), ".")
data_files.append(
(".", [os.path.join(root_dir, "pycountry-1.8-py2.7.egg")])
)
setup(
console=[
{
'script': 'openbazaar.py',
'icon_resources': [(1, 'icon.ico')]
},
{
'script': 'stop.py'
}
],
options={
"py2exe":
{
'dist_dir': 'dist_exe',
'bundle_files': 3,
'compressed': 2,
'optimize': 2,
'includes': [
"pkg_resources",
"zmq.utils",
"zmq.utils.jsonapi",
"zmq.utils.strtypes",
"zmq.backend.cython"
],
# NOTE: py2exe copies libzmq.pyd with the wrong name
# zmq.libzmq.pyd. Manually excluding zmq.libzmq.pyd
# copies it with the right name.
'excludes': ['zmq.libzmq', 'pycountry'],
'dll_excludes': [
'IPHLPAPI.DLL',
'PSAPI.DLL',
'WTSAPI32.dll',
'w9xpopen.exe'
]
}
},
data_files=data_files
)
if __name__ == '__main__':
main()
|
{
"content_hash": "d041a9964fdf657d728fd2c811d24c4d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 73,
"avg_line_length": 28.734939759036145,
"alnum_prop": 0.5006289308176101,
"repo_name": "STRML/OpenBazaar",
"id": "114b94162132fcb9cf9f1117810cebfd95f2a03f",
"size": "2416",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "installers/windows/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8930"
},
{
"name": "JavaScript",
"bytes": "107310"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "Perl",
"bytes": "655"
},
{
"name": "PowerShell",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "361116"
},
{
"name": "Shell",
"bytes": "16486"
}
],
"symlink_target": ""
}
|
import logging
import fixtures
class SetLogLevel(fixtures.Fixture):
"""Override the log level for the named loggers, restoring their
previous value at the end of the test.
To use::
from oslo_log import fixture as log_fixture
self.useFixture(log_fixture.SetLogLevel(['myapp.foo'], logging.DEBUG))
:param logger_names: Sequence of logger names, as would be passed
to getLogger().
:type logger_names: list(str)
:param level: Logging level, usually one of logging.DEBUG,
logging.INFO, etc.
:type level: int
"""
def __init__(self, logger_names, level):
self.logger_names = logger_names
self.level = level
def setUp(self):
super(SetLogLevel, self).setUp()
for name in self.logger_names:
# NOTE(dhellmann): Use the stdlib version of getLogger()
# so we get the logger and not any adaptor wrapping it.
logger = logging.getLogger(name)
self.addCleanup(logger.setLevel, logger.level)
logger.setLevel(self.level)
|
{
"content_hash": "1da9fdc8c4b83cb5e5e2778ac28a2d21",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 31.37142857142857,
"alnum_prop": 0.6311475409836066,
"repo_name": "openstack/oslo.log",
"id": "d3e2e2c15e41624e080a4bc3cb39dc0774df25ab",
"size": "1669",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "oslo_log/fixture/setlevel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "218052"
}
],
"symlink_target": ""
}
|
import os
from ingenico.connect.sdk.factory import Factory
class GetFileExample(object):
def example(self):
with self.__get_client() as client:
headers, chunks = client.merchant("merchantId").files().get_file("fileId")
# make sure all chunks are read
for chunk in chunks:
# use the chunk
pass
def __get_client(self):
api_key_id = os.getenv("connect.api.apiKeyId", "someKey")
secret_api_key = os.getenv("connect.api.secretApiKey", "someSecret")
configuration_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../example_configuration.ini'))
return Factory.create_client_from_file(configuration_file_name=configuration_file_name,
api_key_id=api_key_id, secret_api_key=secret_api_key)
|
{
"content_hash": "1fc606d95cef55af08c665b6520a1362",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 100,
"avg_line_length": 42.5,
"alnum_prop": 0.5689839572192513,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "c52bcf3245527d2d5c7bb768dbba28d9b6f6ae55",
"size": "1062",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/merchant/files/get_file_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
}
|
import os
# Add parent directory for imports
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
import boto.swf
import log
import json
import random
from optparse import OptionParser
"""
Amazon SWF LensArticlePublish starter
"""
class starter_LensArticlePublish():
def start(self, settings, all_doi=None, doi_id=None):
# Log
identity = "starter_%s" % int(random.random() * 1000)
logFile = "starter.log"
#logFile = None
logger = log.logger(logFile, settings.setLevel, identity)
# Simple connect
conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
docs = []
if all_doi is True:
# Publish all articles
# TODO!! Add all articles support again
pass
elif doi_id is not None:
doc = {}
doc['article_id'] = str(doi_id).zfill(5)
docs.append(doc)
if docs:
for doc in docs:
article_id = doc["article_id"]
# Start a workflow execution
workflow_id = "LensArticlePublish_%s" % (article_id)
workflow_name = "LensArticlePublish"
workflow_version = "1"
child_policy = None
execution_start_to_close_timeout = str(60 * 30)
input = '{"article_id": "' + str(article_id) + '"}'
try:
response = conn.start_workflow_execution(
settings.domain, workflow_id, workflow_name, workflow_version,
settings.default_task_list, child_policy, execution_start_to_close_timeout,
input)
logger.info('got response: \n%s' %
json.dumps(response, sort_keys=True, indent=4))
except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
# There is already a running workflow with that ID, cannot start another
message = ('SWFWorkflowExecutionAlreadyStartedError: There is already ' +
'a running workflow with ID %s' % workflow_id)
print message
logger.info(message)
if __name__ == "__main__":
doi_id = None
all_doi = False
# Add options
parser = OptionParser()
parser.add_option("-e", "--env", default="dev", action="store", type="string",
dest="env", help="set the environment to run, either dev or live")
parser.add_option("-d", "--doi-id", default=None, action="store", type="string",
dest="doi_id", help="specify the DOI id of a single article")
parser.add_option("-a", "--all", default=None, action="store_true", dest="all_doi",
help="start workflow for all article DOI")
(options, args) = parser.parse_args()
if options.env:
ENV = options.env
if options.doi_id:
doi_id = options.doi_id
if options.all_doi:
all_doi = options.all_doi
import settings as settingsLib
settings = settingsLib.get_settings(ENV)
o = starter_LensArticlePublish()
o.start(settings=settings, all_doi=all_doi, doi_id=doi_id)
|
{
"content_hash": "321e65b4b012f04592a1f1768cc577cd",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 99,
"avg_line_length": 34.5625,
"alnum_prop": 0.5669077757685352,
"repo_name": "gnott/elife-bot",
"id": "96aa33751e4325c4027224f14b50dc2c74cac96c",
"size": "3318",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "starter/starter_LensArticlePublish.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "53428"
},
{
"name": "HTML",
"bytes": "3975"
},
{
"name": "Python",
"bytes": "1295112"
},
{
"name": "Shell",
"bytes": "2363"
}
],
"symlink_target": ""
}
|
import sys
import argparse
import math
import numpy as np
def parseArgument():
# Parse the input
parser=argparse.ArgumentParser(description=\
"Convert a matrix of continuous-valued TF peaks to a file in the format for HDP")
parser.add_argument("--continuousMatFileName", required=True,\
help='File with continuous TF peaks, should be rank-normalized if numBins > 1')
parser.add_argument("--numBins", type=int, required=False, default=3,\
help='Number of bins for the TF signal')
parser.add_argument("--minTFs", type=int, required=False, default=1,\
help='Minimum number of TFs per merged peak')
parser.add_argument("--HDPFileName", required=True,\
help='Name of file where the continuous matirx in HDP format will be recorded')
options = parser.parse_args()
return options
def convertContinuousMatToHDPFormat(options):
# Convert a matrix of continuous-valued TF peaks to a file in the format for HDP
continuousMat = np.loadtxt(options.continuousMatFileName)
HDPFile = open(options.HDPFileName, 'w+')
for i in range(continuousMat.shape[0]):
# Iterate through the rows of the continous matrix and put each into HDP format
numNonZero = np.count_nonzero(continuousMat[i,:])
if numNonZero < options.minTFs:
# All of the peaks have 0 signal, so continue
continue
HDPFile.write(str(numNonZero))
for j in range(continuousMat.shape[1]):
# Iterate through the columns of the continuous matrix and put each entry into column number:value format
if continuousMat[i][j] == 0:
# There is no peak for the current TF in this location, so skip it
continue
HDPFile.write(" " + str(j) + ":")
if options.numBins == 1:
# The data will be binarized
HDPFile.write("1")
else:
# The data will not be binarized
# Adding 2 so that 0 will be more different from weak peaks that peaks are from peaks that are in the next bin
binnedSignal = int(math.floor(float(options.numBins) * continuousMat[i][j])) + 2
HDPFile.write(str(binnedSignal))
HDPFile.write("\n")
HDPFile.close()
if __name__ == "__main__":
options = parseArgument()
convertContinuousMatToHDPFormat(options)
|
{
"content_hash": "83099219200842ed8cf1318b6a78b768",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 114,
"avg_line_length": 41.94117647058823,
"alnum_prop": 0.7288452547919588,
"repo_name": "imk1/IMKTFBindingCode",
"id": "2f6269b0aedd56f22ccf4ec86ff9d34f1aa4d6e2",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convertContinuousMatToHDPFormat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1156919"
},
{
"name": "R",
"bytes": "22835"
},
{
"name": "Shell",
"bytes": "416606"
}
],
"symlink_target": ""
}
|
import os, sys
lib = os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','hownewspaperswrite'))
sys.path.append(lib)
from django.core.exceptions import ObjectDoesNotExist
from termcolor import colored
class SaveDataInDatabase:
def __init__(self,):
""""""
def dropTable(request, table_name = "hownewspaperswrite_app_entity_items" ):
#hownewspaperswrite_app_entity
from django.db import connection, transaction
cursor = connection.cursor()
try:
cursor.execute("DROP TABLE "+table_name+";")
except Exception as e:
print "Error in dropping app_postitem table %s " % e
def resetItemsFreqDist(self):
"""Resets items freq dist to zero for rebasing."""
from models import PostItem
results = PostItem.most_common.delete_everything()
#results = PostItem.objects.all().delete()
print "Resetting %s items..." % results.count()
return
updated = 0
for res in results:
if res.numeric != 0:
print "Resetting: %s # %s" % (res.word, updated)
res.numeric = 0
res.save()
updated += 1
return updated
def addSite(self, testate = "", url = "", thumbnail = "", description = "", language = 'eng'):
"""Creates site information."""
from models import Site
if len(url) == 0:
raise Exception("No url")
try:
r = Site.objects.create(title=testate, url=url, thumbnail = thumbnail, description = description, language = language)
createdSite = True
except:
r = Site.objects.get(url=url)
createdSite = False
print "AddSite: Site was created? %s" % createdSite
return r
def addSitePost(self, site, testo, url_articolo, links, titolo = ""):
"""Save in database all events contained in a file.
:param listOfEvents:
List of event objects to save in DB.
Return int with total saved videos in database.
"""
from models import SitePost
if len(titolo) == 0:
titolo = url_articolo
#print titolo
try:
v = SitePost.objects.get(url_articolo = url_articolo)
created = False
except:
v = SitePost.objects.create(
testata = site,
url_articolo = url_articolo,
testo = testo,
titolo = titolo
)
created = True
tot = 0
print "Created: %s | URL: %s" % (created, v.url_articolo)
if created is True:
links = [self.addPostItem(site,link,'LINK', url_articolo) for link in links if "http" in link and len(link)>3]
for link in links:
v.links.add(link)
tot+=1
print "AddSitePost: Url: %s | Created? %s | Links created: %s" % (v.url_articolo, created, tot)
return created
def addPostItem(self, site, word, tipo, parent_url = "", numeric = 0, tfidf = 0, stem = ""):
"""Add video to db
:param video: Django video DB object containing title, description, url, thumb url ...
Return two variables with:
* **v** -- Created video object in database, dict.
* **created** -- True if video is created, false if video is already present, bool.
"""
from models import PostItem
try:
v = PostItem.objects.get(word = word, testata_nome = site.title)
print colored("Updating: %s | %s | %s | %s | %s" % (word,v.testata.title, tipo,v.numeric, numeric), "red")
v.numeric = int(v.numeric)+int(numeric)
except ObjectDoesNotExist:
v = PostItem.objects.create(
word = word,
tipo = tipo,
url_articolo = parent_url,
numeric = numeric,
testata = site,
testata_nome = site.title
)
#v.testate.add(site)
print colored("Saving: %s | %s | %s | %s" % (word,site.title, tipo, numeric), "green")
if len(stem) > 0:
v.stem = stem
if tfidf > 0:
#db specific hack
v.tfidf = int(tfidf * 100000)
print colored("Final: %s | %s | %s | %s | %s\n" % (word,site.title, tipo,v.numeric, v.stem), "white")
return v
def addEntity(self, nome= "", word = "", tipo = "", subtipo = "", value = 0, articles_ref = [], categoria = "cd ", add_items = True, add_articles = False):
from models import Entity, PostItem, SitePost
print colored("Saving %s from %s..." % (nome, word), "green")
try:
e = Entity.objects.get(name = nome)
except ObjectDoesNotExist:
e = Entity.objects.create(
name = nome,
tipo = tipo,
subtipo = subtipo,
category = categoria
)
if value != e.valore:
e.valore = value
if add_items is True:
posts = PostItem.stems.filter(word__istartswith = word)
c = 0
for post in posts:
c +=1
e.items.add(post)
print "Linked %s to %s items." % (nome, c)
def saveArticle(e,articles):
d = 0
for article in articles:
d +=1
#TYPOOOOOOOOOOO
e.aritcles.add(article)
print "Linked %s to %s articles." % (nome,d)
e.save()
return e
if add_articles is True:
articles = SitePost.objects.filter(testo_icontains = word)
e = saveArticle(e, articles)
if len(articles_ref) > 0:
e = saveArticle(e, articles_ref)
print colored("Saved %s" % e.name, "yellow")
return e
def addArticlesToEntity(self, entity = {}):
"""Adds articles to entity."""
from models import Entity, PostItem, SitePost
if len(entity) == 0:
entities = Entity.objects.all()
else:
entities = [entity]
c = 0
for ent in entities:
print "Matching articles for %s..." % ent.name
articles = SitePost.objects.filter(testo__icontains = ent.name)
for article in articles:
for ent_art in ent.aritcles.all():
if article.titolo not in ent_art.titolo:
ent.aritcles.add(article)
c += 1
ent.save()
print "[AddToModels][addArticlesToEntity] Saved %s for %s " % (ent.aritcles.count(), ent.name)
return {
"entities": len(entities),
"total_articles": len()
}
|
{
"content_hash": "3f41f08b0cb5d9cef072d0341c1164af",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 159,
"avg_line_length": 35.175879396984925,
"alnum_prop": 0.5128571428571429,
"repo_name": "andrea-f/hownewspaperswrite",
"id": "b7b2ba78250cf62f864e7cc44e44541a7faab9fa",
"size": "7000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/hownewspaperswrite/hownewspaperswrite_app/AddToModels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12887"
},
{
"name": "Python",
"bytes": "39283"
}
],
"symlink_target": ""
}
|
import json
import os
import uuid
folder, fname = os.path.split(__file__)
def contents_of(f):
with open(f, 'r') as ins_file:
contents = ' '.join(ins_file.readlines())
return contents
DEBUG = bool(int(os.environ.get('SOWING_DEBUG', 1))) # True
CSRF_SECRET = 'mysecretsRsaf3' if DEBUG else uuid.uuid4().hex
LOCAL_SETTINGS = os.environ.get('SOWING_SETTINGS', None)
if LOCAL_SETTINGS is None:
LOCAL_SETTINGS = os.path.join(folder, 'local-settings.json')
if not os.path.exists(LOCAL_SETTINGS):
raise EnvironmentError('no configuration settings `local-settings.py`, %s' % LOCAL_SETTINGS)
APP_CONFIG = {
'port': 8888,
'host': '127.0.0.1',
'domain': 'sowingseasons.com',
'protocol': 'http' if DEBUG else 'https', # we don't support HTTP on the WildWildWeb
'media': r'/home/blake/temp/sowing-seasons-media',
'private_settings': json.load(open(LOCAL_SETTINGS, 'r')),
'logging': {
'version': 1,
'incremental': False,
'disable_existing_loggers': False,
'loggers': {
'summer': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'qualname': 'sowing',
'propagate': 0
}
},
'formatters': {
"default": {
"format": "%(asctime)s %(ip)-15s %(levelname)-5s %(name)-40s: %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
}
},
'filters': {
'traffic': {
'()': 'summer.ext.logs.IPFilter'
}
},
'handlers': {
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'level': 'DEBUG',
'filename': r'/home/blake/temp/sowing-seasons-logs/server.log',
'maxBytes': 10000000,
'backupCount': 20,
'mode': 'a',
'filters': ['traffic']
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
'stream': 'ext://sys.stdout',
'filters': ['traffic']
}
}
}
}
SEO_VALUES = {
'title': 'SowingSeasons - takes awhile to grow anything.',
'keywords': 'technology,programming,python',
'description': contents_of(r'DESCRIPTION'),
'author': 'Blake VandeMerwe <blakev@null.net>',
'author_name': 'Blake VandeMerwe',
'author_email': 'blakev@null.net',
'google': {
'author_id': '+BlakeVandeMerwe'
},
'img': r'/static/img/profile.jpg',
'run_analytics': not DEBUG
}
TORNADO_CONFIG = {
'debug': DEBUG,
'compress_response': True,
'cookie_secret': CSRF_SECRET,
'login_url': '/login',
'xsrf_cookies': True,
# static files
'static_hash_cache': not DEBUG,
}
WHOOSH = {
'index_name': 'sowing-seasons',
'location': r'/home/blake/temp/sowing-seasons-index'
}
|
{
"content_hash": "b136e47754bd8e92fa8cc42c83781232",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 100,
"avg_line_length": 28.411214953271028,
"alnum_prop": 0.5210526315789473,
"repo_name": "blakev/sowing-seasons",
"id": "d1d3e13e9cdc43c8b51dcc811db25033cfaf0a10",
"size": "3040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summer/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3185"
},
{
"name": "HTML",
"bytes": "29821"
},
{
"name": "Python",
"bytes": "36386"
}
],
"symlink_target": ""
}
|
from django.views.generic import TemplateView
class DirectTemplateView(TemplateView):
extra_context = None
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return context
|
{
"content_hash": "d8276032d126df6186f42214080bdf19",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 31.25,
"alnum_prop": 0.572,
"repo_name": "DarioGT/docker-carra",
"id": "7e11f25df20d992753f7fb951e1d224e874df5cc",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/protoExt/utils/generic_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64175"
},
{
"name": "Dockerfile",
"bytes": "739"
},
{
"name": "HTML",
"bytes": "14125"
},
{
"name": "JavaScript",
"bytes": "21266785"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "851053"
},
{
"name": "Shell",
"bytes": "2934"
},
{
"name": "Visual Basic",
"bytes": "7788"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.