text stringlengths 4 1.02M | meta dict |
|---|---|
"""ECLAIR is a package for the robust and scalable
inference of cell lineages from gene expression data.
ECLAIR achieves a higher level of confidence in the estimated lineages
through the use of approximation algorithms for consensus clustering and by combining the information from an ensemble of minimum spanning trees
so as to come up with an improved, aggregated lineage tree.
In addition, the present package features several customized algorithms for assessing the similarity between weighted graphs or unrooted trees and for estimating the reproducibility of each edge to a given tree.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Conte, D., Foggia, P., Sansone, C. and Vento, M.,
"Thirty Years of Graph Matching in Pattern Recognition".
In: International Journal of Pattern Recognition and Artificial Intelligence,
18, 3, pp. 265-298. 2004
"""
from .Robustness_analysis import experiment_1, experiment_2, experiment_3
from itertools import permutations
def main():
# i. Split a dataset in three non-overlapping, equally-sized parts, S1, S2, S3;
# ii. Generate and ECLAIR tree/graph or a SPADE tree on S1, then another on S2;
# iii. Compare the afore-mentioned pairs of trees on S3, viewed as a test set.
# iv. Repeat sets i. to iii. by interverting the roles of S1, S2 and S3
# as training and test sets.
# v. Repeat steps i. to iv. up to 10 times so as to generate a series of
# coefficients ascertaining the similarity of trees or graphs.
for data_flags in sorted(permutations([True, False, False]))[::-2]:
method = 'hierarchical' if data_flags[-1] is True else 'k-means'
experiment_1(3, data_flags, method)
# Pairwise comparisons of ECLAIR trees/graphs generated on the same dataset
usecols = [3, 4, 5, 7, 8, 9, 10, 12, 13]
with open('./ECLAIR_performance/nbt-SD2-Transformed.tsv', 'r') as f:
features = f.readline().split('\t')
data = np.loadtxt(f, dtype = float, delimiter = '\t', usecols = usecols)
N_samples = data.shape[0]
sampling_indices = np.sort(np.random.choice(N_samples, size = N_samples / 3, replace = False))
with open('./ECLAIR_performance/one_third_of_nbt-SD2-Transformed.tsv', 'w') as f:
f.write('\t'.join(features[i] for i in usecols))
np.savetxt(f, data[sampling_indices], fmt = '%.4f', delimiter = '\t')
experiment_2('./ECLAIR_performance/one_third_of_nbt-SD2-Transformed.tsv',
k = 50, sampling_fraction = 0.5, N_runs = 100)
# Pairwise comparisons of SPADE trees generated on the same dataset
experiment_3()
if __name__ == '__main__':
main()
| {
"content_hash": "5f42ceac968c8c321e8327d3274e568b",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 211,
"avg_line_length": 43.72857142857143,
"alnum_prop": 0.6886638353479255,
"repo_name": "GGiecold/ECLAIR",
"id": "917414728b8f029d21effb163c6d4c9696807ddc",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/ECLAIR/Statistical_performance/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "216349"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
app_name = 'search_app'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^movie-search/$', views.search_by_title, name='movie_search'),
url(r'^movie/(?P<movie_id>[0-9]+)/', views.movie_detail, name='detail'),
url(r'^actor/(?P<actor_id>[0-9]+)/', views.actor_detail, name='actor_detail'),
url(r'^actor-search/$', views.search_by_actor, name='actor_search')
]
| {
"content_hash": "32c8d72a046e48d55a43eeab070615b4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 41.63636363636363,
"alnum_prop": 0.6397379912663755,
"repo_name": "hannahhall/movies",
"id": "cce51e1e1a379783d9d4e3d50300df3fbca9ed2e",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search_app/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1217"
},
{
"name": "JavaScript",
"bytes": "653"
},
{
"name": "Python",
"bytes": "11964"
}
],
"symlink_target": ""
} |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas.core.arrays import PeriodArray
from pandas.tests.extension import base
@pytest.fixture(params=["D", "2D"])
def dtype(request):
return PeriodDtype(freq=request.param)
@pytest.fixture
def data(dtype):
return PeriodArray(np.arange(1970, 2070), freq=dtype.freq)
@pytest.fixture
def data_for_twos(dtype):
return PeriodArray(np.ones(100) * 2, freq=dtype.freq)
@pytest.fixture
def data_for_sorting(dtype):
return PeriodArray([2018, 2019, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing(dtype):
return PeriodArray([iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing_for_sorting(dtype):
return PeriodArray([2018, iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_for_grouping(dtype):
B = 2018
NA = iNaT
A = 2017
C = 2019
return PeriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq)
@pytest.fixture
def na_value():
return pd.NaT
class BasePeriodTests:
pass
class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests):
pass
class TestConstructors(BasePeriodTests, base.BaseConstructorsTests):
pass
class TestGetitem(BasePeriodTests, base.BaseGetitemTests):
pass
class TestIndex(base.BaseIndexTests):
pass
class TestMethods(BasePeriodTests, base.BaseMethodsTests):
def test_combine_add(self, data_repeated):
# Period + Period is not defined.
pass
class TestInterface(BasePeriodTests, base.BaseInterfaceTests):
pass
class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests):
implements = {"__sub__", "__rsub__"}
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
if all_arithmetic_operators in self.implements:
df = pd.DataFrame({"A": data})
self.check_opname(df, all_arithmetic_operators, data[0], exc=None)
else:
# ... but not the rest.
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# we implement substitution...
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
super()._check_divmod_op(s, op, other, exc=TypeError)
def test_add_series_with_extension_array(self, data):
# we don't implement + for Period
s = pd.Series(data)
msg = (
r"unsupported operand type\(s\) for \+: "
r"\'PeriodArray\' and \'PeriodArray\'"
)
with pytest.raises(TypeError, match=msg):
s + data
def test_direct_arith_with_ndframe_returns_not_implemented(
self, data, frame_or_series
):
# Override to use __sub__ instead of __add__
other = pd.Series(data)
if frame_or_series is pd.DataFrame:
other = other.to_frame()
result = data.__sub__(other)
assert result is NotImplemented
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests):
pass
class TestMissing(BasePeriodTests, base.BaseMissingTests):
pass
class TestReshaping(BasePeriodTests, base.BaseReshapingTests):
pass
class TestSetitem(BasePeriodTests, base.BaseSetitemTests):
pass
class TestGroupby(BasePeriodTests, base.BaseGroupbyTests):
pass
class TestPrinting(BasePeriodTests, base.BasePrintingTests):
pass
class TestParsing(BasePeriodTests, base.BaseParsingTests):
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data):
super().test_EA_types(engine, data)
class Test2DCompat(BasePeriodTests, base.NDArrayBacked2DTests):
pass
| {
"content_hash": "4896befc891597141919d923501be454",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 81,
"avg_line_length": 26.828125,
"alnum_prop": 0.6893807027761599,
"repo_name": "pandas-dev/pandas",
"id": "b51846e34fd8835dc2ac5ff7f47ccb3a01ea1cff",
"size": "5151",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/extension/test_period.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import inviwopy
# import inviwopy.glm as glm
import numpy as np
import h5py
import time
from envisionpy.utils.exceptions import *
from .baseNetworks.LinePlotSubnetwork import LinePlotSubnetwork
class DensityOfStates(LinePlotSubnetwork):
'''
Manages a subnetwork for density of states (DoS) visualisation.
Uses a modified LinePlotSubnetwork. Dataframes are not read directly from
hdf5 file but are processed before sent to the line plot processor.
'''
def __init__(self, inviwoApp, hdf5_path, hdf5_outport, xpos=0, ypos=0):
LinePlotSubnetwork.__init__(self, inviwoApp, hdf5_path, hdf5_outport, xpos, ypos, False)
self.modify_network(hdf5_path, hdf5_outport, xpos, ypos)
self.totalEnabled = True
self.partialEnabled = True
self.set_title('DOS [1/(eV * unit cell)]')
@staticmethod
def valid_hdf5(hdf5_file):
return '/DOS' in hdf5_file and '/DOS/Partial' in hdf5_file and '/DOS/Total' in hdf5_file
def get_ui_data(self):
return []
def toggle_total(self, enable):
self.totalEnabled = enable
totalCollector = self.get_processor("TotalCollector")
collector = self.get_processor("Collector")
if enable:
self.network.addConnection(totalCollector.getOutport("dataframeOutport"), collector.getInport("dataframeMultiInport"))
else:
self.network.removeConnection(totalCollector.getOutport("dataframeOutport"), collector.getInport("dataframeMultiInport"))
def toggle_partial(self, enable):
self.partialEnabled = enable
partialCollector = self.get_processor("PartialCollector")
collector = self.get_processor("Collector")
if enable:
self.network.addConnection(partialCollector.getOutport("dataframeOutport"), collector.getInport("dataframeMultiInport"))
else:
self.network.removeConnection(partialCollector.getOutport("dataframeOutport"), collector.getInport("dataframeMultiInport"))
def get_n_partials(self):
return self.get_processor("Select Partial Band").intProperty.maxValue
def set_partial_selection(self, n):
self.get_processor("Select Partial Band").intProperty.value = n
def get_partial_selection(self):
return self.get_processor("Select Partial Band").intProperty.value
def modify_network(self, hdf5_path, hdf5_outport, xpos, ypos):
self.network.lock()
# Remove default hdf5 to function section.
self.remove_processor('ChildCollector')
self.remove_processor('h5ToFunction')
self.remove_processor('dataFrame')
pathSelection = self.get_processor('PathSelection')
with h5py.File(hdf5_path,"r") as h5:
total_list = []
for key in h5["/DOS/Total"].keys():
if key == "Energy": continue
total_list.append(key)
total_list.sort(key=lambda item: "".join(reversed(item)))
down_totals = [x for x in total_list if x.endswith("(dwn)")]
up_totals = [x for x in total_list if x.endswith("(up)")]
has_partial = "/DOS/Partial" in h5
if has_partial:
n_partials = len(h5['/DOS/Partial'])
partial_list = []
for key in h5["/DOS/Partial/0"]:
if key == "Energy": continue
partial_list.append(key)
partial_list.sort(key=lambda item: "".join(reversed(item)))
down_partials = [x for x in partial_list if x.endswith("(dwn)")]
up_partials = [x for x in partial_list if x.endswith("(up)")]
to_func_processors = []
# Setup totals
totalSelection = self.add_processor("org.inviwo.hdf5.PathSelection", "Select Total", xpos+7, ypos)
self.network.addConnection(pathSelection.getOutport('outport'), totalSelection.getInport('inport'))
totalCollector = self.add_processor("org.inviwo.DataFrameCollector", "TotalCollector", xpos+7, ypos+15)
xpos_tmp = xpos + 7
for n, key in enumerate(down_totals):
toFunction = self.add_processor("org.inviwo.HDF5ToFunction", key, xpos_tmp, ypos+6)
to_func_processors.append(toFunction)
self.network.addConnection(totalSelection.getOutport('outport'), toFunction.getInport('hdf5HandleFlatMultiInport'))
toFunction.yNamePrependParentsProperty.value = 1
addOperation = self.add_processor("org.inviwo.FunctionOperationNary", "Total Down Add {}".format(n), xpos_tmp, ypos+9)
self.network.addConnection(toFunction.getOutport("functionVectorOutport"), addOperation.getInport("functionFlatMultiInport"))
addOperation.operationProperty.value = 'add'
negateOperation = self.add_processor("org.inviwo.FunctionOperationUnary", "Total Down Negate {}".format(n), xpos_tmp, ypos+12)
self.network.addConnection(addOperation.getOutport("dataframeOutport"), negateOperation.getInport("dataframeInport"))
self.network.addConnection(negateOperation.getOutport("dataframeOutport"), totalCollector.getInport('dataframeMultiInport'))
negateOperation.operationProperty.value = 'negate'
n += 1
xpos_tmp += 7
for n, key in enumerate(up_totals):
toFunction = self.add_processor("org.inviwo.HDF5ToFunction", key, xpos_tmp, ypos+6)
to_func_processors.append(toFunction)
self.network.addConnection(totalSelection.getOutport('outport'), toFunction.getInport('hdf5HandleFlatMultiInport'))
toFunction.yNamePrependParentsProperty.value = 1
addOperation = self.add_processor("org.inviwo.FunctionOperationNary", "Total Up Add {}".format(n), xpos_tmp, ypos+9)
self.network.addConnection(toFunction.getOutport("functionVectorOutport"), addOperation.getInport("functionFlatMultiInport"))
self.network.addConnection(addOperation.getOutport("dataframeOutport"), totalCollector.getInport('dataframeMultiInport'))
addOperation.operationProperty.value = 'add'
n += 1
xpos_tmp += 7
# Setup partials
partialSelection = self.add_processor("org.inviwo.hdf5.PathSelection", "Select Partial", xpos_tmp, ypos)
partialBandSelection = self.add_processor("org.inviwo.HDF5PathSelectionInt", "Select Partial Band", xpos_tmp, ypos+3)
self.network.addConnection(pathSelection.getOutport('outport'), partialSelection.getInport('inport'))
self.network.addConnection(partialSelection.getOutport('outport'), partialBandSelection.getInport('hdf5HandleInport'))
partialBandSelection.intProperty.value = 0
partialBandSelection.intProperty.minValue = 0
partialBandSelection.intProperty.maxValue = n_partials
partialCollector = self.add_processor("org.inviwo.DataFrameCollector", "PartialCollector", xpos_tmp, ypos+15)
for n, key in enumerate(down_partials):
toFunction = self.add_processor("org.inviwo.HDF5ToFunction", key, xpos_tmp, ypos+6)
to_func_processors.append(toFunction)
self.network.addConnection(partialBandSelection.getOutport('hdf5HandleVectorOutport'), toFunction.getInport('hdf5HandleFlatMultiInport'))
toFunction.yNamePrependParentsProperty.value = 2
addOperation = self.add_processor("org.inviwo.FunctionOperationNary", "Partial Down Add {}".format(n), xpos_tmp, ypos+9)
self.network.addConnection(toFunction.getOutport("functionVectorOutport"), addOperation.getInport("functionFlatMultiInport"))
addOperation.operationProperty.value = 'add'
negateOperation = self.add_processor("org.inviwo.FunctionOperationUnary", "Partial Down Negate {}".format(n), xpos_tmp, ypos+12)
self.network.addConnection(addOperation.getOutport("dataframeOutport"), negateOperation.getInport("dataframeInport"))
self.network.addConnection(negateOperation.getOutport("dataframeOutport"), partialCollector.getInport('dataframeMultiInport'))
negateOperation.operationProperty.value = 'negate'
n += 1
xpos_tmp += 7
for n, key in enumerate(up_partials):
toFunction = self.add_processor("org.inviwo.HDF5ToFunction", key, xpos_tmp, ypos+6)
to_func_processors.append(toFunction)
self.network.addConnection(partialBandSelection.getOutport('hdf5HandleVectorOutport'), toFunction.getInport('hdf5HandleFlatMultiInport'))
toFunction.yNamePrependParentsProperty.value = 2
addOperation = self.add_processor("org.inviwo.FunctionOperationNary", "Partial Up Add {}".format(n), xpos_tmp, ypos+9)
self.network.addConnection(toFunction.getOutport("functionVectorOutport"), addOperation.getInport("functionFlatMultiInport"))
self.network.addConnection(addOperation.getOutport("dataframeOutport"), partialCollector.getInport('dataframeMultiInport'))
addOperation.operationProperty.value = 'add'
n += 1
xpos_tmp += 7
collector = self.add_processor("org.inviwo.DataFrameCollector", "Collector", xpos+7, ypos+18)
self.network.addConnection(totalCollector.getOutport("dataframeOutport"), collector.getInport('dataframeMultiInport'))
self.network.addConnection(partialCollector.getOutport("dataframeOutport"), collector.getInport('dataframeMultiInport'))
linePlot = self.get_processor("LinePlot")
self.network.addConnection(collector.getOutport("dataframeOutport"), linePlot.getInport('dataFrameInport'))
self.network.unlock()
# Set hdf5 selector paths
pathSelection.selection.value = '/DOS'
totalSelection.selection.value = '/Total'
partialSelection.selection.value = '/Partial'
# Set function paths.
self.network.lock() # Lock network for performence increase.
names = down_totals + up_totals + down_partials + up_partials
for i, toFunction in enumerate(to_func_processors):
toFunction.implicitXProperty.value = False
toFunction.xPathSelectionProperty.value = '/Energy'
toFunction.yPathSelectionProperty.value = '/{}'.format(names[i])
toFunction.xPathFreeze.value = True
toFunction.yPathFreeze.value = True
self.set_y_selection_type(2)
self.network.unlock() | {
"content_hash": "23e16ae40b29b40dcc29519209faafa2",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 149,
"avg_line_length": 53.724489795918366,
"alnum_prop": 0.6779677113010446,
"repo_name": "rartino/ENVISIoN",
"id": "08c61b7d9d85b4afb9151bf88b41e5e402479848",
"size": "10530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envisionpy/network/DensityOfStates.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1319"
},
{
"name": "C++",
"bytes": "184828"
},
{
"name": "CMake",
"bytes": "5232"
},
{
"name": "CSS",
"bytes": "6365"
},
{
"name": "Dockerfile",
"bytes": "4225"
},
{
"name": "HTML",
"bytes": "85568"
},
{
"name": "JavaScript",
"bytes": "21401"
},
{
"name": "Python",
"bytes": "426035"
},
{
"name": "Shell",
"bytes": "4304"
}
],
"symlink_target": ""
} |
"""
Walk whole MIB
++++++++++++++
Send a series of SNMP GETNEXT requests using the following options:
* with SNMPv2c, community 'public'
* over IPv4/UDP
* to an Agent at demo.snmplabs.com:161
* for all OIDs in IF-MIB
* with MIB lookup enabled
Functionally similar to:
| $ snmpwalk -v2c -c public demo.snmplabs.com IF-MIB::
"""#
from pysnmp.hlapi.v1arch import *
iterator = nextCmd(
SnmpDispatcher(),
CommunityData('public'),
UdpTransportTarget(('demo.snmplabs.com', 161)),
ObjectType(ObjectIdentity('IF-MIB'))
)
for errorIndication, errorStatus, errorIndex, varBinds in iterator:
if errorIndication:
print(errorIndication)
break
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
break
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
| {
"content_hash": "e5122c510cc9346c12630cb857288392",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 24.025,
"alnum_prop": 0.6409989594172737,
"repo_name": "etingof/pysnmp",
"id": "f900018f0b94a2c610f2276288095d496c636d88",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/pull-whole-mib.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1453555"
},
{
"name": "Shell",
"bytes": "1312"
}
],
"symlink_target": ""
} |
from datetime import date
import io
import json
import requests
from unittest import mock
import os
from django.test import TestCase, override_settings
from django.core.management import call_command
from django.core.management.base import CommandError
from geotrek.common.tests.factories import RecordSourceFactory, TargetPortalFactory
from geotrek.common.models import Attachment, FileType
from geotrek.common.tests import TranslationResetMixin
from geotrek.tourism.tests.factories import (TouristicContentCategoryFactory, TouristicContentType1Factory,
TouristicContentType2Factory, TouristicEventTypeFactory)
from geotrek.tourism.models import TouristicContent, TouristicEvent
from geotrek.tourism.parsers import (TouristicContentApidaeParser, TouristicEventApidaeParser, EspritParcParser,
TouristicContentTourInSoftParserV3, TouristicContentTourInSoftParserV3withMedias,
TouristicContentTourInSoftParser, TouristicEventTourInSoftParser)
class ApidaeConstantFieldContentParser(TouristicContentApidaeParser):
category = "Constant Content"
type1 = ["Type1 1", "Type1 2"]
type2 = ["Type2 1", "Type2 2"]
themes = ["Theme 1", "Theme 2"]
source = ["Source 1", "Source 2"]
portal = ["Portal 1", "Portal 2"]
field_options = {'themes': {'create': True},
'category': {'create': True},
'type1': {'create': True, 'fk': 'category'},
'type2': {'create': True, 'fk': 'category'}}
class ApidaeConstantFieldEventParser(TouristicEventApidaeParser):
type = "Constant Event"
themes = ["Theme 1", "Theme 2"]
source = ["Source 1", "Source 2"]
portal = ["Portal 1", "Portal 2"]
class EauViveParser(TouristicContentApidaeParser):
category = "Eau vive"
type1 = ["Type A", "Type B"]
type2 = []
class EspritParc(EspritParcParser):
category = "Miels et produits de la ruche"
type1 = ["Miel", "Pollen", "Gelée royale, propolis et pollen"]
type2 = ["Hautes Alpes Naturellement", "Bienvenue à la ferme", "Agriculture biologique"]
class HOT28(TouristicContentTourInSoftParser):
url = "http://wcf.tourinsoft.com/Syndication/cdt28/xxx/Objects"
source = "CDT 28"
category = "Où dormir"
type1 = "Hôtels"
type2 = "****"
portal = "Itinérance"
class HOT28v3(TouristicContentTourInSoftParserV3):
url = "http://wcf.tourinsoft.com/Syndication/3.0/cdt28/xxx/Objects"
source = "CDT 28"
category = "Où dormir"
type1 = "Hôtels"
type2 = "****"
portal = "Itinérance"
class HOT28v3withMedias(TouristicContentTourInSoftParserV3withMedias):
url = "http://wcf.tourinsoft.com/Syndication/3.0/cdt28/xxx/Objects"
source = "CDT 28"
category = "Où dormir"
type1 = "Hôtels"
type2 = "****"
portal = "Itinérance"
class FMA28(TouristicEventTourInSoftParser):
url = "http://wcf.tourinsoft.com/Syndication/cdt28/xxx/Objects"
source = "CDT 28"
type = "Agenda rando"
portal = "Itinérance"
class FMA28OtherPortal(TouristicEventTourInSoftParser):
url = "http://wcf.tourinsoft.com/Syndication/cdt28/xxx/Objects"
source = "CDT 28"
type = "Agenda rando"
portal = "Other_portal"
m2m_aggregate_fields = ["portal"]
class ParserTests(TranslationResetMixin, TestCase):
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_content_apidae_failed(self, mocked):
mocked.return_value.status_code = 404
FileType.objects.create(type="Photographie")
TouristicContentCategoryFactory(label="Eau vive")
TouristicContentType1Factory(label="Type A")
TouristicContentType1Factory(label="Type B")
with self.assertRaises(CommandError):
call_command('import', 'geotrek.tourism.tests.test_parsers.EauViveParser', verbosity=2)
self.assertTrue(mocked.called)
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_content_espritparc_failed(self, mocked):
mocked.return_value.status_code = 404
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Miels et produits de la ruche")
TouristicContentType1Factory(label="Miel", category=category)
TouristicContentType1Factory(label="Gelée royale, propolis et pollen", category=category)
TouristicContentType1Factory(label="Pollen", category=category)
TouristicContentType1Factory(label="Cire", category=category)
TouristicContentType2Factory(label="Hautes Alpes Naturellement", category=category)
TouristicContentType2Factory(label="Bienvenue à la ferme", category=category)
TouristicContentType2Factory(label="Agriculture biologique", category=category)
with self.assertRaises(CommandError):
call_command('import', 'geotrek.tourism.tests.test_parsers.EauViveParser', verbosity=2)
self.assertTrue(mocked.called)
@mock.patch('geotrek.common.parsers.requests.get')
@override_settings(PARSER_RETRY_SLEEP_TIME=0)
@mock.patch('geotrek.common.parsers.AttachmentParserMixin.download_attachments', False)
def test_create_content_espritparc_retry(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeContent.json')
with open(filename, 'r') as f:
return json.load(f)
def side_effect(url, params, auth, code):
response = requests.Response()
response.status_code = code
response.url = url
if code == 200:
response.json = mocked_json
return response
mocked.side_effect = [side_effect(EauViveParser.url, None, None, 503),
side_effect(EauViveParser.url, None, None, 503),
side_effect(EauViveParser.url, None, None, 200)]
FileType.objects.create(type="Photographie")
TouristicContentCategoryFactory(label="Eau vive")
TouristicContentType1Factory(label="Type A")
TouristicContentType1Factory(label="Type B")
call_command('import', 'geotrek.tourism.tests.test_parsers.EauViveParser')
self.assertTrue(mocked.called)
self.assertEqual(TouristicContent.objects.count(), 1)
@mock.patch('geotrek.common.parsers.requests.get')
@override_settings(PARSER_RETRY_SLEEP_TIME=0)
def test_create_content_espritparc_retry_fail(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeContent.json')
with open(filename, 'r') as f:
return json.load(f)
def side_effect(url, allow_redirects, params):
response = requests.Response()
response.status_code = 503
response.url = url
return response
mocked.side_effect = side_effect
FileType.objects.create(type="Photographie")
TouristicContentCategoryFactory(label="Eau vive")
TouristicContentType1Factory(label="Type A")
TouristicContentType1Factory(label="Type B")
with self.assertRaisesRegex(CommandError, "Failed to download %s. HTTP status code 503" % EauViveParser.url):
call_command('import', 'geotrek.tourism.tests.test_parsers.EauViveParser')
self.assertTrue(mocked.called)
@mock.patch('geotrek.common.parsers.requests.get')
@mock.patch('geotrek.common.parsers.requests.head')
def test_create_content_espritparc_not_fail_type1_does_not_exist(self, mocked_head, mocked_get):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'espritparc.json')
with open(filename, 'r') as f:
return json.load(f)
filename = os.path.join(os.path.dirname(__file__), 'data', 'espritparc.json')
# Mock GET
mocked_get.return_value.status_code = 200
mocked_get.return_value.json = mocked_json
mocked_get.return_value.content = b'Fake image'
# Mock HEAD
mocked_head.return_value.status_code = 200
mocked_head.return_value.headers = {'content-length': 666}
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Miels et produits de la ruche")
TouristicContentType2Factory(label="Hautes Alpes Naturellement", category=category)
TouristicContentType2Factory(label="Bienvenue à la ferme", category=category)
TouristicContentType2Factory(label="Agriculture biologique", category=category)
output = io.StringIO()
call_command('import', 'geotrek.tourism.tests.test_parsers.EspritParc', filename, verbosity=2, stdout=output)
self.assertTrue(mocked_get.called)
self.assertTrue(mocked_head.called)
self.assertIn("Type 1 'Miel' does not exist for category 'Miels et produits de la ruche'. Please add it,",
output.getvalue())
@mock.patch('geotrek.common.parsers.requests.get')
@mock.patch('geotrek.common.parsers.requests.head')
def test_create_content_espritparc_not_fail_type2_does_not_exist(self, mocked_head, mocked_get):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'espritparc.json')
with open(filename, 'r') as f:
return json.load(f)
filename = os.path.join(os.path.dirname(__file__), 'data', 'espritparc.json')
# Mock GET
mocked_get.return_value.status_code = 200
mocked_get.return_value.json = mocked_json
mocked_get.return_value.content = b'Fake image'
# Mock HEAD
mocked_head.return_value.status_code = 200
mocked_head.return_value.headers = {'content-length': 666}
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Miels et produits de la ruche")
TouristicContentType1Factory(label="Miel", category=category)
TouristicContentType1Factory(label="Gelée royale, propolis et pollen", category=category)
TouristicContentType1Factory(label="Pollen", category=category)
TouristicContentType1Factory(label="Cire", category=category)
output = io.StringIO()
call_command('import', 'geotrek.tourism.tests.test_parsers.EspritParc', filename, verbosity=2, stdout=output)
self.assertTrue(mocked_get.called)
self.assertTrue(mocked_head.called)
self.assertIn("Type 2 'Bienvenue à la ferme' does not exist for category 'Miels et produits de la ruche'. Please add it",
output.getvalue())
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_content_apidae(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeContent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Eau vive")
TouristicContentType1Factory(label="Type A")
TouristicContentType1Factory(label="Type B")
call_command('import', 'geotrek.tourism.tests.test_parsers.EauViveParser', verbosity=0)
self.assertTrue(mocked.called)
self.assertEqual(TouristicContent.objects.count(), 1)
content = TouristicContent.objects.get()
self.assertEqual(content.eid, "479743")
self.assertEqual(content.name, "Quey' Raft")
self.assertEqual(content.description[:27], "Au pied du château médiéval")
self.assertEqual(content.description_teaser[:24], "Des descentes familiales")
self.assertEqual(content.contact[:24], "Château Queyras<br>05350")
self.assertEqual(content.email, "info@queyraft.com")
self.assertEqual(content.website, "http://www.queyraft.com")
self.assertEqual(round(content.geom.x), 1000157)
self.assertEqual(round(content.geom.y), 6413576)
self.assertEqual(content.practical_info[:39], "<b>Ouverture:</b><br>Du 01/05 au 31/10.")
self.assertTrue("<br><b>Capacité totale:</b><br>10<br>" in content.practical_info)
self.assertTrue("><br><b>Services:</b><br>Test, Test2, Test3, Test4<br>" in content.practical_info)
self.assertIn("<b>Tarifs:</b><br>A partir de 30 € par personne<br>", content.practical_info)
self.assertIn("<b>Accès:</b><br>TestFr<br>", content.practical_info)
self.assertTrue(content.published)
self.assertEqual(content.category, category)
self.assertQuerysetEqual(
content.type1.all(),
['<TouristicContentType1: Type A>', '<TouristicContentType1: Type B>']
)
self.assertQuerysetEqual(content.type2.all(), [])
self.assertEqual(Attachment.objects.count(), 4)
self.assertEqual(Attachment.objects.first().content_object, content)
@mock.patch('geotrek.common.parsers.requests.get')
def test_filetype_structure_none(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeContent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie", structure=None)
TouristicContentCategoryFactory(label="Eau vive")
TouristicContentType1Factory(label="Type A")
TouristicContentType1Factory(label="Type B")
call_command('import', 'geotrek.tourism.tests.test_parsers.EauViveParser', verbosity=0)
self.assertTrue(mocked.called)
self.assertEqual(TouristicContent.objects.count(), 1)
@mock.patch('geotrek.common.parsers.requests.get')
def test_no_event_apidae(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeNoEvent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
output = io.StringIO()
call_command('import', 'geotrek.tourism.parsers.TouristicEventApidaeParser', verbosity=2, stdout=output)
self.assertTrue(mocked.called)
self.assertEqual(TouristicEvent.objects.count(), 0)
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_event_apidae(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeEvent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
self.assertEqual(TouristicEvent.objects.count(), 0)
output = io.StringIO()
call_command('import', 'geotrek.tourism.parsers.TouristicEventApidaeParser', verbosity=2, stdout=output)
self.assertTrue(mocked.called)
self.assertEqual(TouristicEvent.objects.count(), 1)
event = TouristicEvent.objects.get()
self.assertEqual(event.eid, "323154")
self.assertEqual(event.name, "Cols Réservés 2019 : Montée de Chabre (Laragne)")
self.assertEqual(event.description[:31], "Le département des Hautes-Alpes")
self.assertEqual(event.description_teaser[:18], "Une des ascensions")
self.assertEqual(event.contact[:21], "Châteauneuf de Chabre")
self.assertEqual(event.email, "LeGrandTim@mail.fr")
self.assertEqual(event.website, "http://www.LeGrandTim.fr")
self.assertEqual(round(event.geom.x), 922920)
self.assertEqual(round(event.geom.y), 6357103)
self.assertEqual(event.practical_info[:38], "<b>Ouverture:</b><br>Mardi 6 août 2019")
self.assertIn("><br><b>Services:</b><br>Le plus grand des services, Un autre grand service<br>",
event.practical_info)
self.assertIn("<b>Ouverture:</b><br>Mardi 6 août 2019 de 9h à midi.<br>", event.practical_info)
self.assertIn("<b>Langues Parlées:</b><br>Français<br>", event.practical_info)
self.assertIn("<b>Accès:</b><br>TestFr<br>", event.practical_info)
self.assertTrue(event.published)
self.assertEqual(event.organizer, 'Toto')
self.assertEqual(str(event.meeting_time), '09:00:00')
self.assertEqual(event.type.type, 'Sports')
self.assertQuerysetEqual(
event.themes.all(),
['<Theme: Cyclisme>', '<Theme: Sports cyclistes>']
)
self.assertEqual(Attachment.objects.count(), 3)
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_event_apidae_constant_fields(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeEvent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
TargetPortalFactory(name='Portal 1')
TargetPortalFactory(name='Portal 2')
RecordSourceFactory(name='Source 1')
RecordSourceFactory(name='Source 2')
self.assertEqual(TouristicEvent.objects.count(), 0)
output = io.StringIO()
call_command('import', 'geotrek.tourism.tests.test_parsers.ApidaeConstantFieldEventParser', verbosity=2,
stdout=output)
self.assertTrue(mocked.called)
self.assertEqual(TouristicEvent.objects.count(), 1)
event = TouristicEvent.objects.get()
self.assertEqual(str(event.type), "Constant Event")
self.assertQuerysetEqual(event.themes.all(), ["Theme 1", "Theme 2"], transform=str)
self.assertQuerysetEqual(event.source.all(), ["Source 1", "Source 2"], transform=str)
self.assertQuerysetEqual(event.portal.all(), ["Portal 1", "Portal 2"], transform=str)
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_content_apidae_constant_fields(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'apidaeContent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
TargetPortalFactory(name='Portal 1')
TargetPortalFactory(name='Portal 2')
RecordSourceFactory(name='Source 1')
RecordSourceFactory(name='Source 2')
self.assertEqual(TouristicContent.objects.count(), 0)
output = io.StringIO()
call_command('import', 'geotrek.tourism.tests.test_parsers.ApidaeConstantFieldContentParser', verbosity=2,
stdout=output)
self.assertTrue(mocked.called)
self.assertEqual(TouristicContent.objects.count(), 1)
content = TouristicContent.objects.get()
self.assertEqual(str(content.category), "Constant Content")
self.assertQuerysetEqual(content.type1.all(), ["Type1 1", "Type1 2"], transform=str)
self.assertQuerysetEqual(content.type2.all(), ["Type2 1", "Type2 2"], transform=str)
self.assertQuerysetEqual(content.themes.all(), ["Theme 1", "Theme 2"], transform=str)
self.assertQuerysetEqual(content.source.all(), ["Source 1", "Source 2"], transform=str)
self.assertQuerysetEqual(content.portal.all(), ["Portal 1", "Portal 2"], transform=str)
@mock.patch('geotrek.common.parsers.requests.get')
@mock.patch('geotrek.common.parsers.requests.head')
def test_create_esprit(self, mocked_head, mocked_get):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'espritparc.json')
with open(filename, 'r') as f:
return json.load(f)
filename = os.path.join(os.path.dirname(__file__), 'data', 'espritparc.json')
# Mock GET
mocked_get.return_value.status_code = 200
mocked_get.return_value.json = mocked_json
mocked_get.return_value.content = b'Fake image'
# Mock HEAD
mocked_head.return_value.status_code = 200
mocked_head.return_value.headers = {'content-length': 666}
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Miels et produits de la ruche")
TouristicContentType1Factory(label="Miel", category=category)
TouristicContentType1Factory(label="Gelée royale, propolis et pollen", category=category)
TouristicContentType1Factory(label="Pollen", category=category)
TouristicContentType1Factory(label="Cire", category=category)
TouristicContentType2Factory(label="Hautes Alpes Naturellement", category=category)
TouristicContentType2Factory(label="Bienvenue à la ferme", category=category)
TouristicContentType2Factory(label="Agriculture biologique", category=category)
call_command('import', 'geotrek.tourism.tests.test_parsers.EspritParc', filename, verbosity=0)
self.assertTrue(mocked_get.called)
self.assertTrue(mocked_head.called)
self.assertEqual(TouristicContent.objects.count(), 24)
content = TouristicContent.objects.all()
eid = [
"PDT44", "PDT46", "PDT47", "PDT48", "PDT51", "PDT52", "PDT53", "PDT93", "PDT94", "PDT95",
"PDT186", "PDT260", "PDT261", "PDT842", "PDT471", "PDT503", "PDT504", "PDT505", "PDT506",
"PDT795", "PDT797", "PDT799", "PDT836", "PDT837"
]
name = [
"miel de montagne", "miel de haute montagne", "miel de printemps d'embrun",
"gel\xe9e royale de montagne", "pollen de montagne", "miel de haute montagne bio", "miel de for\xeat",
"miel de pissenlit", "miel de haute montagne du valgaudemar", "pollen frais de montagne",
"miel de printemps de l'embrunais", "pollen de fleurs de montagne", "pain de cire",
"miel de montagne toutes fleurs", "miel cuv\xe9e sp\xe9ciale d'ancelle", "miel des ecrins"
]
for one in content:
self.assertIn(one.eid, eid)
self.assertIn(one.name.lower(), name)
self.assertEqual(one.category, category)
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_content_tourinsoft_v2(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'tourinsoftContent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Où dormir")
source = RecordSourceFactory(name="CDT 28")
portal = TargetPortalFactory(name="Itinérance")
call_command('import', 'geotrek.tourism.tests.test_parsers.HOT28', verbosity=0)
self.assertTrue(mocked.called)
self.assertEqual(TouristicContent.objects.count(), 1)
content = TouristicContent.objects.get()
self.assertEqual(content.eid, "HOTCEN0280010001")
self.assertEqual(content.name, "Hôtel du Perche")
self.assertEqual(content.description[:27], "")
self.assertEqual(content.description_teaser[:26], "A deux pas du centre ville")
self.assertEqual(content.contact[:73], "<strong>Adresse :</strong><br>Rue de la Bruyère<br>28400 NOGENT-LE-ROTROU")
self.assertEqual(content.email, "hotelduperche@brithotel.fr")
self.assertEqual(content.website, "http://www.hotel-du-perche.com")
self.assertEqual(round(content.geom.x), 537329)
self.assertEqual(round(content.geom.y), 6805504)
self.assertEqual(content.practical_info[:49], "<strong>Langues parlées :</strong><br>Anglais<br>")
self.assertIn("du 01/01/2019 au 21/07/2019", content.practical_info)
self.assertIn("<strong>Équipements :</strong><br>Bar<br>Parking<br>", content.practical_info)
self.assertTrue(content.published)
self.assertEqual(content.source.get(), source)
self.assertEqual(content.portal.get(), portal)
self.assertEqual(content.category, category)
self.assertEqual(content.type1.get().label, "Hôtels")
self.assertEqual(content.type2.get().label, "****")
self.assertEqual(Attachment.objects.count(), 3)
self.assertEqual(Attachment.objects.first().content_object, content)
@mock.patch('geotrek.common.parsers.requests.get')
def test_create_content_tourinsoft_v3(self, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'tourinsoftContentV3.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
FileType.objects.create(type="Photographie")
category = TouristicContentCategoryFactory(label="Où dormir")
source = RecordSourceFactory(name="CDT 28")
portal = TargetPortalFactory(name="Itinérance")
call_command('import', 'geotrek.tourism.tests.test_parsers.HOT28v3', verbosity=0)
self.assertTrue(mocked.called)
self.assertEqual(TouristicContent.objects.count(), 2)
content = TouristicContent.objects.first()
self.assertEqual(content.eid, "HOTCEN0280010001")
self.assertEqual(content.name, "Hôtel du Perche")
self.assertEqual(content.description[:27], "")
self.assertEqual(content.description_teaser[:26], "A deux pas du centre ville")
self.assertEqual(content.contact[:73], "<strong>Adresse :</strong><br>Rue de la Bruyère<br>28400 NOGENT-LE-ROTROU")
self.assertEqual(content.email, "hotelduperche@brithotel.fr")
self.assertEqual(content.website, "http://www.hotel-du-perche.com")
self.assertEqual(round(content.geom.x), 537329)
self.assertEqual(round(content.geom.y), 6805504)
self.assertEqual(content.practical_info[:49], "<strong>Langues parlées :</strong><br>Anglais<br>")
self.assertIn("du 01/01/2019 au 21/07/2019", content.practical_info)
self.assertIn("<strong>Équipements :</strong><br>Bar<br>Parking<br>", content.practical_info)
self.assertTrue(content.published)
self.assertEqual(content.source.get(), source)
self.assertEqual(content.portal.get(), portal)
self.assertEqual(content.category, category)
self.assertEqual(content.type1.get().label, "Hôtels")
self.assertEqual(content.type2.get().label, "****")
self.assertEqual(Attachment.objects.count(), 3)
self.assertEqual(Attachment.objects.first().content_object, content)
call_command('import', 'geotrek.tourism.tests.test_parsers.HOT28v3withMedias', verbosity=0)
self.assertEqual(Attachment.objects.filter(author="Mairie de Briouze", legend="SteCath800").count(), 1)
@mock.patch('geotrek.common.parsers.requests.get')
@mock.patch('geotrek.common.parsers.requests.head')
def test_create_event_tourinsoft(self, mocked_head, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'tourinsoftEvent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
# Mock HEAD
mocked_head.return_value.status_code = 200
mocked_head.return_value.headers = {'content-length': 666}
FileType.objects.create(type="Photographie")
type = TouristicEventTypeFactory(type="Agenda rando")
source = RecordSourceFactory(name="CDT 28")
portal = TargetPortalFactory(name="Itinérance")
call_command('import', 'geotrek.tourism.tests.test_parsers.FMA28', verbosity=0)
self.assertTrue(mocked.called)
self.assertEqual(TouristicEvent.objects.count(), 1)
event = TouristicEvent.objects.get()
self.assertEqual(event.eid, "FMACEN0280060359")
self.assertEqual(event.name, "Moto cross de Brou")
self.assertEqual(event.description, "")
self.assertEqual(event.description_teaser, "")
self.assertEqual(event.contact[:69], "<strong>Adresse :</strong><br>Circuit des Tonnes<br>28160 DAMPIERRE-S")
self.assertEqual(event.email, "moto-club.brou@orange.fr")
self.assertEqual(event.website, "http://www.mxbrou.com")
self.assertEqual(round(event.geom.x), 559796)
self.assertEqual(round(event.geom.y), 6791765)
self.assertEqual(event.practical_info[:61], "<strong>Langues parlées :</strong><br>Anglais<br>Allemand<br>")
self.assertIn("<strong>Équipements :</strong><br>Restauration sur place<br>Sanitaires", event.practical_info)
self.assertTrue(event.published)
self.assertEqual(event.source.get(), source)
self.assertEqual(event.portal.get(), portal)
self.assertEqual(event.type, type)
self.assertEqual(Attachment.objects.count(), 9)
self.assertEqual(Attachment.objects.first().content_object, event)
self.assertEqual(event.begin_date, date(2100, 6, 1))
self.assertEqual(event.end_date, date(2100, 6, 2))
@mock.patch('geotrek.common.parsers.requests.get')
@mock.patch('geotrek.common.parsers.requests.head')
def test_create_event_multiple_parsers(self, mocked_head, mocked):
def mocked_json():
filename = os.path.join(os.path.dirname(__file__), 'data', 'tourinsoftEvent.json')
with open(filename, 'r') as f:
return json.load(f)
mocked.return_value.status_code = 200
mocked.return_value.json = mocked_json
mocked.return_value.content = b'Fake image'
# Mock HEAD
mocked_head.return_value.status_code = 200
mocked_head.return_value.headers = {'content-length': 666}
FileType.objects.create(type="Photographie")
TouristicEventTypeFactory(type="Agenda rando")
RecordSourceFactory(name="CDT 28")
TargetPortalFactory(name="Itinérance")
TargetPortalFactory(name='Other_portal')
call_command('import', 'geotrek.tourism.tests.test_parsers.FMA28', verbosity=0)
self.assertEqual(TouristicEvent.objects.count(), 1)
event = TouristicEvent.objects.get()
self.assertQuerysetEqual(event.portal.all(), ['<TargetPortal: Itinérance>'])
call_command('import', 'geotrek.tourism.tests.test_parsers.FMA28OtherPortal',
verbosity=0)
self.assertEqual(TouristicEvent.objects.count(), 1)
event = TouristicEvent.objects.get()
self.assertQuerysetEqual(event.portal.all(),
['<TargetPortal: Itinérance>',
'<TargetPortal: Other_portal>'],
ordered=False)
| {
"content_hash": "37fbc502cbec9a5a1f2107f9c931c761",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 129,
"avg_line_length": 52.389802631578945,
"alnum_prop": 0.6643330298559006,
"repo_name": "makinacorpus/Geotrek",
"id": "c7fd47b72199d883536151988bac0dd41ffb74a9",
"size": "31911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geotrek/tourism/tests/test_parsers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30638"
},
{
"name": "HTML",
"bytes": "141008"
},
{
"name": "JavaScript",
"bytes": "184508"
},
{
"name": "Makefile",
"bytes": "4170"
},
{
"name": "PLpgSQL",
"bytes": "85546"
},
{
"name": "Python",
"bytes": "2768434"
},
{
"name": "Shell",
"bytes": "18090"
}
],
"symlink_target": ""
} |
import re
import errno
import sys
from socket import getfqdn
from os.path import getmtime, join, exists
from urllib import urlencode
from ConfigParser import ConfigParser
from django.shortcuts import render_to_response
from django.http import HttpResponse, QueryDict
from django.conf import settings
from django.contrib.auth import login, authenticate, logout
from graphite.util import json, getProfile
from graphite.dashboard.models import Dashboard, Template
from graphite.render.views import renderView
from send_graph import send_graph_email
fieldRegex = re.compile(r'<([^>]+)>')
defaultScheme = {
'name' : 'Everything',
'pattern' : '<category>',
'fields' : [ dict(name='category', label='Category') ],
}
defaultUIConfig = {
'default_graph_width' : 400,
'default_graph_height' : 250,
'refresh_interval' : 60,
'autocomplete_delay' : 375,
'merge_hover_delay' : 700,
'theme' : 'default',
}
defaultKeyboardShortcuts = {
'open_finder' : 'alt-a',
'open_dashboard_editor' : 'shift-e',
'time_range_1_hours': 'alt-shift-1',
'time_range_6_hours': 'alt-shift-2',
'time_range_1_day': 'alt-shift-3',
'time_range_1_week': 'alt-shift-4',
'toggle_toolbar' : 'ctrl-z',
'toggle_metrics_panel' : 'ctrl-space',
'erase_all_graphs' : 'alt-x',
'save_dashboard' : 'alt-s',
'completer_add_metrics' : 'alt-enter',
'completer_del_metrics' : 'alt-backspace',
'give_completer_focus' : 'shift-space',
'prev_dashboard' : 'up',
'next_dashboard' : 'down',
'select_dashboard' : 'enter',
}
ALL_PERMISSIONS = ['change', 'delete']
class DashboardConfig:
def __init__(self):
self.last_read = 0
self.schemes = [defaultScheme]
self.ui_config = defaultUIConfig.copy()
def check(self):
#if getmtime(settings.DASHBOARD_CONF) > self.last_read:
self.load()
def load(self):
schemes = [defaultScheme]
parser = ConfigParser()
parser.read(settings.DASHBOARD_CONF)
for option, default_value in defaultUIConfig.items():
if parser.has_option('ui', option):
try:
self.ui_config[option] = parser.getint('ui', option)
except ValueError:
self.ui_config[option] = parser.get('ui', option)
else:
self.ui_config[option] = default_value
if parser.has_option('ui', 'automatic_variants'):
self.ui_config['automatic_variants'] = parser.getboolean('ui', 'automatic_variants')
else:
self.ui_config['automatic_variants'] = True
self.ui_config['keyboard_shortcuts'] = defaultKeyboardShortcuts.copy()
if parser.has_section('keyboard-shortcuts'):
self.ui_config['keyboard_shortcuts'].update( parser.items('keyboard-shortcuts') )
for section in parser.sections():
if section in ('ui', 'keyboard-shortcuts'):
continue
scheme = parser.get(section, 'scheme')
fields = []
for match in fieldRegex.finditer(scheme):
field = match.group(1)
if parser.has_option(section, '%s.label' % field):
label = parser.get(section, '%s.label' % field)
else:
label = field
fields.append({
'name' : field,
'label' : label
})
schemes.append({
'name' : section,
'pattern' : scheme,
'fields' : fields,
})
self.schemes = schemes
config = DashboardConfig()
def dashboard(request, name=None):
dashboard_conf_missing = False
try:
config.check()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
dashboard_conf_missing = True
else:
raise
initialError = None
debug = request.GET.get('debug', False)
theme = request.GET.get('theme', config.ui_config['theme'])
css_file = join(settings.CSS_DIR, 'dashboard-%s.css' % theme)
if not exists(css_file):
initialError = "Invalid theme '%s'" % theme
theme = config.ui_config['theme']
context = {
'schemes_json' : json.dumps(config.schemes),
'ui_config_json' : json.dumps(config.ui_config),
'jsdebug' : debug or settings.JAVASCRIPT_DEBUG,
'debug' : debug,
'theme' : theme,
'initialError' : initialError,
'querystring' : json.dumps( dict( request.GET.items() ) ),
'dashboard_conf_missing' : dashboard_conf_missing,
'userName': '',
'permissions': json.dumps(getPermissions(request.user)),
'permissionsUnauthenticated': json.dumps(getPermissions(None))
}
user = request.user
if user:
context['userName'] = user.username
if name is not None:
try:
dashboard = Dashboard.objects.get(name=name)
except Dashboard.DoesNotExist:
context['initialError'] = "Dashboard '%s' does not exist." % name
else:
context['initialState'] = dashboard.state
context['server_debug'] = getfqdn()
return render_to_response("dashboard.html", context)
def template(request, name, val):
template_conf_missing = False
try:
config.check()
except OSError, e:
if e.errno == errno.ENOENT:
template_conf_missing = True
else:
raise
initialError = None
debug = request.GET.get('debug', False)
theme = request.GET.get('theme', config.ui_config['theme'])
css_file = join(settings.CSS_DIR, 'dashboard-%s.css' % theme)
if not exists(css_file):
initialError = "Invalid theme '%s'" % theme
theme = config.ui_config['theme']
context = {
'schemes_json' : json.dumps(config.schemes),
'ui_config_json' : json.dumps(config.ui_config),
'jsdebug' : debug or settings.JAVASCRIPT_DEBUG,
'debug' : debug,
'theme' : theme,
'initialError' : initialError,
'querystring' : json.dumps( dict( request.GET.items() ) ),
'template_conf_missing' : template_conf_missing,
'userName': '',
'permissions': json.dumps(getPermissions(request.user)),
'permissionsUnauthenticated': json.dumps(getPermissions(None))
}
user = request.user
if user:
context['userName'] = user.username
try:
template = Template.objects.get(name=name)
except Template.DoesNotExist:
context['initialError'] = "Template '%s' does not exist." % name
else:
state = json.loads(template.loadState(val))
state['name'] = '%s/%s' % (name, val)
context['initialState'] = json.dumps(state)
return render_to_response("dashboard.html", context)
def getPermissions(user):
"""Return [change, delete] based on authorisation model and user privileges/groups"""
if user and not user.is_authenticated():
user = None
if not settings.DASHBOARD_REQUIRE_AUTHENTICATION:
return ALL_PERMISSIONS # don't require login
if not user:
return []
# from here on, we have a user
permissions = ALL_PERMISSIONS
if settings.DASHBOARD_REQUIRE_PERMISSIONS:
permissions = [permission for permission in ALL_PERMISSIONS if user.has_perm('dashboard.%s_dashboard' % permission)]
editGroup = settings.DASHBOARD_REQUIRE_EDIT_GROUP
if editGroup and len(user.groups.filter(name = editGroup)) == 0:
permissions = []
return permissions
def save(request, name):
if 'change' not in getPermissions(request.user):
return json_response( dict(error="Must be logged in with appropriate permissions to save") )
# Deserialize and reserialize as a validation step
state = str( json.dumps( json.loads( request.POST['state'] ) ) )
try:
dashboard = Dashboard.objects.get(name=name)
except Dashboard.DoesNotExist:
dashboard = Dashboard.objects.create(name=name, state=state)
else:
dashboard.state = state
dashboard.save();
return json_response( dict(success=True) )
def save_template(request, name, key):
if 'change' not in getPermissions(request.user):
return json_response( dict(error="Must be logged in with appropriate permissions to save the template") )
# Deserialize and reserialize as a validation step
state = str( json.dumps( json.loads( request.POST['state'] ) ) )
try:
template = Template.objects.get(name=name)
except Template.DoesNotExist:
template = Template.objects.create(name=name)
template.setState(state)
template.save()
else:
template.setState(state, key)
template.save();
return json_response( dict(success=True) )
def load(request, name):
try:
dashboard = Dashboard.objects.get(name=name)
except Dashboard.DoesNotExist:
return json_response( dict(error="Dashboard '%s' does not exist. " % name) )
return json_response( dict(state=json.loads(dashboard.state)) )
def load_template(request, name, val):
try:
template = Template.objects.get(name=name)
except Template.DoesNotExist:
return json_response( dict(error="Template '%s' does not exist. " % name) )
state = json.loads(template.loadState(val))
state['name'] = '%s/%s' % (name, val)
return json_response( dict(state=state) )
def delete(request, name):
if 'delete' not in getPermissions(request.user):
return json_response( dict(error="Must be logged in with appropriate permissions to delete") )
try:
dashboard = Dashboard.objects.get(name=name)
except Dashboard.DoesNotExist:
return json_response( dict(error="Dashboard '%s' does not exist. " % name) )
else:
dashboard.delete()
return json_response( dict(success=True) )
def delete_template(request, name):
if 'delete' not in getPermissions(request.user):
return json_response( dict(error="Must be logged in with appropriate permissions to delete the template") )
try:
template = Template.objects.get(name=name)
except Dashboard.DoesNotExist:
return json_response( dict(error="Template '%s' does not exist. " % name) )
else:
template.delete()
return json_response( dict(success=True) )
def find(request):
query = request.REQUEST['query']
query_terms = set( query.lower().split() )
results = []
# Find all dashboard names that contain each of our query terms as a substring
for dashboard in Dashboard.objects.all():
name = dashboard.name.lower()
if name.startswith('temporary-'):
continue
found = True # blank queries return everything
for term in query_terms:
if term in name:
found = True
else:
found = False
break
if found:
results.append( dict(name=dashboard.name) )
return json_response( dict(dashboards=results) )
def find_template(request):
query = request.REQUEST['query']
query_terms = set( query.lower().split() )
results = []
# Find all dashboard names that contain each of our query terms as a substring
for template in Template.objects.all():
name = template.name.lower()
found = True # blank queries return everything
for term in query_terms:
if term in name:
found = True
else:
found = False
break
if found:
results.append( dict(name=template.name) )
return json_response( dict(templates=results) )
def help(request):
context = {}
return render_to_response("dashboardHelp.html", context)
def email(request):
sender = request.POST['sender']
recipients = request.POST['recipients'].split()
subject = request.POST['subject']
message = request.POST['message']
# these need to be passed to the render function in an HTTP request.
graph_params = json.loads(request.POST['graph_params'], parse_int=str)
target = QueryDict(graph_params.pop('target'))
graph_params = QueryDict(urlencode(graph_params))
new_post = request.POST.copy()
new_post.update(graph_params)
new_post.update(target)
request.POST = new_post
resp = renderView(request)
img = resp.content
if img:
attachments = [('graph.png', img, 'image/png')]
send_graph_email(subject, sender, recipients, attachments, message)
return json_response(dict(success=True))
def create_temporary(request):
state = str( json.dumps( json.loads( request.POST['state'] ) ) )
i = 0
while True:
name = "temporary-%d" % i
try:
Dashboard.objects.get(name=name)
except Dashboard.DoesNotExist:
dashboard = Dashboard.objects.create(name=name, state=state)
break
else:
i += 1
return json_response( dict(name=dashboard.name) )
def json_response(obj):
return HttpResponse(content_type='application/json', content=json.dumps(obj))
def user_login(request):
response = dict(errors={}, text={}, success=False, permissions=[])
user = authenticate(username=request.POST['username'],
password=request.POST['password'])
if user is not None:
if user.is_active:
login(request, user)
response['success'] = True
response['permissions'].extend(getPermissions(user))
else:
response['errors']['reason'] = 'Account disabled.'
else:
response['errors']['reason'] = 'Username and/or password invalid.'
return json_response(response)
def user_logout(request):
response = dict(errors={}, text={}, success=True)
logout(request)
return json_response(response)
| {
"content_hash": "d317ba693c52f6ee95848c1a56d3bc23",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 120,
"avg_line_length": 29.87037037037037,
"alnum_prop": 0.6684748915065096,
"repo_name": "dkulikovsky/graphite-ch-web",
"id": "52aecbd64b039add9ea54d6ba801a9a49254af2d",
"size": "12904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/graphite/dashboard/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "152107"
},
{
"name": "HTML",
"bytes": "38818"
},
{
"name": "JavaScript",
"bytes": "1927910"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "446137"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1045"
}
],
"symlink_target": ""
} |
"""
Testing module for the class ContinuousColumn
"""
from unittest import TestCase
import numpy as np
from numpy import nan
from setup_tests import list_ordered_equal, CHAID
import pytest
def test_chaid_vector_converts_strings():
"""
Check that error raised when string column supplied
"""
with pytest.raises(ValueError) as excinfo:
vector = CHAID.ContinuousColumn(np.array(['2', '4']))
assert excinfo.value.message == 'Must only pass numerical values to create continuous column'
def test_chaid_vector_with_dtype_object():
"""
Check that error raised when object column supplied
"""
with pytest.raises(ValueError) as excinfo:
vector = CHAID.ContinuousColumn(np.array(['2', '4'], dtype="object"))
assert excinfo.value.message == 'Must only pass numerical values to create continuous column'
def test_nans_filled_with_zero():
"""
Check that nans are filled with zero
"""
arr = np.array([np.nan, 1., 2., 3.])
assert (CHAID.ContinuousColumn(arr).arr == np.array([0., 1., 2., 3.])).all()
| {
"content_hash": "b5600054508285855a52a72c7af98a75",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 101,
"avg_line_length": 33.53125,
"alnum_prop": 0.6812674743709226,
"repo_name": "Rambatino/CHAID",
"id": "8939bdf1f7f64342d21f21465d5c86bc0e414e6f",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_continuous_column.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "112457"
}
],
"symlink_target": ""
} |
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
from girder import logger
import requests
import os
class ImageDomainDynamicsSearch(Resource):
def __init__(self):
self.resourceName = 'imagedomaindynamicssearch'
self.route('GET', (), self.getImageDomainDynamicsSearch)
@access.public
def getImageDomainDynamicsSearch(self, params):
return self._imageDomainDynamicsSearch(params)
@access.public
def postImageDomainDynamicsSearch(self, params):
return self._imageDomainDynamicsSearch(params)
def _imageDomainDynamicsSearch(self, params):
filename = params['url'].split('/')[-1]
return requests.get(os.environ['IMAGE_SPACE_GEORGETOWN_DOMAIN_DYNAMICS_SEARCH'] + '?feedback=' + filename).json()
getImageDomainDynamicsSearch.description = Description('Searches images by domain dynamics')
| {
"content_hash": "6562c53147e34ba0af93d44b3046d541",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 121,
"avg_line_length": 35.65384615384615,
"alnum_prop": 0.7421790722761596,
"repo_name": "lewismc/image_space",
"id": "5053ce64a9eeafb7b4e8ec1144d2e051cb097dad",
"size": "1690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagespace/server/imagedomaindynamicssearch_rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2705"
},
{
"name": "Python",
"bytes": "36241"
},
{
"name": "Shell",
"bytes": "2188"
}
],
"symlink_target": ""
} |
from interface import LSODES_IVP_Integrator
| {
"content_hash": "f7d168c08501cacec6c59eb5a2453fd1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 44,
"alnum_prop": 0.8636363636363636,
"repo_name": "bjodah/symodesys",
"id": "6352b12dbfcf63d6e6f2abe813e297ab3a38c8a1",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symodesys/odepack/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "16968"
},
{
"name": "Fortran",
"bytes": "6520"
},
{
"name": "Makefile",
"bytes": "2499"
},
{
"name": "Python",
"bytes": "124620"
},
{
"name": "Shell",
"bytes": "5142"
}
],
"symlink_target": ""
} |
"""
Django settings for test_django_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
FIXTURELESS_PATH = '../../..'
FIXTURELESS_ROOT = os.path.join(os.path.dirname(__file__), FIXTURELESS_PATH)
sys.path.insert(0, FIXTURELESS_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l22o0arf-a%&1)lnz#fb$)bwg-yati@didm7&vby(ai@_n4*q-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Test App
'test_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
WSGI_APPLICATION = 'test_django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': None
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "37fc4502e616e48b068fe3cc07ea06ee",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 76,
"avg_line_length": 23.823529411764707,
"alnum_prop": 0.7283950617283951,
"repo_name": "ricomoss/django-fixtureless",
"id": "7fc27f0cf3cf4e3cba042ad4a458651e62e043bb",
"size": "2025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixtureless/tests/test_django_project/test_django_project/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58354"
}
],
"symlink_target": ""
} |
from datetime import date
import django_filters
from ...discount import DiscountValueType
from ...discount.models import Sale, Voucher
from ..core.filters import EnumFilter, ObjectTypeFilter
from ..core.types.common import DateRangeInput, IntRangeInput
from ..utils import filter_by_query_param
from .enums import (
DiscountStatusEnum, DiscountValueTypeEnum, VoucherDiscountType)
def filter_status(qs, _, value):
today = date.today()
if value == DiscountStatusEnum.ACTIVE:
return qs.active(today)
if value == DiscountStatusEnum.EXPIRED:
return qs.expired(today)
if value == DiscountStatusEnum.SCHEDULED:
return qs.filter(start_date__gt=today)
return qs
def filter_times_used(qs, _, value):
gte = value.get('gte')
lte = value.get('lte')
if gte:
qs = qs.filter(used__gte=gte)
if lte:
qs = qs.filter(used__lte=lte)
return qs
def filter_discount_type(qs, _, value):
if value in [VoucherDiscountType.PERCENTAGE, VoucherDiscountType.FIXED]:
qs = qs.filter(discount_value_type=value)
elif value == VoucherDiscountType.SHIPPING:
qs = qs.filter(type=value)
return qs
def filter_started(qs, _, value):
gte = value.get('gte')
lte = value.get('lte')
if gte:
qs = qs.filter(start_date__gte=gte)
if lte:
qs = qs.filter(start_date__gte=lte)
return qs
def filter_sale_type(qs, _, value):
if value in [DiscountValueType.FIXED, DiscountValueType.PERCENTAGE]:
qs = qs.filter(type=value)
return qs
def filter_sale_search(qs, _, value):
search_fields = ('name', 'value', 'type')
if value:
qs = filter_by_query_param(qs, value, search_fields)
return qs
def filter_voucher_search(qs, _, value):
search_fields = ('name', 'code')
if value:
qs = filter_by_query_param(qs, value, search_fields)
return qs
class VoucherFilter(django_filters.FilterSet):
status = EnumFilter(input_class=DiscountStatusEnum, method=filter_status)
times_used = ObjectTypeFilter(
input_class=IntRangeInput, method=filter_times_used
)
discount_type = EnumFilter(
input_class=VoucherDiscountType, method=filter_discount_type
)
started = ObjectTypeFilter(
input_class=DateRangeInput, method=filter_started
)
search = django_filters.CharFilter(method=filter_voucher_search)
class Meta:
model = Voucher
fields = ['status', 'times_used', 'discount_type', 'started', 'search']
class SaleFilter(django_filters.FilterSet):
status = ObjectTypeFilter(
input_class=DiscountStatusEnum, method=filter_status)
sale_type = ObjectTypeFilter(
input_class=DiscountValueTypeEnum, method=filter_sale_type)
started = ObjectTypeFilter(
input_class=DateRangeInput, method=filter_started)
search = django_filters.CharFilter(method=filter_sale_search)
class Meta:
model = Sale
fields = ['status', 'sale_type', 'started', 'search']
| {
"content_hash": "4f86df014c41284d44b067b6f0bb5423",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 29.300970873786408,
"alnum_prop": 0.6756129887342611,
"repo_name": "UITools/saleor",
"id": "d805b9bc9014b965bb59e20415eee697065f2f78",
"size": "3018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/discount/filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
} |
from unittest import TestCase, main, mock
import gzip
from click.testing import CliRunner
from micronota.commands._uniprot import cli
class Tests(TestCase):
@mock.patch('micronota.commands._uniprot.add_metadata', return_value=9)
def test(self, mock_add_metadata):
runner = CliRunner()
with runner.isolated_filesystem():
with gzip.open('uniprot.gz', 'w') as f1, open('test', 'w') as f2:
f1.write(b'>abc\nATGC')
f2.write('>efg\nATGC')
result = runner.invoke(cli, ['uniprot.gz', 'test', 'outfile'])
self.assertEqual(result.exit_code, 0)
if __name__ == '__main__':
main()
| {
"content_hash": "178e1703c2e22b1efd0ce7490b91c437",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 30.5,
"alnum_prop": 0.6110283159463488,
"repo_name": "RNAer/micronota",
"id": "ac83b602d88546944f4df94f0a2b9db5f5343149",
"size": "671",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "micronota/commands/tests/test_uniprot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "933"
},
{
"name": "Python",
"bytes": "124368"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
"""Lists load balancer health check policies."""
from baseCmd import *
from baseResponse import *
class listLBHealthCheckPoliciesCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list resources by display flag; only ROOT admin is eligible to pass this parameter"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""the ID of the health check policy"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""the ID of the load balancer rule"""
self.lbruleid = None
self.typeInfo['lbruleid'] = 'uuid'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
self.required = []
class listLBHealthCheckPoliciesResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the account of the HealthCheck policy"""
self.account = None
self.typeInfo['account'] = 'string'
"""the domain of the HealthCheck policy"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain ID of the HealthCheck policy"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the LB rule ID"""
self.lbruleid = None
self.typeInfo['lbruleid'] = 'string'
"""the id of the zone the HealthCheck policy belongs to"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the list of healthcheckpolicies"""
self.healthcheckpolicy = []
class healthcheckpolicy:
def __init__(self):
""""the LB HealthCheck policy ID"""
self.id = None
""""the description of the healthcheck policy"""
self.description = None
""""is policy for display to the regular user"""
self.fordisplay = None
""""Amount of time between health checks"""
self.healthcheckinterval = None
""""Number of consecutive health check success before declaring an instance healthy"""
self.healthcheckthresshold = None
""""the pingpath of the healthcheck policy"""
self.pingpath = None
""""Time to wait when receiving a response from the health check"""
self.responsetime = None
""""the state of the policy"""
self.state = None
""""Number of consecutive health check failures before declaring an instance unhealthy."""
self.unhealthcheckthresshold = None
| {
"content_hash": "8a7b46c1884f48ccc7e2916d949fbd1c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 98,
"avg_line_length": 35.74324324324324,
"alnum_prop": 0.5977315689981096,
"repo_name": "MissionCriticalCloud/marvin",
"id": "e1e5cf93832985c1c9523a98ec65e3fe28ea2385",
"size": "2645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marvin/cloudstackAPI/listLBHealthCheckPolicies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2573421"
}
],
"symlink_target": ""
} |
import numpy as np
from nlaugmenter.interfaces.SentenceOperation import SentenceOperation
from nlaugmenter.tasks.TaskTypes import TaskType
"""
Base Class for implementing the different input transformations a generation should be robust against.
"""
class RandomUpperPerturbation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.TEXT_TAGGING,
]
languages = [
"af",
"sq",
"am",
"eu",
"en",
"be",
"bn",
"bs",
"my",
"ca",
"ceb",
"zh",
"co",
"hr",
"nl",
"cs",
"da",
"eo",
"et",
"tl",
"fi",
"fr",
"fy",
"gl",
"ka",
"de",
"el",
"gu",
"ht",
"ha",
"haw",
"iw",
"hu",
"is",
"ig",
"ga",
"it",
"lb",
"no",
"pl",
"pt",
"ro",
"gd",
"sr",
"es",
"sv",
"uk",
"cu",
]
def __init__(self, seed=0, max_output=1, corrupt_proportion=0.1):
super().__init__(seed)
np.random.seed(seed)
self.max_output = max_output
self.corrupt_proportion = corrupt_proportion
def generate(self, sentence: str):
perturbed_texts = [
self.random_upper(sentence) for _ in range(self.max_output)
]
return perturbed_texts
def random_upper(self, sentence: str):
np.random.seed(self.seed)
positions = np.random.choice(
range(len(sentence)),
int(len(sentence) * self.corrupt_proportion),
False,
)
new_sentence = [
letter if index not in positions else letter.upper()
for index, letter in enumerate(sentence)
]
return "".join(new_sentence)
| {
"content_hash": "d8b587ae68f9be5da740ea39dc1f3c38",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 102,
"avg_line_length": 21.33695652173913,
"alnum_prop": 0.46663270504330107,
"repo_name": "GEM-benchmark/NL-Augmenter",
"id": "64416ad38c5ad035f0a53bf0a8e3198c809f6d5f",
"size": "1963",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "nlaugmenter/transformations/random_upper_transformation/transformation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "151288"
},
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "1003016"
}
],
"symlink_target": ""
} |
from hashlib import sha1
import sys
from re import compile
from tuttle.error import TuttleError
from tuttle.resource import ResourceMixIn, MalformedUrl
from snakebite.client import Client
class HDFSResource(ResourceMixIn, object):
"""An HTTP resource"""
scheme = 'hdfs'
__ereg = compile("^hdfs://([^/^:]*)(:[0-9]*)?(/.*)$")
def __init__(self, url):
super(HDFSResource, self).__init__(url)
m = self.__ereg.match(url)
if m is None:
raise MalformedUrl("Malformed HDFS url : '{}'".format(url))
self._host = m.group(1)
captured_port = m.group(2)
if captured_port:
self._port = int(captured_port[1:])
else:
self._port = 8020
self._partial = m.group(3)
def set_authentication(self, user, password):
super(HDFSResource, self).set_authentication(user, password)
def exists(self):
client = Client(self._host, self._port, effective_user=self._user, use_trash=False)
return client.test(self._partial, exists=True)
def remove(self):
client = Client(self._host, self._port, effective_user=self._user, use_trash=False)
it = client.delete([self._partial], recurse=True)
for elmt in it:
pass
def signature(self):
client = Client(self._host, self._port, effective_user=self._user, use_trash=False)
stats = client.stat([self._partial])
if stats['file_type'] == 'f':
return "modification_time:{}".format(stats['modification_time'])
else:
return stats['file_type']
| {
"content_hash": "3cc9b5bdf86a7a9fc6b30e0a7e20c89d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 91,
"avg_line_length": 32.69387755102041,
"alnum_prop": 0.6054931335830213,
"repo_name": "lexman/tuttle",
"id": "916256df365b941a8c4a6907acd074db3ee21e42",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuttle/addons/hdfs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "63"
},
{
"name": "HTML",
"bytes": "8239"
},
{
"name": "JavaScript",
"bytes": "1753739"
},
{
"name": "Python",
"bytes": "389718"
}
],
"symlink_target": ""
} |
'''
App Web que escoge una carrera universitaria de la Universidad Interameriacana de Panama;
usando Flask, en la misma se ingresa un nombre y se sigue un test para escoger la carrera y
este redirecciona a la pagina de la universidad
'''
from flask import Flask
from flask import render_template
from flask import request
from flask import make_response
from flask import session
app = Flask(__name__)
app.secret_key = 'F12Zr47j\3yX R~X@H!jmM]Lwf/,?KT'
@app.route('/')
def index():
'''
:return: Retorna hacia el index.html
'''
return render_template('index.html')
@app.route('/iniciarTest', methods=['POST'])
def iniciar():
'''
Funcion que inicia el test
:return: Retorna hacia el Pag1.html
'''
if request.method == 'POST':
session['name'] = request.form['yourname']
if session['name'] == "":
respuesta = make_response(render_template('index.html'))
return respuesta
else:
respuesta = make_response(render_template('pag1.html'))
return respuesta
@app.route('/escogerCarrera', methods=['POST'])
def EscogerCarrera():
'''
:return: Toma las respuestas de las 25 preguntas, los analiza y retorna a Pag2.html con las carreras que
evaluo a traves de las preguntas
'''
carreras=[]
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
pregunta5 = request.form['pregunta5']
pregunta6 = request.form['pregunta6']
pregunta7 = request.form['pregunta7']
pregunta8 = request.form['pregunta8']
pregunta9 = request.form['pregunta9']
pregunta10 = request.form['pregunta10']
pregunta11 = request.form['pregunta11']
pregunta12 = request.form['pregunta12']
pregunta13 = request.form['pregunta13']
pregunta14 = request.form['pregunta14']
pregunta15 = request.form['pregunta15']
pregunta16 = request.form['pregunta16']
pregunta17 = request.form['pregunta17']
pregunta18 = request.form['pregunta18']
pregunta19 = request.form['pregunta19']
pregunta20 = request.form['pregunta20']
pregunta21 = request.form['pregunta21']
pregunta22 = request.form['pregunta22']
pregunta23 = request.form['pregunta23']
pregunta24 = request.form['pregunta24']
pregunta25 = request.form['pregunta25']
if (pregunta1 == "si" and pregunta4 == "si") or (pregunta1 == "si" and pregunta18 == "si") or (pregunta4 == "si" and pregunta18 == "si"):
carrera='Comunicacion y diseño'
carreras.append(carrera)
if (pregunta2 == "si" and pregunta5 == "si") or (pregunta2 == "si" and pregunta14 == "si") or (pregunta5 == "si" and pregunta14 == "si"):
carrera = 'Ciencias Administrativas'
carreras.append(carrera)
if (pregunta7 == "si" and pregunta10 == "si") or (pregunta7 == "si" and pregunta15 == "si") or (pregunta10 == "si" and pregunta15 == "si"):
carrera = 'Ingenieria y sistemas'
carreras.append(carrera)
if (pregunta9 == "si" and pregunta11 == "si") or (pregunta9 == "si" and pregunta21 == "si") or (pregunta11 == "si" and pregunta21 == "si"):
carrera = 'Ciencias de la salud'
carreras.append(carrera)
if (pregunta6 == "si" and pregunta12 == "si") or (pregunta6 == "si" and pregunta16 == "si") or (pregunta12 == "si" and pregunta16 == "si"):
carrera = 'Arquitectura'
carreras.append(carrera)
if (pregunta13 == "si" and pregunta17 == "si") or (pregunta13 == "si" and pregunta20 == "si") or (pregunta17 == "si" and pregunta20 == "si"):
carrera = 'Derecho y ciencias politicas'
carreras.append(carrera)
if (pregunta3 == "si" and pregunta8 == "si") or (pregunta3 == "si" and pregunta19 == "si") or (pregunta3 == "si" and pregunta22 == "si") or (pregunta8 == "si" and pregunta19 == "si") or (pregunta8== "si" and pregunta22 == "si") or (pregunta19 == "si" and pregunta22 == "si"):
carrera = 'Hotelería, Gastronomía y Turismo'
carreras.append(carrera)
if (pregunta23 == "si" and pregunta24 == "si") or (pregunta23 == "si" and pregunta25 == "si") or (pregunta24 == "si" and pregunta24 == "si"):
carrera = 'Logistica Maritma y portuaria'
carreras.append(carrera)
respuesta = make_response(render_template('pag2.html', c=carreras))
return respuesta
@app.route('/SeleccionarFacultad', methods=['POST'])
def SeleccionarFacultad():
'''
Funcion que es para escoger la facultad
:return: Retorna el Html de la facultad escogida por el usuario
'''
facultad_final=request.form['pregunta']
if facultad_final == 'Comunicacion y disenio':
respuesta=make_response(render_template('comunicacion.html',facul=facultad_final))
return respuesta
elif facultad_final == 'Ciencias Administrativas':
respuesta = make_response(render_template('Ciencias Administrativas.html', facul=facultad_final))
return respuesta
elif facultad_final == 'Ingenieria y sistemas':
respuesta = make_response(render_template('Sistemas.html', facul=facultad_final))
return respuesta
elif facultad_final == 'Ciencias de la salud':
respuesta = make_response(render_template('CS', facul=facultad_final))
return respuesta
elif facultad_final == 'Arquitectura':
respuesta = make_response(render_template('Arquitectura.html', facul=facultad_final))
return respuesta
elif facultad_final == 'Derecho y ciencias politicas':
respuesta = make_response(render_template('Derechos y ciencias politicas.html', facul=facultad_final))
return respuesta
elif facultad_final == 'Hotelería, Gastronomía y Turismo':
respuesta = make_response(render_template('HGT.html', facul=facultad_final))
return respuesta
elif facultad_final == 'Logistica Maritma y portuaria':
respuesta = make_response(render_template('Logística, Marítima y Portuaria.html', facul=facultad_final))
return respuesta
@app.route('/Comunicacion', methods=['POST'])
def Comunicacion():
'''
Facultad Comunicacion y Diseño
:return: Retorna los valores de las preguntas, para escoger la carrera final, segun las preguntas de Comunicacion.html en el Carrera Final.html
'''
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
carreras=[]
if pregunta1 == "si":
carrera='Lic. en Diseño Gráfico '
carreras.append(carrera)
if pregunta2 == "si":
carrera = 'Lic. en Diseño Gráfico con énfasis en Publicidad y Mercadeo'
carreras.append(carrera)
if pregunta3 == "si":
carrera = 'Lic. en Com. Audiovisual con énfasis en Producción de Radio y TV'
carreras.append(carrera)
if pregunta4 == "si":
carrera = 'Lic. en Publicidad y Mercadeo con énfasis en Imagen Corporativa '
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/Arquitectura', methods=['POST'])
def Arquitectura():
'''
Facultad Arquitectura
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de Arquitectura.html en el Carrera Final.html
'''
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
carreras=[]
if pregunta1 == "si" and pregunta3 == "si":
carrera='Licenciatura en Arquitectura'
carreras.append(carrera)
if pregunta2 == "si" and pregunta4 == "si":
carrera = 'Licenciatura en Diseño de Interiores'
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/Hotelería,GastronomíayTurismo', methods=['POST'])
def HoteleríaGastronomíayTurismo():
'''
Facultad Hotelería, Gastronomía y Turismo
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de HGT.html en el Carrera Final.html
'''
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
carreras=[]
if pregunta1 == "si":
carrera='Lic. Internacional en Administración de Empresas Hoteleras'
carreras.append(carrera)
if pregunta2 == "si":
carrera = 'Lic. Internacional en Administración de Empresas Turísticas'
carreras.append(carrera)
if pregunta3 == "si":
carrera = 'Lic. Internacional en Artes Culinarias '
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/Derechosycienciaspoliticas', methods=['POST'])
def Derechosycienciaspoliticas():
'''
Facultad Derechos y cienciaspoliticas
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de Derechos y ciencias politicas.html en el Carrera Final.html
'''
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
carreras=[]
if pregunta1 == "si":
carrera='Lic. en Derecho y Ciencias Políticas '
carreras.append(carrera)
if pregunta2 == "si":
carrera = 'Lic. en Criminología'
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/CienciasAdministraciones', methods=['POST'])
def CienciasAdministraciones():
'''
Facultad Derechos y cienciaspoliticas
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de Derechos y ciencias politicas.html en el Carrera Final.html
'''
carreras=[]
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
pregunta5 = request.form['pregunta5']
pregunta6 = request.form['pregunta6']
pregunta7 = request.form['pregunta7']
pregunta8 = request.form['pregunta8']
pregunta9 = request.form['pregunta9']
pregunta10 = request.form['pregunta10']
pregunta11 = request.form['pregunta11']
if pregunta1 == "si":
carrera='Lic. en Administración de Negocios'
carreras.append(carrera)
if pregunta2 == "si":
carrera= 'Lic. en Contabilidad '
carreras.append(carrera)
if pregunta3 == "si":
carrera='Lic. en Administración de Recursos Humanos '
carreras.append(carrera)
if pregunta4 == "si":
carrera ='Lic. en Banca y Finanzas '
carreras.append(carrera)
if pregunta5 == "si":
carrera='Lic. en Comercio Internacional '
carreras.append(carrera)
if pregunta6 == "si":
carrera = 'Lic. en Negocios Internacionales '
carreras.append(carrera)
if pregunta7 == "si":
carrera='Lic. en Ingeniería Comercial Lic. en Comportamiento Organizacional y Desarrollo Humano '
carreras.append(carrera)
if pregunta8 == "si":
carrera = 'Lic. en Global Business '
carreras.append(carrera)
if pregunta9 == "si":
carrera='Lic. en Administración de Empresas Turísticas'
carreras.append(carrera)
if pregunta10 == "si":
carrera = 'Lic. en Mercadeo y Publicidad'
carreras.append(carrera)
if pregunta11 == "si":
carrera='Lic. en Mercadeo y Ventas'
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/LMP', methods=['POST'])
def LMP():
'''
Facultad de Logistica Maritima y Portuaria
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de Logistica Maritima y Portuaria.html en el Carrera Final.html
'''
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
carreras=[]
if pregunta1 == "si":
carrera='Lic. en Administración Marítima y Portuaria'
carreras.append(carrera)
if pregunta2 == "si":
carrera= 'Lic. en Gestión Marítima con énfasis en Operaciones Portuarias'
carreras.append(carrera)
if pregunta3 == "si":
carrera='Lic. en Gestión Marítima con énfasis en Transporte Multimodal'
carreras.append(carrera)
if pregunta4 == "si":
carrera ='Lic. en Ingeniería de Transporte y Logística'
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/Ingenieria', methods=['POST'])
def ingenieria():
'''
Facultad de Ingenieria
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de Ingenieria.html en el Carrera Final.html
'''
carreras = []
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
pregunta5 = request.form['pregunta5']
pregunta6 = request.form['pregunta6']
pregunta7 = request.form['pregunta7']
if pregunta1 == "si":
carrera = 'Lic. en Ing. de Redes y Datos con énfasis en Sistemas Inalámbricos '
carreras.append(carrera)
if pregunta2 == "si":
carrera = 'Lic. en Ingeniería en Sistemas Computacionales '
carreras.append(carrera)
if pregunta3 == "si":
carrera = 'Lic. en Ingeniería en Electrónica y Comunicaciones '
carreras.append(carrera)
if pregunta4 == "si":
carrera = 'Lic. en Ingeniería en Industrial con énfasis en Gestión de Calidad'
carreras.append(carrera)
if pregunta5 == "si":
carrera = 'Lic. en Ingeniería en Industrial con énfasis en Gestión de Operaciones '
carreras.append(carrera)
if pregunta6 == "si":
carrera = 'Lic. en Ingeniería Industrial y de Sistemas '
carreras.append(carrera)
if pregunta7 == "si":
carrera = 'Lic. en Sistemas Comp. con énfasis en Desarrollo de Sistemas Avanzados de Redes y Software '
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
@app.route('/CienciasSalud', methods=['POST'])
def CienciasSalud():
'''
Facultad Ciencias de la Salud
:return: Retorna los valores de las preguntas, para escoger la carrera final,
segun las preguntas de CS.html en el Carrera Final.html
'''
carreras = []
pregunta1 = request.form['pregunta1']
pregunta2 = request.form['pregunta2']
pregunta3 = request.form['pregunta3']
pregunta4 = request.form['pregunta4']
pregunta5 = request.form['pregunta5']
if pregunta1 == "si":
carrera = 'Doctor en Medicina'
carreras.append(carrera)
if pregunta2 == "si":
carrera = 'Doctor en Cirujia Dental'
carreras.append(carrera)
if pregunta3 == "si":
carrera = 'Licenciatura en Enfermería'
carreras.append(carrera)
if pregunta4 == "si":
carrera = 'Licenciatura en Psicología'
carreras.append(carrera)
if pregunta5 == "si":
carrera = 'Licenciatura en Nutrición y Dietética'
carreras.append(carrera)
respuesta = make_response(render_template('Carrera Final.html', c=carreras))
return respuesta
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "5b819718a98d00bfa041159c701202fd",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 279,
"avg_line_length": 43.18817204301075,
"alnum_prop": 0.6521847379559318,
"repo_name": "hakuruklis/PROYECTO-PCIII",
"id": "72ed1b953822f5e28d7e653a790a8ca2f8fb4e25",
"size": "16126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProyectoFinal/app/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "822"
},
{
"name": "CSS",
"bytes": "57196"
},
{
"name": "HTML",
"bytes": "65476"
},
{
"name": "JavaScript",
"bytes": "382350"
},
{
"name": "Makefile",
"bytes": "615"
},
{
"name": "Python",
"bytes": "21099"
}
],
"symlink_target": ""
} |
import platform, sys, os, subprocess
import psutil
from app.api.models.LXDModule import LXDModule
import logging
def readInstanceDetails():
instanceDetails = ("Python Version: {}".format(platform.python_version()))
instanceDetails +=("\nPython Path: {}".format(' '.join(path for path in sys.path)))
instanceDetails +=("\nLXD Version: {}".format(getLXDInfo()['environment']['server_version']))
instanceDetails +=("\nLXD Status: {}".format(getLXDInfo()['api_status']))
instanceDetails +=("\nOS: {}".format(platform.platform()))
instanceDetails +=("\nLXDUI Path: {}".format(sys.path[0]))
instanceDetails +=("\nCPU Count: {}".format(getProcessorDetails()))
instanceDetails +=("\nMemory: {}MB".format(getMemory()))
instanceDetails +=("\nDisk used percent: {}".format(getDiskDetails()))
logging.info(instanceDetails)
def getLXDInfo():
try:
info = LXDModule().config()
return info
except:
return {
'environment': {
'server_version': 'N/A'
},
'api_status': 'N/A'
}
def getMemory():
return int(psutil.virtual_memory().total / (1024*1024))
def getProcessorDetails():
return psutil.cpu_count()
def getDiskDetails():
return psutil.disk_usage('/').percent | {
"content_hash": "56a5e8cb4208fa03c09232dedd9d9dc4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 97,
"avg_line_length": 34,
"alnum_prop": 0.6354489164086687,
"repo_name": "AdaptiveScale/lxdui",
"id": "43e6dcf630d44336ba5d7cfb0550237f381454a4",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/api/utils/readInstanceDetails.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "49013"
},
{
"name": "Dockerfile",
"bytes": "376"
},
{
"name": "HTML",
"bytes": "226108"
},
{
"name": "JavaScript",
"bytes": "145007"
},
{
"name": "Python",
"bytes": "178836"
},
{
"name": "Shell",
"bytes": "9374"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class ScitemMapBatchQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.outer_code = None
self.page_index = None
self.page_size = None
self.sc_item_id = None
def getapiname(self):
return 'taobao.scitem.map.batch.query'
| {
"content_hash": "53059f46231cf6281d345b16e5972208",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 28.5,
"alnum_prop": 0.6842105263157895,
"repo_name": "CooperLuan/devops.notes",
"id": "0fbb7b0c8279073508da3fc3af203106ca75d9f1",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/top/api/rest/ScitemMapBatchQueryRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "211546"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
import csv
import os
import re
HERE = os.path.dirname(os.path.abspath(__file__))
def sexa_to_dec(dh, min, secs, sign):
return sign*(dh + float(min)/60 + float(secs)/60**2)
def string_to_dec(s, neg):
parsed = filter(
None, re.split('[\'" °]', unicode(s, 'utf-8')))
sign = -1 if parsed[-1] == neg else 1
return sexa_to_dec(float(parsed[0]), float(parsed[1]), float(parsed[2]),
sign)
def process_geo_coordinates(obj):
if obj['Latitude']:
obj['Latitude'] = string_to_dec(obj['Latitude'], 'S')
if obj['Longitude']:
obj['Longitude'] = string_to_dec(obj['Longitude'], 'W')
def load_db():
with open(os.path.join(HERE, 'The_Haiti_Earthquake_Database.csv')) as f:
reader = csv.DictReader(f)
for elt in reader:
del elt['']
process_geo_coordinates(elt)
yield elt
HAITI_DB = list(load_db())
| {
"content_hash": "221dbd5fe78d02b653f1c286f9547c2b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 23.564102564102566,
"alnum_prop": 0.5701849836779108,
"repo_name": "DesignSafe-CI/adama_example",
"id": "254d3eb13c5c946032ff2492cdff8bad1362dc10",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/demo/services/common/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1273"
},
{
"name": "Python",
"bytes": "21194"
}
],
"symlink_target": ""
} |
"""
Django settings for gettingstarted project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@w*9!f4t6ujp)7v4&@98$luxl2)ap0upuqousyc$+@q+vn(38#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Aplicacion.apps.AplicacionConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gettingstarted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gettingstarted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| {
"content_hash": "16ce6d20bf1b44271b82542908f3296b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 91,
"avg_line_length": 26.66906474820144,
"alnum_prop": 0.6973293768545994,
"repo_name": "SebasWS/Programa-permanencia-adacemica",
"id": "971aa7046395540d4fb9027bc98011137034dfec",
"size": "3707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gettingstarted/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "34085"
},
{
"name": "HTML",
"bytes": "119254"
},
{
"name": "JavaScript",
"bytes": "8259"
},
{
"name": "Python",
"bytes": "33964"
}
],
"symlink_target": ""
} |
def foo(ids):
for vert in bpy.context.active_object.data.vertices:
vert.select = False
for id in ids:
bpy.context.active_object.data.vertices[id].select = True
| {
"content_hash": "54aea32f8a22f30cf03f1c55b244c0c6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 65,
"avg_line_length": 36.8,
"alnum_prop": 0.6684782608695652,
"repo_name": "alextsui05/toolbox",
"id": "657b87cb2912c926adf4ec098fcb1a0d1b80d8a2",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blender/select_verts_by_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1168"
},
{
"name": "CMake",
"bytes": "10982"
},
{
"name": "HTML",
"bytes": "476422"
},
{
"name": "Python",
"bytes": "482"
},
{
"name": "Shell",
"bytes": "1319"
},
{
"name": "TeX",
"bytes": "1195"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class CertificateTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates("CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/DeployedDevices/Fleets/FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Certificates/CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"fleet_sid": "FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"device_sid": "THaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"thumbprint": "1234567890",
"date_created": "2016-07-30T20:00:00Z",
"date_updated": null,
"url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates/CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates("CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates("CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://preview.twilio.com/DeployedDevices/Fleets/FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Certificates/CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates("CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates.create(certificate_data="certificate_data")
values = {'CertificateData': "certificate_data", }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/DeployedDevices/Fleets/FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Certificates',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"fleet_sid": "FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"device_sid": "THaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"thumbprint": "1234567890",
"date_created": "2016-07-30T20:00:00Z",
"date_updated": null,
"url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates/CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates.create(certificate_data="certificate_data")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/DeployedDevices/Fleets/FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Certificates',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"certificates": [],
"meta": {
"first_page_url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates?PageSize=50&Page=0",
"key": "certificates",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"certificates": [
{
"sid": "CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"fleet_sid": "FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"device_sid": "THaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"thumbprint": "1234567890",
"date_created": "2016-07-30T20:00:00Z",
"date_updated": null,
"url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates/CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"first_page_url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates?PageSize=50&Page=0",
"key": "certificates",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates("CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/DeployedDevices/Fleets/FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Certificates/CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"fleet_sid": "FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"device_sid": "THaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"thumbprint": "1234567890",
"date_created": "2016-07-30T20:00:00Z",
"date_updated": "2016-07-30T20:00:00Z",
"url": "https://preview.twilio.com/DeployedDevices/Fleets/FLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Certificates/CYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.deployed_devices.fleets("FLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.certificates("CYXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| {
"content_hash": "f3fffff84a29171bb11aaf6794707aeb",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 165,
"avg_line_length": 42.67741935483871,
"alnum_prop": 0.5716445308282043,
"repo_name": "twilio/twilio-python",
"id": "a77489cc4b2ef92621d7e745c2fda3257e8c21e7",
"size": "9276",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/preview/deployed_devices/fleet/test_certificate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
import re
import subprocess
from deoplete.base.source import Base
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'cmake'
self.mark = '[cmake]'
self.filetypes = ['cmake']
self.rank = 600
self.debug_enabled = False
self._commands = {}
self._variables = []
self._identifiers = []
self._gather_commands()
self._gather_variables()
def _get_command_help_text(self, command):
p = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE)
return str(p.stdout.read()).replace('\\n', '\n')
def _gather_commands(self):
help_text= self._get_command_help_text('cmake --help-commands')
regex = re.compile(r'\s([\w\d]+\s*\([^)]*?\))')
commands = regex.findall(help_text)
for command in commands:
name = command.split('(')[0]
if name not in self._commands:
regex = re.compile(r'\(([^)]*?)\)')
args = regex.findall(command)[0]
self._commands[name] = ' '.join(args.replace('\n', ' ').split())
def _gather_variables(self):
help_text = self._get_command_help_text('cmake --help-variable-list')
language_keyword = ['C', 'CXX', 'FORTRAN', 'JAVA', 'PYTHON']
for variable in help_text.split('\n'):
if variable.find('<LANG>') >= 0:
variables = [variable.replace('<LANG>', v)
for v in language_keyword]
self._variables += variables
elif variable not in self._variables:
self._variables.append(variable)
def _gather_identifier(self):
regex = re.compile(r'\s*set\(([\w\d]+)[\w\d\s./]*\)')
self._identifiers = regex.findall('\n'.join(self.vim.current.buffer))
def on_event(self, context):
self._gather_identifier()
def gather_candidates(self, context):
self._gather_identifier()
commands = [{'word': c, 'kind': 'command', 'menu': self._commands[c]}
for c in self._commands]
variables = [{'word': v, 'kind': 'variable'}
for v in self._variables]
identifiers = [{'word': i, 'kind': 'identifier'}
for i in self._identifiers]
return commands + variables + identifiers
| {
"content_hash": "62740b7c0144cb8a3f5c3a8ba735689d",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 33.98571428571429,
"alnum_prop": 0.5380411937788987,
"repo_name": "zchee/deoplete-clang",
"id": "f6e88b3ec37ebda8488985f35d9257890dabb9d0",
"size": "2379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rplugin/python3/deoplete/sources/cmake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "130"
},
{
"name": "C++",
"bytes": "130"
},
{
"name": "Makefile",
"bytes": "7085"
},
{
"name": "Objective-C",
"bytes": "139"
},
{
"name": "Python",
"bytes": "36500"
},
{
"name": "Vim script",
"bytes": "520"
}
],
"symlink_target": ""
} |
import unittest
from core.core import Core
class BucketTests(unittest.TestCase):
def runTest(self):
self.test_get_products()
self.test_get_product_success()
self.test_get_product_fail()
@classmethod
def setUpClass(cls):
cls.core = Core.get_instance()
cls.core.feed.raw_collection.append(dict(product='x', os='windows', title='X'))
cls.core.feed.raw_collection.append(dict(product='x', os='windows', bitness='64', version='1.3.5'))
cls.core.feed.raw_collection.append(dict(product='x', os='windows', bitness='64', version='1.3.7'))
cls.core.feed.raw_collection.append(dict(product='x', os='windows', bitness='64', version='1.3.9'))
cls.core.feed.raw_collection.append(dict(product='y'))
cls.core.feed.raw_collection.append(dict(product='y', os='windows', bitness='64', version='1.5.0'))
cls.core.feed.raw_collection.append(dict(product='y', os='windows', bitness='64', version='1.5.2'))
cls.core.feed.raw_collection.append(dict(product='y', os='windows', bitness='64', version='1.5.4'))
def test_get_products(self):
products = self.core.feed.get_products()
self.assertEqual(len(products), 2)
self.assertEqual(products[0].name, 'x')
self.assertEqual(products[0].version, '1.3.9')
self.assertEqual(products[1].name, 'y')
self.assertEqual(products[1].version, '1.5.4')
def test_get_product_success(self):
p1 = self.core.feed.get_product('y')
self.assertEqual(p1.name, 'y')
self.assertEqual(p1.version, '1.5.4')
p2 = self.core.feed.get_product('x==1.3.7')
self.assertEqual(p2.name, 'x')
self.assertEqual(p2.version, '1.3.7')
p3 = self.core.feed.get_product('x>=1.3.5')
self.assertEqual(p3.name, 'x')
self.assertEqual(p3.title, 'X')
self.assertEqual(p3.version, '1.3.9')
p4 = self.core.feed.get_product('y<1.5.1')
self.assertEqual(p4.name, 'y')
self.assertEqual(p4.version, '1.5.0')
def test_get_product_fail(self):
self.assertIsNone(self.core.feed.get_product('z'))
self.assertIsNone(self.core.feed.get_product('q==1.3.5.6'))
self.assertIsNone(self.core.feed.get_product('x==1.3.4'))
self.assertIsNotNone(self.core.feed.get_product('x==1.3.5'))
self.assertIsNone(self.core.feed.get_product('x==1.3.6'))
self.assertIsNone(self.core.feed.get_product('x<1.3.5'))
self.assertIsNone(self.core.feed.get_product('x>1.3.9'))
def test_remove_product(self):
core = Core.create_instance(dict(os='windows', bitness='64'))
core.feed.raw_collection.append(dict(product='x', os='windows'))
core.feed.raw_collection.append(dict(product='x', os='windows', bitness='64', version='1.3.5'))
core.feed.raw_collection.append(dict(product='x', os='windows', bitness='64', version='1.3.7'))
core.feed.raw_collection.append(dict(product='y'))
core.feed.raw_collection.append(dict(product='y', os='windows', bitness='64', version='1.5.0'))
core.feed.raw_collection.append(dict(product='y', os='windows', bitness='64', version='1.5.2'))
self.assertEqual(len(core.feed.raw_collection), 6)
core.feed.remove_product('x')
self.assertEqual(len(core.feed.raw_collection), 3)
self.assertIsNone(core.feed.get_product('x'))
self.assertIsNotNone(core.feed.get_product('y'))
| {
"content_hash": "c0e460601afbb130b2d6e9eb28f06b33",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 107,
"avg_line_length": 39.63636363636363,
"alnum_prop": 0.6295871559633027,
"repo_name": "helicontech/zoo",
"id": "da9597053bc7a3842b2d4933d716e34e78358042",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Zoocmd/tests/test_bucket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8232"
},
{
"name": "C",
"bytes": "1962566"
},
{
"name": "C++",
"bytes": "20316288"
},
{
"name": "CMake",
"bytes": "293719"
},
{
"name": "CSS",
"bytes": "16032"
},
{
"name": "HTML",
"bytes": "88920"
},
{
"name": "JavaScript",
"bytes": "389952"
},
{
"name": "Objective-C",
"bytes": "929638"
},
{
"name": "Python",
"bytes": "550176"
},
{
"name": "XSLT",
"bytes": "629"
}
],
"symlink_target": ""
} |
from fastapi import FastAPI, Response
app = FastAPI()
@app.get("/legacy/")
def get_legacy_data():
data = """<?xml version="1.0"?>
<shampoo>
<Header>
Apply shampoo here.
</Header>
<Body>
You'll have to use soap here.
</Body>
</shampoo>
"""
return Response(content=data, media_type="application/xml")
| {
"content_hash": "446d9dfd3af18982e4aa80f5ddc6b9bc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.5819209039548022,
"repo_name": "tiangolo/fastapi",
"id": "6643da6e6d61ac322a9ecacc1d9defd8ddfdba9a",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/response_directly/tutorial002.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
from pywedding import app
from gdata.spreadsheet.service import SpreadsheetsService, ListQuery
from gdata.spreadsheet.text_db import Record
from datetime import datetime
client = SpreadsheetsService()
feed = client.GetWorksheetsFeed(visibility='public', projection='basic', key=app.config['SPREADSHEET_KEY'])
sheet = None
for s in feed.entry:
if s.title.text == 'wishlist':
wishlist = s
elif s.title.text == 'accommodation':
accommodation = s
class Item:
"""
Represents an item in the wishlist.
Properties :
- id : the row_id of the row in the wishlist
- title : name of the item
- description : description
- photo : (optional) the URL of the picture
- link : (optional) URL of the item (reference, description ...)
- reserved : Whether this item has been reserved or not
"""
def __init__(self, record):
self.record = record
self.id = record.row_id
self.title = record.content['title']
self.description = record.content['description']
self.photo = record.content['photo']
self.link = record.content['link']
self.reserved = record.content['mail'] is not None
class Accommodation:
"""
Represents an accommodation.
Properties :
- id : the row_id of the row in the wishlist
- name : name of the accommodation
- type : type (camping, hotel, hostel, etc)
- site : (optional) url of the website of the accommodation
- email : (optional) contact e-mail
- phone : (optional) contact phone
"""
def __init__(self, record):
self.record = record
self.id = record.row_id
self.name = record.content['name']
self.address = record.content['address']
self.type = record.content['type']
self.site = record.content['site']
self.email = record.content['email']
self.phone = record.content['phone']
def load_wishlist():
"""
Load all items in the wishlist (300 max). google requires a limit...
"""
#records = wishlist.GetRecords(1,300)
row_query = ListQuery()
row_query.start_index = str(1)
rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1])
records = []
for row in rows_feed.entry:
records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],
worksheet_id=wishlist.id.text.split('/')[-1],
row_entry=row,
)
)
return [Item(r) for r in records]
def load_accommodation():
"""
Load all items in the list of accommodation (300 max). google requires a limit...
"""
row_query = ListQuery()
row_query.start_index = str(1)
rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=accommodation.id.text.split('/')[-1])
records = []
for row in rows_feed.entry:
records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],
worksheet_id=accommodation.id.text.split('/')[-1],
row_entry=row
)
)
return [Accommodation(r) for r in records]
def update_wishlist_by_id(id, mail):
"""
Update an item of the wishlist, setting an e-mail.
"""
row = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1], row_id=id)
r = Record(content=None, row_entry=row,
spreadsheet_key=app.config['SPREADSHEET_KEY'],
worksheet_id=wishlist.id.text.split('/')[-1], database_client=client)
if r is not None:
r.content['mail'] = mail
r.content['date'] = datetime.now().strftime('%Y/%m/%d %H:%M')
client.UpdateRow(row, r.content)
return True
return False | {
"content_hash": "17fb8f0c3c71758e5d8c9b3c8e5a2224",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 156,
"avg_line_length": 35.41803278688525,
"alnum_prop": 0.5626012497107151,
"repo_name": "Kehrlann/pywedding",
"id": "3d717a46bca7b89b566d2b0afaa134d2a7e1d21e",
"size": "4335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywedding/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3599"
},
{
"name": "HTML",
"bytes": "8827"
},
{
"name": "JavaScript",
"bytes": "44778"
},
{
"name": "Python",
"bytes": "13266"
}
],
"symlink_target": ""
} |
import sys
from configparser import ConfigParser
class ConfigTool(object):
""" This object realises the parser and index of the configuration files """
def __init__(self, config_path):
self.config = ConfigParser()
self.config.read(config_path)
def configsectionmap(self, section):
""" Realises the parse of a configuration section.
Keyword arguments:
section -- the named section on the configuration file (ie: [ConfigSection])
"""
dict1 = {}
options = self.config.options(section)
for option in options:
try:
dict1[option] = self.config.get(section, option)
except Exception:
print("exception on %s!" % sys.exc_info()[0])
dict1[option] = None
return dict1
| {
"content_hash": "51d19dfa975700510d4da6e047d60637",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 84,
"avg_line_length": 33.28,
"alnum_prop": 0.5961538461538461,
"repo_name": "caiomartini/SonarCommitAnalyzer",
"id": "57f0ab100f33e4d316c0ff04c1eda2e490c360c9",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16181"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.automation.event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Offers event listening automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation.html#event-trigger
"""
import logging
CONF_EVENT_TYPE = "event_type"
CONF_EVENT_DATA = "event_data"
_LOGGER = logging.getLogger(__name__)
def trigger(hass, config, action):
""" Listen for events based on config. """
event_type = config.get(CONF_EVENT_TYPE)
if event_type is None:
_LOGGER.error("Missing configuration key %s", CONF_EVENT_TYPE)
return False
event_data = config.get(CONF_EVENT_DATA)
def handle_event(event):
""" Listens for events and calls the action when data matches. """
if not event_data or all(val == event.data.get(key) for key, val
in event_data.items()):
action()
hass.bus.listen(event_type, handle_event)
return True
| {
"content_hash": "6029b0f46f3f3885d1ddfe16fb620d90",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 29.823529411764707,
"alnum_prop": 0.6410256410256411,
"repo_name": "pottzer/home-assistant",
"id": "c172b8e0e11552d99b6f222803a8c261dce0487b",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/automation/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1044045"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import os
import sys
import errno
import re
import shutil
import tempfile
import codecs
import atexit
import weakref
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.figure import Figure
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _png, rcParams
from matplotlib.cbook import is_writable_file_like
from matplotlib.compat import subprocess
from matplotlib.compat.subprocess import check_output
###############################################################################
# create a list of system fonts, all of these should work with xe/lua-latex
system_fonts = []
if sys.platform.startswith('win'):
from matplotlib import font_manager
for f in font_manager.win32InstalledFonts():
try:
system_fonts.append(font_manager.get_font(str(f)).family_name)
except:
pass # unknown error, skip this font
else:
# assuming fontconfig is installed and the command 'fc-list' exists
try:
# list scalable (non-bitmap) fonts
fc_list = check_output([str('fc-list'), ':outline,scalable', 'family'])
fc_list = fc_list.decode('utf8')
system_fonts = [f.split(',')[0] for f in fc_list.splitlines()]
system_fonts = list(set(system_fonts))
except:
warnings.warn('error getting fonts from fc-list', UserWarning)
def get_texcommand():
"""Get chosen TeX system from rc."""
texsystem_options = ["xelatex", "lualatex", "pdflatex"]
texsystem = rcParams["pgf.texsystem"]
return texsystem if texsystem in texsystem_options else "xelatex"
def get_fontspec():
"""Build fontspec preamble from rc."""
latex_fontspec = []
texcommand = get_texcommand()
if texcommand != "pdflatex":
latex_fontspec.append("\\usepackage{fontspec}")
if texcommand != "pdflatex" and rcParams["pgf.rcfonts"]:
# try to find fonts from rc parameters
families = ["serif", "sans-serif", "monospace"]
fontspecs = [r"\setmainfont{%s}", r"\setsansfont{%s}",
r"\setmonofont{%s}"]
for family, fontspec in zip(families, fontspecs):
matches = [f for f in rcParams["font." + family]
if f in system_fonts]
if matches:
latex_fontspec.append(fontspec % matches[0])
else:
pass # no fonts found, fallback to LaTeX defaule
return "\n".join(latex_fontspec)
def get_preamble():
"""Get LaTeX preamble from rc."""
return "\n".join(rcParams["pgf.preamble"])
###############################################################################
# This almost made me cry!!!
# In the end, it's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
###############################################################################
# helper functions
NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
re_mathsep = re.compile(NO_ESCAPE + r"\$")
re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
repl_escapetext = lambda m: "\\" + m.group(1)
re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
def common_texification(text):
"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = re_mathdefault.sub(repl_mathdefault, text)
# split text into normaltext and inline math parts
parts = re_mathsep.split(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = re_escapetext.sub(repl_escapetext, s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def writeln(fh, line):
# every line of a file included with \\input must be terminated with %
# if not, latex will create additional vertical spaces for some reason
fh.write(line)
fh.write("%\n")
def _font_properties_str(prop):
# translate font properties to latex commands, return as string
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif family in system_fonts and get_texcommand() != "pdflatex":
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
pass # print warning?
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands)
def make_pdf_to_png_converter():
"""
Returns a function that converts a pdf file to a png file.
"""
tools_available = []
# check for pdftocairo
try:
check_output([str("pdftocairo"), "-v"], stderr=subprocess.STDOUT)
tools_available.append("pdftocairo")
except:
pass
# check for ghostscript
gs, ver = mpl.checkdep_ghostscript()
if gs:
tools_available.append("gs")
# pick converter
if "pdftocairo" in tools_available:
def cairo_convert(pdffile, pngfile, dpi):
cmd = [str("pdftocairo"), "-singlefile", "-png", "-r", "%d" % dpi,
pdffile, os.path.splitext(pngfile)[0]]
check_output(cmd, stderr=subprocess.STDOUT)
return cairo_convert
elif "gs" in tools_available:
def gs_convert(pdffile, pngfile, dpi):
cmd = [str(gs), '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-sDEVICE=png16m', '-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile]
check_output(cmd, stderr=subprocess.STDOUT)
return gs_convert
else:
raise RuntimeError("No suitable pdf to png renderer found.")
class LatexError(Exception):
def __init__(self, message, latex_output=""):
Exception.__init__(self, message)
self.latex_output = latex_output
class LatexManagerFactory(object):
previous_instance = None
@staticmethod
def get_latex_manager():
texcommand = get_texcommand()
latex_header = LatexManager._build_latex_header()
prev = LatexManagerFactory.previous_instance
# Check if the previous instance of LatexManager can be reused.
if (prev and prev.latex_header == latex_header
and prev.texcommand == texcommand):
if rcParams["pgf.debug"]:
print("reusing LatexManager")
return prev
else:
if rcParams["pgf.debug"]:
print("creating LatexManager")
new_inst = LatexManager()
LatexManagerFactory.previous_instance = new_inst
return new_inst
class LatexManager(object):
"""
The LatexManager opens an instance of the LaTeX application for
determining the metrics of text elements. The LaTeX environment can be
modified by setting fonts and/or a custem preamble in the rc parameters.
"""
_unclean_instances = weakref.WeakSet()
@staticmethod
def _build_latex_header():
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some
# math fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
latex_header = [r"\documentclass{minimal}",
latex_preamble,
latex_fontspec,
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}"]
return "\n".join(latex_header)
@staticmethod
def _cleanup_remaining_instances():
unclean_instances = list(LatexManager._unclean_instances)
for latex_manager in unclean_instances:
latex_manager._cleanup()
def _stdin_writeln(self, s):
self.latex_stdin_utf8.write(s)
self.latex_stdin_utf8.write("\n")
self.latex_stdin_utf8.flush()
def _expect(self, s):
exp = s.encode("utf8")
buf = bytearray()
while True:
b = self.latex.stdout.read(1)
buf += b
if buf[-len(exp):] == exp:
break
if not len(b):
raise LatexError("LaTeX process halted", buf.decode("utf8"))
return buf.decode("utf8")
def _expect_prompt(self):
return self._expect("\n*")
def __init__(self):
# store references for __del__
self._os_path = os.path
self._shutil = shutil
self._debug = rcParams["pgf.debug"]
# create a tmp directory for running latex, remember to cleanup
self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
LatexManager._unclean_instances.add(self)
# test the LaTeX setup to ensure a clean startup of the subprocess
self.texcommand = get_texcommand()
self.latex_header = LatexManager._build_latex_header()
latex_end = "\n\\makeatletter\n\\@@end\n"
try:
latex = subprocess.Popen([str(self.texcommand), "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.tmpdir)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError("Latex command not found. "
"Install '%s' or change pgf.texsystem to the desired command."
% self.texcommand
)
else:
raise RuntimeError("Error starting process '%s'" % self.texcommand)
test_input = self.latex_header + latex_end
stdout, stderr = latex.communicate(test_input.encode("utf-8"))
if latex.returncode != 0:
raise LatexError("LaTeX returned an error, probably missing font or error in preamble:\n%s" % stdout)
# open LaTeX process for real work
latex = subprocess.Popen([str(self.texcommand), "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
self.latex = latex
self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
# write header with 'pgf_backend_query_start' token
self._stdin_writeln(self._build_latex_header())
# read all lines until our 'pgf_backend_query_start' token appears
self._expect("*pgf_backend_query_start")
self._expect_prompt()
# cache for strings already processed
self.str_cache = {}
def _cleanup(self):
if not self._os_path.isdir(self.tmpdir):
return
try:
self.latex.communicate()
self.latex_stdin_utf8.close()
self.latex.stdout.close()
except:
pass
try:
self._shutil.rmtree(self.tmpdir)
LatexManager._unclean_instances.discard(self)
except:
sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
def __del__(self):
if self._debug:
print("deleting LatexManager")
self._cleanup()
def get_width_height_descent(self, text, prop):
"""
Get the width, total height and descent for a text typesetted by the
current LaTeX environment.
"""
# apply font properties and define textbox
prop_cmds = _font_properties_str(prop)
textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
# check cache
if textbox in self.str_cache:
return self.str_cache[textbox]
# send textbox to LaTeX and wait for prompt
self._stdin_writeln(textbox)
try:
self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# typeout width, height and text offset of the last textbox
self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
# read answer from latex and advance to the next prompt
try:
answer = self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# parse metrics from the answer string
try:
width, height, offset = answer.splitlines()[0].split(",")
except:
msg = "Error processing '%s'\nLaTeX Output:\n%s" % (text, answer)
raise ValueError(msg)
w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
# the height returned from LaTeX goes from base to top.
# the height matplotlib expects goes from bottom to top.
self.str_cache[textbox] = (w, h + o, o)
return w, h + o, o
class RendererPgf(RendererBase):
def __init__(self, figure, fh, dummy=False):
"""
Creates a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes
----------
figure : `matplotlib.figure.Figure`
Matplotlib figure to initialize height, width and dpi from.
fh : file-like
File handle for the output of the drawing commands.
"""
RendererBase.__init__(self)
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
# get LatexManager instance
self.latexManager = LatexManagerFactory.get_latex_manager()
if dummy:
# dummy==True deactivate all methods
nop = lambda *args, **kwargs: None
for m in RendererPgf.__dict__:
if m.startswith("draw_"):
self.__dict__[m] = nop
else:
# if fh does not belong to a filename, deactivate draw_image
if not hasattr(fh, 'name') or not os.path.exists(fh.name):
warnings.warn("streamed pgf-code does not support raster "
"graphics, consider using the pgf-to-pdf option",
UserWarning)
self.__dict__["draw_image"] = lambda *args, **kwargs: None
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
writeln(self.fh, r"\pgfsys@defobject{currentmarker}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"}")
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False):
x, y = point[0] * f, point[1] * f
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform, rgbFace)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform, rgbFace)
writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
writeln(self.fh, r"\pgfsys@defobject{currentpattern}{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = int(math.ceil(xmax-xmin)), int(math.ceil(ymax-ymin))
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}} " % coords)
writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
if gc.get_forced_alpha():
fillopacity = strokeopacity = gc.get_alpha()
else:
strokeopacity = gc.get_rgb()[3]
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
writeln(self.fh, r"\definecolor{currentfill}{rgb}{%f,%f,%f}" % tuple(rgbFace[:3]))
writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
writeln(self.fh, r"\definecolor{currentstroke}{rgb}{%f,%f,%f}" % stroke_rgba[:3])
writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
dash_str = r"\pgfsetdash{"
for dash in dash_list:
dash_str += r"{%fpt}" % dash
dash_str += r"}{%fpt}" % dash_offset
writeln(self.fh, dash_str)
def _print_pgf_path(self, gc, path, transform, rgbFace=None):
f = 1. / self.dpi
# check for clip box / ignore clip for filled paths
bbox = gc.get_clip_rectangle() if gc else None
if bbox and (rgbFace is None):
p1, p2 = bbox.get_points()
clip = (p1[0], p1[1], p2[0], p2[1])
else:
clip = None
# build path
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
writeln(self.fh, r"\pgfpathquadraticcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
writeln(self.fh, r"\pgfpathcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def option_scale_image(self):
"""
pgf backend supports affine transform of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, transform=None):
h, w = im.shape[:2]
if w == 0 or h == 0:
return
# save the images to png files
path = os.path.dirname(self.fh.name)
fname = os.path.splitext(os.path.basename(self.fh.name))[0]
fname_img = "%s-img%d.png" % (fname, self.image_counter)
self.image_counter += 1
_png.write_png(im[::-1], os.path.join(path, fname_img))
# reference the image in the pgf picture
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
f = 1. / self.dpi # from display coords to inch
if transform is None:
writeln(self.fh,
r"\pgfsys@transformshift{%fin}{%fin}" % (x * f, y * f))
w, h = w * f, h * f
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
writeln(self.fh,
r"\pgfsys@transformcm{%f}{%f}{%f}{%f}{%fin}{%fin}" %
(tr1 * f, tr2 * f, tr3 * f, tr4 * f,
(tr5 + x) * f, (tr6 + y) * f))
w = h = 1 # scale is already included in the transform
interp = str(transform is None).lower() # interpolation in PDF reader
writeln(self.fh,
r"\pgftext[left,bottom]"
r"{\pgfimage[interpolate=%s,width=%fin,height=%fin]{%s}}" %
(interp, w, h, fname_img))
writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
self.draw_text(gc, x, y, s, prop, angle, ismath, mtext)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# prepare string for tex
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = r"%s %s" % (prop_cmds, s)
writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
if rgb != (0, 0, 0):
writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
f = 1.0 / self.figure.dpi
text_args = []
if mtext and (
(angle == 0 or
mtext.get_rotation_mode() == "anchor") and
mtext.get_va() != "center_baseline"):
# if text anchoring can be supported, get the original coordinates
# and add alignment information
x, y = mtext.get_transform().transform_point(mtext.get_position())
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
halign = {"left": "left", "right": "right", "center": ""}
valign = {"top": "top", "bottom": "bottom",
"baseline": "base", "center": ""}
text_args.append(halign[mtext.get_ha()])
text_args.append(valign[mtext.get_va()])
else:
# if not, use the text layout provided by matplotlib
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
text_args.append("left")
text_args.append("base")
if angle != 0:
text_args.append("rotate=%f" % angle)
writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# check if the math is supposed to be displaystyled
s = common_texification(s)
# get text metrics in units of latex pt, convert to display units
w, h, d = self.latexManager.get_width_height_descent(s, prop)
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
return False
def get_canvas_width_height(self):
return self.figure.get_figwidth(), self.figure.get_figheight()
def points_to_pixels(self, points):
return points * mpl_pt_to_in * self.dpi
def new_gc(self):
return GraphicsContextPgf()
class GraphicsContextPgf(GraphicsContextBase):
pass
########################################################################
class TmpDirCleaner(object):
remaining_tmpdirs = set()
@staticmethod
def add(tmpdir):
TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
@staticmethod
def cleanup_remaining_tmpdirs():
for tmpdir in TmpDirCleaner.remaining_tmpdirs:
try:
shutil.rmtree(tmpdir)
except:
sys.stderr.write("error deleting tmp directory %s\n" % tmpdir)
class FigureCanvasPgf(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh, *args, **kwargs):
if kwargs.get("dryrun", False):
renderer = RendererPgf(self.figure, None, dummy=True)
self.figure.draw(renderer)
return
header_text = """%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \\input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \\usepackage{pgf}
%%
%% Figures using additional raster images can only be included by \\input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \\usepackage{import}
%% and then include the figures with
%% \\import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in get_preamble().splitlines():
header_info_preamble.append("%% " + line)
for line in get_fontspec().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.get_dpi()
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
writeln(fh, r"\begingroup")
writeln(fh, r"\makeatletter")
writeln(fh, r"\begin{pgfpicture}")
writeln(fh, r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}" % (w, h))
writeln(fh, r"\pgfusepath{use as bounding box, clip}")
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
writeln(fh, r"\end{pgfpicture}")
writeln(fh, r"\makeatother")
writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, *args, **kwargs):
"""
Output pgf commands for drawing the figure so it can be included and
rendered in latex documents.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pgf is to be written to
if isinstance(fname_or_fh, six.string_types):
with codecs.open(fname_or_fh, "w", encoding="utf-8") as fh:
self._print_pgf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
fh = codecs.getwriter("utf-8")(fname_or_fh)
self._print_pgf_to_fh(fh, *args, **kwargs)
else:
raise ValueError("filename must be a path")
def _print_pdf_to_fh(self, fh, *args, **kwargs):
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
try:
# create temporary directory for compiling the figure
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pgf = os.path.join(tmpdir, "figure.pgf")
fname_tex = os.path.join(tmpdir, "figure.tex")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
# print figure to pgf and compile it with latex
self.print_pgf(fname_pgf, *args, **kwargs)
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latexcode = """
\\documentclass[12pt]{minimal}
\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\\usepackage{pgf}
\\begin{document}
\\centering
\\input{figure.pgf}
\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
fh_tex.write(latexcode)
texcommand = get_texcommand()
cmdargs = [str(texcommand), "-interaction=nonstopmode",
"-halt-on-error", "figure.tex"]
try:
check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
except subprocess.CalledProcessError as e:
raise RuntimeError("%s was not able to process your file.\n\nFull log:\n%s" % (texcommand, e.output))
# copy file contents to target
with open(fname_pdf, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_pdf(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a Pgf generated figure to PDF.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pdf is to be written to
if isinstance(fname_or_fh, six.string_types):
with open(fname_or_fh, "wb") as fh:
self._print_pdf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_pdf_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def _print_png_to_fh(self, fh, *args, **kwargs):
converter = make_pdf_to_png_converter()
try:
# create temporary directory for pdf creation and png conversion
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
fname_png = os.path.join(tmpdir, "figure.png")
# create pdf and try to convert it to png
self.print_pdf(fname_pdf, *args, **kwargs)
converter(fname_pdf, fname_png, dpi=self.figure.dpi)
# copy file contents to target
with open(fname_png, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_png(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a pgf figure to pdf and convert it to png.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
if isinstance(fname_or_fh, six.string_types):
with open(fname_or_fh, "wb") as fh:
self._print_png_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_png_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def get_renderer(self):
return RendererPgf(self.figure, None, dummy=True)
class FigureManagerPgf(FigureManagerBase):
def __init__(self, *args):
FigureManagerBase.__init__(self, *args)
@_Backend.export
class _BackendPgf(_Backend):
FigureCanvas = FigureCanvasPgf
FigureManager = FigureManagerPgf
def _cleanup_all():
LatexManager._cleanup_remaining_instances()
TmpDirCleaner.cleanup_remaining_tmpdirs()
atexit.register(_cleanup_all)
| {
"content_hash": "cec2e64f4caa8920fd8825fb57af17d2",
"timestamp": "",
"source": "github",
"line_count": 962,
"max_line_length": 133,
"avg_line_length": 37.91995841995842,
"alnum_prop": 0.5687655911620384,
"repo_name": "louisLouL/pair_trading",
"id": "e75c08f8bd7429460dd332b4074c0ff35050e473",
"size": "36479",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_pgf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "148513"
},
{
"name": "C++",
"bytes": "172384"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "HTML",
"bytes": "568460"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Python",
"bytes": "30357437"
},
{
"name": "Shell",
"bytes": "3260"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
__all__ = ['phpunit', 'coverfish', 'lint', 'phpcs', 'phpcpd', 'phpmd', 'security_checker']
| {
"content_hash": "7d1c9897373567a3c4d64b1f29446096",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 90,
"avg_line_length": 91,
"alnum_prop": 0.5934065934065934,
"repo_name": "mi-schi/php-code-checker",
"id": "fff2778c36a69265cceff56f1e6cb2487336958a",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/check/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16348"
}
],
"symlink_target": ""
} |
import copy
from loqusdb.build_models.variant import build_variant
class TestInsertVariant:
def test_insert_one_variant(self, mongo_adapter, simplest_variant):
"""Test to insert one variant"""
mongo_adapter.add_variant(simplest_variant)
db = mongo_adapter.db
# Get the variant without the adapter
mongo_variant = db.variant.find_one()
assert mongo_variant["_id"] == "test"
assert mongo_variant["observations"] == 1
assert mongo_variant["homozygote"] == 0
def test_insert_one_variant_twice(self, mongo_adapter, simplest_variant):
"""Test to insert one variant"""
mongo_adapter.add_variant(simplest_variant)
mongo_adapter.add_variant(simplest_variant)
db = mongo_adapter.db
mongo_variant = db.variant.find_one()
assert mongo_variant["_id"] == "test"
assert mongo_variant["observations"] == 2
assert mongo_variant.get("homozygote", 0) == 0
def test_insert_hom_variant(self, real_mongo_adapter, homozygous_variant):
"""Test to insert a homozygote variant"""
mongo_adapter = real_mongo_adapter
mongo_adapter.add_variant(homozygous_variant)
db = mongo_adapter.db
mongo_variant = db.variant.find_one()
assert mongo_variant["_id"] == "test"
assert mongo_variant["observations"] == 1
assert mongo_variant.get("homozygote", 0) == 1
assert mongo_variant["families"] == ["1"]
def test_insert_many(self, mongo_adapter, simplest_variant):
"""Test to insert a homozygote variant"""
for _ in range(10000):
mongo_adapter.add_variant(simplest_variant)
db = mongo_adapter.db
mongo_variant = db.variant.find_one()
assert mongo_variant["_id"] == "test"
assert mongo_variant["observations"] == 10000
assert mongo_variant.get("homozygote", 0) == 0
class TestGetVariant:
def test_get_variant(self, mongo_client, mongo_adapter, simplest_variant):
"""Test to insert one variant"""
# Insert without adapter
db = mongo_client["test"]
db.variant.insert_one(simplest_variant)
mongo_variant = mongo_adapter.get_variant(simplest_variant)
assert mongo_variant["_id"] == "test"
def test_get_none(self, mongo_adapter, simplest_variant):
"""Test to get non existing variant"""
mongo_variant = mongo_adapter.get_variant(simplest_variant)
assert mongo_variant is None
class TestBulkOperations:
def test_insert_one_variant(self, real_mongo_adapter, simplest_variant):
"""Test to insert one variant with bulk insert"""
mongo_adapter = real_mongo_adapter
variants = [simplest_variant]
mongo_adapter.add_variants(variants)
db = mongo_adapter.db
mongo_variant = db.variant.find_one()
assert mongo_variant["_id"] == "test"
assert mongo_variant["observations"] == 1
assert mongo_variant["homozygote"] == 0
def test_insert_two_variants(self, real_mongo_adapter):
"""Test to insert two variants with bulk"""
adapter = real_mongo_adapter
db = adapter.db
variants = [
{"_id": "test", "homozygote": 0},
{"_id": "test_1", "homozygote": 1},
]
adapter.add_variants(variants)
first_variant = db.variant.find_one({"_id": "test"})
second_variant = db.variant.find_one({"_id": "test_1"})
assert first_variant["_id"] == "test"
assert first_variant["observations"] == 1
assert first_variant.get("homozygote", 0) == 0
assert second_variant["_id"] == "test_1"
assert second_variant["observations"] == 1
assert second_variant.get("homozygote", 0) == 1
def test_insert_many(self, real_mongo_adapter):
adapter = real_mongo_adapter
db = adapter.db
variants = ({"_id": "test", "homozygote": 0} for i in range(20000))
adapter.add_variants(variants)
mongo_variant = db.variant.find_one()
assert mongo_variant["_id"] == "test"
assert mongo_variant["observations"] == 20000
assert mongo_variant.get("homozygote", 0) == 0
class TestRemoveVariant:
def test_remove_one_variant(self, mongo_adapter):
"""Test to update one variant"""
db = mongo_adapter.db
variant = {"_id": "test", "observations": 1}
db.variant.insert_one(variant)
mongo_adapter.delete_variant(variant)
assert db.variant.find_one() is None
def test_downcount_one_variant(self, mongo_adapter):
"""Test to update one variant"""
db = mongo_adapter.db
insert_variant = {"_id": "test", "families": ["1", "2"], "observations": 2}
db.variant.insert_one(insert_variant)
variant = {"_id": "test", "case_id": "1"}
mongo_adapter.delete_variant(variant)
mongo_variant = db.variant.find_one()
assert mongo_variant["observations"] == 1
assert mongo_variant["families"] == ["2"]
def test_remove_non_existing(self, mongo_adapter, simplest_variant):
db = mongo_adapter.db
mongo_adapter.delete_variant(simplest_variant)
assert db.variant.find_one() is None
class TestRemoveSV:
def test_remove_one_SV(self, mongo_adapter, del_variant, case_obj):
# GIVEN a database poulated with one SV
db = mongo_adapter.db
formated_variant = build_variant(
del_variant, case_obj=case_obj, case_id=case_obj["case_id"]
)
mongo_adapter.add_structural_variant(formated_variant)
mongo_SV = db.structural_variant.find_one()
mongo_identity = db.identity.find_one()
assert mongo_SV is not None
assert mongo_identity is not None
# WHEN deleting SV
mongo_adapter.delete_structural_variant(formated_variant)
# THEN there should be no remaining SVs in the database
mongo_SV = db.structural_variant.find_one()
mongo_identity = db.indentity.find_one()
assert mongo_SV is None
assert mongo_identity is None
def test_remove_one_of_two_SV(self, mongo_adapter, duptandem_variant, case_obj):
# GIVEN a database poulated with one SV
db = mongo_adapter.db
formated_variant = build_variant(
duptandem_variant, case_obj=case_obj, case_id=case_obj["case_id"]
)
mongo_adapter.add_structural_variant(formated_variant)
# Add second of same variant, changing the start and end position slightly
formated_variant_ = copy.deepcopy(formated_variant)
formated_variant_["pos"] = formated_variant_["pos"] + 2
formated_variant_["end"] = formated_variant_["end"] - 1
formated_variant_["case_id"] = "case_2"
mongo_adapter.add_structural_variant(formated_variant_)
# This should correspond to one structural variant document
mongo_svs = list(db.structural_variant.find())
assert len(mongo_svs) == 1
mongo_sv = mongo_svs[0]
assert mongo_sv["pos_sum"] == formated_variant["pos"] + formated_variant_["pos"]
# And two identity documents
mongo_identities = list(db.identity.find())
assert len(mongo_identities) == 2
# WHEN deleting the variant from the first case
mongo_adapter.delete_structural_variant(formated_variant)
# THEN the SV document should have the pos_sum equal to the pos of the
# SV from the second case
mongo_svs = list(db.structural_variant.find())
assert len(mongo_svs) == 1
mongo_sv = mongo_svs[0]
assert mongo_sv["pos_sum"] == formated_variant_["pos"]
# And one identity documents
mongo_identities = list(db.identity.find())
assert len(mongo_identities) == 1
class TestHelperMethods:
def test_update_sv_metrics(self, mongo_adapter):
# GIVEN a mongo adapter
# WHEN cluster_len > 10000
cluster_len, interval_size = mongo_adapter._update_sv_metrics(
sv_type="INV", pos_mean=10000, end_mean=30000, max_window=3000
)
# THEN interval_size should be the cluster_len divided by 10
assert cluster_len == 20000
assert interval_size == round(20000 / 10, -2)
# WHEN cluster_len <10000
cluster_len, interval_size = mongo_adapter._update_sv_metrics(
sv_type="DUP", pos_mean=10000, end_mean=15000, max_window=3000
)
# THEN interval_size should be cluster_len divided by 5
assert cluster_len == 5000
assert interval_size == round(5000 / 5, -2)
# WHEN interval_size < 1000
cluster_len, interval_size = mongo_adapter._update_sv_metrics(
sv_type="DEL", pos_mean=10000, end_mean=10500, max_window=3000
)
# THEN interval_size should be cluster_len divided by 2
assert cluster_len == 500
assert interval_size == round(500 / 2, -2)
# WHEN interval size is > max_window
cluster_len, interval_size = mongo_adapter._update_sv_metrics(
sv_type="INV", pos_mean=100000, end_mean=200000, max_window=3000
)
# THEN interval size should be set to max_window
assert cluster_len == 100000
assert interval_size == 3000
# WHEN sv_type == BND
cluster_len, interval_size = mongo_adapter._update_sv_metrics(
sv_type="BND", pos_mean=1000, end_mean=2000, max_window=3000
)
# THEN cluster_len should be 10e10 and interval_size 2*max window
assert cluster_len == 10e10
assert interval_size == 2 * 3000
| {
"content_hash": "78f8cf02ba0acf053cc5c5ccd9b0c5a0",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 88,
"avg_line_length": 34.84532374100719,
"alnum_prop": 0.6212449674821926,
"repo_name": "moonso/loqusdb",
"id": "e86a46eab2503ab59f6c9940128d1569d496ea3b",
"size": "9687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/mongo/test_variant_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "216366"
}
],
"symlink_target": ""
} |
import gntp.notifier
'''
No purpose here other than creating a callable library for system notifications
'''
class message:
def __init__(self, messagex):
growl = gntp.notifier.GrowlNotifier(
applicationName = "RSEvent Notification",
notifications = ["New Updates","New Messages"],
defaultNotifications = ["New Messages"],
)
growl.register()
growl.notify(
noteType = "New Messages",
title = "Status: Alert",
description = "%s" % messagex,
icon = "http://url/to/Alert-Icon-.png",
sticky = False,
priority = 1,
)
| {
"content_hash": "e5bf74ae6ee390643984ef02212c3fde",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5640243902439024,
"repo_name": "krazybean/message_agent_abandoned",
"id": "454d4f63df2ac00f2031b315116833bd1d497291",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osx/mac_notify_lib.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26830"
}
],
"symlink_target": ""
} |
"""
Handle compressed filesystems
This replicates functionality that 'archivemount' would provide
"""
import logging
import tempfile
import tarfile
import os
import shutil
logger = logging.getLogger(__name__)
class InsightsCompressedFile(object):
"""
This class handles uncompressing and mounting compressed filesystems
"""
def __init__(self, compressed_file_location=None):
self.compressed_file_location = compressed_file_location
self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')
self.is_file = os.path.isfile(self.compressed_file_location)
self.is_tarfile = (tarfile.is_tarfile(self.compressed_file_location)
if self.is_file else False)
if self.is_file and self.is_tarfile:
try:
tar = tarfile.open(self.compressed_file_location)
tar.extractall(path=self.tmp_dir)
tar.close()
logger.debug("Compressed filesystem %s extracted to %s",
self.compressed_file_location, self.tmp_dir)
except:
logger.debug("Invalid compressed tar filesystem provided. "
"Could not extract contents.")
else:
logger.debug("Invalid compressed tar filesystem provided.")
def cleanup_temp_filesystem(self):
"""
Cleanup the temporary directory
"""
logger.debug("Deleting compressed file extraction directory: " + self.tmp_dir)
shutil.rmtree(self.tmp_dir, True)
def get_filesystem_path(self):
"""
Get the filesystem path, where it was extracted to
"""
return self.tmp_dir
| {
"content_hash": "a7708b84f4368df95102f7c0cf7c8b34",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 86,
"avg_line_length": 33.6,
"alnum_prop": 0.625,
"repo_name": "wcmitchell/insights-core",
"id": "f13018b77a8682a4dfe74f2daf92d52721de10d0",
"size": "1680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/client/compressed_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "19339"
},
{
"name": "Jupyter Notebook",
"bytes": "91793"
},
{
"name": "Python",
"bytes": "3414025"
},
{
"name": "Shell",
"bytes": "2274"
}
],
"symlink_target": ""
} |
from .avatar import *
from .room import *
from .exit import *
| {
"content_hash": "d8b7c6ce00a2bdca7c9cdc48836c8e61",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 21,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.7096774193548387,
"repo_name": "elastic-event-components/e2c",
"id": "f75d0d1678e49c48c9023b22642ab8bec94c1b92",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/python/e2c.examples/web_mud/contracts/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "75533"
},
{
"name": "CSS",
"bytes": "1350"
},
{
"name": "HTML",
"bytes": "962"
},
{
"name": "Python",
"bytes": "117092"
},
{
"name": "Shell",
"bytes": "509"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy
import sys
import brukerIO
import math
def cpmgadd2D(dataset, lb=0, gb=0, nEchoes=None, slope=0,
cycle=None, echo_parity='both'):
"""
A faire :
ser file reshape en 4 dim (F1, HCF1(re/im), F2, HCF2(re/im))
note : en QF HCF1 a une dimension de 1
Le splitting en echoes sur F2 peut se faire sous la forme
F1, HCF1(re/im), NEchoes, F2, HCF2(re/im)
"""
# check dataset is 2D:
if dataset.readacqpar("PARMODE") != 1:
print("dataset is not 2D : exiting...")
sys.exit()
# 0 undef, 1 QF, 2 QSEQ, 3 TPPI, 4 states, 5 states-tppi, 6 echo=antiecho
mode2D = dataset.readacqpar("FnMODE", dimension=2, status=True)
if mode2D == 0:
mode2D = dataset.readprocpar("MC2", dimension=1)+1
if mode2D == 1: # QF
HCsize = 1
elif mode2D in [4, 5, 6]: # States, States-TPPI, Echo/Antiecho
HCsize = 2
else:
print("Problem: only QF, States, States-TPPI, Echo-AntiEcho acquisition supported.")
sys.exit()
# lire la fid et eliminer le filter digital (par defaut)
serfile = dataset.readser()
print(serfile.shape)
# fcor = dataset.readprocpar("FCOR")
# fcor1 = dataset.readprocpar("FCOR", dimension=2)
# serfile[0, :] *= fcor1
# serfile[1, :] *= fcor1
# serfile[:, 0] *= fcor1
# serfile[:, 1] *= fcor1
# calcule la duree d'un echo en points (a modifier selon le PP utilise)
dw = 1e6/dataset.readacqpar("SW_h")
dw1 = 1e6/dataset.readacqpar("SW_h", dimension=2)
# nombre de points par cycle
if not cycle:
cycle = dataset.readacqpar("P 60") # for pp where cycle is stored in P_60
if cycle < 0.1: # for old
D3 = dataset.readacqpar("D 3")*1e6
D6 = dataset.readacqpar("D 6")*1e6
P180 = dataset.readacqpar("P 2")
cycle = 2*(D3+D6)+P180
# how many echoes to add ?
L22 = dataset.readacqpar("L 22")+1
if nEchoes is None or nEchoes > L22:
nEchoes = L22
shift = 0
# le shift et longueur echo en points
firstP = int(shift/dw*2)
ppc = (cycle)/dw
# oneEchoSize=int(round((2*(D3+D6)+P2)/dw))
oneEchoSize = int(round(cycle/dw))
if abs(ppc-oneEchoSize) > 0.001:
print("Warning echo cycle is not multiple of dwell")
chunkNotRound = True
else:
chunkNotRound = False
# Check that TD can accomodate L22+1 echoes
digFilLen = int(round(dataset.getdigfilt()))
TD = dataset.readacqpar("TD")
if TD < 2*oneEchoSize*nEchoes + 2*digFilLen:
nEchoes = (TD//2 - digFilLen) // oneEchoSize
print("""WARNING : FID is not long enough for L22 echo + 1.
Actually using %s echoes""" % (nEchoes,))
#print(serfile.dtype)
#print(TD, 2*oneEchoSize*nEchoes + 2*digFilLen)
serfile = serfile[:,0:2*oneEchoSize*nEchoes ]
#print(serfile.shape, 2*oneEchoSize*nEchoes )
# size of 2D array in t1
TD1 = dataset.readacqpar("TD", status=True, dimension=2)
# TD1 is rounded to multiple of HCsize
TD1 = TD1//HCsize*HCsize
serfile = serfile[0:TD1]
#print("HCsize=", HCsize)
#print("TD1=", TD1)
#print(digFilLen)
# print("dw=%5.3f D3=%5.3f D6=%5.3f P2=%5.3f L22=%d np=%d cy=%5.3f" %
# (dw,D3,D6,P2,nEchoes,oneEchoSize,2*(D3+D6)+P2))
# reshape le ser file en 5D (TD1//HCsize, HCsize(F1) , echo index,
# echo point index, Re/Im)
if not chunkNotRound:
# print(serfile[:, firstP:firstP+oneEchoSize*2*nEchoes].shape, TD1//HCsize*HCsize, nEchoes*oneEchoSize*2)
# print(serfile.shape, firstP, firstP+oneEchoSize, nEchoes, (TD1//HCsize, HCsize, nEchoes, oneEchoSize, 2))
summed = serfile[:, firstP:firstP+oneEchoSize*2*nEchoes].reshape(TD1//HCsize, HCsize, nEchoes, oneEchoSize, 2)
else:
(si1, si) = serfile.shape
#print((si1, si), (TD1//HCsize, HCsize, si//2, 2),
# len(serfile), len(serfile)//HCsize)
tmp = serfile.reshape(TD1//HCsize, HCsize, si//2, 2)
summed = numpy.zeros((TD1//HCsize, HCsize, nEchoes, oneEchoSize, 2))
#print(summed.shape)
for i in range(nEchoes):
summed[:, :, i, :, :] += tmp[:, :,
firstP+int(i*ppc+0.5):
firstP+int(ppc*i+0.5)+oneEchoSize, :]
# trunc SER file according to TDeff :
TDeff1 = (dataset.readprocpar("TDeff", status=False, dimension=2)//HCsize)*HCsize
if TDeff1 > 0 and TDeff1 < TD1:
TD1 = TDeff1
summed = summed[:TD1//HCsize]
#print(summed.shape)
# cree une fonction d'apodisation gaussienne
# temporel exp(-(at)**2) -> spectral exp(-(w/2a)**2) avec largeur mi hauteur
# GB = a/pi * 2*sqrt(ln(2))
# soit a=GB
# il faut prévoir une double apodization avec une pente +-p réglable.
# la 2D suppose States
# calcule la matrice d'apodization composée de deux gaussiennes dont les centres
# se décalent avec t1 dans un rapport +-1 (1Q) ou +-2 (DQ)
# NE PAS utiliser fromfunction (pas performant)
# generer deux tables (A et B) avec les deux gaussiennes qui shiftent,
# prendre le max elt par elt de A et B
# GB : from gb (fwhh of gaussian in frequency domain) for use in time
# domain multiplication by exp(-(x*GB)^2)
GB = gb*math.pi/2.0/math.sqrt(math.log(2.0))
# the slope (t2/t1) along which the E/AE shifts with t1
expRatio = slope
# create array with x
t2_ind = numpy.arange(-oneEchoSize/2, oneEchoSize/2)*(1e-6*dw*GB)
t2_ind2d = numpy.resize(t2_ind, (TD1//HCsize, oneEchoSize))
# create array with |x0| shift
t1_ind = numpy.arange(TD1/HCsize)*(1e-6*dw1*expRatio*GB)
t1_ind2d = numpy.resize(t1_ind, (oneEchoSize, TD1//HCsize))
t1_ind2d = t1_ind2d.T
# gaussian with positive t1 shift
G_p = numpy.exp(-(t2_ind2d-t1_ind2d)**2)
# gaussian with positive t1 shift
G_m = numpy.exp(-(t2_ind2d+t1_ind2d)**2)
# Gaussian function max of G_p or G_m
G = numpy.maximum(G_p, G_m)
# extend apodization matrix with n echoes with LB
LG = numpy.resize(G, (nEchoes, TD1//HCsize, oneEchoSize)).swapaxes(0, 1)
# LG is now (TD1/2, necho, oneEchoSize) shape
LB = lb
L = numpy.exp(numpy.arange(nEchoes)*(-LB*1e-6*dw*oneEchoSize))
LG *= L[numpy.newaxis, :, numpy.newaxis]
# apply apodization for each hypercomplex component
for f1HC in range(HCsize):
for f2HC in range(2):
summed[:, f1HC, :, :, f2HC] *= LG
# print(LG.shape, summed[:, i, :, :, j].shape)
# sum echoes odd, even or all
if echo_parity == 'odd': # add only odd echoes (start 1) :
a = 0
c = 2
elif echo_parity == 'even': # add only even echoes (start 1) :
a = 1
c = 2
else: # add all echoes
a = 0
c = 1
SUM = summed[:, :, a::c, :, :].sum(axis=2).reshape(TD1, oneEchoSize, 2)
# ecrit le resultat dans les fichiers 2rr et 2ri :
# separe Re et Im et remet
s1 = SUM[..., 0]
s2 = SUM[..., 1]
# print(s1.max(), s2.max())
# print(s1.min(), s2.min())
smax = numpy.absolute(s1+1j*s2).max()
# fait du zero fill pour que topspin puisse processer et
# remet les points correspondant au filtre digital
SI = dataset.readprocpar("SI", False)
SI1 = dataset.readprocpar("SI", status=False, dimension=2)
# RAJOUTER ZEROFILL t1
# digFilLen=0
#print(digFilLen)
r1 = numpy.hstack((numpy.zeros((TD1, digFilLen)), s1,
numpy.zeros((TD1, SI-oneEchoSize-digFilLen))))
r2 = numpy.hstack((numpy.zeros((TD1, digFilLen)), s2,
numpy.zeros((TD1, SI-oneEchoSize-digFilLen))))
r1 = numpy.vstack((r1, numpy.zeros((SI1-TD1, SI))))
r2 = numpy.vstack((r2, numpy.zeros((SI1-TD1, SI))))
#print(r1.shape, r2.shape)
if mode2D in [1, ] : # QF only
imag_file = '2ii'
elif mode2D in [4, 5, 6,]: # states, states-TPPI, Echo-AntiEcho
imag_file = '2ir'
# ecrit les fichiers 1r 1i
dataset.writespect2dall([r1, r2], MC2=mode2D-1, dType="tt")
# dataset.writespect2d(r1, name="2rr", dType="tt", MAX=smax)
# dataset.writespect2d(r2, name=imag_file, dType="tt",MAX=smax)
# write some status processed parameters in procs file so topspin can display
# and process the data properly
dataset.writeprocpar("PKNL", "no", status = True)
# dataset.writeprocpar("PKNL", "no", status = False)
# set all optionnal processing parameters to 0
ProcOptions = {"WDW" : [["LB", 0], ["GB", 0], ["SSB", 0], ["TM1", 0], ["TM2", 0]],
"PH_mod": [["PHC0", 0], ["PHC1", 0]],
"BC_mod": [["BCFW", 0], ["COROFFS", 0]],
"ME_mod": [["NCOEF", 0], ["LPBIN", 0], ["TDoff", 0]],
"FT_mod": [["FTSIZE", 0], ["FCOR", 0], ["STSR", 0],
["STSI", 0], ["REVERSE", False]],
}
for dim in [1, 2]:
for par in ProcOptions:
dataset.writeprocpar(par, 0, True, dimension=dim)
for opt in ProcOptions[par]:
dataset.writeprocpar(opt[0], opt[1], True, dimension=dim)
# need to deal with TDoff. Although not used in time domain for indirect dimension it must be copied for further processing
TDoff = dataset.readprocpar("TDoff", status=False, dimension=2)
dataset.writeprocpar("TDoff", (TDoff), status=True, dimension=2)
# even though we are in time domain we need to set a SW_p in ppm
# with respect to irradiation frequency SFO1
# otherwise the OFFSET is not properly calculated in further
# topspin calculations especially in indirect dimension...
sw1 = dataset.readacqpar("SW_h", status=True, dimension=2)
sfo1 = dataset.readacqpar("SFO1", status=True, dimension=2)
sw2 = dataset.readacqpar("SW_h", status=True, dimension=1)
sfo2 = dataset.readacqpar("SFO1", status=True, dimension=1)
dataset.writeprocpar("SW_p", (sw2/sfo2), status=True,dimension=1)
dataset.writeprocpar("SW_p", (sw1/sfo1), status=True,dimension=2)
# adjust the WDW in F2 since we applied some GB/LB
dataset.writeprocpar("WDW", 1, True, 1)
dataset.writeprocpar("LB", (args.gb), True, 1)
dataset.writeprocpar("AXUNIT", "s", True)
dataset.writeprocpar("AXUNIT", "s", True, dimension=1)
dataset.writeprocpar("AXRIGHT", (SI*dw*1e-6), True)
dataset.writeprocpar("AXRIGHT", (SI1/2*dw1*1e-6), True, dimension=2)
if __name__ == '__main__':
# gestion des arguments
import argparse
parser = argparse.ArgumentParser(description='Add echoes in a (Hypercomplex) 2D qcpmg bruker experiment')
parser.add_argument('-l', '--lb', type=float,
help='Lorentzian broadening applied to the decaying echo', default=0)
parser.add_argument('-g', '--gb', type=float,
help='Gaussian broadening applied to each echo', default=0)
parser.add_argument('-n', type=int, help='Number of echo to sum')
parser.add_argument('-s', '--slope', type=float,
help='t2/t1 slope along which the echoes are shifting', default=0)
parser.add_argument('-c', '--cycle', type=float,
help='qcpmg cycle in us', default = None)
group = parser.add_mutually_exclusive_group()
group.add_argument('-e', '--even_echo_only', action='store_true',
help='Sum only even echoes')
group.add_argument('-o', '--odd_echo_only', action='store_true',
help='Sum only odd echoes')
parser.add_argument('infile', help='Full path of the dataset to process')
args = parser.parse_args()
# print(brukerIO.splitprocpath(infile))
dat = brukerIO.dataset(brukerIO.splitprocpath(args.infile))
if args.even_echo_only:
parity = "even"
elif args.odd_echo_only:
parity = "odd"
else:
parity = "both"
cpmgadd2D(dat, lb=args.lb, gb=args.gb, nEchoes=args.n, slope=args.slope,
cycle=args.cycle, echo_parity=parity)
| {
"content_hash": "703d0b3469af89fb7b6f76fc0590c280",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 128,
"avg_line_length": 41.21917808219178,
"alnum_prop": 0.6055167829843802,
"repo_name": "jtrebosc/JTutils",
"id": "449153b41ebef1fa0eef063ca86b6de3fe834382",
"size": "12206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CpyBin/qcpmgadd2D_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "307974"
}
],
"symlink_target": ""
} |
"""Contains the version string of AlphaFold inference components."""
__version__ = "0.1.1dev"
| {
"content_hash": "c2e33a763520589672e3e398cb9a63fe",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 68,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.7052631578947368,
"repo_name": "GoogleCloudPlatform/vertex-ai-alphafold-inference-pipeline",
"id": "0b4345aa4f84f7ec92d7c8de8a753f6384497b81",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/components/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3128"
},
{
"name": "HCL",
"bytes": "14622"
},
{
"name": "Jupyter Notebook",
"bytes": "32030"
},
{
"name": "Python",
"bytes": "142035"
}
],
"symlink_target": ""
} |
from app import db
from models import *
import datetime
# create the db and tables
db.create_all()
# prepare data to insert
year = 1982
month = 4
day = 3
birthday = datetime.date(year, month, day)
now = datetime.datetime.now()
today = datetime.date(now.year, now.month, now.day)
yesterday = datetime.date(now.year, now.month, 13)
# insert data
adam = User("adam", "abmorton@gmail.com", "testpw", yesterday)
# db.session.add(User("admin", "admin@admin.com", "adminpw", today))
db.session.add(User(adam))
db.session.commit()
# make a Portfolio
port = Portfolio(adam.id)
db.session.add(port)
db.session.commit()
# add a stock
db.session.add(Stock("XOMA", "XOMA Corporation", "NGM", "0.9929", None, None, None, "117.74M", 1))
db.session.commit()
# get a stock instance for later use creating other records
stock = Stock.query.get(1)
# make some trades
db.session.add(Trade(stock.symbol, 1, 10, yesterday, None, None, None))
db.session.add(Trade(stock.symbol, 1.20, -5, today, None, None, None))
# make a Position
# pos = Position(port.id, )
# position = Position(1)
# insert the data requiring ForeignKeys & relationship()
# commit changes
db.session.commit() | {
"content_hash": "86d77eeb7755bc895418d17ac212495b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 98,
"avg_line_length": 21.381818181818183,
"alnum_prop": 0.70578231292517,
"repo_name": "abmorton/stockhawk",
"id": "d85ca52402346be7dfaf6277ede793e7a996a2e4",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "47978"
},
{
"name": "Python",
"bytes": "56872"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('client', '0002_auto_20150122_1116'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(verbose_name='Nome', max_length=256)),
('description', models.TextField(verbose_name='')),
('concluded', models.BooleanField(verbose_name='Concluido', default=False)),
('arrived_date', models.DateTimeField(verbose_name='Chegou', auto_now_add=True)),
('delivery_date', models.DateTimeField(verbose_name='Entrega')),
('total_value', models.DecimalField(verbose_name='Valor total', max_digits=20, default=0.0, decimal_places=2)),
('total_paid', models.DecimalField(verbose_name='Valor pago', max_digits=20, default=0.0, decimal_places=2)),
('client', models.ForeignKey(to='client.Client', related_name='client')),
],
options={
'verbose_name': 'Item',
'verbose_name_plural': 'Items',
},
bases=(models.Model,),
),
]
| {
"content_hash": "47986185c362a5fc8f24e8fa75e15130",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 127,
"avg_line_length": 42.75,
"alnum_prop": 0.5723684210526315,
"repo_name": "delete/estofadora",
"id": "5cb13f1237927ddf37ba90aaba236f0795192613",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "estofadora/item/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91546"
},
{
"name": "HTML",
"bytes": "81458"
},
{
"name": "JavaScript",
"bytes": "147042"
},
{
"name": "Python",
"bytes": "144796"
}
],
"symlink_target": ""
} |
from pandas.compat import StringIO, callable, signature
from pandas.lib import cache_readonly # noqa
import sys
import warnings
from textwrap import dedent
from functools import wraps
def deprecate(name, alternative, alt_name=None):
alt_name = alt_name or alternative.__name__
def wrapper(*args, **kwargs):
warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name),
FutureWarning, stacklevel=2)
return alternative(*args, **kwargs)
return wrapper
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""Decorator to deprecate a keyword argument of a function
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str
Name of prefered argument in function
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = "the %s=%r keyword is deprecated, " \
"use %s=%r instead" % \
(old_arg_name, old_arg_value,
new_arg_name, new_arg_value)
else:
new_arg_value = old_arg_value
msg = "the '%s' keyword is deprecated, " \
"use '%s' instead" % (old_arg_name, new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '%s' or '%s', not both" %
(old_arg_name, new_arg_name))
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module http://matplotlib.sourceforge.net/users/license.html
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if (args and kwargs):
raise AssertionError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or dictionary)
and it may change before this class is called, one may explicitly use a
reference to the params rather than using *args or **kwargs which will
copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join='', indents=0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func):
func.__doc__ = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
docitems = [func.__doc__, self.addendum]
func.__doc__ = dedent(self.join.join(docitems))
return func
def indent(text, indents=1):
if not text or not isinstance(text, str):
return ''
jointext = ''.join(['\n'] + [' '] * indents)
return jointext.join(text.split('\n'))
def suppress_stdout(f):
def wrapped(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapped
class KnownFailureTest(Exception):
"""Raise this exception to mark a test as a known failing test."""
pass
def knownfailureif(fail_condition, msg=None):
"""
Make function raise KnownFailureTest exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
fail_condition : bool or callable
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
Message to give on raising a KnownFailureTest exception.
Default is None.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = fail_condition
else:
fail_val = lambda: fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest(msg)
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
def make_signature(func):
"""
Returns a string repr of the arg list of a func call, with any defaults
Examples
--------
>>> def f(a,b,c=2) :
>>> return a*b*c
>>> print(_make_signature(f))
a,b,c=2
"""
spec = signature(func)
if spec.defaults is None:
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else:
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + spec.defaults
args = []
for i, (var, default) in enumerate(zip(spec.args, defaults)):
args.append(var if default == '' else var + '=' + repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.keywords:
args.append('**' + spec.keywords)
return args, spec.args
| {
"content_hash": "6f16fa94450dd13e3e5832e350c5a517",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 79,
"avg_line_length": 33.19178082191781,
"alnum_prop": 0.6001857201815931,
"repo_name": "BigDataforYou/movie_recommendation_workshop_1",
"id": "58cd0c13d8ec76c9b950052bca2e9becd6cedbd1",
"size": "9692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/util/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "738713"
},
{
"name": "C++",
"bytes": "169366"
},
{
"name": "CSS",
"bytes": "14786"
},
{
"name": "Fortran",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "1408733"
},
{
"name": "JavaScript",
"bytes": "13700"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "19755294"
},
{
"name": "Shell",
"bytes": "3276"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from pyttsx.drivers import _espeak
import ctypes
import wave
import time
import threading
import StringIO
class Synth(object):
_done = False
def __init__(self):
self.rate = _espeak.Initialize(_espeak.AUDIO_OUTPUT_RETRIEVAL, 1000)
assert self.rate != -1, 'could not initialize espeak'
_espeak.SetSynthCallback(self)
self.lock = threading.Lock()
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
self.wav.setsampwidth(2)
self.wav.setframerate(self.rate)
self._done = False
_espeak.Synth(say)
while not self._done:
time.sleep(0)
self.wav.close()
| {
"content_hash": "9b9e18fc6ad847a7bb18c4b719ecc4f5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 28.63888888888889,
"alnum_prop": 0.5645004849660524,
"repo_name": "proppy/appengine-vm-fortunespeak-python",
"id": "e9941a27217759cd503e7b85b6c98549b52090c1",
"size": "1031",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2713"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
} |
import json
from nltk.corpus import wordnet as wn
from itertools import combinations
if __name__ == '__main__':
data = json.load(open('data/nasa_kw.json'))
outfile = 'data/keyword_synset_scores.json'
kw_field = 'keyword'
max_items = 700000 # limit keyword pairs to analyse. there are ~1100 projects, that's ~640000 pairs!
dataset = data['dataset']
similarity = []
single_words = []
# create a list of all single words among keywords
for i, ds in enumerate(dataset):
for keyword in ds[kw_field]:
for one_word in keyword.lower().split():
if one_word not in single_words:
single_words.append(one_word)
# create a matrix of all similarity scores
#
# for all unique pairs of keyword words:
# compare each word in the synset of the first with that of the second
# keep the highest similarity score, let that be the score for those two words
#
# 0 if at least one of the words is not in WordNet
#
# 1134 words have 642411 unique pairs
# count = 0
for pair in list(combinations(single_words, 2))[:max_items]:
key = str(sorted(pair, key=unicode.lower))
if key not in similarity:
score = 0.0
if pair[0] == pair[1]:
score = 1.0
else:
for word1 in wn.synsets(pair[0]):
for word2 in wn.synsets(pair[1]):
score = max([score, word1.path_similarity(word2)])
similarity.append({'keyword': pair, 'score': score})
# print count # sanity check for long runs
# count += 1
with open(outfile, 'w') as f:
json.dump(similarity, f)
print 'done'
# calculate a score for each pair of projects in the dataset
#
# score = sum of keyword similarities for all pairs of keywords
# print len(dataset)
# print len(single_words)
# print similarity
# [i for i,x in enumerate(testlist) if x == 1]
# print score | {
"content_hash": "8195d43d1f6625580827d3dc02d39368",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 104,
"avg_line_length": 34.88135593220339,
"alnum_prop": 0.5889212827988338,
"repo_name": "jonroberts/nasaMining",
"id": "fd5994e4022dcaf8fefb00cd4bbe36dd87b5251e",
"size": "2058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keyword_similarity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "116224"
},
{
"name": "JavaScript",
"bytes": "11694"
},
{
"name": "Python",
"bytes": "88008"
}
],
"symlink_target": ""
} |
"""Maps subworkflow of Oozie to Airflow's sub-dag"""
import logging
import os
from typing import Dict, List, Set, Type
from xml.etree.ElementTree import Element
from o2a.converter.oozie_converter import OozieConverter
from o2a.converter.relation import Relation
from o2a.converter.renderers import BaseRenderer
from o2a.converter.task import Task
from o2a.definitions import EXAMPLES_PATH
from o2a.mappers.action_mapper import ActionMapper
from o2a.o2a_libs.property_utils import PropertySet
from o2a.transformers.base_transformer import BaseWorkflowTransformer
from o2a.utils import xml_utils
TAG_APP = "app-path"
# pylint: disable=too-many-instance-attributes
class SubworkflowMapper(ActionMapper):
"""
Converts a Sub-workflow Oozie node to an Airflow task.
"""
# pylint: disable=too-many-arguments
def __init__(
self,
oozie_node: Element,
name: str,
dag_name: str,
input_directory_path: str,
output_directory_path: str,
props: PropertySet,
action_mapper: Dict[str, Type[ActionMapper]],
renderer: BaseRenderer,
transformers: List[BaseWorkflowTransformer] = None,
**kwargs,
):
ActionMapper.__init__(
self,
oozie_node=oozie_node,
name=name,
dag_name=dag_name,
props=props,
input_directory_path=input_directory_path,
**kwargs,
)
self.task_id = name
self.input_directory_path = input_directory_path
self.output_directory_path = output_directory_path
self.dag_name = dag_name
self.action_mapper = action_mapper
self.renderer = renderer
self.transformers = transformers or []
self._parse_oozie_node()
def _parse_oozie_node(self):
app_path = xml_utils.get_tag_el_text(self.oozie_node, TAG_APP)
_, _, self.app_name = app_path.rpartition("/")
# TODO: hacky: we should calculate it deriving from input_directory_path and comparing app-path
# TODO: but for now we assume app is in "examples"
app_path = os.path.join(EXAMPLES_PATH, self.app_name)
logging.info(f"Converting subworkflow from {app_path}")
converter = OozieConverter(
input_directory_path=app_path,
output_directory_path=self.output_directory_path,
renderer=self.renderer,
action_mapper=self.action_mapper,
dag_name=self.app_name,
initial_props=self.get_child_props(),
transformers=self.transformers,
)
converter.convert(as_subworkflow=True)
def get_child_props(self) -> PropertySet:
propagate_configuration = self.oozie_node.find("propagate-configuration")
# Below the `is not None` is necessary due to Element's __bool__() return value:
# `len(self._children) != 0`,
# and `propagate_configuration` is an empty node so __bool__() will always return False.
return (
self.props if propagate_configuration is not None else PropertySet(config={}, job_properties={})
)
def to_tasks_and_relations(self):
tasks: List[Task] = [
Task(task_id=self.name, template_name="subwf.tpl", template_params=dict(app_name=self.app_name))
]
relations: List[Relation] = []
return tasks, relations
def required_imports(self) -> Set[str]:
return {
"from airflow.utils import dates",
"from airflow.contrib.operators import dataproc_operator",
"from airflow.operators.subdag_operator import SubDagOperator",
f"import subdag_{self.app_name}",
}
| {
"content_hash": "0b978a9691003d00ae7554999a90ddb8",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 108,
"avg_line_length": 36.99,
"alnum_prop": 0.6428764530954312,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "2757afad508958735ee0f2bf4adfbfab0e698050",
"size": "4293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "o2a/mappers/subworkflow_mapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
} |
'''Manipulate feature sets'''
import numpy as np
import unittest
import pdb
def rescale(train, test):
"""
Rescale the data so that each feature in the training set is in
the interval [0,1], and apply the same transformations to the test
set, using the statistics computed on the training set.
Args:
train - training set, a 2D numpy array of
size (num_instances, num_features)
test - test set, a 2D numpy array of size (num_instances, num_features)
Returns:
train_normalized - training set after normalization
test_normalized - test set after normalization
"""
def rescale(m, mins, maxs):
'''
Rescale to [0,1].
Args:
m : 2D array
mins: 1D array of minum values in columns of m
maxs: 1D array of maximum values in colums of m
formula: x := (x - min(x))/(max(x) - min(x))
'''
numerator = m - mins
denominator = maxs - mins
result = numerator / denominator
return result
# compute training set statistics for each column
mins = np.amin(train, axis=0)
maxs = np.amax(train, axis=0)
train_rescaled = rescale(train, mins, maxs)
test_rescaled = rescale(test, mins, maxs)
return train_rescaled, test_rescaled
class TestRescale(unittest.TestCase):
def setUp(self):
self.verbose = False
def test_one_column(self):
test = np.float64([[2],
[-10],
[100]])
train = test
train_rescaled, test_rescaled = \
rescale(train, test)
if self.verbose:
print 'train_rescaled'
print train_rescaled
self.assertTrue((train_rescaled <= 1.0).all())
self.assertTrue((train_rescaled >= 0.0).all())
def test_rand(self):
test = np.random.rand(2, 3) * 10
train = np.random.rand(5, 3) * 10
train_rescaled, test_rescaled = \
rescale(train, test)
if self.verbose:
print 'train_rescaled'
print train_rescaled
print 'test_rescaled'
print test_rescaled
self.assertTrue((train_rescaled <= 1.0).all())
self.assertTrue((train_rescaled >= 0.0).all())
# NOTE: the test vectors satisfying no conditions at all
def test_known(self):
'''See lab book for 2/1 for notes.'''
train = np.float64([[-1],
[0],
[1]])
test = np.float64([[-4],
[6],
[0]])
train_rescaled, test_rescaled = \
rescale(train, test)
if self.verbose:
print 'train_rescaled'
print train_rescaled
print 'test_rescaled'
print test_rescaled
def check_equal(v, a, b, c):
self.assertAlmostEqual(v[0], a)
self.assertAlmostEqual(v[1], b)
self.assertAlmostEqual(v[2], c)
check_equal(train_rescaled, 0, .5, 1)
check_equal(test_rescaled, -1.5, 3.5, .5)
def delete_constant_features(x):
'''Retain only columns that have at least two values
ARGS
x: np.array 2d
RETURNS
non_constant: np.array 2d with possibly fewer columns
ref: answer to hw1, DS1003, Spring 2015
'''
column_max = x.max(axis = 0)
column_min = x.min(axis = 0)
at_least_two_values = column_max != column_min
non_constant = x[:, at_least_two_values]
return non_constant
class TestDeleteConstantFeatures(unittest.TestCase):
def test_delete_none(self):
x = np.array([[1, 1, 2],
[10, 20, 30]]).T
xx = delete_constant_features(x)
self.assertTrue(np.linalg.norm(x - xx) <= 1e-6)
def test_delete_one(self):
x = np.array([[1, 1, 2],
[100, 100, 100],
[10, 20, 30]]).T
xx = delete_constant_features(x)
self.assertEqual(xx.shape[0], 3)
self.assertEqual(xx.shape[1], 2)
if __name__ == '__main__':
unittest.main()
if False:
pdb.set_trace()
| {
"content_hash": "0b508370609f7c6628c6131fefc7de93",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 80,
"avg_line_length": 28.346938775510203,
"alnum_prop": 0.5478761699064075,
"repo_name": "rlowrance/mlpack",
"id": "6221556a12afa502c332bc45f6d8d230fdd81f56",
"size": "4167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121439"
},
{
"name": "Shell",
"bytes": "710"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from django.contrib.auth.models import User
from mockatron_core.models import *
from .classes import *
class UserCreateSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'username', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
class UserReadOnlySerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email', 'username')
class AgentSerializer(serializers.ModelSerializer):
class Meta:
model = Agent
fields = ('id', 'protocol', 'host', 'port', 'path', 'method', 'content_type', 'responder', 'created', 'operations', 'responses', 'filters')
class OperationSerializer(serializers.ModelSerializer):
class Meta:
model = Operation
fields = ('id', 'agent', 'name', 'input_message', 'output_message', 'responder', 'responses', 'filters')
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Response
fields = ('id', 'agent', 'operation', 'label', 'http_code', 'content', 'enable')
class FilterSerializer(serializers.ModelSerializer):
class Meta:
model = Filter
fields = ('id', 'agent', 'operation', 'label', 'priority', 'enable', 'request_conditions', 'response_conditions')
class RequestConditionSerializer(serializers.ModelSerializer):
class Meta:
model = RequestCondition
fields = ('id', 'filter', 'field_type', 'header_or_query_param', 'operator', 'value')
class ResponseConditionSerializer(serializers.ModelSerializer):
class Meta:
model = ResponseCondition
fields = ('id', 'filter', 'field_type', 'operator', 'value')
| {
"content_hash": "aba210aa6c82a24df63f980ba8d448bc",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 147,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.6487873134328358,
"repo_name": "rodrigozc/mockatron",
"id": "066d06adab14f7e4eb3f53df5d61a8e32f3d8bf5",
"size": "2144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mockatron_api/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "265"
},
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "HTML",
"bytes": "45502"
},
{
"name": "JavaScript",
"bytes": "1651"
},
{
"name": "Python",
"bytes": "52376"
},
{
"name": "TypeScript",
"bytes": "55828"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import decimal
import itertools
from copy import copy
from argeweb.libs.wtforms import widgets
from argeweb.libs.wtforms.compat import text_type, izip
from argeweb.libs.wtforms.i18n import DummyTranslations
from argeweb.libs.wtforms.validators import StopValidation
from argeweb.libs.wtforms.utils import unset_value
__all__ = (
'BooleanField', 'DecimalField', 'DateField', 'DateTimeField', 'FieldList',
'FloatField', 'FormField', 'IntegerField', 'RadioField', 'SelectField',
'SelectMultipleField', 'StringField',
)
class Field(object):
"""
Field base class
"""
errors = tuple()
process_errors = tuple()
raw_data = None
validators = tuple()
widget = None
_formfield = True
_translations = DummyTranslations()
do_not_call_in_templates = True # Allow Django 1.4 traversal
def __new__(cls, *args, **kwargs):
if '_form' in kwargs and '_name' in kwargs:
return super(Field, cls).__new__(cls)
else:
return UnboundField(cls, *args, **kwargs)
def __init__(self, label=None, validators=None, filters=tuple(),
description='', id=None, default=None, widget=None,
render_kw=None, _form=None, _name=None, _prefix='',
_translations=None, _meta=None):
"""
Construct a new field.
:param label:
The label of the field.
:param validators:
A sequence of validators to call when `validate` is called.
:param filters:
A sequence of filters which are run on input data by `process`.
:param description:
A description for the field, typically used for help text.
:param id:
An id to use for the field. A reasonable default is set by the form,
and you shouldn't need to set this manually.
:param default:
The default value to assign to the field, if no form or object
input is provided. May be a callable.
:param widget:
If provided, overrides the widget used to render the field.
:param dict render_kw:
If provided, a dictionary which provides default keywords that
will be given to the widget at render time.
:param _form:
The form holding this field. It is passed by the form itself during
construction. You should never pass this value yourself.
:param _name:
The name of this field, passed by the enclosing form during its
construction. You should never pass this value yourself.
:param _prefix:
The prefix to prepend to the form name of this field, passed by
the enclosing form during construction.
:param _translations:
A translations object providing message translations. Usually
passed by the enclosing form during construction. See
:doc:`I18n docs <i18n>` for information on message translations.
:param _meta:
If provided, this is the 'meta' instance from the form. You usually
don't pass this yourself.
If `_form` and `_name` isn't provided, an :class:`UnboundField` will be
returned instead. Call its :func:`bind` method with a form instance and
a name to construct the field.
"""
if _translations is not None:
self._translations = _translations
if _meta is not None:
self.meta = _meta
elif _form is not None:
self.meta = _form.meta
else:
raise TypeError("Must provide one of _form or _meta")
self.default = default
self.description = description
self.render_kw = render_kw
self.filters = filters
self.flags = Flags()
self.name = _prefix + _name
self.short_name = _name
self.type = type(self).__name__
self.validators = validators or list(self.validators)
self.id = id or self.name
self.label = Label(self.id, label if label is not None else self.gettext(_name.replace('_', ' ').title()))
if widget is not None:
self.widget = widget
for v in itertools.chain(self.validators, [self.widget]):
flags = getattr(v, 'field_flags', ())
for f in flags:
setattr(self.flags, f, True)
def __unicode__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __str__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the `__call__` method.
"""
return self()
def __html__(self):
"""
Returns a HTML representation of the field. For more powerful rendering,
see the :meth:`__call__` method.
"""
return self()
def __call__(self, **kwargs):
"""
Render this field as HTML, using keyword args as additional attributes.
This delegates rendering to
:meth:`meta.render_field <wtforms.meta.DefaultMeta.render_field>`
whose default behavior is to call the field's widget, passing any
keyword arguments from this call along to the widget.
In all of the WTForms HTML widgets, keyword arguments are turned to
HTML attributes, though in theory a widget is free to do anything it
wants with the supplied keyword arguments, and widgets don't have to
even do anything related to HTML.
"""
return self.meta.render_field(self, kwargs)
def gettext(self, string):
"""
Get a translation for the given message.
This proxies for the internal translations object.
:param string: A unicode string to be translated.
:return: A unicode string which is the translated output.
"""
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
"""
Get a translation for a message which can be pluralized.
:param str singular: The singular form of the message.
:param str plural: The plural form of the message.
:param int n: The number of elements this message is referring to
"""
return self._translations.ngettext(singular, plural, n)
def validate(self, form, extra_validators=tuple()):
"""
Validates the field and returns True or False. `self.errors` will
contain any errors raised during validation. This is usually only
called by `Form.validate`.
Subfields shouldn't override this, but rather override either
`pre_validate`, `post_validate` or both, depending on needs.
:param form: The form the field belongs to.
:param extra_validators: A sequence of extra validators to run.
"""
self.errors = list(self.process_errors)
stop_validation = False
# Call pre_validate
try:
self.pre_validate(form)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
stop_validation = True
except ValueError as e:
self.errors.append(e.args[0])
# Run validators
if not stop_validation:
chain = itertools.chain(self.validators, extra_validators)
try:
stop_validation = self._run_validation_chain(form, chain)
except Exception as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
stop_validation = True
# Call post_validate
try:
self.post_validate(form, stop_validation)
except ValueError as e:
self.errors.append(e.args[0])
return len(self.errors) == 0
def _run_validation_chain(self, form, validators):
"""
Run a validation chain, stopping if any validator raises StopValidation.
:param form: The Form instance this field belongs to.
:param validators: a sequence or iterable of validator callables.
:return: True if validation was stopped, False otherwise.
"""
for validator in validators:
try:
validator(form, self)
except StopValidation as e:
if e.args and e.args[0]:
self.errors.append(e.args[0])
return True
except ValueError as e:
self.errors.append(e.args[0])
return False
def pre_validate(self, form):
"""
Override if you need field-level validation. Runs before any other
validators.
:param form: The form the field belongs to.
"""
pass
def post_validate(self, form, validation_stopped):
"""
Override if you need to run any field-level validation tasks after
normal validation. This shouldn't be needed in most cases.
:param form: The form the field belongs to.
:param validation_stopped:
`True` if any validator raised StopValidation.
"""
pass
def process(self, formdata, data=unset_value):
"""
Process incoming data, calling process_data, process_formdata as needed,
and run filters.
If `data` is not provided, process_data will be called on the field's
default.
Field subclasses usually won't override this, instead overriding the
process_formdata and process_data methods. Only override this for
special advanced processing, such as when a field encapsulates many
inputs.
"""
self.process_errors = []
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
try:
self.process_data(data)
except ValueError as e:
self.process_errors.append(e.args[0])
if formdata is not None:
if self.name in formdata:
self.raw_data = formdata.getlist(self.name)
else:
self.raw_data = []
try:
self.process_formdata(self.raw_data)
except ValueError as e:
self.process_errors.append(e.args[0])
try:
for filter in self.filters:
self.data = filter(self.data)
except ValueError as e:
self.process_errors.append(e.args[0])
def process_data(self, value):
"""
Process the Python data applied to this field and store the result.
This will be called during form construction by the form's `kwargs` or
`obj` argument.
:param value: The python object containing the value to process.
"""
self.data = value
def process_formdata(self, valuelist):
"""
Process data received over the wire from a form.
This will be called during form construction with data supplied
through the `formdata` argument.
:param valuelist: A list of strings to process.
"""
if valuelist:
self.data = valuelist[0]
def populate_obj(self, obj, name):
"""
Populates `obj.<name>` with the field's data.
:note: This is a destructive operation. If `obj.<name>` already exists,
it will be overridden. Use with caution.
"""
if self.type == 'IntegerField':
if self.data == u'':
setattr(obj, name, 0)
else:
setattr(obj, name, int(self.data))
return
if self.type == 'FloatField':
if self.data == u'':
setattr(obj, name, 0)
else:
setattr(obj, name, float(self.data))
return
if self.type == 'RangeField':
if self.data == u'':
setattr(obj, name, 0)
else:
setattr(obj, name, float(self.data))
return
if self.type == 'UserField':
if self.data == u'':
setattr(obj, name, None)
else:
setattr(obj, name, self.data.key)
return
setattr(obj, name, self.data)
class UnboundField(object):
_formfield = True
creation_counter = 0
def __init__(self, field_class, *args, **kwargs):
UnboundField.creation_counter += 1
self.field_class = field_class
self.args = args
self.kwargs = kwargs
self.creation_counter = UnboundField.creation_counter
def bind(self, form, name, prefix='', translations=None, **kwargs):
kw = dict(
self.kwargs,
_form=form,
_prefix=prefix,
_name=name,
_translations=translations,
**kwargs
)
return self.field_class(*self.args, **kw)
def __repr__(self):
return '<UnboundField(%s, %r, %r)>' % (self.field_class.__name__, self.args, self.kwargs)
class Flags(object):
"""
Holds a set of boolean flags as attributes.
Accessing a non-existing attribute returns False for its value.
"""
def __getattr__(self, name):
if name.startswith('_'):
return super(Flags, self).__getattr__(name)
return False
def __contains__(self, name):
return getattr(self, name)
def __repr__(self):
flags = (name for name in dir(self) if not name.startswith('_'))
return '<wtforms.fields.Flags: {%s}>' % ', '.join(flags)
class Label(object):
"""
An HTML form label.
"""
def __init__(self, field_id, text):
self.field_id = field_id
self.text = text
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
attributes = widgets.html_params(**kwargs)
return widgets.HTMLString('<label %s>%s</label>' % (attributes, text or self.text))
def __repr__(self):
return 'Label(%r, %r)' % (self.field_id, self.text)
class SelectFieldBase(Field):
option_widget = widgets.Option()
"""
Base class for fields which can be iterated to produce options.
This isn't a field, but an abstract base class for fields which want to
provide this functionality.
"""
def __init__(self, label=None, validators=None, option_widget=None, **kwargs):
super(SelectFieldBase, self).__init__(label, validators, **kwargs)
if option_widget is not None:
self.option_widget = option_widget
def iter_choices(self):
"""
Provides data for choice widget rendering. Must return a sequence or
iterable of (value, label, selected) tuples.
"""
raise NotImplementedError()
def __iter__(self):
opts = dict(widget=self.option_widget, _name=self.name, _form=None, _meta=self.meta)
for i, (value, label, checked) in enumerate(self.iter_choices()):
opt = self._Option(label=label, id='%s-%d' % (self.id, i), **opts)
opt.process(None, value)
opt.checked = checked
yield opt
class _Option(Field):
checked = False
def _value(self):
return text_type(self.data)
class SelectField(SelectFieldBase):
widget = widgets.Select()
def __init__(self, label=None, validators=None, coerce=text_type, choices=None, **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.coerce = coerce
self.choices = copy(choices)
def iter_choices(self):
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext('Invalid Choice: could not coerce'))
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == v:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class SelectMultipleField(SelectField):
"""
No different from a normal select field, except this one can take (and
validate) multiple choices. You'll need to specify the HTML `size`
attribute to the select field when rendering.
"""
widget = widgets.Select(multiple=True)
def iter_choices(self):
for value, label in self.choices:
selected = self.data is not None and self.coerce(value) in self.data
yield (value, label, selected)
def process_data(self, value):
try:
self.data = list(self.coerce(v) for v in value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
try:
self.data = list(self.coerce(x) for x in valuelist)
except ValueError:
raise ValueError(self.gettext('Invalid choice(s): one or more data inputs could not be coerced'))
def pre_validate(self, form):
if self.data:
values = list(c[0] for c in self.choices)
for d in self.data:
if d not in values:
raise ValueError(self.gettext("'%(value)s' is not a valid choice for this field") % dict(value=d))
class RadioField(SelectField):
"""
Like a SelectField, except displays a list of radio buttons.
Iterating the field will produce subfields (each containing a label as
well) in order to allow custom rendering of the individual radio fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.RadioInput()
class StringField(Field):
"""
This field is the base for most of the more complicated fields, and
represents an ``<input type="text">``.
"""
widget = widgets.TextInput()
def process_formdata(self, valuelist):
if valuelist:
self.data = valuelist[0]
else:
self.data = ''
def _value(self):
return text_type(self.data) if self.data is not None else ''
class LocaleAwareNumberField(Field):
"""
Base class for implementing locale-aware number parsing.
Locale-aware numbers require the 'babel' package to be present.
"""
def __init__(self, label=None, validators=None, use_locale=False, number_format=None, **kwargs):
super(LocaleAwareNumberField, self).__init__(label, validators, **kwargs)
self.use_locale = use_locale
if use_locale:
self.number_format = number_format
self.locale = kwargs['_form'].meta.locales[0]
self._init_babel()
def _init_babel(self):
try:
from babel import numbers
self.babel_numbers = numbers
except ImportError:
raise ImportError('Using locale-aware decimals requires the babel library.')
def _parse_decimal(self, value):
return self.babel_numbers.parse_decimal(value, self.locale)
def _format_decimal(self, value):
return self.babel_numbers.format_decimal(value, self.number_format, self.locale)
class IntegerField(Field):
"""
A text field, except all input is coerced to an integer. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = int(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid integer value'))
class DecimalField(LocaleAwareNumberField):
"""
A text field which displays and coerces data of the `decimal.Decimal` type.
:param places:
How many decimal places to quantize the value to for display on form.
If None, does not quantize value.
:param rounding:
How to round the value during quantize, for example
`decimal.ROUND_UP`. If unset, uses the rounding value from the
current thread's context.
:param use_locale:
If True, use locale-based number formatting. Locale-based number
formatting requires the 'babel' package.
:param number_format:
Optional number format for locale. If omitted, use the default decimal
format for the locale.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, places=unset_value, rounding=None, **kwargs):
super(DecimalField, self).__init__(label, validators, **kwargs)
if self.use_locale and (places is not unset_value or rounding is not None):
raise TypeError("When using locale-aware numbers, 'places' and 'rounding' are ignored.")
if places is unset_value:
places = 2
self.places = places
self.rounding = rounding
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
if self.use_locale:
return text_type(self._format_decimal(self.data))
elif self.places is not None:
if hasattr(self.data, 'quantize'):
exp = decimal.Decimal('.1') ** self.places
if self.rounding is None:
quantized = self.data.quantize(exp)
else:
quantized = self.data.quantize(exp, rounding=self.rounding)
return text_type(quantized)
else:
# If for some reason, data is a float or int, then format
# as we would for floats using string formatting.
format = '%%0.%df' % self.places
return format % self.data
else:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
if self.use_locale:
self.data = self._parse_decimal(valuelist[0])
else:
self.data = decimal.Decimal(valuelist[0])
except (decimal.InvalidOperation, ValueError):
self.data = None
raise ValueError(self.gettext('Not a valid decimal value'))
class FloatField(Field):
"""
A text field, except all input is coerced to an float. Erroneous input
is ignored and will not be accepted as a value.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
elif self.data is not None:
return text_type(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = float(valuelist[0])
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid float value'))
class BooleanField(Field):
"""
Represents an ``<input type="checkbox">``. Set the ``checked``-status by using the
``default``-option. Any value for ``default``, e.g. ``default="checked"`` puts
``checked`` into the html-element and sets the ``data`` to ``True``
:param false_values:
If provided, a sequence of strings each of which is an exact match
string of what is considered a "false" value. Defaults to the tuple
``('false', '')``
"""
widget = widgets.CheckboxInput()
false_values = ('false', '')
def __init__(self, label=None, validators=None, false_values=None, **kwargs):
super(BooleanField, self).__init__(label, validators, **kwargs)
if false_values is not None:
self.false_values = false_values
def process_data(self, value):
self.data = bool(value)
def process_formdata(self, valuelist):
if not valuelist or valuelist[0] in self.false_values:
self.data = False
else:
self.data = True
def _value(self):
if self.raw_data:
return text_type(self.raw_data[0])
else:
return 'y'
class DateTimeField(Field):
"""
A text field which stores a `datetime.datetime` matching a format.
"""
widget = widgets.TextInput()
def __init__(self, label=None, validators=None, format='%Y-%m-%d %H:%M:%S', **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.format) or ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid datetime value'))
class DateField(DateTimeField):
"""
Same as DateTimeField, except stores a `datetime.date`.
"""
def __init__(self, label=None, validators=None, format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, format, **kwargs)
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(date_str, self.format).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Not a valid date value'))
class FormField(Field):
"""
Encapsulate a form as a field in another form.
:param form_class:
A subclass of Form that will be encapsulated.
:param separator:
A string which will be suffixed to this field's name to create the
prefix to enclosed fields. The default is fine for most uses.
"""
widget = widgets.TableWidget()
def __init__(self, form_class, label=None, validators=None, separator='-', **kwargs):
super(FormField, self).__init__(label, validators, **kwargs)
self.form_class = form_class
self.separator = separator
self._obj = None
if self.filters:
raise TypeError('FormField cannot take filters, as the encapsulated data is not mutable.')
if validators:
raise TypeError('FormField does not accept any validators. Instead, define them on the enclosed form.')
def process(self, formdata, data=unset_value):
if data is unset_value:
try:
data = self.default()
except TypeError:
data = self.default
self._obj = data
self.object_data = data
prefix = self.name + self.separator
if isinstance(data, dict):
self.form = self.form_class(formdata=formdata, prefix=prefix, **data)
else:
self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix)
def validate(self, form, extra_validators=tuple()):
if extra_validators:
raise TypeError('FormField does not accept in-line validators, as it gets errors from the enclosed form.')
return self.form.validate()
def populate_obj(self, obj, name):
candidate = getattr(obj, name, None)
if candidate is None:
if self._obj is None:
raise TypeError('populate_obj: cannot find a value to populate from the provided obj or input data/defaults')
candidate = self._obj
setattr(obj, name, candidate)
self.form.populate_obj(candidate)
def __iter__(self):
return iter(self.form)
def __getitem__(self, name):
return self.form[name]
def __getattr__(self, name):
return getattr(self.form, name)
@property
def data(self):
return self.form.data
@property
def errors(self):
return self.form.errors
class FieldList(Field):
"""
Encapsulate an ordered list of multiple instances of the same field type,
keeping data as a list.
>>> authors = FieldList(StringField('Name', [validators.DataRequired()]))
:param unbound_field:
A partially-instantiated field definition, just like that would be
defined on a form directly.
:param min_entries:
if provided, always have at least this many entries on the field,
creating blank ones if the provided input does not specify a sufficient
amount.
:param max_entries:
accept no more than this many entries as input, even if more exist in
formdata.
"""
widget = widgets.ListWidget()
def __init__(self, unbound_field, label=None, validators=None, min_entries=0,
max_entries=None, default=tuple(), **kwargs):
super(FieldList, self).__init__(label, validators, default=default, **kwargs)
if self.filters:
raise TypeError('FieldList does not accept any filters. Instead, define them on the enclosed field.')
assert isinstance(unbound_field, UnboundField), 'Field must be unbound, not a field class'
self.unbound_field = unbound_field
self.min_entries = min_entries
self.max_entries = max_entries
self.last_index = -1
self._prefix = kwargs.get('_prefix', '')
def process(self, formdata, data=unset_value):
self.entries = []
if data is unset_value or not data:
try:
data = self.default()
except TypeError:
data = self.default
self.object_data = data
if formdata:
indices = sorted(set(self._extract_indices(self.name, formdata)))
if self.max_entries:
indices = indices[:self.max_entries]
idata = iter(data)
for index in indices:
try:
obj_data = next(idata)
except StopIteration:
obj_data = unset_value
self._add_entry(formdata, obj_data, index=index)
else:
for obj_data in data:
self._add_entry(formdata, obj_data)
while len(self.entries) < self.min_entries:
self._add_entry(formdata)
def _extract_indices(self, prefix, formdata):
"""
Yield indices of any keys with given prefix.
formdata must be an object which will produce keys when iterated. For
example, if field 'foo' contains keys 'foo-0-bar', 'foo-1-baz', then
the numbers 0 and 1 will be yielded, but not neccesarily in order.
"""
offset = len(prefix) + 1
for k in formdata:
if k.startswith(prefix):
k = k[offset:].split('-', 1)[0]
if k.isdigit():
yield int(k)
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in izip(self.entries, candidates):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
name = '%s-%d' % (self.short_name, index)
id = '%s-%d' % (self.id, index)
field = self.unbound_field.bind(form=None, name=name, prefix=self._prefix, id=id, _meta=self.meta,
translations=self._translations)
field.process(formdata, data)
self.entries.append(field)
return field
def append_entry(self, data=unset_value):
"""
Create a new entry with optional default data.
Entries added in this way will *not* receive formdata however, and can
only receive object data.
"""
return self._add_entry(data=data)
def pop_entry(self):
""" Removes the last entry from the list and returns it. """
entry = self.entries.pop()
self.last_index -= 1
return entry
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
return self.entries[index]
@property
def data(self):
return [f.data for f in self.entries]
| {
"content_hash": "cf2ac240dc5874cc13f1cbbefc61aa1a",
"timestamp": "",
"source": "github",
"line_count": 1019,
"max_line_length": 125,
"avg_line_length": 34.066732090284596,
"alnum_prop": 0.5883793282249237,
"repo_name": "argeweb/start",
"id": "b4dd81d5ede6424ecc3849527a8d45af6fa9385a",
"size": "34714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argeweb/libs/wtforms/fields/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49476"
},
{
"name": "HTML",
"bytes": "49689"
},
{
"name": "JavaScript",
"bytes": "20835"
},
{
"name": "Python",
"bytes": "1718543"
}
],
"symlink_target": ""
} |
'''
Record Order in BIFF8
Workbook Globals Substream
BOF Type = workbook globals
Interface Header
MMS
Interface End
WRITEACCESS
CODEPAGE
DSF
TABID
FNGROUPCOUNT
Workbook Protection Block
WINDOWPROTECT
PROTECT
PASSWORD
PROT4REV
PROT4REVPASS
BACKUP
HIDEOBJ
WINDOW1
DATEMODE
PRECISION
REFRESHALL
BOOKBOOL
FONT +
FORMAT *
XF +
STYLE +
? PALETTE
USESELFS
BOUNDSHEET +
COUNTRY
? Link Table
SST
ExtSST
EOF
'''
import BIFFRecords
import Style
class Workbook(object):
#################################################################
## Constructor
#################################################################
def __init__(self, encoding='ascii', style_compression=0):
self.encoding = encoding
self.__owner = 'None'
self.__country_code = None # 0x07 is Russia :-)
self.__wnd_protect = 0
self.__obj_protect = 0
self.__protect = 0
self.__backup_on_save = 0
# for WINDOW1 record
self.__hpos_twips = 0x01E0
self.__vpos_twips = 0x005A
self.__width_twips = 0x3FCF
self.__height_twips = 0x2A4E
self.__active_sheet = 0
self.__first_tab_index = 0
self.__selected_tabs = 0x01
self.__tab_width_twips = 0x0258
self.__wnd_hidden = 0
self.__wnd_mini = 0
self.__hscroll_visible = 1
self.__vscroll_visible = 1
self.__tabs_visible = 1
self.__styles = Style.StyleCollection(style_compression)
self.__dates_1904 = 0
self.__use_cell_values = 1
self.__sst = BIFFRecords.SharedStringTable(self.encoding)
self.__worksheets = []
self.__worksheet_idx_from_name = {}
self.__sheet_refs = {}
self._supbook_xref = {}
self._xcall_xref = {}
self._ownbook_supbookx = None
self._ownbook_supbook_ref = None
self._xcall_supbookx = None
self._xcall_supbook_ref = None
#################################################################
## Properties, "getters", "setters"
#################################################################
def get_style_stats(self):
return self.__styles.stats[:]
def set_owner(self, value):
self.__owner = value
def get_owner(self):
return self.__owner
owner = property(get_owner, set_owner)
#################################################################
def set_country_code(self, value):
self.__country_code = value
def get_country_code(self):
return self.__country_code
country_code = property(get_country_code, set_country_code)
#################################################################
def set_wnd_protect(self, value):
self.__wnd_protect = int(value)
def get_wnd_protect(self):
return bool(self.__wnd_protect)
wnd_protect = property(get_wnd_protect, set_wnd_protect)
#################################################################
def set_obj_protect(self, value):
self.__obj_protect = int(value)
def get_obj_protect(self):
return bool(self.__obj_protect)
obj_protect = property(get_obj_protect, set_obj_protect)
#################################################################
def set_protect(self, value):
self.__protect = int(value)
def get_protect(self):
return bool(self.__protect)
protect = property(get_protect, set_protect)
#################################################################
def set_backup_on_save(self, value):
self.__backup_on_save = int(value)
def get_backup_on_save(self):
return bool(self.__backup_on_save)
backup_on_save = property(get_backup_on_save, set_backup_on_save)
#################################################################
def set_hpos(self, value):
self.__hpos_twips = value & 0xFFFF
def get_hpos(self):
return self.__hpos_twips
hpos = property(get_hpos, set_hpos)
#################################################################
def set_vpos(self, value):
self.__vpos_twips = value & 0xFFFF
def get_vpos(self):
return self.__vpos_twips
vpos = property(get_vpos, set_vpos)
#################################################################
def set_width(self, value):
self.__width_twips = value & 0xFFFF
def get_width(self):
return self.__width_twips
width = property(get_width, set_width)
#################################################################
def set_height(self, value):
self.__height_twips = value & 0xFFFF
def get_height(self):
return self.__height_twips
height = property(get_height, set_height)
#################################################################
def set_active_sheet(self, value):
self.__active_sheet = value & 0xFFFF
self.__first_tab_index = self.__active_sheet
def get_active_sheet(self):
return self.__active_sheet
active_sheet = property(get_active_sheet, set_active_sheet)
#################################################################
def set_tab_width(self, value):
self.__tab_width_twips = value & 0xFFFF
def get_tab_width(self):
return self.__tab_width_twips
tab_width = property(get_tab_width, set_tab_width)
#################################################################
def set_wnd_visible(self, value):
self.__wnd_hidden = int(not value)
def get_wnd_visible(self):
return not bool(self.__wnd_hidden)
wnd_visible = property(get_wnd_visible, set_wnd_visible)
#################################################################
def set_wnd_mini(self, value):
self.__wnd_mini = int(value)
def get_wnd_mini(self):
return bool(self.__wnd_mini)
wnd_mini = property(get_wnd_mini, set_wnd_mini)
#################################################################
def set_hscroll_visible(self, value):
self.__hscroll_visible = int(value)
def get_hscroll_visible(self):
return bool(self.__hscroll_visible)
hscroll_visible = property(get_hscroll_visible, set_hscroll_visible)
#################################################################
def set_vscroll_visible(self, value):
self.__vscroll_visible = int(value)
def get_vscroll_visible(self):
return bool(self.__vscroll_visible)
vscroll_visible = property(get_vscroll_visible, set_vscroll_visible)
#################################################################
def set_tabs_visible(self, value):
self.__tabs_visible = int(value)
def get_tabs_visible(self):
return bool(self.__tabs_visible)
tabs_visible = property(get_tabs_visible, set_tabs_visible)
#################################################################
def set_dates_1904(self, value):
self.__dates_1904 = int(value)
def get_dates_1904(self):
return bool(self.__dates_1904)
dates_1904 = property(get_dates_1904, set_dates_1904)
#################################################################
def set_use_cell_values(self, value):
self.__use_cell_values = int(value)
def get_use_cell_values(self):
return bool(self.__use_cell_values)
use_cell_values = property(get_use_cell_values, set_use_cell_values)
#################################################################
def get_default_style(self):
return self.__styles.default_style
default_style = property(get_default_style)
##################################################################
## Methods
##################################################################
def add_style(self, style):
return self.__styles.add(style)
def add_str(self, s):
return self.__sst.add_str(s)
def del_str(self, sst_idx):
self.__sst.del_str(sst_idx)
def str_index(self, s):
return self.__sst.str_index(s)
def add_sheet(self, sheetname, cell_overwrite_ok=False):
import Worksheet, Utils
if not isinstance(sheetname, unicode):
sheetname = sheetname.decode(self.encoding)
if not Utils.valid_sheet_name(sheetname):
raise Exception("invalid worksheet name %r" % sheetname)
lower_name = sheetname.lower()
if lower_name in self.__worksheet_idx_from_name:
raise Exception("duplicate worksheet name %r" % sheetname)
self.__worksheet_idx_from_name[lower_name] = len(self.__worksheets)
self.__worksheets.append(Worksheet.Worksheet(sheetname, self, cell_overwrite_ok))
return self.__worksheets[-1]
def get_sheet(self, sheetnum):
return self.__worksheets[sheetnum]
def raise_bad_sheetname(self, sheetname):
raise Exception("Formula: unknown sheet name %s" % sheetname)
def convert_sheetindex(self, strg_ref, n_sheets):
idx = int(strg_ref)
if 0 <= idx < n_sheets:
return idx
msg = "Formula: sheet index (%s) >= number of sheets (%d)" % (strg_ref, n_sheets)
raise Exception(msg)
def _get_supbook_index(self, tag):
if tag in self._supbook_xref:
return self._supbook_xref[tag]
self._supbook_xref[tag] = idx = len(self._supbook_xref)
return idx
def setup_ownbook(self):
self._ownbook_supbookx = self._get_supbook_index(('ownbook', 0))
self._ownbook_supbook_ref = None
reference = (self._ownbook_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._ownbook_supbook_ref = len(self.__sheet_refs)
def setup_xcall(self):
self._xcall_supbookx = self._get_supbook_index(('xcall', 0))
self._xcall_supbook_ref = None
reference = (self._xcall_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._xcall_supbook_ref = len(self.__sheet_refs)
def add_sheet_reference(self, formula):
patches = []
n_sheets = len(self.__worksheets)
sheet_refs, xcall_refs = formula.get_references()
for ref0, ref1, offset in sheet_refs:
if not ref0.isdigit():
try:
ref0n = self.__worksheet_idx_from_name[ref0.lower()]
except KeyError:
self.raise_bad_sheetname(ref0)
else:
ref0n = self.convert_sheetindex(ref0, n_sheets)
if ref1 == ref0:
ref1n = ref0n
elif not ref1.isdigit():
try:
ref1n = self.__worksheet_idx_from_name[ref1.lower()]
except KeyError:
self.raise_bad_sheetname(ref1)
else:
ref1n = self.convert_sheetindex(ref1, n_sheets)
if ref1n < ref0n:
msg = "Formula: sheets out of order; %r:%r -> (%d, %d)" \
% (ref0, ref1, ref0n, ref1n)
raise Exception(msg)
if self._ownbook_supbookx is None:
self.setup_ownbook()
reference = (self._ownbook_supbookx, ref0n, ref1n)
if reference in self.__sheet_refs:
patches.append((offset, self.__sheet_refs[reference]))
else:
nrefs = len(self.__sheet_refs)
if nrefs > 65535:
raise Exception('More than 65536 inter-sheet references')
self.__sheet_refs[reference] = nrefs
patches.append((offset, nrefs))
for funcname, offset in xcall_refs:
if self._ownbook_supbookx is None:
self.setup_ownbook()
if self._xcall_supbookx is None:
self.setup_xcall()
# print funcname, self._supbook_xref
patches.append((offset, self._xcall_supbook_ref))
if not isinstance(funcname, unicode):
funcname = funcname.decode(self.encoding)
if funcname in self._xcall_xref:
idx = self._xcall_xref[funcname]
else:
self._xcall_xref[funcname] = idx = len(self._xcall_xref)
patches.append((offset + 2, idx + 1))
formula.patch_references(patches)
##################################################################
## BIFF records generation
##################################################################
def __bof_rec(self):
return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.BOOK_GLOBAL).get()
def __eof_rec(self):
return BIFFRecords.EOFRecord().get()
def __intf_hdr_rec(self):
return BIFFRecords.InteraceHdrRecord().get()
def __intf_end_rec(self):
return BIFFRecords.InteraceEndRecord().get()
def __intf_mms_rec(self):
return BIFFRecords.MMSRecord().get()
def __write_access_rec(self):
return BIFFRecords.WriteAccessRecord(self.__owner).get()
def __wnd_protect_rec(self):
return BIFFRecords.WindowProtectRecord(self.__wnd_protect).get()
def __obj_protect_rec(self):
return BIFFRecords.ObjectProtectRecord(self.__obj_protect).get()
def __protect_rec(self):
return BIFFRecords.ProtectRecord(self.__protect).get()
def __password_rec(self):
return BIFFRecords.PasswordRecord().get()
def __prot4rev_rec(self):
return BIFFRecords.Prot4RevRecord().get()
def __prot4rev_pass_rec(self):
return BIFFRecords.Prot4RevPassRecord().get()
def __backup_rec(self):
return BIFFRecords.BackupRecord(self.__backup_on_save).get()
def __hide_obj_rec(self):
return BIFFRecords.HideObjRecord().get()
def __window1_rec(self):
flags = 0
flags |= (self.__wnd_hidden) << 0
flags |= (self.__wnd_mini) << 1
flags |= (self.__hscroll_visible) << 3
flags |= (self.__vscroll_visible) << 4
flags |= (self.__tabs_visible) << 5
return BIFFRecords.Window1Record(self.__hpos_twips, self.__vpos_twips,
self.__width_twips, self.__height_twips,
flags,
self.__active_sheet, self.__first_tab_index,
self.__selected_tabs, self.__tab_width_twips).get()
def __codepage_rec(self):
return BIFFRecords.CodepageBiff8Record().get()
def __country_rec(self):
if not self.__country_code:
return ''
return BIFFRecords.CountryRecord(self.__country_code, self.__country_code).get()
def __dsf_rec(self):
return BIFFRecords.DSFRecord().get()
def __tabid_rec(self):
return BIFFRecords.TabIDRecord(len(self.__worksheets)).get()
def __fngroupcount_rec(self):
return BIFFRecords.FnGroupCountRecord().get()
def __datemode_rec(self):
return BIFFRecords.DateModeRecord(self.__dates_1904).get()
def __precision_rec(self):
return BIFFRecords.PrecisionRecord(self.__use_cell_values).get()
def __refresh_all_rec(self):
return BIFFRecords.RefreshAllRecord().get()
def __bookbool_rec(self):
return BIFFRecords.BookBoolRecord().get()
def __all_fonts_num_formats_xf_styles_rec(self):
return self.__styles.get_biff_data()
def __palette_rec(self):
result = ''
return result
def __useselfs_rec(self):
return BIFFRecords.UseSelfsRecord().get()
def __boundsheets_rec(self, data_len_before, data_len_after, sheet_biff_lens):
# .................................
# BOUNDSEHEET0
# BOUNDSEHEET1
# BOUNDSEHEET2
# ..................................
# WORKSHEET0
# WORKSHEET1
# WORKSHEET2
boundsheets_len = 0
for sheet in self.__worksheets:
boundsheets_len += len(BIFFRecords.BoundSheetRecord(
0x00L, sheet.visibility, sheet.name, self.encoding
).get())
start = data_len_before + boundsheets_len + data_len_after
result = ''
for sheet_biff_len, sheet in zip(sheet_biff_lens, self.__worksheets):
result += BIFFRecords.BoundSheetRecord(
start, sheet.visibility, sheet.name, self.encoding
).get()
start += sheet_biff_len
return result
def __all_links_rec(self):
pieces = []
temp = [(idx, tag) for tag, idx in self._supbook_xref.items()]
temp.sort()
for idx, tag in temp:
stype, snum = tag
if stype == 'ownbook':
rec = BIFFRecords.InternalReferenceSupBookRecord(len(self.__worksheets)).get()
pieces.append(rec)
elif stype == 'xcall':
rec = BIFFRecords.XcallSupBookRecord().get()
pieces.append(rec)
temp = [(idx, name) for name, idx in self._xcall_xref.items()]
temp.sort()
for idx, name in temp:
rec = BIFFRecords.ExternnameRecord(
options=0, index=0, name=name, fmla='\x02\x00\x1c\x17').get()
pieces.append(rec)
else:
raise Exception('unknown supbook stype %r' % stype)
if len(self.__sheet_refs) > 0:
# get references in index order
temp = [(idx, ref) for ref, idx in self.__sheet_refs.items()]
temp.sort()
temp = [ref for idx, ref in temp]
externsheet_record = BIFFRecords.ExternSheetRecord(temp).get()
pieces.append(externsheet_record)
return ''.join(pieces)
def __sst_rec(self):
return self.__sst.get_biff_record()
def __ext_sst_rec(self, abs_stream_pos):
return ''
# return BIFFRecords.ExtSSTRecord(abs_stream_pos, self.sst_record.str_placement,
# self.sst_record.portions_len).get()
def get_biff_data(self):
before = ''
before += self.__bof_rec()
before += self.__intf_hdr_rec()
before += self.__intf_mms_rec()
before += self.__intf_end_rec()
before += self.__write_access_rec()
before += self.__codepage_rec()
before += self.__dsf_rec()
before += self.__tabid_rec()
before += self.__fngroupcount_rec()
before += self.__wnd_protect_rec()
before += self.__protect_rec()
before += self.__obj_protect_rec()
before += self.__password_rec()
before += self.__prot4rev_rec()
before += self.__prot4rev_pass_rec()
before += self.__backup_rec()
before += self.__hide_obj_rec()
before += self.__window1_rec()
before += self.__datemode_rec()
before += self.__precision_rec()
before += self.__refresh_all_rec()
before += self.__bookbool_rec()
before += self.__all_fonts_num_formats_xf_styles_rec()
before += self.__palette_rec()
before += self.__useselfs_rec()
country = self.__country_rec()
all_links = self.__all_links_rec()
shared_str_table = self.__sst_rec()
after = country + all_links + shared_str_table
ext_sst = self.__ext_sst_rec(0) # need fake cause we need calc stream pos
eof = self.__eof_rec()
self.__worksheets[self.__active_sheet].selected = True
sheets = ''
sheet_biff_lens = []
for sheet in self.__worksheets:
data = sheet.get_biff_data()
sheets += data
sheet_biff_lens.append(len(data))
bundlesheets = self.__boundsheets_rec(len(before), len(after) + len(ext_sst) + len(eof), sheet_biff_lens)
sst_stream_pos = len(before) + len(bundlesheets) + len(country) + len(all_links)
ext_sst = self.__ext_sst_rec(sst_stream_pos)
return before + bundlesheets + after + ext_sst + eof + sheets
def save(self, filename):
import CompoundDoc
doc = CompoundDoc.XlsDoc()
doc.save(filename, self.get_biff_data())
| {
"content_hash": "cdbbdb5faa8501d3cf1b58268fab2a26",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 113,
"avg_line_length": 32.4770206022187,
"alnum_prop": 0.5218367247352754,
"repo_name": "rhefner1/ghidonations",
"id": "3bf8381d4dda4d56612aea4c76c78bb88877410b",
"size": "20524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlwt/Workbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "225830"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "HTML",
"bytes": "273771"
},
{
"name": "JavaScript",
"bytes": "333908"
},
{
"name": "Python",
"bytes": "1565508"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0014_createscheduleinstanceactiondefinition_start_date_case_property'),
]
operations = [
migrations.AddField(
model_name='automaticupdaterule',
name='locked_for_editing',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "f25a1fc87dea520e0577b339f5c3bc2a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 100,
"avg_line_length": 26,
"alnum_prop": 0.6418269230769231,
"repo_name": "dimagi/commcare-hq",
"id": "48642d8ebf3bcdf4c5b098fba8110e317a1f9913",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/data_interfaces/migrations/0015_automaticupdaterule_locked_for_editing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
class Deque:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def addFront(self, item):
self.items.append(item)
def addRear(self, item):
self.items.insert(0,item)
def removeFront(self):
return self.items.pop()
def removeRear(self):
return self.items.pop(0)
def size(self):
return len(self.items)
| {
"content_hash": "d1d1e6b9d6c2b352e339b51f183b5af6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 33,
"avg_line_length": 19.80952380952381,
"alnum_prop": 0.5673076923076923,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "f6b582b6b8be1e6b04b4dce9721d74e6d985b33b",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonds/basic/deque.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
} |
from . import BaseBackend
import flask
class SessionBackend(BaseBackend):
"""
The default storage backend. Stores and retrieves OAuth tokens using
the :ref:`Flask session <flask:sessions>`.
"""
def __init__(self, key="{bp.name}_oauth_token"):
"""
Args:
key (str): The name to use as a key for storing the OAuth token in the
Flask session. This string will have ``.format(bp=self.blueprint)``
called on it before it is used. so you can refer to information
on the blueprint as part of the key. For example, ``{bp.name}``
will be replaced with the name of the blueprint.
"""
self.key = key
def get(self, blueprint):
key = self.key.format(bp=blueprint)
return flask.session.get(key)
def set(self, blueprint, token):
key = self.key.format(bp=blueprint)
flask.session[key] = token
def delete(self, blueprint):
key = self.key.format(bp=blueprint)
del flask.session[key]
| {
"content_hash": "bd5e56e15cdac915ac6fa9bb4f7fe588",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 34.096774193548384,
"alnum_prop": 0.6045411542100284,
"repo_name": "jsfan/flask-dance",
"id": "5b28588f3f169ee69d57a8f54338ce53f1f02457",
"size": "1057",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_dance/consumer/backend/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "376811"
}
],
"symlink_target": ""
} |
default_app_config = "taiga.base.apps.BaseAppConfig"
| {
"content_hash": "27166f1e90c87c9139b2b0ddb0fedf38",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.7924528301886793,
"repo_name": "mattcongy/itshop",
"id": "5a7db2f0c3e684ef698b1001ea5f60f795ed0731",
"size": "989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker-images/taigav2/taiga-back/taiga/base/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103474"
},
{
"name": "CoffeeScript",
"bytes": "3380"
},
{
"name": "HTML",
"bytes": "274547"
},
{
"name": "JavaScript",
"bytes": "203660"
},
{
"name": "Nginx",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "3591150"
},
{
"name": "Ruby",
"bytes": "164978"
},
{
"name": "Shell",
"bytes": "5238"
}
],
"symlink_target": ""
} |
def mergesort(unsorted_list):
if len(unsorted_list) < 2:
return unsorted_list
sorted_list = []
midpoint = len(unsorted_list) // 2
first_half = mergesort(unsorted_list[:midpoint])
second_half = mergesort(unsorted_list[midpoint:])
while (len(first_half) > 0) and (len(second_half) > 0):
if first_half[0] < second_half[0]:
sorted_list.append(first_half.pop(0))
else:
sorted_list.append(second_half.pop(0))
print sorted_list
sorted_list += first_half
sorted_list += second_half
return sorted_list
def mergesort_inplace(list_to_sort):
if len(list_to_sort) <= 1:
return list_to_sort
else:
mid = len(list_to_sort) // 2
left = mergesort_inplace(list_to_sort[:mid])
right = mergesort_inplace(list_to_sort[mid:])
left_counter, right_counter, main_counter = 0, 0, 0
while left_counter < len(left) and right_counter < len(right):
if left[left_counter] < right[right_counter]:
list_to_sort[main_counter] = left[left_counter]
left_counter += 1
main_counter += 1
else:
list_to_sort[main_counter] = right[right_counter]
right_counter += 1
main_counter += 1
if left_counter < right_counter:
remaining = left
else:
remaining = right
if remaining == left:
remaining_counter = left_counter
else:
remaining_counter = right_counter
while remaining_counter < len(remaining):
list_to_sort[main_counter] = remaining[remaining_counter]
remaining_counter += 1
main_counter += 1
return list_to_sort
# while (len(first_half) > 0) and (len(second_half) > 0):
# if first_half[0] < second_half[0]:
# sorted_list.append(first_half.pop(0))
# else:
# sorted_list.append(second_half.pop(0))
# print sorted_list
# sorted_list += first_half
# sorted_list += second_half
| {
"content_hash": "2ea4b3ae226768f42feb268f840ccc6c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 70,
"avg_line_length": 35.94915254237288,
"alnum_prop": 0.5582272512965583,
"repo_name": "markableidinger/sorting",
"id": "19dffff443517d2025bce68b6343651f80e1a6ed",
"size": "2121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mergesort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7546"
}
],
"symlink_target": ""
} |
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Thread to perform service deletes """
from agent.lib.agent_thread.agent_thread import AgentThread
from pylons import config
from agent.lib.errors import Errors, AgentException
from agent.lib.utils import xstr
import traceback
import logging
LOG = logging.getLogger(__name__)
class AgentThreadCancel(AgentThread):
""" Cancel distribution """
def __init__(self, threadMgr, uuid):
""" Constructor """
AgentThread.__init__(self, threadMgr, cat = [uuid], name = 'cancel_agentthread')
self.__uuid = uuid
def doRun(self):
""" Main body of the thread """
try:
appGlobal = config['pylons.app_globals']
thread = appGlobal.threadMgr.getThreadByUuid(self.__uuid)
# Check if indeed trying to stop only distribution client threads
if thread is None:
self._updateStatus(httpStatus = 500,
error = Errors.AGENT_THREAD_NOT_CANCELABLE,
errorMsg = 'Non-existing thread %s cannot be canceled' % self.__uuid)
return
if not (issubclass(thread.__class__, AgentThread)):
self._updateStatus(httpStatus = 500,
error = Errors.AGENT_THREAD_NOT_CANCELABLE,
errorMsg = 'thread of type %s cannot be canceled' % xstr(thread.__class__))
return
self._updateStatus(progress = 50)
#Ignore if thread is not alive
if (thread.isAlive()):
thread.stop()
self._updateStatus(progress = 100)
except AgentException as exc:
msg = 'Could not cancel distribution thread with uuid %s %s' % (self.__uuid, exc.getMsg)
self._updateStatus(httpStatus = 500, error = exc.getCode(), errorMsg = msg)
except Exception as exc:
code = Errors.UNKNOWN_ERROR
msg = 'Could not cancel distribution thread with uuid ' + \
self.__uuid + '(#' + str(code) + '). ' + traceback.format_exc(5)
self._updateStatus(httpStatus = 500, error = code, errorMsg = msg)
| {
"content_hash": "312da38482641533691551dff6e352eb",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 110,
"avg_line_length": 41.57575757575758,
"alnum_prop": 0.619533527696793,
"repo_name": "eBay/cronus-agent",
"id": "7f0b1e91dbac9a5c8617853c43ddf2797c7288e1",
"size": "2779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/agent/lib/agent_thread/cancel_agentthread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "127210"
},
{
"name": "HTML",
"bytes": "44250"
},
{
"name": "JavaScript",
"bytes": "558476"
},
{
"name": "Python",
"bytes": "589115"
},
{
"name": "Shell",
"bytes": "38184"
}
],
"symlink_target": ""
} |
"""
# Seomoz Backlink Analysis
Discover relevant backlinks.
"""
import os
import csv
import time
import argparse
import numpy as np
import pandas as pd
from lsapi import lsapi
api = lsapi('member-76bd0a8077', '09e78de0f24fbbf8b41b46623b75d5e6')
parser = argparse.ArgumentParser(description='Seomoz Analyzer')
parser.add_argument('urls', help='path to list of urls to analyze')
parser.add_argument('links', help='path to list of urls to output')
parser.add_argument('--column', default='resulturl', help='column name')
args = parser.parse_args()
urls = pd.read_csv(args.urls)
urls = set(urls[args.column])
if os.path.exists(args.links):
df = pd.read_csv(args.links)
for url in set(df['url']):
urls.discard(url)
else:
df = pd.DataFrame()
print 'Urls remaining:', len(urls)
data = list(df.loc[pos].to_dict() for pos in xrange(len(df)))
def write_links():
global data
print 'Writing', args.links
with open(args.links, 'wb') as fptr:
writer = csv.writer(fptr)
header = ['lrid','lsrc','ltgt','luupa','luuu','upa','uu','url']
writer.writerow(header)
for row in data:
cols = map(lambda val: row.get(val, ''), header)
cols = [col if col != np.nan else '' for col in cols]
writer.writerow(cols)
for pos, url in enumerate(urls):
print 'Processing:', url
try:
results = api.links(url, filters=['external+follow'], scope='page_to_page', sort='page_authority', limit=100)
except Exception as exc:
print 'Exception', exc
time.sleep(5)
continue
if results:
for result in results:
result['url'] = url
data.append(result)
else:
data.append({'url': url})
time.sleep(5)
if pos % 5 == 0:
write_links()
write_links()
| {
"content_hash": "24f39e8ccf728c0158b1515ac17bfd22",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 117,
"avg_line_length": 25.6056338028169,
"alnum_prop": 0.6303630363036303,
"repo_name": "conceptcreative/free_grants_community",
"id": "fc3d57af10138fbce98ba4d8aa9bcbc51f3f716e",
"size": "1818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backlinks/seomoz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24919"
}
],
"symlink_target": ""
} |
#===============================================================================
# Abstraction
#===============================================================================
from core import Generator, Actor, Terminator, Interface, Filter, Flow
#===============================================================================
# Programming Interface
#===============================================================================
from core import register
#===============================================================================
# APIs
#===============================================================================
import pygics
# Flow #########################################################################
from core import _prometheus_flow_by_uuid
@pygics.api('GET', '/flow')
def get_flow(req, fid=None):
if fid != None:
flow = _prometheus_flow_by_uuid[fid]
return {'name' : flow._prometheus_name,
'uuid' : flow._prometheus_uuid,
'active' : flow.isRun()}
result = []
for flow in _prometheus_flow_by_uuid.values():
result.append({'name' : flow._prometheus_name,
'uuid' : flow._prometheus_uuid,
'active' : flow.isRun()})
return result
@pygics.api('POST', '/flow')
def create_flow(req, name='Non-Named Flow'):
flow = Flow(name)
return {'name' : flow._prometheus_name,
'uuid' : flow._prometheus_uuid,
'active' : flow.isRun()}
@pygics.api('GET', '/startflow')
def start_flow(req, fid):
flow = _prometheus_flow_by_uuid[fid]
flow.start()
return flow.isRun()
@pygics.api('GET', '/stopflow')
def stop_flow(req, fid):
flow = _prometheus_flow_by_uuid[fid]
flow.stop()
return not flow.isRun()
@pygics.api('DELETE', '/flow')
def delete_flow(req, fid):
flow = _prometheus_flow_by_uuid[fid]
flow.delete()
del flow
return True
# Generator ####################################################################
from core import _prometheus_generators, _prometheus_generator_by_uuid
@pygics.api('GET', '/generator')
def get_generator(req, gid=None):
if gid != None:
cls = _prometheus_generator_by_uuid[gid]
return _prometheus_generators[cls.VENDOR][cls.TITLE]
return _prometheus_generators
@pygics.api('POST', '/generator')
def create_generator(req, fid, gid, name='Non-Named Generator'):
_flow = _prometheus_flow_by_uuid[fid]
_gen = _prometheus_generator_by_uuid[gid]()
_gen.create(**req.data)
return _flow.setGenerator(_gen)
@pygics.api('DELETE', '/generator')
def delete_generator(req, fid):
_flow = _prometheus_flow_by_uuid[fid]
return _flow.delGenerator()
# Actor ########################################################################
from core import _prometheus_actors, _prometheus_actor_by_uuid
@pygics.api('GET', '/actor')
def get_actor(req, aid=None):
if aid != None:
cls = _prometheus_actor_by_uuid[aid]
return _prometheus_actors[cls.VENDOR][cls.TITLE]
return _prometheus_actors
@pygics.api('POST', '/actor')
def create_actor(req, fid, aid, name='Non-Named Actor'):
_flow = _prometheus_flow_by_uuid[fid]
_act = _prometheus_actor_by_uuid[aid]()
_act.create(**req.data)
return _flow.addProcessor(_act)
@pygics.api('DELETE', '/actor')
def delete_actor(req, fid, aid):
_flow = _prometheus_flow_by_uuid[fid]
return _flow.delProcessor(aid)
#===============================================================================
# Web
#===============================================================================
from page import *
prometheus_page = PAGE()
@PAGE.MAIN(prometheus_page, 'Prometheus')
def prometheus_page_main(req):
return DIV().html(
HEAD(1).html("Prometheus Main"),
PARA().html('This is Prometheus Page')
)
| {
"content_hash": "4dfb7133c0f009b5bd897a620376e62f",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 34.771929824561404,
"alnum_prop": 0.4808274470232089,
"repo_name": "pygics-app/prometheus",
"id": "b58d62cc2760afbc1042e6bf3bd0c130fedcfa74",
"size": "3964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22949"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
import redis
from funcy import memoize, merge
from functools import wraps
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
CACHEOPS_REDIS = getattr(settings, 'CACHEOPS_REDIS', None)
CACHEOPS_DEFAULTS = getattr(settings, 'CACHEOPS_DEFAULTS', {})
CACHEOPS = getattr(settings, 'CACHEOPS', {})
CACHEOPS_LRU = getattr(settings, 'CACHEOPS_LRU', False)
CACHEOPS_DEGRADE_ON_FAILURE = getattr(settings, 'CACHEOPS_DEGRADE_ON_FAILURE', False)
FILE_CACHE_DIR = getattr(settings, 'FILE_CACHE_DIR', '/tmp/cacheops_file_cache')
FILE_CACHE_TIMEOUT = getattr(settings, 'FILE_CACHE_TIMEOUT', 60*60*24*30)
ALL_OPS = {'get', 'fetch', 'count', 'exists'}
# Support degradation on redis fail
DEGRADE_ON_FAILURE = getattr(settings, 'CACHEOPS_DEGRADE_ON_FAILURE', False)
def handle_connection_failure(func):
if not DEGRADE_ON_FAILURE:
return func
@wraps(func)
def _inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except redis.ConnectionError as e:
warnings.warn("The cacheops cache is unreachable! Error: %s" % e, RuntimeWarning)
return _inner
class SafeRedis(redis.StrictRedis):
get = handle_connection_failure(redis.StrictRedis.get)
# Connecting to redis
try:
redis_conf = settings.CACHEOPS_REDIS
except AttributeError:
raise ImproperlyConfigured('You must specify non-empty CACHEOPS_REDIS setting to use cacheops')
redis_client = (SafeRedis if DEGRADE_ON_FAILURE else redis.StrictRedis)(**redis_conf)
@memoize
def prepare_profiles():
"""
Prepares a dict 'app.model' -> profile, for use in model_profile()
"""
profile_defaults = {
'ops': (),
'local_get': False,
'db_agnostic': True,
}
profile_defaults.update(CACHEOPS_DEFAULTS)
model_profiles = {}
for app_model, profile in CACHEOPS.items():
if profile is None:
model_profiles[app_model] = None
continue
model_profiles[app_model] = mp = merge(profile_defaults, profile)
if mp['ops'] == 'all':
mp['ops'] = ALL_OPS
# People will do that anyway :)
if isinstance(mp['ops'], six.string_types):
mp['ops'] = {mp['ops']}
mp['ops'] = set(mp['ops'])
if 'timeout' not in mp:
raise ImproperlyConfigured(
'You must specify "timeout" option in "%s" CACHEOPS profile' % app_model)
return model_profiles
@memoize
def model_profile(model):
"""
Returns cacheops profile for a model
"""
model_profiles = prepare_profiles()
app = model._meta.app_label
model_name = model._meta.model_name
for guess in ('%s.%s' % (app, model_name), '%s.*' % app, '*.*'):
if guess in model_profiles:
return model_profiles[guess]
else:
return None
| {
"content_hash": "805542eec8a77fcd6f516a9aaf1997de",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 99,
"avg_line_length": 29.602040816326532,
"alnum_prop": 0.6511547742157877,
"repo_name": "Tapo4ek/django-cacheops",
"id": "8315ce118806ee140db919e7e7fff6fe5f105b24",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cacheops/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3956"
},
{
"name": "Lua",
"bytes": "2959"
},
{
"name": "Python",
"bytes": "82809"
}
],
"symlink_target": ""
} |
import asyncio
import datetime
import enum
import json
import math
import time
import warnings
from email.utils import parsedate
from multidict import CIMultiDict, CIMultiDictProxy
from . import hdrs, payload
from .helpers import HeadersMixin, SimpleCookie, sentinel
from .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11
__all__ = ('ContentCoding', 'StreamResponse', 'Response', 'json_response')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
_length_check = True
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._compression = False
self._compression_force = False
self._cookies = SimpleCookie()
self._req = None
self._payload_writer = None
self._eof_sent = False
self._body_length = 0
if headers is not None:
self._headers = CIMultiDict(headers)
else:
self._headers = CIMultiDict()
self.set_status(status, reason)
@property
def prepared(self):
return self._payload_writer is not None
@property
def task(self):
return getattr(self._req, 'task', None)
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None, _RESPONSES=RESPONSES):
assert not self.prepared, \
'Cannot change the response status code after ' \
'the headers have been sent'
self._status = int(status)
if reason is None:
try:
reason = _RESPONSES[self._status][0]
except:
reason = ''
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
@property
def body_length(self):
return self._body_length
@property
def output_length(self):
warnings.warn('output_length is deprecated', DeprecationWarning)
return self._payload_writer.buffer_size
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
if chunk_size is not None:
warnings.warn('Chunk size is deprecated #1615', DeprecationWarning)
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
elif force is not None:
assert isinstance(force, ContentCoding), ("force should one of "
"None, bool or "
"ContentEncoding")
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self._headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self._headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
self.headers.pop(hdrs.LAST_MODIFIED, None)
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
@property
def tcp_nodelay(self):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot get tcp_nodelay for not prepared response"
return payload_writer.tcp_nodelay
def set_tcp_nodelay(self, value):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot set tcp_nodelay for not prepared response"
payload_writer.set_tcp_nodelay(value)
@property
def tcp_cork(self):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot get tcp_cork for not prepared response"
return payload_writer.tcp_cork
def set_tcp_cork(self, value):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot set tcp_cork for not prepared response"
payload_writer.set_tcp_cork(value)
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _do_start_compression(self, coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._payload_writer.enable_compression(coding.value)
self._chunked = True
def _start_compression(self, request):
if self._compression_force:
self._do_start_compression(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
self._do_start_compression(coding)
return
@asyncio.coroutine
def prepare(self, request):
if self._eof_sent:
return
if self._payload_writer is not None:
return self._payload_writer
yield from request._prepare_hook(self)
return self._start(request)
def _start(self, request,
HttpVersion10=HttpVersion10,
HttpVersion11=HttpVersion11,
CONNECTION=hdrs.CONNECTION,
DATE=hdrs.DATE,
SERVER=hdrs.SERVER,
CONTENT_TYPE=hdrs.CONTENT_TYPE,
CONTENT_LENGTH=hdrs.CONTENT_LENGTH,
SET_COOKIE=hdrs.SET_COOKIE,
SERVER_SOFTWARE=SERVER_SOFTWARE,
TRANSFER_ENCODING=hdrs.TRANSFER_ENCODING):
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
version = request.version
writer = self._payload_writer = request._writer
headers = self._headers
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
headers.add(SET_COOKIE, value)
if self._compression:
self._start_compression(request)
if self._chunked:
if version != HttpVersion11:
raise RuntimeError(
"Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(request.version))
writer.enable_chunking()
headers[TRANSFER_ENCODING] = 'chunked'
if CONTENT_LENGTH in headers:
del headers[CONTENT_LENGTH]
elif self._length_check:
writer.length = self.content_length
if writer.length is None and version >= HttpVersion11:
writer.enable_chunking()
headers[TRANSFER_ENCODING] = 'chunked'
if CONTENT_LENGTH in headers:
del headers[CONTENT_LENGTH]
headers.setdefault(CONTENT_TYPE, 'application/octet-stream')
headers.setdefault(DATE, request.time_service.strtime())
headers.setdefault(SERVER, SERVER_SOFTWARE)
# connection header
if CONNECTION not in headers:
if keep_alive:
if version == HttpVersion10:
headers[CONNECTION] = 'keep-alive'
else:
if version == HttpVersion11:
headers[CONNECTION] = 'close'
# status line
status_line = 'HTTP/{}.{} {} {}\r\n'.format(
version[0], version[1], self._status, self._reason)
writer.write_headers(status_line, headers)
return writer
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._payload_writer is None:
raise RuntimeError("Cannot call write() before prepare()")
return self._payload_writer.write(data)
@asyncio.coroutine
def drain(self):
assert not self._eof_sent, "EOF has already been sent"
assert self._payload_writer is not None, \
"Response has not been started"
yield from self._payload_writer.drain()
@asyncio.coroutine
def write_eof(self, data=b''):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
return
assert self._payload_writer is not None, \
"Response has not been started"
yield from self._payload_writer.write_eof(data)
self._eof_sent = True
self._req = None
self._body_length = self._payload_writer.output_size
self._payload_writer = None
def __repr__(self):
if self._eof_sent:
info = "eof"
elif self.prepared:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not prepared"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None,
charset=None):
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
if content_type is not None and ";" in content_type:
raise ValueError("charset must not be in content_type "
"argument")
if text is not None:
if hdrs.CONTENT_TYPE in headers:
if content_type or charset:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" %
type(text))
if content_type is None:
content_type = 'text/plain'
if charset is None:
charset = 'utf-8'
headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=' + charset)
body = text.encode(charset)
text = None
else:
if hdrs.CONTENT_TYPE in headers:
if content_type is not None or charset is not None:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
if content_type is not None:
if charset is not None:
content_type += '; charset=' + charset
headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, headers=headers)
if text is not None:
self.text = text
else:
self.body = body
@property
def body(self):
return self._body
@body.setter
def body(self, body,
CONTENT_TYPE=hdrs.CONTENT_TYPE,
CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
if body is None:
self._body = None
self._body_payload = False
elif isinstance(body, (bytes, bytearray)):
self._body = body
self._body_payload = False
else:
try:
self._body = body = payload.PAYLOAD_REGISTRY.get(body)
except payload.LookupError:
raise ValueError('Unsupported body type %r' % type(body))
self._body_payload = True
headers = self._headers
# enable chunked encoding if needed
if not self._chunked and CONTENT_LENGTH not in headers:
size = body.size
if size is None:
self._chunked = True
elif CONTENT_LENGTH not in headers:
headers[CONTENT_LENGTH] = str(size)
# set content-type
if CONTENT_TYPE not in headers:
headers[CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for (key, value) in body.headers.items():
if key not in headers:
headers[key] = value
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
assert text is None or isinstance(text, str), \
"text argument must be str (%r)" % type(text)
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self._body = text.encode(self.charset)
self._body_payload = False
@property
def content_length(self):
if self._chunked:
return None
if hdrs.CONTENT_LENGTH in self.headers:
return super().content_length
if self._body is not None:
return len(self._body)
else:
return 0
@content_length.setter
def content_length(self, value):
raise RuntimeError("Content length is set automatically")
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
if (self._req._method == hdrs.METH_HEAD or
self._status in [204, 304]):
yield from super().write_eof()
elif self._body_payload:
yield from body.write(self._payload_writer)
yield from super().write_eof()
else:
yield from super().write_eof(body)
else:
yield from super().write_eof()
def _start(self, request):
if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:
if self._body is not None:
self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))
else:
self._headers[hdrs.CONTENT_LENGTH] = '0'
return super()._start(request)
def json_response(data=sentinel, *, text=None, body=None, status=200,
reason=None, headers=None, content_type='application/json',
dumps=json.dumps):
if data is not sentinel:
if text or body:
raise ValueError(
"only one of data, text, or body should be specified"
)
else:
text = dumps(data)
return Response(text=text, body=body, status=status, reason=reason,
headers=headers, content_type=content_type)
| {
"content_hash": "f45cd4f1ad101376a2d04563a2f1b6d8",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 91,
"avg_line_length": 33.53255425709516,
"alnum_prop": 0.5577516678283382,
"repo_name": "AraHaanOrg/aiohttp",
"id": "f1de727322405331937801a0fded9d4ee57eabd0",
"size": "20086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp/web_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Makefile",
"bytes": "2817"
},
{
"name": "Python",
"bytes": "1233163"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
} |
"""Namespace Manager Module."""
from .namespace_manager import *
| {
"content_hash": "96ab21f7779a480e9eafeee2d00313b3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 32,
"avg_line_length": 8.875,
"alnum_prop": 0.676056338028169,
"repo_name": "Suwmlee/XX-Net",
"id": "a0035aeafa77c0cbf1ed8834f15349996cfa8c6b",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "gae_proxy/server/lib/google/appengine/api/namespace_manager/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "200"
},
{
"name": "C",
"bytes": "33097"
},
{
"name": "CSS",
"bytes": "86345"
},
{
"name": "HTML",
"bytes": "141382"
},
{
"name": "JavaScript",
"bytes": "345991"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "17312939"
},
{
"name": "Shell",
"bytes": "4647"
},
{
"name": "Visual Basic",
"bytes": "382"
}
],
"symlink_target": ""
} |
from os import environ
from fabric.api import run, env, sudo, task, cd, settings
from gitric.api import git_seed, git_reset, allow_dirty
from fabtools import deb, require
env.user = environ['DOTFILES_USER']
env.hosts = environ['DOTFILES_HOST']
allow_dirty = allow_dirty # Silence flake8
home_dir = "/home/%s" % env.user
dot_dir = "%s/.files" % home_dir
@task
def echo():
run("echo foo")
def wget(cmd):
run("wget %s" % cmd)
@task
def setup():
deb.update_index()
deb.upgrade()
require.deb.packages([
"build-essential", "i3", "unzip", "xclip", "curl", "git", "sudo",
"xdm", "iw", "network-manager", "firmware-atheros", "xfce4-terminal"])
run('sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)"') # NOQA
run("touch private.sh")
git_seed(dot_dir)
git_reset(dot_dir)
with cd(dot_dir):
with settings(warn_only=True):
run("cp home/.* ~")
run("cp -R fonts/ ~/.fonts")
run("fc-cache -rf")
run("locales")
run("brew install gcc ruby curl python3 neovim bash bash-completion@2 git pipenv tmux") # NOQA
run("pip3 install pwdman hostscli neovim tmuxp")
sudo("hostscli block_all")
run("curl -fLo ~/.local/share/nvim/site/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim") # NOQA
run('nvim -c "PlugInstall | q | q"')
'''
Some Use commands for new machine
su -
ip link list
ip link set enp0s20f0u3 up
nmtui
lspci
apt install i3
vi /etc/apt/sources.list
iw config
'''
| {
"content_hash": "0d499758bcc95fec0a7882f7a0a752e1",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 152,
"avg_line_length": 26.779661016949152,
"alnum_prop": 0.6455696202531646,
"repo_name": "dhilipsiva/.files",
"id": "74a3cd18cbbe851758d4ea6fa67ae7a086c4a414",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1106"
},
{
"name": "Nu",
"bytes": "2560"
},
{
"name": "Shell",
"bytes": "10219"
},
{
"name": "Vim Script",
"bytes": "5143"
}
],
"symlink_target": ""
} |
import base64
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.samlp import NameIDPolicy
from s2repoze.plugins.sp import make_plugin
from saml2.server import Server
ENV1 = {'SERVER_SOFTWARE': 'CherryPy/3.1.2 WSGI Server',
'SCRIPT_NAME': '',
'ACTUAL_SERVER_PROTOCOL': 'HTTP/1.1',
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/krissms',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_USER_AGENT':
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-us) ',
'HTTP_CONNECTION': 'keep-alive',
'SERVER_NAME': 'lingon-catalogix-se-2.local',
'REMOTE_PORT': '57309',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '8087',
'HTTP_HOST': '127.0.0.1:8087',
'wsgi.multithread': True,
'HTTP_ACCEPT':
'application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'wsgi.version': (1, 0),
'wsgi.run_once': False,
'wsgi.multiprocess': False,
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate'}
trans_name_policy = NameIDPolicy(format=NAMEID_FORMAT_TRANSIENT,
allow_create="true")
AUTHN = {
"class_ref": INTERNETPROTOCOLPASSWORD,
"authn_auth": "http://www.example.com/login"
}
class TestSP():
def setup_class(self):
self.sp = make_plugin("rem", saml_conf="server_conf")
self.server = Server(config_file="idp_conf")
def test_setup(self):
assert self.sp
def test_identify(self):
# Create a SAMLResponse
ava = { "givenName": ["Derek"], "surName": ["Jeter"],
"mail": ["derek@nyy.mlb.com"], "title":["The man"]}
resp_str = "%s" % self.server.create_authn_response(
ava, "id1", "http://lingon.catalogix.se:8087/",
"urn:mace:example.com:saml:roland:sp", trans_name_policy,
"foba0001@example.com", authn=AUTHN)
resp_str = base64.encodestring(resp_str)
self.sp.outstanding_queries = {"id1":"http://www.example.com/service"}
session_info = self.sp._eval_authn_response({},
{"SAMLResponse": resp_str})
assert len(session_info) > 1
assert session_info["came_from"] == 'http://www.example.com/service'
assert session_info["ava"] == {'givenName': ['Derek'],
'mail': ['derek@nyy.mlb.com'],
'sn': ['Jeter'],
'title': ['The man']}
if __name__ == "__main__":
_sp = TestSP()
_sp.setup_class()
_sp.test_identify()
| {
"content_hash": "35dd20061b67eeda640a817dc31daa30",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 102,
"avg_line_length": 35.33766233766234,
"alnum_prop": 0.565600882028666,
"repo_name": "Runscope/pysaml2",
"id": "cb07c65ec6842bc6c96f1ea27255164aa90f32ff",
"size": "2768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_60_sp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46"
},
{
"name": "Python",
"bytes": "2602966"
},
{
"name": "Shell",
"bytes": "4371"
}
],
"symlink_target": ""
} |
from twisted.internet import reactor
from twisted.spread import pb
from twisted.python import log
from buildbot import util
from collections import defaultdict
class StepProgress:
"""I keep track of how much progress a single BuildStep has made.
Progress is measured along various axes. Time consumed is one that is
available for all steps. Amount of command output is another, and may be
better quantified by scanning the output for markers to derive number of
files compiled, directories walked, tests run, etc.
I am created when the build begins, and given to a BuildProgress object
so it can track the overall progress of the whole build.
"""
startTime = None
stopTime = None
expectedTime = None
buildProgress = None
debug = False
def __init__(self, name, metricNames):
self.name = name
self.progress = {}
self.expectations = {}
for m in metricNames:
self.progress[m] = None
self.expectations[m] = None
def setBuildProgress(self, bp):
self.buildProgress = bp
def setExpectations(self, metrics):
"""The step can call this to explicitly set a target value for one
of its metrics. E.g., ShellCommands knows how many commands it will
execute, so it could set the 'commands' expectation."""
for metric, value in metrics.items():
self.expectations[metric] = value
self.buildProgress.newExpectations()
def setExpectedTime(self, seconds):
self.expectedTime = seconds
self.buildProgress.newExpectations()
def start(self):
if self.debug: print "StepProgress.start[%s]" % self.name
self.startTime = util.now()
def setProgress(self, metric, value):
"""The step calls this as progress is made along various axes."""
if self.debug:
print "setProgress[%s][%s] = %s" % (self.name, metric, value)
self.progress[metric] = value
if self.debug:
r = self.remaining()
print " step remaining:", r
self.buildProgress.newProgress()
def finish(self):
"""This stops the 'time' metric and marks the step as finished
overall. It should be called after the last .setProgress has been
done for each axis."""
if self.debug: print "StepProgress.finish[%s]" % self.name
self.stopTime = util.now()
self.buildProgress.stepFinished(self.name)
def totalTime(self):
if self.startTime != None and self.stopTime != None:
return self.stopTime - self.startTime
def remaining(self):
if self.startTime == None:
return self.expectedTime
if self.stopTime != None:
return 0 # already finished
# TODO: replace this with cleverness that graphs each metric vs.
# time, then finds the inverse function. Will probably need to save
# a timestamp with each setProgress update, when finished, go back
# and find the 2% transition points, then save those 50 values in a
# list. On the next build, do linear interpolation between the two
# closest samples to come up with a percentage represented by that
# metric.
# TODO: If no other metrics are available, just go with elapsed
# time. Given the non-time-uniformity of text output from most
# steps, this would probably be better than the text-percentage
# scheme currently implemented.
percentages = []
for metric, value in self.progress.items():
expectation = self.expectations[metric]
if value != None and expectation != None:
p = 1.0 * value / expectation
percentages.append(p)
if percentages:
avg = reduce(lambda x,y: x+y, percentages) / len(percentages)
if avg > 1.0:
# overdue
avg = 1.0
if avg < 0.0:
avg = 0.0
if percentages and self.expectedTime != None:
return self.expectedTime - (avg * self.expectedTime)
if self.expectedTime is not None:
# fall back to pure time
return self.expectedTime - (util.now() - self.startTime)
return None # no idea
class WatcherState:
def __init__(self, interval):
self.interval = interval
self.timer = None
self.needUpdate = 0
class BuildProgress(pb.Referenceable):
"""I keep track of overall build progress. I hold a list of StepProgress
objects.
"""
def __init__(self, stepProgresses):
self.steps = {}
for s in stepProgresses:
self.steps[s.name] = s
s.setBuildProgress(self)
self.finishedSteps = []
self.watchers = {}
self.debug = 0
def setExpectationsFrom(self, exp):
"""Set our expectations from the builder's Expectations object."""
for name, metrics in exp.steps.items():
s = self.steps.get(name)
if s:
s.setExpectedTime(exp.times[name])
s.setExpectations(exp.steps[name])
def newExpectations(self):
"""Call this when one of the steps has changed its expectations.
This should trigger us to update our ETA value and notify any
subscribers."""
pass # subscribers are not implemented: they just poll
def stepFinished(self, stepname):
assert(stepname not in self.finishedSteps)
self.finishedSteps.append(stepname)
if len(self.finishedSteps) == len(self.steps.keys()):
self.sendLastUpdates()
def newProgress(self):
r = self.remaining()
if self.debug:
print " remaining:", r
if r != None:
self.sendAllUpdates()
def remaining(self):
# sum eta of all steps
sum = 0
for name, step in self.steps.items():
rem = step.remaining()
if rem == None:
return None # not sure
sum += rem
return sum
def eta(self):
left = self.remaining()
if left == None:
return None # not sure
done = util.now() + left
return done
def remote_subscribe(self, remote, interval=5):
# [interval, timer, needUpdate]
# don't send an update more than once per interval
self.watchers[remote] = WatcherState(interval)
remote.notifyOnDisconnect(self.removeWatcher)
self.updateWatcher(remote)
self.startTimer(remote)
log.msg("BuildProgress.remote_subscribe(%s)" % remote)
def remote_unsubscribe(self, remote):
# TODO: this doesn't work. I think 'remote' will always be different
# than the object that appeared in _subscribe.
log.msg("BuildProgress.remote_unsubscribe(%s)" % remote)
self.removeWatcher(remote)
#remote.dontNotifyOnDisconnect(self.removeWatcher)
def removeWatcher(self, remote):
#log.msg("removeWatcher(%s)" % remote)
try:
timer = self.watchers[remote].timer
if timer:
timer.cancel()
del self.watchers[remote]
except KeyError:
log.msg("Weird, removeWatcher on non-existent subscriber:",
remote)
def sendAllUpdates(self):
for r in self.watchers.keys():
self.updateWatcher(r)
def updateWatcher(self, remote):
# an update wants to go to this watcher. Send it if we can, otherwise
# queue it for later
w = self.watchers[remote]
if not w.timer:
# no timer, so send update now and start the timer
self.sendUpdate(remote)
self.startTimer(remote)
else:
# timer is running, just mark as needing an update
w.needUpdate = 1
def startTimer(self, remote):
w = self.watchers[remote]
timer = reactor.callLater(w.interval, self.watcherTimeout, remote)
w.timer = timer
def sendUpdate(self, remote, last=0):
self.watchers[remote].needUpdate = 0
#text = self.asText() # TODO: not text, duh
try:
remote.callRemote("progress", self.remaining())
if last:
remote.callRemote("finished", self)
except:
log.deferr()
self.removeWatcher(remote)
def watcherTimeout(self, remote):
w = self.watchers.get(remote, None)
if not w:
return # went away
w.timer = None
if w.needUpdate:
self.sendUpdate(remote)
self.startTimer(remote)
def sendLastUpdates(self):
for remote in self.watchers.keys():
self.sendUpdate(remote, 1)
self.removeWatcher(remote)
class Expectations:
debug = False
# decay=1.0 ignores all but the last build
# 0.9 is short time constant. 0.1 is very long time constant
# TODO: let decay be specified per-metric
decay = 0.5
def __init__(self, buildprogress):
"""Create us from a successful build. We will expect each step to
take as long as it did in that build."""
# .steps maps stepname to dict2
# dict2 maps metricname to final end-of-step value
self.steps = defaultdict(dict)
# .times maps stepname to per-step elapsed time
self.times = {}
for name, step in buildprogress.steps.items():
self.steps[name] = {}
for metric, value in step.progress.items():
self.steps[name][metric] = value
self.times[name] = None
if step.startTime is not None and step.stopTime is not None:
self.times[name] = step.stopTime - step.startTime
def wavg(self, old, current):
if old is None:
return current
if current is None:
return old
else:
return (current * self.decay) + (old * (1 - self.decay))
def update(self, buildprogress):
for name, stepprogress in buildprogress.steps.items():
old = self.times.get(name)
current = stepprogress.totalTime()
if current == None:
log.msg("Expectations.update: current[%s] was None!" % name)
continue
new = self.wavg(old, current)
self.times[name] = new
if self.debug:
print "new expected time[%s] = %s, old %s, cur %s" % \
(name, new, old, current)
for metric, current in stepprogress.progress.items():
old = self.steps[name].get(metric)
new = self.wavg(old, current)
if self.debug:
print "new expectation[%s][%s] = %s, old %s, cur %s" % \
(name, metric, new, old, current)
self.steps[name][metric] = new
def expectedBuildTime(self):
if None in self.times.values():
return None
#return sum(self.times.values())
# python-2.2 doesn't have 'sum'. TODO: drop python-2.2 support
s = 0
for v in self.times.values():
s += v
return s
| {
"content_hash": "2a6ed3025565cbe6610204f6752c2bae",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 77,
"avg_line_length": 36.564935064935064,
"alnum_prop": 0.5903924702539514,
"repo_name": "denny820909/builder",
"id": "62aae3171d33f009d509ad6cdc14c8a733501e17",
"size": "11969",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/status/progress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
from juriscraper.opinions.united_states.state import delaware
class Site(delaware.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = 'http://courts.delaware.gov/opinions/List.aspx?ag=Superior%20Court'
self.court_id = self.__module__
| {
"content_hash": "2ea60aba1ebd4984ed34e3175fb14db8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 86,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.657051282051282,
"repo_name": "m4h7/juriscraper",
"id": "7850914affc6ee88e792218ac98ba15c9c6056ac",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "juriscraper/opinions/united_states/state/delsuperct.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "27160373"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "623951"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._gallery_application_versions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_gallery_application_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryApplicationVersionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_10_01.aio.ComputeManagementClient`'s
:attr:`gallery_application_versions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: Union[_models.GalleryApplicationVersion, IO],
**kwargs: Any
) -> _models.GalleryApplicationVersion:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.GalleryApplicationVersion]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(gallery_application_version, (IO, bytes)):
_content = gallery_application_version
else:
_json = self._serialize.body(gallery_application_version, "GalleryApplicationVersion")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: _models.GalleryApplicationVersion,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GalleryApplicationVersion]:
"""Create or update a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be created. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
created. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`. Required.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the create or update gallery
Application Version operation. Required.
:type gallery_application_version:
~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GalleryApplicationVersion]:
"""Create or update a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be created. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
created. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`. Required.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the create or update gallery
Application Version operation. Required.
:type gallery_application_version: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: Union[_models.GalleryApplicationVersion, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.GalleryApplicationVersion]:
"""Create or update a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be created. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
created. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`. Required.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the create or update gallery
Application Version operation. Is either a model type or a IO type. Required.
:type gallery_application_version:
~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.GalleryApplicationVersion]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
gallery_application_version=gallery_application_version,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: Union[_models.GalleryApplicationVersionUpdate, IO],
**kwargs: Any
) -> _models.GalleryApplicationVersion:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.GalleryApplicationVersion]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(gallery_application_version, (IO, bytes)):
_content = gallery_application_version
else:
_json = self._serialize.body(gallery_application_version, "GalleryApplicationVersionUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
@overload
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: _models.GalleryApplicationVersionUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GalleryApplicationVersion]:
"""Update a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be updated. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
updated. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`. Required.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the update gallery Application
Version operation. Required.
:type gallery_application_version:
~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersionUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.GalleryApplicationVersion]:
"""Update a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be updated. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
updated. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`. Required.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the update gallery Application
Version operation. Required.
:type gallery_application_version: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
gallery_application_version: Union[_models.GalleryApplicationVersionUpdate, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.GalleryApplicationVersion]:
"""Update a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version is to be updated. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
updated. Needs to follow semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`. Required.
:type gallery_application_version_name: str
:param gallery_application_version: Parameters supplied to the update gallery Application
Version operation. Is either a model type or a IO type. Required.
:type gallery_application_version:
~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersionUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryApplicationVersion or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.GalleryApplicationVersion]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial( # type: ignore
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
gallery_application_version=gallery_application_version,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
expand: Optional[Union[str, _models.ReplicationStatusTypes]] = None,
**kwargs: Any
) -> _models.GalleryApplicationVersion:
"""Retrieves information about a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version resides. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
retrieved. Required.
:type gallery_application_version_name: str
:param expand: The expand expression to apply on the operation. "ReplicationStatus" Default
value is None.
:type expand: str or ~azure.mgmt.compute.v2021_10_01.models.ReplicationStatusTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryApplicationVersion or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.GalleryApplicationVersion]
request = build_get_request(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("GalleryApplicationVersion", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application_version_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a gallery Application Version.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition in which the
Application Version resides. Required.
:type gallery_application_name: str
:param gallery_application_version_name: The name of the gallery Application Version to be
deleted. Required.
:type gallery_application_version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application_version_name=gallery_application_version_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}"} # type: ignore
@distributed_trace
def list_by_gallery_application(
self, resource_group_name: str, gallery_name: str, gallery_application_name: str, **kwargs: Any
) -> AsyncIterable["_models.GalleryApplicationVersion"]:
"""List gallery Application Versions in a gallery Application Definition.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition resides. Required.
:type gallery_name: str
:param gallery_application_name: The name of the Shared Application Gallery Application
Definition from which the Application Versions are to be listed. Required.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryApplicationVersion or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_10_01.models.GalleryApplicationVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-10-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.GalleryApplicationVersionList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_application_request(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_gallery_application.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryApplicationVersionList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_gallery_application.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions"} # type: ignore
| {
"content_hash": "b19a6dc557741a00ec0ec797e243f642",
"timestamp": "",
"source": "github",
"line_count": 880,
"max_line_length": 265,
"avg_line_length": 51.125,
"alnum_prop": 0.664369859968882,
"repo_name": "Azure/azure-sdk-for-python",
"id": "dbcb0e4b08a5dcb057bae474867b1ffcb3361a97",
"size": "45490",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_10_01/aio/operations/_gallery_application_versions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf.urls import url
from talks.contributors.views import (contributors_home, contributors_events,
contributors_eventgroups, contributors_persons)
urlpatterns = [
url(r'^$', contributors_home, name='contributors-home'),
url(r'^talks$', contributors_events, name='contributors-events'),
url(r'^series', contributors_eventgroups, name='contributors-eventgroups'),
url(r'^persons$', contributors_persons, name='contributors-persons'),
]
| {
"content_hash": "c62c1b1c301075572957700eefdf6a17",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 45.083333333333336,
"alnum_prop": 0.6987060998151571,
"repo_name": "ox-it/talks.ox",
"id": "ce1d0d9194c2c8b494344f25b0e3db2353675976",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talks/contributors/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23891"
},
{
"name": "Dockerfile",
"bytes": "750"
},
{
"name": "HTML",
"bytes": "117234"
},
{
"name": "JavaScript",
"bytes": "98316"
},
{
"name": "Makefile",
"bytes": "417"
},
{
"name": "Python",
"bytes": "312877"
},
{
"name": "RobotFramework",
"bytes": "18436"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, \
ParasiteAxesAuxTrans
from mpl_toolkits.axisartist.grid_helper_curvelinear import \
GridHelperCurveLinear
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return x, y - x
def inv_tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return x, y + x
grid_helper = GridHelperCurveLinear((tr, inv_tr))
ax1 = Subplot(fig, 1, 2, 1, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
xx, yy = tr([3, 6], [5.0, 10.])
ax1.plot(xx, yy)
ax1.set_aspect(1.)
ax1.set_xlim(0, 10.)
ax1.set_ylim(0, 10.)
ax1.axis["t"] = ax1.new_floating_axis(0, 3.)
ax1.axis["t2"] = ax1.new_floating_axis(1, 7.)
ax1.grid(True)
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle=360,
lat_cycle=None,
lon_minmax=None,
lat_minmax=(0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 2, 2, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks = 0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks = 1
fig.add_subplot(ax1)
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
curvelinear_test2(fig)
plt.draw()
plt.show()
| {
"content_hash": "b194ab47bb311b24b92ece3d6c96f58f",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 76,
"avg_line_length": 32.404761904761905,
"alnum_prop": 0.6029879990203282,
"repo_name": "bundgus/python-playground",
"id": "4080aaadc9741a17b2c5de360bb88e47c81bfa2e",
"size": "4083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matplotlib-playground/examples/axes_grid/demo_curvelinear_grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "897"
},
{
"name": "HTML",
"bytes": "22309040"
},
{
"name": "Jupyter Notebook",
"bytes": "666681"
},
{
"name": "Python",
"bytes": "1046557"
},
{
"name": "Thrift",
"bytes": "58"
}
],
"symlink_target": ""
} |
import collections
import hashlib
import itertools
import six
from oslo_serialization import jsonutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.cfn import functions as cfn_funcs
from heat.engine import function
class GetParam(function.Function):
"""A function for resolving parameter references.
Takes the form::
get_param: <param_name>
or::
get_param:
- <param_name>
- <path1>
- ...
"""
def __init__(self, stack, fn_name, args):
super(GetParam, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
args = function.resolve(self.args)
if not args:
raise ValueError(_('Function "%s" must have arguments') %
self.fn_name)
if isinstance(args, six.string_types):
param_name = args
path_components = []
elif isinstance(args, collections.Sequence):
param_name = args[0]
path_components = args[1:]
else:
raise TypeError(_('Argument to "%s" must be string or list') %
self.fn_name)
if not isinstance(param_name, six.string_types):
raise TypeError(_('Parameter name in "%s" must be string') %
self.fn_name)
try:
parameter = self.parameters[param_name]
except KeyError:
raise exception.UserParameterMissing(key=param_name)
def get_path_component(collection, key):
if not isinstance(collection, (collections.Mapping,
collections.Sequence)):
raise TypeError(_('"%s" can\'t traverse path') % self.fn_name)
if not isinstance(key, (six.string_types, int)):
raise TypeError(_('Path components in "%s" '
'must be strings') % self.fn_name)
return collection[key]
try:
return six.moves.reduce(get_path_component, path_components,
parameter)
except (KeyError, IndexError, TypeError):
return ''
class GetAttThenSelect(cfn_funcs.GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def _parse_args(self):
if (not isinstance(self.args, collections.Sequence) or
isinstance(self.args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
if len(self.args) < 2:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute, (path), ...]') %
self.fn_name)
self._path_components = self.args[2:]
return tuple(self.args[:2])
def result(self):
attribute = super(GetAttThenSelect, self).result()
if attribute is None:
return None
path_components = function.resolve(self._path_components)
return attributes.select_from_attribute(attribute, path_components)
def dep_attrs(self, resource_name):
if self._resource().name == resource_name:
path = function.resolve(self._path_components)
attr = [function.resolve(self._attribute)]
if path:
attrs = [tuple(attr + path)]
else:
attrs = attr
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args, resource_name),
attrs)
class GetAtt(GetAttThenSelect):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def result(self):
path_components = function.resolve(self._path_components)
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtt(attribute, *path_components)
else:
return None
class GetAttAllAttributes(GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attributes_name>
- <path1>
- ...
where <attributes_name> and <path1>, ... are optional arguments. If there
is no <attributes_name>, result will be dict of all resource's attributes.
Else function returns resolved resource's attribute.
"""
def _parse_args(self):
if not self.args:
raise ValueError(_('Arguments to "%s" can be of the next '
'forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
) % self.fn_name)
elif isinstance(self.args, collections.Sequence):
if len(self.args) > 1:
return super(GetAttAllAttributes, self)._parse_args()
else:
return self.args[0], None
else:
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
def dep_attrs(self, resource_name):
"""Check if there is no attribute_name defined, return empty chain."""
if self._attribute is not None:
return super(GetAttAllAttributes, self).dep_attrs(resource_name)
elif self._resource().name == resource_name:
res = self._resource()
attrs = six.iterkeys(res.attributes_schema)
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args,
resource_name), attrs)
def result(self):
if self._attribute is None:
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtts()
else:
return None
else:
return super(GetAttAllAttributes, self).result()
def _allow_without_attribute_name(self):
return True
class Replace(cfn_funcs.Replace):
"""A function for performing string substitutions.
Takes the form::
str_replace:
template: <key_1> <key_2>
params:
<key_1>: <value_1>
<key_2>: <value_2>
...
And resolves to::
"<value_1> <value_2>"
This is implemented using Python's str.replace on each key. The order in
which replacements are performed is undefined.
"""
def _parse_args(self):
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
try:
mapping = self.args['params']
string = self.args['template']
except (KeyError, TypeError):
example = ('''str_replace:
template: This is var1 template var2
params:
var1: a
var2: string''')
raise KeyError(_('"str_replace" syntax should be %s') %
example)
else:
return mapping, string
class ReplaceJson(Replace):
"""A function for performing string substitutions.
Behaves the same as Replace, but tolerates non-string parameter
values, e.g map/list - these are serialized as json before doing
the string substitution.
"""
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, six.string_types):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, six.string_types):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value,
(six.string_types, six.integer_types,
float, bool)):
if isinstance(value,
(collections.Mapping, collections.Sequence)):
try:
value = jsonutils.dumps(value, default=None)
except TypeError:
raise TypeError(_('"%(name)s" params must be strings, '
'numbers, list or map. '
'Failed to json serialize %(value)s'
) % {'name': self.fn_name,
'value': value})
else:
raise TypeError(_('"%s" params must be strings, numbers, '
'list or map.') % self.fn_name)
return string.replace(placeholder, six.text_type(value))
return six.moves.reduce(replace, six.iteritems(mapping), template)
class GetFile(function.Function):
"""A function for including a file inline.
Takes the form::
get_file: <file_key>
And resolves to the content stored in the files dictionary under the given
key.
"""
def __init__(self, stack, fn_name, args):
super(GetFile, self).__init__(stack, fn_name, args)
self.files = self.stack.t.files
def result(self):
args = function.resolve(self.args)
if not (isinstance(args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a string') %
self.fn_name)
f = self.files.get(args)
if f is None:
fmt_data = {'fn_name': self.fn_name,
'file_key': args}
raise ValueError(_('No content found in the "files" section for '
'%(fn_name)s path: %(file_key)s') % fmt_data)
return f
class Join(cfn_funcs.Join):
"""A function for joining strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
"""
class JoinMultiple(function.Function):
"""A function for joining one or more lists of strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
Optionally multiple lists may be specified, which will also be joined.
"""
def __init__(self, stack, fn_name, args):
super(JoinMultiple, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"] ...]' % fn_name
fmt_data = {'fn_name': fn_name,
'example': example}
if not isinstance(args, list):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim = args[0]
self._joinlists = args[1:]
if len(self._joinlists) < 1:
raise ValueError
except (IndexError, ValueError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
r_joinlists = function.resolve(self._joinlists)
strings = []
for jl in r_joinlists:
if jl:
if (isinstance(jl, six.string_types) or
not isinstance(jl, collections.Sequence)):
raise TypeError(_('"%s" must operate on '
'a list') % self.fn_name)
strings += jl
delim = function.resolve(self._delim)
if not isinstance(delim, six.string_types):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
msg = _('Items to join must be string, map or list not %s'
) % (repr(s)[:200])
if s is None:
return ''
elif isinstance(s, six.string_types):
return s
elif isinstance(s, (collections.Mapping, collections.Sequence)):
try:
return jsonutils.dumps(s, default=None)
except TypeError:
msg = _('Items to join must be string, map or list. '
'%s failed json serialization'
) % (repr(s)[:200])
raise TypeError(msg)
return delim.join(ensure_string(s) for s in strings)
class MapMerge(function.Function):
"""A function for merging maps.
Takes the form::
{ "map_merge" : [{'k1': 'v1', 'k2': 'v2'}, {'k1': 'v2'}] }
And resolves to::
{'k1': 'v2', 'k2': 'v2'}
"""
def __init__(self, stack, fn_name, args):
super(MapMerge, self).__init__(stack, fn_name, args)
example = (_('"%s" : [ { "key1": "val1" }, { "key2": "val2" } ]')
% fn_name)
self.fmt_data = {'fn_name': fn_name, 'example': example}
def result(self):
args = function.resolve(self.args)
if not isinstance(args, collections.Sequence):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def ensure_map(m):
if m is None:
return {}
elif isinstance(m, collections.Mapping):
return m
else:
msg = _('Incorrect arguments: Items to merge must be maps.')
raise TypeError(msg)
ret_map = {}
for m in args:
ret_map.update(ensure_map(m))
return ret_map
class ResourceFacade(cfn_funcs.ResourceFacade):
"""A function for retrieving data in a parent provider template.
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
resource_facade: <attribute_type>
where the valid attribute types are "metadata", "deletion_policy" and
"update_policy".
"""
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'metadata', 'deletion_policy', 'update_policy'
)
class Removed(function.Function):
"""This function existed in previous versions of HOT, but has been removed.
Check the HOT guide for an equivalent native function.
"""
def validate(self):
exp = (_("The function %s is not supported in this version of HOT.") %
self.fn_name)
raise exception.InvalidTemplateVersion(explanation=exp)
def result(self):
return super(Removed, self).result()
class Repeat(function.Function):
"""A function for iterating over a list of items.
Takes the form::
repeat:
template:
<body>
for_each:
<var>: <list>
The result is a new list of the same size as <list>, where each element
is a copy of <body> with any occurrences of <var> replaced with the
corresponding item of <list>.
"""
def __init__(self, stack, fn_name, args):
super(Repeat, self).__init__(stack, fn_name, args)
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
# We don't check for invalid keys appearing here, which is wrong but
# it's probably too late to change
try:
self._for_each = self.args['for_each']
self._template = self.args['template']
except KeyError:
example = ('''repeat:
template: This is %var%
for_each:
%var%: ['a', 'b', 'c']''')
raise KeyError(_('"repeat" syntax should be %s') % example)
def validate(self):
super(Repeat, self).validate()
if not isinstance(self._for_each, function.Function):
if not isinstance(self._for_each, collections.Mapping):
raise TypeError(_('The "for_each" argument to "%s" must '
'contain a map') % self.fn_name)
if not all(self._valid_list(v) for v in self._for_each.values()):
raise TypeError(_('The values of the "for_each" argument '
'to "%s" must be lists') % self.fn_name)
@staticmethod
def _valid_list(arg):
return (isinstance(arg, (collections.Sequence,
function.Function)) and
not isinstance(arg, six.string_types))
def _do_replacement(self, keys, values, template):
if isinstance(template, six.string_types):
for (key, value) in zip(keys, values):
template = template.replace(key, value)
return template
elif isinstance(template, collections.Sequence):
return [self._do_replacement(keys, values, elem)
for elem in template]
elif isinstance(template, collections.Mapping):
return dict((self._do_replacement(keys, values, k),
self._do_replacement(keys, values, v))
for (k, v) in template.items())
def result(self):
for_each = function.resolve(self._for_each)
if not all(self._valid_list(l) for l in for_each.values()):
raise TypeError(_('The values of the "for_each" argument to '
'"%s" must be lists') % self.fn_name)
template = function.resolve(self._template)
keys, lists = six.moves.zip(*for_each.items())
return [self._do_replacement(keys, replacements, template)
for replacements in itertools.product(*lists)]
class Digest(function.Function):
"""A function for performing digest operations.
Takes the form::
digest:
- <algorithm>
- <value>
Valid algorithms are the ones provided by natively by hashlib (md5, sha1,
sha224, sha256, sha384, and sha512) or any one provided by OpenSSL.
"""
def validate_usage(self, args):
if not (isinstance(args, list) and
all([isinstance(a, six.string_types) for a in args])):
msg = _('Argument to function "%s" must be a list of strings')
raise TypeError(msg % self.fn_name)
if len(args) != 2:
msg = _('Function "%s" usage: ["<algorithm>", "<value>"]')
raise ValueError(msg % self.fn_name)
if six.PY3:
algorithms = hashlib.algorithms_available
else:
algorithms = hashlib.algorithms
if args[0].lower() not in algorithms:
msg = _('Algorithm must be one of %s')
raise ValueError(msg % six.text_type(algorithms))
def digest(self, algorithm, value):
_hash = hashlib.new(algorithm)
_hash.update(six.b(value))
return _hash.hexdigest()
def result(self):
args = function.resolve(self.args)
self.validate_usage(args)
return self.digest(*args)
class StrSplit(function.Function):
"""A function for splitting delimited strings into a list.
Optionally extracting a specific list member by index.
Takes the form::
str_split: [delimiter, string, <index> ]
or::
str_split:
- delimiter
- string
- <index>
If <index> is specified, the specified list item will be returned
otherwise, the whole list is returned, similar to get_attr with
path based attributes accessing lists.
"""
def __init__(self, stack, fn_name, args):
super(StrSplit, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "apples,pears", <index>]' % fn_name
self.fmt_data = {'fn_name': fn_name,
'example': example}
self.fn_name = fn_name
if isinstance(args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def result(self):
args = function.resolve(self.args)
try:
delim = args.pop(0)
str_to_split = args.pop(0)
except (AttributeError, IndexError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
split_list = str_to_split.split(delim)
# Optionally allow an index to be specified
if args:
try:
index = int(args.pop(0))
except ValueError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
else:
try:
res = split_list[index]
except IndexError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be between 0 and '
'%(max_index)s')
% {'fn_name': self.fn_name,
'max_index': len(split_list) - 1})
else:
res = split_list
return res
| {
"content_hash": "af2df7d8e26a2f1ce90ea2a00890bff3",
"timestamp": "",
"source": "github",
"line_count": 677,
"max_line_length": 79,
"avg_line_length": 33.06499261447563,
"alnum_prop": 0.5279874916238553,
"repo_name": "jasondunsmore/heat",
"id": "5356ee51da42dfe9edf3b0cdad3e425ff16a4ebd",
"size": "22960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/hot/functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7819626"
},
{
"name": "Shell",
"bytes": "33158"
}
],
"symlink_target": ""
} |
import requests
from copy import deepcopy
import json
import os
import pkg_resources
from .config import SMARTAPI_URL
def restructure_specs(spec):
"""SmartAPI API restructure each spec when storing them. This function converts them back to the original smartapi specification"""
copy_spec = deepcopy(spec)
if isinstance(copy_spec.get("paths"), list) and len(copy_spec.get("paths")) > 0:
new_paths = {}
for path in copy_spec.get("paths"):
new_paths[path.get("path")] = path.get("pathitem")
copy_spec["paths"] = new_paths
return copy_spec
else:
return spec
def load_specs(source="remote", tag="translator"):
"""Load SmartAPI specs."""
if source == "remote":
try:
specs = requests.get(SMARTAPI_URL).json()
except:
raise Exception(
"Unable to retrieve smartapi specs from {}".format(SMARTAPI_URL)
)
else:
DATA_PATH = pkg_resources.resource_filename("biothings_explorer", "data/")
DB_FILE = pkg_resources.resource_filename(
"biothings_explorer", "data/smartapi_local_specs.json"
)
dir_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(dir_path, "smartapi_local_specs.json")
with open(DB_FILE) as f:
specs = json.load(f)
new_specs = []
for spec in specs.get("hits"):
tags = [item.get("name") for item in spec.get("tags")]
if tag in tags:
new_specs.append(restructure_specs(spec))
return new_specs
| {
"content_hash": "3b0642455008ff90e661b8fa2739fb18",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 135,
"avg_line_length": 32.979166666666664,
"alnum_prop": 0.6114971572962729,
"repo_name": "biothings/biothings_explorer",
"id": "bfccc6a1afb1e7716c01c2681eecd96fa6944720",
"size": "1583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biothings_explorer/smartapi_kg/dataload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2401542"
},
{
"name": "Jupyter Notebook",
"bytes": "14468811"
},
{
"name": "Python",
"bytes": "585318"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from proso_user.models import Session
from django.db.models.signals import pre_save
from django.dispatch import receiver
from proso.django.models import disable_for_loaddata
class Comment(models.Model):
username = models.CharField(null=True, blank=True, max_length=100)
email = models.EmailField(null=True, blank=True, max_length=200)
text = models.TextField(null=False, blank=False)
inserted = models.DateTimeField(auto_now_add=True)
session = models.ForeignKey(Session)
class Rating(models.Model):
UNKNOWN = 0
EASY = 1
RIGHT = 2
HARD = 3
VALUES = (
(UNKNOWN, 'Unknown'),
(EASY, 'Too Easy'),
(RIGHT, 'Just Right'),
(HARD, 'Too Hard'),
(4, 'Much easier'),
(5, 'Bit easier'),
(6, 'The same'),
(7, 'Bit harder'),
(8, 'Much harder'),
)
user = models.ForeignKey(User)
inserted = models.DateTimeField(default=datetime.now)
value = models.SmallIntegerField(choices=VALUES, default=UNKNOWN)
@receiver(pre_save, sender=Comment)
@disable_for_loaddata
def init_comment_session(sender, instance, **kwargs):
instance.session = Session.objects.get_current_session()
| {
"content_hash": "4fed7666f45fb4a3cbadf646307df12c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 70,
"avg_line_length": 30.11627906976744,
"alnum_prop": 0.6772200772200773,
"repo_name": "adaptive-learning/proso-apps",
"id": "9dfb41ace3a3034b04bd8ae954c9be9d3d612d50",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proso_feedback/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4739"
},
{
"name": "HTML",
"bytes": "35781"
},
{
"name": "JavaScript",
"bytes": "865"
},
{
"name": "Makefile",
"bytes": "4125"
},
{
"name": "Python",
"bytes": "645104"
}
],
"symlink_target": ""
} |
"""Berksfile related tests."""
import unittest
FIRST_BERKS = """
source 'https://supermarket.getchef.com'
cookbook 'newrelic_plugins', git: 'git@github.com:rackspace-cookbooks/newrelic_plugins_chef.git'
cookbook 'disable_ipv6', path: 'test/fixtures/cookbooks/disable_ipv6'
cookbook 'wrapper', path: 'test/fixtures/cookbooks/wrapper'
cookbook 'apt'
# until https://github.com/elasticsearch/cookbook-elasticsearch/pull/230
cookbook 'elasticsearch', '~> 0.3', git:'git@github.com:racker/cookbook-elasticsearch.git'
# until https://github.com/lusis/chef-logstash/pull/336 & 394
cookbook 'logstash', git:'git@github.com:racker/chef-logstash.git'
metadata
"""
SECOND_BERKS = """
source "https://supermarket.chef.io"
metadata
cookbook 'cron', git: 'git@github.com:rackspace-cookbooks/cron.git'
# until https://github.com/elastic/cookbook-elasticsearch/pull/230
cookbook 'disable_ipv6', path: 'test/fixtures/cookbooks/disable_ipv6'
cookbook 'wrapper', path: 'test/fixtures/cookbooks/wrapper'
cookbook 'yum'
cookbook 'fake', '~> 9.1'
"""
import copy
from fastfood import book
class TestBerks(unittest.TestCase):
def test_merge(self):
fb = book.Berksfile.from_string(FIRST_BERKS)
before = copy.deepcopy(fb.to_dict())
self.assertIn('cookbook', before)
self.assertIsInstance(before['cookbook'], dict)
self.assertNotIn('cron', before['cookbook'])
self.assertNotIn('fake', before['cookbook'])
self.assertNotIn('yum', before['cookbook'])
self.assertIn('source', before)
self.assertIsInstance(before['source'], list)
self.assertNotIn('https://supermarket.chef.io', before['source'])
sb = book.Berksfile.from_string(SECOND_BERKS)
fb.merge(sb)
after = fb.to_dict()
self.assertIn('cron', after['cookbook'])
self.assertIn('fake', after['cookbook'])
self.assertIn('yum', after['cookbook'])
self.assertIn('source', after)
self.assertEqual(after['source'],
['https://supermarket.getchef.com',
'https://supermarket.chef.io'])
self.assertEqual(after['cookbook']['fake']['constraint'],
'~> 9.1')
self.assertEqual(after['cookbook']['cron']['git'],
'git@github.com:rackspace-cookbooks/cron.git')
def test_berks_to_dict(self):
fb = book.Berksfile.from_string(FIRST_BERKS)
self.assertIsInstance(fb, book.Berksfile)
fbd = fb.to_dict()
self.assertIsInstance(fbd, dict)
self.assertIn('cookbook', fbd)
self.assertIsInstance(fbd['cookbook'], dict)
self.assertIn('source', fbd)
self.assertIsInstance(fbd['source'], list)
self.assertIn('https://supermarket.getchef.com', fbd['source'])
self.assertIn('metadata', fbd)
self.assertTrue(fbd['metadata'] is True)
expects = [
'newrelic_plugins',
'disable_ipv6',
'elasticsearch',
'wrapper',
'apt',
'logstash',
]
self.assertEqual(len(fbd['cookbook']), len(expects))
for cb in expects:
self.assertIn(cb, fbd['cookbook'])
self.assertEqual(
fbd['cookbook']['newrelic_plugins']['git'],
'git@github.com:rackspace-cookbooks/newrelic_plugins_chef.git')
self.assertEqual(
fbd['cookbook']['disable_ipv6']['path'],
'test/fixtures/cookbooks/disable_ipv6')
self.assertEqual(
fbd['cookbook']['wrapper']['path'],
'test/fixtures/cookbooks/wrapper')
self.assertEqual(fbd['cookbook']['apt'], {})
self.assertEqual(
fbd['cookbook']['elasticsearch']['constraint'],
'~> 0.3')
self.assertEqual(
fbd['cookbook']['elasticsearch']['git'],
'git@github.com:racker/cookbook-elasticsearch.git')
self.assertEqual(
fbd['cookbook']['logstash']['git'],
'git@github.com:racker/chef-logstash.git')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a82d587e30c277db8959b8a14a552ad5",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 96,
"avg_line_length": 34.108333333333334,
"alnum_prop": 0.6166625946738333,
"repo_name": "brint/fastfood",
"id": "de11f4b5c0d46fe57d62bdbfb3607485d2083c1d",
"size": "4093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_berks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8391"
},
{
"name": "Python",
"bytes": "59134"
},
{
"name": "Ruby",
"bytes": "12296"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
import time
from datetime import datetime
import nose
import openpyxl
from nose.tools import eq_, ok_, raises
from ..Alignment import Alignment
from ..Color import Color
from ..Fill import Fill
from ..Font import Font
from ..Style import Style
from ..Workbook import Workbook
from .utils import get_output_path
def test_style():
wb = Workbook()
ws = wb.new_sheet("test")
ws[1][1].value = 1
ws[1][2].value = 1
ws[1][3].value = 1
ws[1][1].style.font.bold = True
ws[1][2].style.font.italic = True
ws[1][3].style.font.underline = True
ws[1][1].style.font.strikethrough = True
ws[1][1].style.font.color = Color(255, 0, 255)
ws[1][1].style.fill.background = Color(0, 255, 0)
ws[1][2].style.fill.background = Color(255, 255, 0)
ws[2][1].value = "asdf"
ws.range("A2", "B2").merge()
eq_(ws[1][2].value, ws[1][1].value)
ws[2][2].value = "qwer"
eq_(ws[1][2].value, ws[1][1].value)
ws[2][1].style.fill.background = Color(0, 255, 0)
ws[1][1].style.alignment.vertical = "top"
ws[1][1].style.alignment.horizontal = "right"
ws[1][1].style.alignment.rotation = 90
eq_(ws[1][1].style.alignment.rotation, 90)
ws[3][3].style.borders.top.color = Color(255, 0, 0)
ws[3][3].style.borders.left.color = Color(0, 255, 0)
ws[3][4].style.borders.right.style = "-."
value = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla lobortis fermentum metus id congue. Sed ultrices velit id sapien sodales bibendum. Mauris volutpat porta arcu non bibendum. Pellentesque adipiscing lacus quam, ac congue ipsum fringilla sed. Praesent dapibus dignissim elit vel dictum. Pellentesque commodo iaculis ipsum a rhoncus. Sed mattis neque eget justo dignissim scelerisque. Nam odio neque, mattis et libero id, posuere aliquam mi."
ws[4][1].value = value
ws[4][2].value = value
ws[4][1].style.alignment.wrap_text = True
wb.save(get_output_path("style-test.xlsx"))
@raises(TypeError)
def test_invalid_wrap_text():
a = Alignment()
a.wrap_text = True
ok_(a.wrap_text)
a.wrap_text = "some random nonsense"
@raises(ValueError)
def test_invalid_horizontal():
a = Alignment()
a.horizontal = "left"
eq_(a.horizontal, "left")
a.horizontal = "nowhere"
@raises(ValueError)
def test_invalid_vertical():
a = Alignment()
a.vertical = "bottom"
eq_(a.vertical, "bottom")
a.vertical = "somewhere"
def test_style_compression():
wb = Workbook()
ws = wb.new_sheet("test")
ws.range("A1", "C3").value = 1
ws.range("A1", "C1").style.font.bold = True
ws.range("A2", "C3").style.font.italic = True
ws.range("A3", "C3").style.fill.background = Color(255, 0, 0)
ws.range("C1", "C3").style.font.strikethrough = True
wb.save(get_output_path("style-compression-test.xlsx"))
def test_style_reference():
wb = Workbook()
ws = wb.new_sheet("test")
ws[1][1].value = 1
font = Font(bold=True, italic=True, underline=True, strikethrough=True)
ws[1][1].style.font = font
wb.save(get_output_path("style-reference-test.xlsx"))
def test_style_row():
wb = Workbook()
ws = wb.new_sheet("test")
ws[1].style.fill.background = Color(255, 0, 0)
ws[1][3].style.fill.background = Color(0, 255, 0)
wb.save(get_output_path("style-row-test.xlsx"))
def test_style_row_col():
wb = Workbook()
ws = wb.new_sheet("test")
ws[1][1].value = "asdf"
ws[1][3].value = "asdf\nasdf\nasdf\nasdf"
ws[3][1].value = "asdfasdfasdfasdfasdfasdfasdfasdf"
eq_(Style(), ws.get_row_style(1))
eq_(Style(), ws.get_col_style(1))
ws.set_row_style(1, Style(size=-1))
ws.set_row_style(2, Style(size=-1))
ws.set_row_style(3, Style(size=-1))
ws.set_row_style(4, Style(size=0))
ws.set_row_style(5, Style(size=100, fill=Fill(background=Color(0, 255, 0, 0))))
ws.set_col_style(1, Style(size=-1))
ws.set_col_style(2, Style(size=-1))
ws.set_col_style(3, Style(size=-1))
ws.set_col_style(4, Style(size=0))
ws.set_col_style(5, Style(size=100, fill=Fill(background=Color(255, 0, 0, 0))))
wb.save(get_output_path("style-auto-row-col-test.xlsx"))
def test_and_or_xor():
bolditalic = Font(bold=True, italic=True)
italicunderline = Font(italic=True, underline=True)
eq_(Font(italic=True), bolditalic & italicunderline)
eq_(Font(bold=True, italic=True, underline=True), bolditalic | italicunderline)
eq_(Font(bold=True, underline=True), bolditalic ^ italicunderline)
fontstyle = Style(font=Font(bold=True))
fillstyle = Style(fill=Fill(background=Color(255, 0, 0, 0)))
eq_(Style(), fontstyle & fillstyle)
eq_(
Style(font=Font(bold=True), fill=Fill(background=Color(255, 0, 0, 0))),
fontstyle | fillstyle,
)
eq_(
Style(font=Font(bold=True), fill=Fill(background=Color(255, 0, 0, 0))),
fontstyle ^ fillstyle,
)
leftstyle = Style(alignment=Alignment("right", "top"))
bottomstyle = Style(alignment=Alignment(vertical="top", rotation=15))
eq_(Style(alignment=Alignment("right", "top", 15)), leftstyle | bottomstyle)
eq_(Style(alignment=Alignment(vertical="top")), leftstyle & bottomstyle)
eq_(Style(alignment=Alignment("right", rotation=15)), leftstyle ^ bottomstyle)
def test_str_():
font = Font(bold=True, italic=True, underline=True, strikethrough=True)
eq_(font.__repr__(), "<Font: Calibri, 11pt b i u s>")
def test_no_style_xml():
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # data is a 2D array
filename = get_output_path("no_style.xlsx")
sheetname = "test"
wb = Workbook()
wb.new_sheet(sheetname, data=data)
wb.save(filename)
wbr = openpyxl.reader.excel.load_workbook(filename=filename, read_only=True)
mySheet = wbr[sheetname]
def test_dense_sparse_styles():
testData = [["1x1", "1x2", "1x3"], ["2x1", "2x2", "2x3"], ["3x1", "3x2", "3x3"]]
wb = Workbook()
ws = wb.new_sheet("Test 1", data=testData)
ws[2][2].style.font.bold = True
ws[2][5] = "2x5"
ws[2][7] = "2x7"
ws[2][5].style.font.bold = True
ws[5][2] = "5x2"
ws[7][2] = "7x2"
ws[5][2].style.font.bold = True
ws[5][5] = "5x5"
ws[5][5].style.font.bold = True
ws[6][6] = "6x6"
wb.save(get_output_path("dense-sparse-style-test.xlsx"))
def test_unicode_with_styles():
wb = Workbook()
ws = wb.new_sheet(u"ʇǝǝɥsǝpoɔıun")
ws[1][1].value = u"Körperschaft des öffentlichen"
ws.set_col_style(2, Style(size=0))
wb.save(get_output_path("unicode-styles.xlsx"))
| {
"content_hash": "fcde26569a98d98d7e57583e8ad3323f",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 465,
"avg_line_length": 34.93582887700535,
"alnum_prop": 0.6372263891014848,
"repo_name": "kz26/PyExcelerate",
"id": "b477a8d6283ffb38eccb99bda5f65be08176b2fb",
"size": "6567",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyexcelerate/tests/test_Style.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "90006"
}
],
"symlink_target": ""
} |
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import scipy.stats
flags = pd.read_csv("../dataviz/datasets/countries.csv").filter_rows("organisations >> un").split_columns('country', "|").explode('country').set_index('country').drop_duplicates(subset='flag', keep='first')
continents = flags.groupby("continent").count().index
SIZE = 100
FLAG_WIDTH = 6*SIZE
BOX_WIDTH = SIZE * 3 // 4
LABEL_FONT = calibri(SIZE, bold=True)
SUB_FONT = partial(calibri, SIZE*2//3)
def flag_image(c):
return Image.from_url_with_cache(flags['flag'][c]).convert("RGBA").remove_transparency("white").convert("RGB")
def label_image(text, align="center"):
return Image.from_text(text.upper().replace(" ","\n"), LABEL_FONT, "black", "white", align=align)
def mean_image(imgs, size):
average = ImageColor.from_linear(sum(ImageColor.to_linear(np.array(img.resize(size))) for img in imgs) / len(imgs))
return Image.fromarray(np.uint8(average))
class HeraldicPalette(metaclass=NamedPaletteMeta):
# Omits Murrey and Sanguine (which are very similar to Gules and Purpure) and Cendree and Carnation (which are barely used).
OR = "#fcdd09"
ARGENT = "#ffffff"
AZURE = "#0f47af"
GULES = "#da121a"
PURPURE = "#9116a1"
SABLE = "#000000"
VERT = "#009200"
TENNE = "#804000"
ORANGE = "#ff8000"
CELESTE = "#75aadb"
def mode_image(imgs, size):
a = np.stack([np.array(img.to_palette(HeraldicPalette).resize(size)) for img in imgs], axis=-1)
mode = Image.fromarray(scipy.stats.mode(a, -1)[0][...,0], "P")
mode.putpalette(list(generate_leafs(RGBA(col)[:3] for col in list(HeraldicPalette))))
return mode.to_rgba()
def median_image(imgs, size):
a = np.stack([np.array(img.convert("L").resize(size)) for img in imgs], axis=-1)
median = np.median(a, axis=-1)
return Image.fromarray(np.uint8(median), "L").to_rgba()
def median_image_rgb(imgs, size):
imgs = [img.resize(size) for img in imgs]
arrays = [np.stack([np.array(img.getchannel(i)) for img in imgs], axis=-1) for i in range(3)]
medians = [Image.fromarray(np.uint8(np.median(a, axis=-1)), "L") for a in arrays]
return Image.merge("RGB", medians)
def average_flag(df, size, average):
flags = [flag_image(i) for i in df.index]
return average(flags, (size[0]-2,size[1]-2)).pad(1, "black")
def average_flags(label, average):
world = average_flag(flags, (FLAG_WIDTH, FLAG_WIDTH*2//3), average)
continent = [average_flag(flags[flags.continent == continent], (FLAG_WIDTH, FLAG_WIDTH*2//3), average) for continent in continents]
return [label_image(label), world] + continent
array = [[None, label_image("world")] + [label_image(c) for c in continents],
average_flags("mean", mean_image),
average_flags("mode", mode_image),
average_flags("median", median_image_rgb)]
grid = Image.from_array(tmap(list, zip(*array)), bg="white", padding=SIZE // 5, xalign=(1,0.5,0.5,0.5,0.5))
title = Image.from_column([
Image.from_text("Average flags of the world".upper(), calibri(SIZE*2, bold=True), "black", "white"),
Image.from_text("mean versus mode versus median", calibri(SIZE*3//2, italics=True), "black", "white")
], padding=SIZE//10)
descriptions = [
Image.from_markup("Averages flags of the 195 member and observer states of the UN, resized to a constant aspect ratio.", SUB_FONT),
Image.from_markup("**Mean flags** calculated by first converting from sRGB to linear RGB.", SUB_FONT),
Image.from_row([
Image.from_markup("**Modal flags** calculated by first quantizing to heraldic colors: ", SUB_FONT),
Checkers((BOX_WIDTH*len(HeraldicPalette), BOX_WIDTH), GradientColormap(*HeraldicPalette), shape=(len(HeraldicPalette), 1), colors=len(HeraldicPalette)).add_grid((len(HeraldicPalette), 1))]),
Image.from_markup("**Median flags** calculated separately for each RGB channel.", SUB_FONT)]
img = Image.from_column([title, Image.from_column(descriptions, equal_heights=True, xalign=0), grid], padding=SIZE//4, bg="white")
img.place(Image.from_text("/u/Udzu", font("arial", 40), fg="black", bg="white", padding=10).pad((2,2,0,0), "black"), align=1, padding=20, copy=False)
img.save("output/flagsaverage2.png")
# Extras
# array = [[None, label_image("world")] + [label_image(c, "right") for c in continents], average_flags("RGB median", median_image_rgb)]
# grid = Image.from_array(tmap(list, zip(*array)), bg="white", padding=SIZE // 5, xalign=(1,0.5))
| {
"content_hash": "9f59a2148249865a848644c779276f25",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 206,
"avg_line_length": 51.50574712643678,
"alnum_prop": 0.6744030350368221,
"repo_name": "Udzu/pudzu",
"id": "7d80ea2dcfc1d79d19d5fe7eb1424cb25d4797bd",
"size": "4481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviz/flagsaverage2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7945"
},
{
"name": "Python",
"bytes": "867429"
},
{
"name": "Roff",
"bytes": "3702309"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import sys
def minimum(a, b):
"""
Minimum t-norm, also called the Gödel t-norm, is the standard semantics for conjunction in Gödel fuzzy logic.
It occurs in most t-norm based fuzzy logics as the standard semantics for weak conjunction.
.. math:: \top_min(a, b) = min(a, b)
:param a: (N,) Tensor containing the first terms of the t-norm.
:param b: (N,) Tensor containing the second terms of the t-norm.
:return: (N,) Tensor containing the resulting t-norm values.
"""
return tf.minimum(a, b)
def product(a, b):
"""
Product t-norm corresponds to the ordinary product of real numbers.
The product t-norm is the standard semantics for strong conjunction in product fuzzy logic.
It is a strict Archimedean t-norm.
.. math:: \top_prod(a, b) = a * b
:param a: (N,) Tensor containing the first terms of the t-norm.
:param b: (N,) Tensor containing the second terms of the t-norm.
:return: (N,) Tensor containing the resulting t-norm values.
"""
return a * b
def lukasiewicz(a, b):
"""
Łukasiewicz t-norm: the name comes from the fact that the t-norm is the standard semantics for
strong conjunction in Łukasiewicz fuzzy logic.
It is a nilpotent Archimedean t-norm, pointwise smaller than the product t-norm.
.. math:: \top_Luk(a, b) = max(0, a + b - 1)
:param a: (N,) Tensor containing the first terms of the t-norm.
:param b: (N,) Tensor containing the second terms of the t-norm.
:return: (N,) Tensor containing the resulting t-norm values.
"""
return tf.nn.relu(a + b - 1)
def nilpotent_minimum(a, b):
"""
Nilpotent minimum t-norm is a standard example of a t-norm which is left-continuous, but not continuous.
Despite its name, the nilpotent minimum is not a nilpotent t-norm.
.. math:: \top_D(a, b) = if-then-else(a + b > 1, min(a, b), 0)
:param a: (N,) Tensor containing the first terms of the t-norm.
:param b: (N,) Tensor containing the second terms of the t-norm.
:return: (N,) Tensor containing the resulting t-norm values.
"""
return tf.cond(a + b > 1, tf.minimum(a, b), 0)
def hamacher_product(a, b):
"""
Hamacher product is a strict Archimedean t-norm, and an important representative of the parametric classes of
Hamacher t-norms and Schweizer–Sklar t-norms.
.. math:: \top_H0(a, b) = if-then-else(a == b == 0, 0, (a * b) / (a + b - a * b))
:param a: (N,) Tensor containing the first terms of the t-norm.
:param b: (N,) Tensor containing the second terms of the t-norm.
:return: (N,) Tensor containing the resulting t-norm values.
"""
return tf.cond(a == b == 0, 0, (a * b) / (a + b - a * b))
def get_function(function_name):
this_module = sys.modules[__name__]
if not hasattr(this_module, function_name):
raise ValueError('Unknown t-norm: {}'.format(function_name))
return getattr(this_module, function_name)
| {
"content_hash": "f3a58024ab86ceeb60f2a86e262df0cb",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 113,
"avg_line_length": 36.23170731707317,
"alnum_prop": 0.6529787950185123,
"repo_name": "uclmr/inferbeddings",
"id": "c83213d2556c8d45c1eb22791fc8b0c58d6ade38",
"size": "3002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inferbeddings/fuzzy/tnorms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "102860"
},
{
"name": "Prolog",
"bytes": "569340"
},
{
"name": "Python",
"bytes": "1319760"
},
{
"name": "R",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "22689"
}
],
"symlink_target": ""
} |
import sys, logging
logging.basicConfig(level=15, stream=sys.stderr, format="%(levelname)1s:%(filename)10s:%(lineno)3d:%(message)s")
# make log level names shorter so that we can show them
logging.addLevelName(50, 'C')
logging.addLevelName(40, 'E')
logging.addLevelName(30, 'W')
logging.addLevelName(20, 'I')
logging.addLevelName(10, 'D')
logging.info("test1")
import jpype
jpype.startJVM(convertStrings=False)
IPluginAtom = jpype.JClass("at.ac.tuwien.kr.hexlite.api.IPluginAtom")
ISolverContext = jpype.JClass("at.ac.tuwien.kr.hexlite.api.ISolverContext")
IAtom = jpype.JClass("at.ac.tuwien.kr.hexlite.api.IAtom")
ISymbol = jpype.JClass("at.ac.tuwien.kr.hexlite.api.ISymbol")
@jpype.JImplements(ISymbol)
class JavaSymbolImpl:
def __init__(self, what):
self.what = what
@jpype.JOverride
def getType(self):
return ISymbol.Type.CONSTANT
@jpype.JOverride
def getName(self):
return self.what
@jpype.JOverride
def getInteger(self):
return 4711
@jpype.JOverride
def getArguments(self):
return []
@jpype.JOverride
def getTuple(self):
return [self.getName()]
@jpype.JOverride
def hashCode(self):
return jpype.JInt(hash(self.what) & 0x7FFFFFFF)
@jpype.JOverride
def equals(self, other):
if self == other:
return True
elif self.what == other.what:
return True
else:
return False
@jpype.JImplements(IPluginAtom.IQuery)
class JavaQueryImpl:
def __init__(self):
pass
@jpype.JOverride
def getInterpretation(self):
logging.warning("TBD")
return None
@jpype.JOverride
def getInput(self):
ret = jpype.JClass("java.util.ArrayList")()
ret.add(JavaSymbolImpl('foo'))
ret.add(JavaSymbolImpl('bar'))
return ret
@jpype.JImplements(ISolverContext)
class JavaSolverContextImpl:
def __init__(self):
pass
@jpype.JOverride
def storeOutputAtom(self, atom):
logging.warning("TBD")
return jpype.JObject(None, IAtom)
@jpype.JOverride
def storeAtom(self, atom):
logging.warning("TBD")
return None
@jpype.JOverride
def storeConstant(self, s):
logging.warning("TBD store %s", s)
return JavaSymbolImpl('stored({})'.format(s))
@jpype.JOverride
def learn(self, nogood):
logging.warning("TBD")
def jmain():
logging.info("test2")
jls = jpype.JClass("java.lang.System")
jls.out.println("i am printing java.class.path")
print(jls.getProperty("java.class.path"))
JStringPlugin = jpype.JClass("at.ac.tuwien.kr.hexlite.stringplugin.StringPlugin")
logging.info("got JStringPlugin %s", JStringPlugin)
splugin = JStringPlugin()
logging.info("got splugin %s", splugin)
jatoms = splugin.createAtoms()
logging.info("got atoms %s", jatoms)
jconcat = jatoms[0]
jcontext = JavaSolverContextImpl()
jquery = JavaQueryImpl()
janswer = jconcat.retrieve(jcontext, jquery)
logging.info("answer is %s", janswer)
jpype.shutdownJVM()
logging.info("done")
def main():
try:
jmain()
except jpype.JClass("java.lang.Exception") as ex:
logging.error("exception: %s", ex.toString())
st = ex.getStackTrace()
for ste in st:
logging.error("\t at %s", ste.toString())
#sb.append(ex.getClass().getName() + ": " + ex.getMessage() + "\n");
main()
| {
"content_hash": "f55f9141f3f4eefd05619b2db862ff69",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 137,
"avg_line_length": 26.593984962406015,
"alnum_prop": 0.6335877862595419,
"repo_name": "hexhex/hexlite",
"id": "aacc8bbbf99b43a26b87fb8ad6304a3f48ff28aa",
"size": "3537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java-api/src/test/python/test-jpype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1506"
},
{
"name": "Java",
"bytes": "25549"
},
{
"name": "Prolog",
"bytes": "3525"
},
{
"name": "Python",
"bytes": "219697"
},
{
"name": "Shell",
"bytes": "10353"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import FormType
class FormTypeAdmin(admin.ModelAdmin):
pass
admin.site.register(FormType, FormTypeAdmin)
| {
"content_hash": "41ec246a1cec55a9bad23093fdda5ed3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 16,
"alnum_prop": 0.79375,
"repo_name": "astrikov-d/dartcms",
"id": "0ca4f73c28b1d23bade751c79b95b32b82e57f21",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dartcms/apps/feedback/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "628621"
},
{
"name": "HTML",
"bytes": "72527"
},
{
"name": "JavaScript",
"bytes": "816668"
},
{
"name": "Python",
"bytes": "240030"
}
],
"symlink_target": ""
} |
import os
from .tts import TTS
from .nlu import NLU
VERSION_PATH = os.path.join(os.path.dirname(__file__), "VERSION.txt")
with open(VERSION_PATH) as f:
__version__ = f.read().strip()
| {
"content_hash": "b9d02a5d5a10229c9729445c521d254b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 21.11111111111111,
"alnum_prop": 0.6684210526315789,
"repo_name": "art1415926535/yandex_speech",
"id": "2d1332c063d5192148fa591ae206f25bb159c070",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yandex_speech/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9704"
}
],
"symlink_target": ""
} |
# Copyright 2016 by caixw, All rights reserved.
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
# @api POST /users/login 登录
# @apiGroup users
#
# @apiRequest json
# @apiParam username string 登录账号
# @apiParam password string 密码
#
# @apiSuccess 201 OK
# @apiParam expires int 过期时间
# @apiParam token string 凭证
# @apiExample json
# {
# "expires": 11111111,
# "token": "adl;kfqwer;q;afd"
# }
#
# @apiError 401 账号或密码错误
def login(w, r):
print("/**********", "login")
# TODO
return
# @api DELETE /users/login 注销登录
# @apiGroup users
#
# @apiRequest json
# @apiHeader Authorization xxxx
#
# @apiSuccess 201 OK
# @apiParam expires int 过期时间
# @apiParam token string 凭证
# @apiExample json
# {
# "expires": 11111111,
# "token": "adl;kfqwer;q;afd"
# }
def logout(w, r):
print("logout", "**********/")
# TODO
return
| {
"content_hash": "7482c1a4726a6c5bf444a53ede756052",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 48,
"avg_line_length": 19.304347826086957,
"alnum_prop": 0.6430180180180181,
"repo_name": "a233894432/gowork",
"id": "c868e562fb11fbb6f3f297e7be8ae03d45b92459",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/github.com/caixw/apidoc/input/testdata/python/test1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "604"
},
{
"name": "C",
"bytes": "529"
},
{
"name": "CSS",
"bytes": "73315"
},
{
"name": "Emacs Lisp",
"bytes": "17030"
},
{
"name": "Go",
"bytes": "6808576"
},
{
"name": "Groovy",
"bytes": "1597"
},
{
"name": "HTML",
"bytes": "76585"
},
{
"name": "Java",
"bytes": "1479"
},
{
"name": "JavaScript",
"bytes": "83581"
},
{
"name": "Logos",
"bytes": "1248"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "PHP",
"bytes": "1328"
},
{
"name": "Pascal",
"bytes": "1307"
},
{
"name": "Perl",
"bytes": "1208"
},
{
"name": "Python",
"bytes": "7026"
},
{
"name": "Ruby",
"bytes": "2389"
},
{
"name": "Rust",
"bytes": "1507"
},
{
"name": "Shell",
"bytes": "6549"
},
{
"name": "Swift",
"bytes": "1392"
},
{
"name": "Tcl",
"bytes": "1441"
},
{
"name": "TypeScript",
"bytes": "26072"
},
{
"name": "Vim script",
"bytes": "5247"
},
{
"name": "Yacc",
"bytes": "3224"
}
],
"symlink_target": ""
} |
from aleph.views.context import blueprint as cache
from aleph.views.base_api import blueprint as base_api
from aleph.views.sessions_api import blueprint as sessions_api
from aleph.views.roles_api import blueprint as roles_api
from aleph.views.groups_api import blueprint as groups_api
from aleph.views.permissions_api import blueprint as permissions_api
from aleph.views.collections_api import blueprint as collections_api
from aleph.views.entities_api import blueprint as entities_api
from aleph.views.profiles_api import blueprint as profiles_api
from aleph.views.alerts_api import blueprint as alerts_api
from aleph.views.ingest_api import blueprint as ingest_api
from aleph.views.notifications_api import blueprint as notifications_api
from aleph.views.reconcile_api import blueprint as reconcile_api
from aleph.views.xref_api import blueprint as xref_api
from aleph.views.stream_api import blueprint as stream_api
from aleph.views.archive_api import blueprint as archive_api
from aleph.views.status_api import blueprint as status_api
from aleph.views.mappings_api import blueprint as mappings_api
from aleph.views.entitysets_api import blueprint as entitysets_api
from aleph.views.exports_api import blueprint as exports_api
def mount_app_blueprints(app):
app.register_blueprint(cache)
app.register_blueprint(base_api)
app.register_blueprint(sessions_api)
app.register_blueprint(roles_api)
app.register_blueprint(groups_api)
app.register_blueprint(permissions_api, url_prefix="/api/2/collections")
app.register_blueprint(collections_api, url_prefix="/api/2/collections")
app.register_blueprint(entities_api)
app.register_blueprint(profiles_api)
app.register_blueprint(alerts_api)
app.register_blueprint(ingest_api, url_prefix="/api/2/collections")
app.register_blueprint(reconcile_api)
app.register_blueprint(notifications_api)
app.register_blueprint(xref_api)
app.register_blueprint(stream_api)
app.register_blueprint(archive_api)
app.register_blueprint(status_api)
app.register_blueprint(mappings_api, url_prefix="/api/2/collections")
app.register_blueprint(entitysets_api)
app.register_blueprint(exports_api)
| {
"content_hash": "7cfbd659dd9b0db05f01efd1fe8b9619",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 51.13953488372093,
"alnum_prop": 0.7989995452478399,
"repo_name": "pudo/aleph",
"id": "43cfcc749156285c2268006a6651d6134ab0765a",
"size": "2199",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/develop/jsonschema-4.1.2",
"path": "aleph/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15949"
},
{
"name": "HTML",
"bytes": "170476"
},
{
"name": "JavaScript",
"bytes": "111287"
},
{
"name": "Makefile",
"bytes": "1319"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "492593"
}
],
"symlink_target": ""
} |
import sublime, sublime_plugin
from xml.dom import minidom
import urllib, logging, urllib2, sys
import re, os.path,HTMLParser
from BeautifulSoup import BeautifulSoup
hh = urllib2.HTTPHandler()
hsh = urllib2.HTTPSHandler()
#hh.set_http_debuglevel(1)
#hsh.set_http_debuglevel(1)
opener = urllib2.build_opener(hh, hsh, urllib2.HTTPCookieProcessor())
email = ""
pwd = ""
prototype = ""
sheet_db = {};
_ = lambda x: 0
class HandcraftSaveCommand(sublime_plugin.TextCommand):
def run(self, edit):
content = self.view.substr(sublime.Region(0, self.view.size()))
global sheet_db
view = sublime.active_window().active_view()
prototype, sheet = sheet_db[view.id()]
print "saving", prototype, sheet, content
def callback():
saveSheet(prototype, sheet, content )
view.set_scratch(True)
login(callback)
class HandcraftListSheetsCommand(sublime_plugin.TextCommand):
def run(self, edit):
def callback(): listSheets(edit)
login(callback)
def listSheets(edit):
# get prototypes
url = "http://handcraft.it/prototypes"
response2 = opener.open(url)
soup = BeautifulSoup(response2)
"section li .visual a"
prototypes = [os.path.basename(li.find("div", "visual").a["href"]) for li in soup.section.findAll("li")]
print prototypes
# show list of prototypes
def on_select_prototype(index):
if index != -1:
prototype = prototypes[index]
# retrieve sheets
url = "http://handcraft.it/workon/%s/sheets" % prototype
soup = BeautifulSoup(opener.open(url))
sheets = [li.a.string for li in soup.find("div", id="otherSheets").ol.findAll("li")]
def on_select_sheet(i):
if i != -1:
def callback(): loadSheet(edit, prototype, sheets[i])
login(callback)
sublime.active_window().show_quick_panel(sheets, on_select_sheet)
sublime.active_window().show_quick_panel(prototypes, on_select_prototype)
def loadSheet(edit, prototype, sheet):
url = "http://handcraft.it/workon/%s/sheets/%s" % (prototype, sheet)
print url
response2 = opener.open(url)
soup = BeautifulSoup(response2)
h = HTMLParser.HTMLParser()
code = h.unescape(soup.textarea.string)
view = sublime.active_window().new_file()
view.insert(edit, view.size(), code)
view.set_syntax_file("Packages/XML/XML.tmLanguage")
global sheet_db
sheet_db[view.id()] = (prototype, sheet)
def saveSheet(prototype, sheet, content):
url = 'http://handcraft.it/workon/%s/action?type=save' % prototype
data = urllib.urlencode({ "sheetName" : sheet, "code" : content })
response2 = opener.open(url, data)
print "save complete"
def login(callback):
#print 'checking login'
if len(email) > 0 and len(pwd) > 0:
return callback()
def setEmail(s):
global email
email = s
sublime.active_window().show_input_panel("pwd", pwd, setPwd, _, _)
def setPwd(s):
global pwd
pwd = s
cred = urllib.urlencode({ "email" : email, "password" : pwd})
response = opener.open("http://handcraft.it/signin", cred)
callback()
sublime.active_window().show_input_panel("email", email, setEmail, _,_)
| {
"content_hash": "1b225f638846b0c4d1b5d4dcdb3b2a94",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 106,
"avg_line_length": 27.821428571428573,
"alnum_prop": 0.6774711168164314,
"repo_name": "remcoder/SublimeHandcraft",
"id": "4469428620a0671ceb09f0da80c06c33a8d7629c",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handcraft.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82605"
}
],
"symlink_target": ""
} |
import collections
import functools
from typing import List
from test_framework import generic_test
from test_framework.test_utils import enable_executor_hook
Endpoint = collections.namedtuple('Endpoint', ('is_closed', 'val'))
Interval = collections.namedtuple('Interval', ('left', 'right'))
def union_of_intervals(intervals: List[Interval]) -> List[Interval]:
# TODO - you fill in here.
return []
@enable_executor_hook
def union_of_intervals_wrapper(executor, intervals):
intervals = [
Interval(Endpoint(x[1], x[0]), Endpoint(x[3], x[2])) for x in intervals
]
result = executor.run(functools.partial(union_of_intervals, intervals))
return [(i.left.val, i.left.is_closed, i.right.val, i.right.is_closed)
for i in result]
if __name__ == '__main__':
exit(
generic_test.generic_test_main('intervals_union.py',
'intervals_union.tsv',
union_of_intervals_wrapper))
| {
"content_hash": "6ffa3f1bca533848903ad7130ae959e3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 29.529411764705884,
"alnum_prop": 0.6354581673306773,
"repo_name": "shobhitmishra/CodingProblems",
"id": "423e0d1c6a3bf61c2091e57e263ef2a034871d5a",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_judge_python/intervals_union.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
} |
import requests
from bs4 import BeautifulSoup
from PIL import Image
import sys
import os
import gi
from gi.repository import Gdk
gi.require_version('Gdk', '3.0')
def get_comic():
r = requests.get("http://xkcd.com/")
website_content = BeautifulSoup(r.content)
comic_div = website_content.findAll("div", {"id": "comic"})
for comic in comic_div:
comic_link = "http:" + comic.img['src']
return comic_link
def download_comic():
f = open('00000000.png', 'wb')
f.write(requests.get(get_comic()).content)
f.close()
def create_wallpaper():
download_comic()
comic = Image.open('00000000.png', 'r')
comic_w, comic_h = comic.size
width = 1440
height = 900
try:
screen = Gdk.Screen.get_default()
width = screen.get_monitor_geometry(0).width
height = screen.get_monitor_geometry(0).height
except:
print("No GDK Found ")
background = Image.new('RGBA', (width, height), (255, 255, 255, 255))
background_w, background_h = background.size
offset = ((background_w - comic_w) / 2, (background_h - comic_h) / 2)
background.paste(comic, offset)
background.save('out.png')
os.remove("00000000.png")
def os_identification():
return sys.platform
def set_wallpaper():
create_wallpaper()
if os_identification() == "darwin":
app_script = "sqlite3 ~/Library/Application\ Support/Dock/desktoppicture.db \"update data set value = '{}'\";".format("~/out.png")
os.system(app_script)
os.system("killall Dock;")
elif os_identification() == "linux2":
linux_path = "file:///out.png"
mint = "gsettings set org.cinnamon.desktop.background picture-uri " + linux_path
ubuntu = "gsettings set org.gnome.desktop.background picture-uri " + linux_path
try:
os.system(mint)
except:
os.system(ubuntu)
elif os_identification() == "linux":
linux_path = "out.png"
arch = "gsettings set org.gnome.desktop.background picture-uri " + linux_path
os.system(arch)
if __name__ == '__main__':
set_wallpaper()
| {
"content_hash": "6ceb2b1921d5d965316d8f5d6b59ed3e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 138,
"avg_line_length": 26.974683544303797,
"alnum_prop": 0.6227123416236509,
"repo_name": "saru95/xkcd",
"id": "0677a70624d20d3c89cd6b460330f85a721d64d5",
"size": "2154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xkcdwall/cmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2766"
}
],
"symlink_target": ""
} |
import sys
import hashlib
from pyftpdlib.authorizers import AuthenticationFailed
class Authorizer:
def __init__(self, username, password, directory, permission=None):
self.username = username
self.password = password
self.directory = directory
self.permission = permission or "elradfmw"
def has_user(self, username):
return self.username == username
def validate_password(self, password):
return self.password == make_password_hash(password)
def validate_authentication(self, username, password, handler):
if self.username == username and self.validate_password(password):
return
raise AuthenticationFailed("Authentication failed.")
def get_home_dir(self, username):
return self.directory
def has_perm(self, username, perm, path=None):
return perm in self.permission
def get_perms(self, username):
return self.permission
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def impersonate_user(self, username, password):
pass
def terminate_impersonation(self, username):
pass
def make_password_hash(password):
return hashlib.sha1(password.encode('utf8')).hexdigest()
def main():
if len(sys.argv) < 2:
sys.exit()
print("password hash:")
print(make_password_hash(sys.argv[1]))
if __name__ == '__main__':
main()
| {
"content_hash": "bce083a052371ce9e992429ceb60f5a0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 25.74137931034483,
"alnum_prop": 0.6584058941728065,
"repo_name": "tokibito/soloftpd",
"id": "3f6a414abceafa1f07f2f67f355ae11071ef35c2",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soloftpd/authorizers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14406"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from django.http import HttpResponse, StreamingHttpResponse
from sentry import eventstore
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.lang.native.applecrashreport import AppleCrashReport
from sentry.utils.safe import get_path
class EventAppleCrashReportEndpoint(ProjectEndpoint):
def get(self, request, project, event_id):
"""
Retrieve an Apple Crash Report from an event
`````````````````````````````````````````````
This endpoint returns the an apple crash report for a specific event.
This works only if the event.platform == cocoa
"""
event = eventstore.get_event_by_id(project.id, event_id)
if event is None:
raise ResourceDoesNotExist
if event.platform not in ("cocoa", "native"):
return HttpResponse(
{"message": "Only cocoa events can return an apple crash report"}, status=403
)
symbolicated = request.GET.get("minified") not in ("1", "true")
apple_crash_report_string = six.text_type(
AppleCrashReport(
threads=get_path(event.data, "threads", "values", filter=True),
context=event.data.get("contexts"),
debug_images=get_path(event.data, "debug_meta", "images", filter=True),
exceptions=get_path(event.data, "exception", "values", filter=True),
symbolicated=symbolicated,
)
)
response = HttpResponse(apple_crash_report_string, content_type="text/plain")
if request.GET.get("download") is not None:
filename = u"{}{}.crash".format(event.event_id, symbolicated and "-symbolicated" or "")
response = StreamingHttpResponse(apple_crash_report_string, content_type="text/plain")
response["Content-Length"] = len(apple_crash_report_string)
response["Content-Disposition"] = 'attachment; filename="%s"' % filename
return response
| {
"content_hash": "77f9b2c817ff957a1f8f16203d678cb0",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 99,
"avg_line_length": 40.32692307692308,
"alnum_prop": 0.6351931330472103,
"repo_name": "beeftornado/sentry",
"id": "15131841ab3ebfe5e715179e6fcf78ca02bf3494",
"size": "2097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/event_apple_crash_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
import unittest
from datetime import datetime
import mock
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.operators.check_operator import (
CheckOperator, IntervalCheckOperator, ThresholdCheckOperator, ValueCheckOperator,
)
class TestCheckOperator(unittest.TestCase):
@mock.patch.object(CheckOperator, 'get_db_hook')
def test_execute_no_records(self, mock_get_db_hook):
mock_get_db_hook.return_value.get_first.return_value = []
with self.assertRaises(AirflowException):
CheckOperator(sql='sql').execute()
@mock.patch.object(CheckOperator, 'get_db_hook')
def test_execute_not_all_records_are_true(self, mock_get_db_hook):
mock_get_db_hook.return_value.get_first.return_value = ["data", ""]
with self.assertRaises(AirflowException):
CheckOperator(sql='sql').execute()
class TestValueCheckOperator(unittest.TestCase):
def setUp(self):
self.task_id = 'test_task'
self.conn_id = 'default_conn'
def _construct_operator(self, sql, pass_value, tolerance=None):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return ValueCheckOperator(
dag=dag,
task_id=self.task_id,
conn_id=self.conn_id,
sql=sql,
pass_value=pass_value,
tolerance=tolerance)
def test_pass_value_template_string(self):
pass_value_str = "2018-03-22"
operator = self._construct_operator('select date from tab1;', "{{ ds }}")
operator.render_template_fields({'ds': pass_value_str})
self.assertEqual(operator.task_id, self.task_id)
self.assertEqual(operator.pass_value, pass_value_str)
def test_pass_value_template_string_float(self):
pass_value_float = 4.0
operator = self._construct_operator('select date from tab1;', pass_value_float)
operator.render_template_fields({})
self.assertEqual(operator.task_id, self.task_id)
self.assertEqual(operator.pass_value, str(pass_value_float))
@mock.patch.object(ValueCheckOperator, 'get_db_hook')
def test_execute_pass(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [10]
mock_get_db_hook.return_value = mock_hook
sql = 'select value from tab1 limit 1;'
operator = self._construct_operator(sql, 5, 1)
operator.execute(None)
mock_hook.get_first.assert_called_once_with(sql)
@mock.patch.object(ValueCheckOperator, 'get_db_hook')
def test_execute_fail(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator('select value from tab1 limit 1;', 5, 1)
with self.assertRaisesRegex(AirflowException, 'Tolerance:100.0%'):
operator.execute()
class TestIntervalCheckOperator(unittest.TestCase):
def _construct_operator(self, table, metric_thresholds,
ratio_formula, ignore_zero):
return IntervalCheckOperator(
task_id='test_task',
table=table,
metrics_thresholds=metric_thresholds,
ratio_formula=ratio_formula,
ignore_zero=ignore_zero,
)
def test_invalid_ratio_formula(self):
with self.assertRaisesRegex(AirflowException, 'Invalid diff_method'):
self._construct_operator(
table='test_table',
metric_thresholds={
'f1': 1,
},
ratio_formula='abs',
ignore_zero=False,
)
@mock.patch.object(IntervalCheckOperator, 'get_db_hook')
def test_execute_not_ignore_zero(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [0]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table='test_table',
metric_thresholds={
'f1': 1,
},
ratio_formula='max_over_min',
ignore_zero=False,
)
with self.assertRaises(AirflowException):
operator.execute()
@mock.patch.object(IntervalCheckOperator, 'get_db_hook')
def test_execute_ignore_zero(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [0]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table='test_table',
metric_thresholds={
'f1': 1,
},
ratio_formula='max_over_min',
ignore_zero=True,
)
operator.execute()
@mock.patch.object(IntervalCheckOperator, 'get_db_hook')
def test_execute_min_max(self, mock_get_db_hook):
mock_hook = mock.Mock()
def returned_row():
rows = [
[2, 2, 2, 2], # reference
[1, 1, 1, 1], # current
]
yield from rows
mock_hook.get_first.side_effect = returned_row()
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table='test_table',
metric_thresholds={
'f0': 1.0,
'f1': 1.5,
'f2': 2.0,
'f3': 2.5,
},
ratio_formula='max_over_min',
ignore_zero=True,
)
with self.assertRaisesRegex(AirflowException, "f0, f1, f2"):
operator.execute()
@mock.patch.object(IntervalCheckOperator, 'get_db_hook')
def test_execute_diff(self, mock_get_db_hook):
mock_hook = mock.Mock()
def returned_row():
rows = [
[3, 3, 3, 3], # reference
[1, 1, 1, 1], # current
]
yield from rows
mock_hook.get_first.side_effect = returned_row()
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table='test_table',
metric_thresholds={
'f0': 0.5,
'f1': 0.6,
'f2': 0.7,
'f3': 0.8,
},
ratio_formula='relative_diff',
ignore_zero=True,
)
with self.assertRaisesRegex(AirflowException, "f0, f1"):
operator.execute()
class TestThresholdCheckOperator(unittest.TestCase):
def _construct_operator(self, sql, min_threshold, max_threshold):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return ThresholdCheckOperator(
task_id='test_task',
sql=sql,
min_threshold=min_threshold,
max_threshold=max_threshold,
dag=dag
)
@mock.patch.object(ThresholdCheckOperator, 'get_db_hook')
def test_pass_min_value_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [(10,)]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
'Select avg(val) from table1 limit 1',
1,
100
)
operator.execute()
@mock.patch.object(ThresholdCheckOperator, 'get_db_hook')
def test_fail_min_value_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [(10,)]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
'Select avg(val) from table1 limit 1',
20,
100
)
with self.assertRaisesRegex(AirflowException, '10.*20.0.*100.0'):
operator.execute()
@mock.patch.object(ThresholdCheckOperator, 'get_db_hook')
def test_pass_min_sql_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: [(int(x.split()[1]),)]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
'Select 10',
'Select 1',
'Select 100'
)
operator.execute()
@mock.patch.object(ThresholdCheckOperator, 'get_db_hook')
def test_fail_min_sql_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: [(int(x.split()[1]),)]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
'Select 10',
'Select 20',
'Select 100'
)
with self.assertRaisesRegex(AirflowException, '10.*20.*100'):
operator.execute()
@mock.patch.object(ThresholdCheckOperator, 'get_db_hook')
def test_pass_min_value_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: [(int(x.split()[1]),)]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
'Select 75',
45,
'Select 100'
)
operator.execute()
@mock.patch.object(ThresholdCheckOperator, 'get_db_hook')
def test_fail_min_sql_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: [(int(x.split()[1]),)]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
'Select 155',
'Select 45',
100
)
with self.assertRaisesRegex(AirflowException, '155.*45.*100.0'):
operator.execute()
| {
"content_hash": "9e7d855bde975a9f1ba3285cc9a3be61",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 87,
"avg_line_length": 31.709150326797385,
"alnum_prop": 0.5753890549314645,
"repo_name": "wooga/airflow",
"id": "66bab5afaa31e4f72e8d0ef3dd6c70433205c11c",
"size": "10491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/operators/test_check_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
from unittest import mock
from unittest.mock import patch
import pendulum
import pytest
from pytest import param
from sqlalchemy.engine.url import make_url
from airflow.cli import cli_parser
from airflow.cli.commands import db_command
from airflow.exceptions import AirflowException
class TestCliDb:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@mock.patch("airflow.cli.commands.db_command.db.initdb")
def test_cli_initdb(self, mock_initdb):
db_command.initdb(self.parser.parse_args(['db', 'init']))
mock_initdb.assert_called_once_with()
@mock.patch("airflow.cli.commands.db_command.db.resetdb")
def test_cli_resetdb(self, mock_resetdb):
db_command.resetdb(self.parser.parse_args(['db', 'reset', '--yes']))
mock_resetdb.assert_called_once_with(skip_init=False)
@mock.patch("airflow.cli.commands.db_command.db.resetdb")
def test_cli_resetdb_skip_init(self, mock_resetdb):
db_command.resetdb(self.parser.parse_args(['db', 'reset', '--yes', '--skip-init']))
mock_resetdb.assert_called_once_with(skip_init=True)
@mock.patch("airflow.cli.commands.db_command.db.check_migrations")
def test_cli_check_migrations(self, mock_wait_for_migrations):
db_command.check_migrations(self.parser.parse_args(['db', 'check-migrations']))
mock_wait_for_migrations.assert_called_once_with(timeout=60)
@pytest.mark.parametrize(
'args, called_with',
[
([], dict(to_revision=None, from_revision=None, show_sql_only=False)),
(['--show-sql-only'], dict(to_revision=None, from_revision=None, show_sql_only=True)),
(['--to-revision', 'abc'], dict(to_revision='abc', from_revision=None, show_sql_only=False)),
(
['--to-revision', 'abc', '--show-sql-only'],
dict(to_revision='abc', from_revision=None, show_sql_only=True),
),
(
['--to-version', '2.2.2'],
dict(to_revision='7b2661a43ba3', from_revision=None, show_sql_only=False),
),
(
['--to-version', '2.2.2', '--show-sql-only'],
dict(to_revision='7b2661a43ba3', from_revision=None, show_sql_only=True),
),
(
['--to-revision', 'abc', '--from-revision', 'abc123', '--show-sql-only'],
dict(to_revision='abc', from_revision='abc123', show_sql_only=True),
),
(
['--to-revision', 'abc', '--from-version', '2.2.2', '--show-sql-only'],
dict(to_revision='abc', from_revision='7b2661a43ba3', show_sql_only=True),
),
(
['--to-version', '2.2.4', '--from-revision', 'abc123', '--show-sql-only'],
dict(to_revision='587bdf053233', from_revision='abc123', show_sql_only=True),
),
(
['--to-version', '2.2.4', '--from-version', '2.2.2', '--show-sql-only'],
dict(to_revision='587bdf053233', from_revision='7b2661a43ba3', show_sql_only=True),
),
],
)
@mock.patch("airflow.cli.commands.db_command.db.upgradedb")
def test_cli_upgrade_success(self, mock_upgradedb, args, called_with):
db_command.upgradedb(self.parser.parse_args(['db', 'upgrade', *args]))
mock_upgradedb.assert_called_once_with(**called_with)
@pytest.mark.parametrize(
'args, pattern',
[
param(['--to-version', '2.1.25'], 'not supported', id='bad version'),
param(
['--to-revision', 'abc', '--from-revision', 'abc123'],
'used with `--show-sql-only`',
id='requires offline',
),
param(
['--to-revision', 'abc', '--from-version', '2.0.2'],
'used with `--show-sql-only`',
id='requires offline',
),
param(
['--to-revision', 'abc', '--from-version', '2.1.25', '--show-sql-only'],
'Unknown version',
id='bad version',
),
],
)
@mock.patch("airflow.cli.commands.db_command.db.upgradedb")
def test_cli_upgrade_failure(self, mock_upgradedb, args, pattern):
with pytest.raises(SystemExit, match=pattern):
db_command.upgradedb(self.parser.parse_args(['db', 'upgrade', *args]))
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch("airflow.cli.commands.db_command.NamedTemporaryFile")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url", make_url("mysql://root@mysql:3306/airflow")
)
def test_cli_shell_mysql(self, mock_tmp_file, mock_execute_interactive):
mock_tmp_file.return_value.__enter__.return_value.name = "/tmp/name"
db_command.shell(self.parser.parse_args(['db', 'shell']))
mock_execute_interactive.assert_called_once_with(['mysql', '--defaults-extra-file=/tmp/name'])
mock_tmp_file.return_value.__enter__.return_value.write.assert_called_once_with(
b'[client]\nhost = mysql\nuser = root\npassword = \nport = 3306'
b'\ndatabase = airflow'
)
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch("airflow.cli.commands.db_command.NamedTemporaryFile")
@mock.patch("airflow.cli.commands.db_command.settings.engine.url", make_url("mysql://root@mysql/airflow"))
def test_cli_shell_mysql_without_port(self, mock_tmp_file, mock_execute_interactive):
mock_tmp_file.return_value.__enter__.return_value.name = "/tmp/name"
db_command.shell(self.parser.parse_args(['db', 'shell']))
mock_execute_interactive.assert_called_once_with(['mysql', '--defaults-extra-file=/tmp/name'])
mock_tmp_file.return_value.__enter__.return_value.write.assert_called_once_with(
b'[client]\nhost = mysql\nuser = root\npassword = \nport = 3306'
b'\ndatabase = airflow'
)
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url", make_url("sqlite:////root/airflow/airflow.db")
)
def test_cli_shell_sqlite(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(['db', 'shell']))
mock_execute_interactive.assert_called_once_with(['sqlite3', '/root/airflow/airflow.db'])
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg2://postgres:airflow@postgres:5432/airflow"),
)
def test_cli_shell_postgres(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(['db', 'shell']))
mock_execute_interactive.assert_called_once_with(['psql'], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs['env']
postgres_env = {k: v for k, v in env.items() if k.startswith('PG')}
assert {
'PGDATABASE': 'airflow',
'PGHOST': 'postgres',
'PGPASSWORD': 'airflow',
'PGPORT': '5432',
'PGUSER': 'postgres',
} == postgres_env
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg2://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_postgres_without_port(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(['db', 'shell']))
mock_execute_interactive.assert_called_once_with(['psql'], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs['env']
postgres_env = {k: v for k, v in env.items() if k.startswith('PG')}
assert {
'PGDATABASE': 'airflow',
'PGHOST': 'postgres',
'PGPASSWORD': 'airflow',
'PGPORT': '5432',
'PGUSER': 'postgres',
} == postgres_env
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("invalid+psycopg2://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_invalid(self):
with pytest.raises(AirflowException, match=r"Unknown driver: invalid\+psycopg2"):
db_command.shell(self.parser.parse_args(['db', 'shell']))
@pytest.mark.parametrize(
'args, match',
[
(['-y', '--to-revision', 'abc', '--to-version', '2.2.0'], 'Cannot supply both'),
(['-y', '--to-revision', 'abc1', '--from-revision', 'abc2'], 'only .* with `--show-sql-only`'),
(['-y', '--to-revision', 'abc1', '--from-version', '2.2.2'], 'only .* with `--show-sql-only`'),
(['-y', '--to-version', '2.2.2', '--from-version', '2.2.2'], 'only .* with `--show-sql-only`'),
(
['-y', '--to-revision', 'abc', '--from-version', '2.2.0', '--from-revision', 'abc'],
'may not be combined',
),
(['-y', '--to-version', 'abc'], r'Downgrading to .* not supported\.'),
(['-y'], 'Must provide either'),
],
)
@mock.patch("airflow.utils.db.downgrade")
def test_cli_downgrade_invalid(self, mock_dg, args, match):
"""We test some options that should produce an error"""
with pytest.raises(SystemExit, match=match):
db_command.downgrade(self.parser.parse_args(['db', 'downgrade', *args]))
@pytest.mark.parametrize(
'args, expected',
[
(['-y', '--to-revision', 'abc1'], dict(to_revision='abc1')),
(
['-y', '--to-revision', 'abc1', '--from-revision', 'abc2', '-s'],
dict(to_revision='abc1', from_revision='abc2', show_sql_only=True),
),
(
['-y', '--to-revision', 'abc1', '--from-version', '2.2.2', '-s'],
dict(to_revision='abc1', from_revision='7b2661a43ba3', show_sql_only=True),
),
(
['-y', '--to-version', '2.2.2', '--from-version', '2.2.2', '-s'],
dict(to_revision='7b2661a43ba3', from_revision='7b2661a43ba3', show_sql_only=True),
),
(['-y', '--to-version', '2.2.2'], dict(to_revision='7b2661a43ba3')),
],
)
@mock.patch("airflow.utils.db.downgrade")
def test_cli_downgrade_good(self, mock_dg, args, expected):
defaults = dict(from_revision=None, show_sql_only=False)
db_command.downgrade(self.parser.parse_args(['db', 'downgrade', *args]))
mock_dg.assert_called_with(**{**defaults, **expected})
@pytest.mark.parametrize(
'resp, raise_',
[
('y', False),
('Y', False),
('n', True),
('a', True), # any other value
],
)
@mock.patch("airflow.utils.db.downgrade")
@mock.patch("airflow.cli.commands.db_command.input")
def test_cli_downgrade_confirm(self, mock_input, mock_dg, resp, raise_):
mock_input.return_value = resp
if raise_:
with pytest.raises(SystemExit):
db_command.downgrade(self.parser.parse_args(['db', 'downgrade', '--to-revision', 'abc']))
else:
db_command.downgrade(self.parser.parse_args(['db', 'downgrade', '--to-revision', 'abc']))
mock_dg.assert_called_with(to_revision='abc', from_revision=None, show_sql_only=False)
class TestCLIDBClean:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@pytest.mark.parametrize('timezone', ['UTC', 'Europe/Berlin', 'America/Los_Angeles'])
@patch('airflow.cli.commands.db_command.run_cleanup')
def test_date_timezone_omitted(self, run_cleanup_mock, timezone):
"""
When timezone omitted we should always expect that the timestamp is
coerced to tz-aware with default timezone
"""
timestamp = '2021-01-01 00:00:00'
with patch('airflow.utils.timezone.TIMEZONE', pendulum.timezone(timezone)):
args = self.parser.parse_args(['db', 'clean', '--clean-before-timestamp', f"{timestamp}", '-y'])
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse(timestamp, tz=timezone),
verbose=False,
confirm=False,
)
@pytest.mark.parametrize('timezone', ['UTC', 'Europe/Berlin', 'America/Los_Angeles'])
@patch('airflow.cli.commands.db_command.run_cleanup')
def test_date_timezone_supplied(self, run_cleanup_mock, timezone):
"""
When tz included in the string then default timezone should not be used.
"""
timestamp = '2021-01-01 00:00:00+03:00'
with patch('airflow.utils.timezone.TIMEZONE', pendulum.timezone(timezone)):
args = self.parser.parse_args(['db', 'clean', '--clean-before-timestamp', f"{timestamp}", '-y'])
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse(timestamp),
verbose=False,
confirm=False,
)
@pytest.mark.parametrize('confirm_arg, expected', [(['-y'], False), ([], True)])
@patch('airflow.cli.commands.db_command.run_cleanup')
def test_confirm(self, run_cleanup_mock, confirm_arg, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
'db',
'clean',
'--clean-before-timestamp',
'2021-01-01',
*confirm_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'),
verbose=False,
confirm=expected,
)
@pytest.mark.parametrize('dry_run_arg, expected', [(['--dry-run'], True), ([], False)])
@patch('airflow.cli.commands.db_command.run_cleanup')
def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
'db',
'clean',
'--clean-before-timestamp',
'2021-01-01',
*dry_run_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=expected,
clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'),
verbose=False,
confirm=True,
)
@pytest.mark.parametrize(
'extra_args, expected', [(['--tables', 'hello, goodbye'], ['hello', 'goodbye']), ([], None)]
)
@patch('airflow.cli.commands.db_command.run_cleanup')
def test_tables(self, run_cleanup_mock, extra_args, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
'db',
'clean',
'--clean-before-timestamp',
'2021-01-01',
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=expected,
dry_run=False,
clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'),
verbose=False,
confirm=True,
)
@pytest.mark.parametrize('extra_args, expected', [(['--verbose'], True), ([], False)])
@patch('airflow.cli.commands.db_command.run_cleanup')
def test_verbose(self, run_cleanup_mock, extra_args, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
'db',
'clean',
'--clean-before-timestamp',
'2021-01-01',
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'),
verbose=expected,
confirm=True,
)
| {
"content_hash": "7359bd391342fef50c5acd7c80eacb4c",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 110,
"avg_line_length": 42.17,
"alnum_prop": 0.564678681527152,
"repo_name": "lyft/incubator-airflow",
"id": "125e5d7c3e28d8c80350e2b3635d4bb53efa415e",
"size": "17654",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/cli/commands/test_db_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
} |
import asyncio
import asyncio.streams
import collections
import ctypes
import struct
from . import aioutils
Event = collections.namedtuple('Event', ['flags', 'cookie', 'name', 'alias'])
_libc = ctypes.cdll.LoadLibrary('libc.so.6')
class LibC:
"""Proxy to C functions for inotify"""
@classmethod
def inotify_init(cls):
return _libc.inotify_init()
@classmethod
def inotify_add_watch(cls, fd, path, flags):
return _libc.inotify_add_watch(fd, path.encode('utf-8'), flags)
@classmethod
def inotify_rm_watch(cls, fd, wd):
return _libc.inotify_rm_watch(fd, wd)
PREFIX = struct.Struct('iIII')
class Watcher:
def __init__(self):
self.requests = {}
self._reset()
def _reset(self):
self.descriptors = {}
self.aliases = {}
self._stream = None
self._transport = None
self._fd = None
self._loop = None
def watch(self, path, flags, *, alias=None):
"""Add a new watching rule."""
if alias is None:
alias = path
if alias in self.requests:
raise ValueError("A watch request is already scheduled for alias %s" % alias)
self.requests[alias] = (path, flags)
if self._fd is not None:
# We've started, register the watch immediately.
self._setup_watch(alias, path, flags)
def unwatch(self, alias):
"""Stop watching a given rule."""
if alias not in self.descriptors:
raise ValueError("Unknown watch alias %s; current set is %r" % (alias, list(self.descriptors.keys())))
wd = self.descriptors[alias]
errno = LibC.inotify_rm_watch(self._fd, wd)
if errno != 0:
raise IOError("Failed to close watcher %d: errno=%d" % (wd, errno))
del self.descriptors[alias]
del self.requests[alias]
del self.aliases[wd]
def _setup_watch(self, alias, path, flags):
"""Actual rule setup."""
assert alias not in self.descriptors, "Registering alias %s twice!" % alias
wd = LibC.inotify_add_watch(self._fd, path, flags)
if wd < 0:
raise IOError("Error setting up watch on %s with flags %s: wd=%s" % (
path, flags, wd))
self.descriptors[alias] = wd
self.aliases[wd] = alias
@asyncio.coroutine
def setup(self, loop):
"""Start the watcher, registering new watches if any."""
self._loop = loop
self._fd = LibC.inotify_init()
for alias, (path, flags) in self.requests.items():
self._setup_watch(alias, path, flags)
# We pass ownership of the fd to the transport; it will close it.
self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop)
def close(self):
"""Schedule closure.
This will close the transport and all related resources.
"""
self._transport.close()
self._reset()
@property
def closed(self):
"""Are we closed?"""
return self._transport is None
@asyncio.coroutine
def get_event(self):
"""Fetch an event.
This coroutine will swallow events for removed watches.
"""
while True:
prefix = yield from self._stream.readexactly(PREFIX.size)
if prefix == b'':
# We got closed, return None.
return
wd, flags, cookie, length = PREFIX.unpack(prefix)
path = yield from self._stream.readexactly(length)
# All async performed, time to look at the event's content.
if wd not in self.aliases:
# Event for a removed watch, skip it.
continue
decoded_path = struct.unpack('%ds' % length, path)[0].rstrip(b'\x00').decode('utf-8')
return Event(
flags=flags,
cookie=cookie,
name=decoded_path,
alias=self.aliases[wd],
)
| {
"content_hash": "26be195b59a2ae831d6ed5a67c6bb7f5",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 114,
"avg_line_length": 30.823076923076922,
"alnum_prop": 0.5739955078612429,
"repo_name": "rbarrois/aionotify",
"id": "113f68d7d77f4f677bd9ac9505b5e320adfec99e",
"size": "4112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aionotify/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "121"
},
{
"name": "Python",
"bytes": "19579"
}
],
"symlink_target": ""
} |
from .api_types import *
class Notification(object):
"""
Base class for all notifications.
"""
class ServerConnectedNotification(Notification):
"""
Reports that the server is running. This notification is issued once after
the server has started running but before any requests are processed to let
the client know that it started correctly.
It is not possible to subscribe to or unsubscribe from this notification.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
class ServerErrorNotification(Notification):
"""
Reports that an unexpected error has occurred while executing the server.
This notification is not used for problems with specific requests (which are
returned as part of the response) but is used for exceptions that occur while
performing other tasks, such as analysis or preparing notifications.
It is not possible to subscribe to or unsubscribe from this notification.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def isFatal(self):
return self.data['params'].get('isFatal')
@property
def message(self):
return self.data['params'].get('message')
@property
def stackTrace(self):
return self.data['params'].get('stackTrace')
class ServerStatusNotification(Notification):
"""
Reports the current status of the server. Parameters are omitted if there has
been no change in the status represented by that parameter.
This notification is not subscribed to by default. Clients can subscribe by
including the value "STATUS" in the list of services passed in a
server.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def analysis(self):
return AnalysisStatus.fromJson(self.data['params'].get('analysis'))
@property
def event(self):
return self.data.get('event')
@property
def pub(self):
return PubStatus.fromJson(self.data['params'].get('pub'))
class AnalysisErrorsNotification(Notification):
"""
Reports the errors associated with a given file. The set of errors included
in the notification is always a complete list that supersedes any previously
reported errors.
It is only possible to unsubscribe from this notification by using the
command-line flag --no-error-notification.
"""
def __init__(self, data):
self.data = data
@property
def errors(self):
yield from [AnalysisError.fromJson(x) for x in self.data['params'].get('errors')]
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
class AnalysisFlushResultsNotification(Notification):
"""
Reports that any analysis results that were previously associated with the
given files should be considered to be invalid because those files are no
longer being analyzed, either because the analysis root that contained it is
no longer being analyzed or because the file no longer exists.
If a file is included in this notification and at some later time a
notification with results for the file is received, clients should assume
that the file is once again being analyzed and the information should be
processed.
It is not possible to subscribe to or unsubscribe from this notification.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def files(self):
yield from [x for x in self.data['params'].get('files')]
class AnalysisFoldingNotification(Notification):
"""
Reports the folding regions associated with a given file. Folding regions can
be nested, but will not be overlapping. Nesting occurs when a foldable
element, such as a method, is nested inside another foldable element such as
a class.
This notification is not subscribed to by default. Clients can subscribe by
including the value "FOLDING" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def regions(self):
yield from [FoldingRegion.fromJson(x) for x in self.data['params'].get('regions')]
class AnalysisHighlightsNotification(Notification):
"""
Reports the highlight regions associated with a given file.
This notification is not subscribed to by default. Clients can subscribe by
including the value "HIGHLIGHTS" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def regions(self):
yield from [HighlightRegion.fromJson(x) for x in self.data['params'].get('regions')]
class AnalysisInvalidateNotification(Notification):
"""
Reports that the navigation information associated with a region of a single
file has become invalid and should be re-requested.
This notification is not subscribed to by default. Clients can subscribe by
including the value "INVALIDATE" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def delta(self):
return self.data['params'].get('delta')
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def length(self):
return self.data['params'].get('length')
@property
def offset(self):
return self.data['params'].get('offset')
class AnalysisNavigationNotification(Notification):
"""
Reports the navigation targets associated with a given file.
This notification is not subscribed to by default. Clients can subscribe by
including the value "NAVIGATION" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def files(self):
yield from [x for x in self.data['params'].get('files')]
@property
def regions(self):
yield from [NavigationRegion.fromJson(x) for x in self.data['params'].get('regions')]
@property
def targets(self):
yield from [NavigationTarget.fromJson(x) for x in self.data['params'].get('targets')]
class AnalysisOccurrencesNotification(Notification):
"""
Reports the occurrences of references to elements within a single file.
This notification is not subscribed to by default. Clients can subscribe by
including the value "OCCURRENCES" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def occurrences(self):
yield from [Occurrences.fromJson(x) for x in self.data['params'].get('occurrences')]
class AnalysisOutlineNotification(Notification):
"""
Reports the outline associated with a single file.
This notification is not subscribed to by default. Clients can subscribe by
including the value "OUTLINE" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def outline(self):
return Outline.fromJson(self.data['params'].get('outline'))
class AnalysisOverridesNotification(Notification):
"""
Reports the overridding members in a file.
This notification is not subscribed to by default. Clients can subscribe by
including the value "OVERRIDES" in the list of services passed in an
analysis.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def overrides(self):
yield from [Override.fromJson(x) for x in self.data['params'].get('overrides')]
class CompletionResultsNotification(Notification):
"""
Reports the completion suggestions that should be presented to the user. The
set of suggestions included in the notification is always a complete list
that supersedes any previously reported suggestions.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def id(self):
return CompletionId.fromJson(self.data['params'].get('id'))
@property
def isLast(self):
return self.data['params'].get('isLast')
@property
def replacementLength(self):
return self.data['params'].get('replacementLength')
@property
def replacementOffset(self):
return self.data['params'].get('replacementOffset')
@property
def results(self):
yield from [CompletionSuggestion.fromJson(x) for x in self.data['params'].get('results')]
class SearchResultsNotification(Notification):
"""
Reports some or all of the results of performing a requested search. Unlike
other notifications, this notification contains search results that should be
added to any previously received search results associated with the same
search id.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def id(self):
return SearchId.fromJson(self.data['params'].get('id'))
@property
def isLast(self):
return self.data['params'].get('isLast')
@property
def results(self):
yield from [SearchResult.fromJson(x) for x in self.data['params'].get('results')]
class ExecutionLaunchDataNotification(Notification):
"""
Reports information needed to allow a single file to be launched.
This notification is not subscribed to by default. Clients can subscribe by
including the value "LAUNCH_DATA" in the list of services passed in an
execution.setSubscriptions request.
"""
def __init__(self, data):
self.data = data
@property
def event(self):
return self.data.get('event')
@property
def file(self):
return self.data['params'].get('file')
@property
def kind(self):
return self.data['params'].get('kind')
@property
def referencedFiles(self):
yield from [x for x in self.data['params'].get('referencedFiles')]
| {
"content_hash": "443c01ff4ac6cf047a6106e5704230a7",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 93,
"avg_line_length": 26.750617283950618,
"alnum_prop": 0.7140483662543844,
"repo_name": "guillermooo/dart-sublime-bundle-releases",
"id": "6649e00f058494212638b587f0bc6ad3f3b1c25b",
"size": "11221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/analyzer/api/notifications.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10564"
},
{
"name": "PowerShell",
"bytes": "464"
},
{
"name": "Python",
"bytes": "713856"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO_TRIG = 17
GPIO_ECHO = 18
GPIO.setup(GPIO_TRIG, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.output(GPIO_TRIG, GPIO.LOW)
if __name__ == '__main__':
try:
while True:
time.sleep(3)
GPIO.output(GPIO_TRIG, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIG, False)
while GPIO.input(GPIO_ECHO) == 0:
SIGNALOFF = time.time()
while GPIO.input(GPIO_ECHO) == 1:
SIGNALON = time.time()
D = (SIGNALON - SIGNALOFF) * 17000
print D
except KeyboardInterrupt:
GPIO.cleanup()
| {
"content_hash": "2f0c2ef7c0e4ef73c033f08c81d62fb4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 46,
"avg_line_length": 24.214285714285715,
"alnum_prop": 0.5501474926253688,
"repo_name": "perchouli/raspberry-gadgets",
"id": "ec8f4ec07e153b3fb460f5bfe908607b783288f1",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ultrasound/HCSR04.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1503"
}
],
"symlink_target": ""
} |
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
##################################
# Init des paths et noms de fichiers
missionName = '005'
AddLog('title' , '{} : Début du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
# Nom du fichier source
raw_file = 'src'
##################################
# retreiving raw string
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
# replacing tabs with carriage return
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
# turning the string into a list
raw_list = raw_string_with_cr.splitlines()
# going through oddities finder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
# going through string formatter
ref_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
| {
"content_hash": "ce7325f85405c1ee08f5990cfa2aba7e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 148,
"avg_line_length": 43.77142857142857,
"alnum_prop": 0.6664490861618799,
"repo_name": "sighill/shade_app",
"id": "860b324697958f44d077d67bdbd74e3f71a67c80",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apis/raw/005_raw/005_cleaner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2051"
},
{
"name": "HTML",
"bytes": "5399"
},
{
"name": "Python",
"bytes": "143646"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class UsersConfig(AppConfig):
"""Configuration for users app."""
name = 'ariane.apps.users'
verbose_name = _("Users")
| {
"content_hash": "7162237350e6cffe0cec7453f124d35e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 24.77777777777778,
"alnum_prop": 0.7130044843049327,
"repo_name": "DebVortex/ariane-old-",
"id": "5aaa8ed8723acd4c613586890735074329765b65",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ariane/apps/users/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38455"
},
{
"name": "HTML",
"bytes": "21154"
},
{
"name": "JavaScript",
"bytes": "15058"
},
{
"name": "Makefile",
"bytes": "6317"
},
{
"name": "Python",
"bytes": "40952"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
import numpy as np
from distla_core.analysis.errors import errors_invsqrt
from distla_core.utils import pops
if __name__ == "__main__":
local_rows = np.array([128, 256, 512, 1024, 2048])
ROWS = tuple(pops.NROWS * local_rows)
DTYPES = (np.float32,) # dtype of the matrices
P_SZS = (256,) # panel size of the SUMMA multiplications
PRECISIONS = (lax.Precision.DEFAULT, lax.Precision.HIGH,
lax.Precision.HIGHEST,) # ASIC matmul precision
SEEDS = (None,) # Random seed to initialize input; system clock if None.
SERIAL = (False,) # Whether to run in serial or distributed mode.
EPS = (None, ) # Convergence threshold.
MAXITER = (50,) # When to terminate if convergence stagnates.
S_MIN_EST = (None, -1) # Estimated lowest singular value;
# None means machine epsilon; -1 means the true value.
S_THRESH = (0., 0.1) # When to switch to Newton-Schulz from `rogue` iteration.
S_MIN = (1E-5, 1E-4, 1E-3, 1E-2, 0.1,) # Smallest nonzero singular value.
S_MAX = (1.0,) # Largest singular value of the input matrix.
EV_DISTRIBUTION = ("linear",) # `linear` or `geometric` distribution of
# singular values in the input matrix.
BATCH_SIZE = 1 # How many runs to assemblage
REDUCTION_MODE = "min" # how to assemblage results
OUTPUT_DIR_PATH = None # directory of output; CWD if None
OUTPUT_NAME = "errors_invsqrt" # output saved to OUTPUT_NAME.csv
_ = errors_invsqrt.errors_invsqrt(
ROWS, dtypes=DTYPES, p_szs=P_SZS, precisions=PRECISIONS, seeds=SEEDS,
serial=SERIAL, eps=EPS, maxiter=MAXITER, s_min_est=S_MIN_EST,
s_thresh=S_THRESH, s_min=S_MIN, s_max=S_MAX,
ev_distribution=EV_DISTRIBUTION, batch_size=BATCH_SIZE,
reduction_mode=REDUCTION_MODE, output_dir_path=OUTPUT_DIR_PATH,
output_name=OUTPUT_NAME)
| {
"content_hash": "dfd8a7b1520ad3f48b3fde0e43244299",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 52.34285714285714,
"alnum_prop": 0.6730349344978166,
"repo_name": "google/distla_core",
"id": "1435fa865f444e427582a83fc9d9162c6984dd0a",
"size": "2535",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "distla/distla_core/distla_core/asic_execution/analysis/invsqrt/error/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1317325"
},
{
"name": "Shell",
"bytes": "5454"
}
],
"symlink_target": ""
} |
import sys
import socket
import re
import string
# you may use urllib to encode data appropriately
import urllib
from urlparse import urlparse
def help():
print "httpclient.py [GET/POST] [URL]\n"
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPRequest(object):
def __init__(self, method, path, headers, body = ""):
self.method = method
self.path = path
self.protocol = "HTTP/1.1"
self.body = body
self.hostname = "\nHost: " + headers[0]
self.accept = "\nAccept: application/json, text/html, text/plain"
self.content_type = ""
self.content_length =""
if method == 'POST':
self.content_type = "\nContent-type: " + headers[1]
self.content_length = "\nContent-length: " + str(headers[2])
def build(self):
return self.method+" "+self.path+" "+self.protocol+self.hostname+self.accept+self.content_type+self.content_length+"\r\n\r\n" + self.body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
# use sockets!
outgoing = socket.socket()
try:
outgoing.connect((host,port))
outgoing.setblocking(0)
except socket.error, ex:
if ex.errno == -5 or ex.errno == 111:
outgoing = None
else:
raise
return outgoing
def get_code(self, data):
reg_ex_format = "(HTTP/1.[0,1]) ([1-5][0-9][0-9]) (.*)\n"
match = re.search(reg_ex_format, data)
code = 0
if match == None or len(match.groups()) != 3:
code = 404
else:
code = int(match.group(2))
return code
def get_headers(self,data):
return None
def get_body(self, data):
return data.split("\r\n\r\n", 1)[1]
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
try:
part = sock.recv(1024)
except socket.error, ex:
if ex.errno == 11:
continue
if (part):
buffer.extend(part)
else:
done = not part
return str(buffer)
def sendall(self, socket, request):
socket.sendall(request.build())
def prepend_http(self, url):
if not (url.startswith("http://") or url.startswith('https://')):
url = "http://" + url
return url
def parse_url(self, url):
url = self.prepend_http(url)
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
path = parsed_url.path
if(host == None):
host = port = path = None
if port != None:
port = int(port)
if port >= 65535:
host, port = None, None
if port == None and host != None:
port = 80
if path == None or path == "":
path = "/"
return host, port, path
def build_header_and_body(self, host, args, method):
if method == 'GET':
return ([host], "")
content_type = "application/x-www-form-urlencoded"
body = ""
content_length = 0
if (args != None):
body = urllib.urlencode(args, True)
content_length = len(body)
headers = [host, content_type, content_length]
return (headers, body.strip('&'))
def GET(self, url, args=None):
return self.perform_http_operation(url, args, "GET")
def POST(self, url, args=None):
return self.perform_http_operation(url, args, "POST")
def perform_http_operation(self, url, args, method):
host, port, path = self.parse_url(url)
# Check host and port for None
connection_socket = self.connect(host, port)
if connection_socket == None:
print 'Could not resolve host: ', url
return HTTPResponse(400)
headers, body = self.build_header_and_body(host, args, method)
request = HTTPRequest(method, path, headers, body)
self.sendall(connection_socket, request)
data = self.recvall(connection_socket)
if (data == None):
return HTTPResponse(404)
print data
return HTTPResponse(self.get_code(data), self.get_body(data))
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print client.command( sys.argv[2], sys.argv[1] )
else:
print client.command( sys.argv[1] )
| {
"content_hash": "87887b009c97ede5fcd9bd8547fd2f60",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 145,
"avg_line_length": 31.369426751592357,
"alnum_prop": 0.542741116751269,
"repo_name": "wrflemin/CMPUT404-assignment-web-client",
"id": "20ba7203cec67ec815bc83b3ac300983b2111bdd",
"size": "5773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13077"
}
],
"symlink_target": ""
} |
# -*- coding:utf-8 -*-
import urllib
import urllib2
import json
from voice2cmd import voice2cmd
def joinRoom(url):
url=url+'join'
values ={'trainer':'rsj'} # 对数据进行JSON格式化编码
jdata = json.dumps(values)
req = urllib2.Request(url, jdata) # 生成页面请求的完整数据
response = urllib2.urlopen(req) # 发送页面请求
return response.read() # 获取服务器返回的页面信息
def rsjGo(channel, url):
url=url+str(channel)+'/excCommand'
try:
res = json.loads(voice2cmd('golong.wav'))
print res
values ={'trainer':'rsj','action':res['action'], 'vocmon':res['vocmon']}
except Exception, e:
values ={'trainer':'rsj','action':'Go'}
#values ={'trainer':'rsj','action':'Go'}
jdata = json.dumps(values)
req = urllib2.Request(url, jdata)
response = urllib2.urlopen(req)
return response.read()
def rsjAttack(channel, url):
url=url+str(channel)+'/excCommand'
try:
res = json.loads(voice2cmd('tackle.wav'))
print res
values ={'trainer':'rsj','action':'Attack','move':res['move']}
except Exception, e:
values ={'trainer':'rsj','action':'Attack','move':'撞击'}
#values ={'trainer':'rsj','action':'Attack','move':'1'}
jdata = json.dumps(values)
req = urllib2.Request(url, jdata)
response = urllib2.urlopen(req)
return response.read()
#url='http://192.168.43.77:8000/battle/room/'
#url='http://192.168.43.210:8000/battle/room/'
url='http://127.0.0.1:8000/battle/room/'
channel = 0
while(1):
print "0.join a room"
print "1.let vcm go out"
print "2.attack"
option = input("Option: ")
if option == 0:
res=json.loads(joinRoom(url))
channel = res['channel']
print "join channel "+str(channel)
elif option == 1:
rsjGo(channel, url)
elif option == 2:
rsjAttack(channel, url) | {
"content_hash": "6fd4b5920f0c9c88f77f7176ab93b36a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 74,
"avg_line_length": 28.89830508474576,
"alnum_prop": 0.6580645161290323,
"repo_name": "jaceyen/vocmon",
"id": "e1e5d444d1b541b250aae08fb7547b53c2adcb36",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/rsj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24237"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Produkte', '0003_auto_20171027_1709'),
]
operations = [
migrations.AlterField(
model_name='spendenstufe',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
| {
"content_hash": "3fe94fe96d1d159dacae682139cb535f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6089108910891089,
"repo_name": "valuehack/scholarium.at",
"id": "566d6e265f1f587b5a1e2dfd7fc7f4155139baca",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Produkte/migrations/0004_auto_20180327_1717.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "6190"
},
{
"name": "CSS",
"bytes": "144109"
},
{
"name": "HTML",
"bytes": "109787"
},
{
"name": "JavaScript",
"bytes": "702"
},
{
"name": "Python",
"bytes": "216135"
}
],
"symlink_target": ""
} |
from django.contrib import admin
class PermissionAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': (('type', 'folder',))}),
(None, {'fields': (('user', 'group', 'everybody'),)}),
(None, {'fields': (
('can_edit','can_read','can_add_children')
)}
),
)
raw_id_fields = ('folder', 'user', 'group',)
| {
"content_hash": "7d79b72ca8bfd3c428bc13b7064678c5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 62,
"avg_line_length": 32.25,
"alnum_prop": 0.4754521963824289,
"repo_name": "KristianOellegaard/django-filer",
"id": "9498d9233136cb4d2fa63104e25ba0dee858710f",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "filer/admin/permissionadmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "63757"
},
{
"name": "Python",
"bytes": "193028"
},
{
"name": "Shell",
"bytes": "4981"
}
],
"symlink_target": ""
} |
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
def plot_sample_imgs(get_imgs_fun, img_shape, plot_side=5, savepath=None, cmap='gray'):
"""
Generate visual samples and plot on a grid
:param get_imgs_fun: function that given a int return a corresponding number of generated samples
:param img_shape: shape of image to plot
:param plot_side: samples per row (and column). Generated plot_side x plot_side samples
:param savepath: if given, save plot to such filepath, otherwise show plot
:param cmap: matplotlib specific cmap to use for the plot
"""
fig, axarr = plt.subplots(plot_side, plot_side)
#dpi = 100
#fig, ax = plt.subplots(dpi=dpi, figsize=(img_shape[0] / dpi, img_shape[1] / dpi))
#fig.subplots_adjust(left=0, right=1, bottom=0, top=1) # hspace=0.4, wspace=0.4
samples = get_imgs_fun(plot_side*plot_side)
for row in range(plot_side):
for col in range(plot_side):
axarr[row, col].imshow(samples[plot_side*row+col].reshape(img_shape), cmap=cmap)
axarr[row, col].set_title('')
axarr[row, col].axis('off')
# Save figure if savepath is provided
if savepath:
fig.savefig(savepath)
plt.close()
else:
plt.show()
def plot_correlation(df):
# Correlation
corr = df.corr()
print(corr)
# Plot masking the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, mask=mask)
sns.plt.show()
"""
# Rotate tick labels
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=25)
# Save plot
sns_plot.savefig("output.png")
fig = swarm_plot.get_figure()
fig.savefig(...)
# Matplotlib to Plotly
import plotly.tools as tls
plotly_fig = tls.mpl_to_plotly(mpl_fig)
py.iplot(plotly_fig
"""
##############################
# Animation
##############################
#%matplotlib notebook # rely on notebook mode as the inline doesn't seem to work in Jupyter
from matplotlib import animation
#plt.rcParams['animation.ffmpeg_path'] = '~/path/to/bin/ffmpeg'
def animated_plot(img_width: int, img_height: int, nb_frames: int, outpath: str = None):
# Setup plot
dpi = 100
if outpath:
fig, ax = plt.subplots(dpi=dpi, figsize=(img_width / dpi, img_height / dpi))
else:
fig, ax = plt.subplots(dpi=dpi, figsize=(5, 5))
plt.axis('off')
#line, = plt.plot([0, 1.0], [init_intercept, 1.0 * init_slope + init_intercept], 'k-')
#epoch_text = plt.text(0, 0, "Epoch 0")
#im = ax.imshow(np.zeros((28, 28)), cmap='gray')
def animate(i, ):
pass
#current_intercept, current_slope = res[i]
#line.set_ydata([current_intercept, 1.0 * current_slope + current_intercept])
#epoch_text.set_text("Epoch {}, cost {:.3f}".format(i, history[i][0]))
#return line,
# one other option is to set the data like
#im.set_data(np.zeros((28, 28))+1)
#ax.imshow(system.B, cmap='gray')
# Animate
ani = animation.FuncAnimation(fig, animate, frames=nb_frames, interval=100,
fargs=[]) # be sure to pass the additional args needed for the animation
if outpath:
ani.save(outpath, animation.FFMpegFileWriter(fps=30))
else:
return ani
"""
Writer = animation.writers['ffmpeg']
writer = Writer(fps=30)
animation.writers.list()
"""
##############################
# Drawing
##############################
def draw_template():
from PIL import Image, ImageDraw
img_size = 1000
img = Image.new('RGB', (img_size, img_size), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.ellipse((20, 20, 180, 180), fill='blue', outline='blue') | {
"content_hash": "bdc350d5eec328e3626551697be2aaef",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 107,
"avg_line_length": 31.95798319327731,
"alnum_prop": 0.6205627136471207,
"repo_name": "5agado/data-science-learning",
"id": "1bd262b1c1bb4a21b3dfd267b1ea741f45ec2825",
"size": "3803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ds_utils/plot_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "565"
},
{
"name": "Jupyter Notebook",
"bytes": "2011939"
},
{
"name": "Python",
"bytes": "550056"
}
],
"symlink_target": ""
} |
from django.db.models import (
CharField, F, Func, IntegerField, OuterRef, Q, Subquery, Value,
)
from django.db.models.fields.json import KeyTextTransform, KeyTransform
from django.db.models.functions import Cast, Concat, Substr
from django.test.utils import Approximate, ignore_warnings
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from . import PostgreSQLTestCase
from .models import AggregateTestModel, HotelReservation, Room, StatTestModel
try:
from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, JSONBAgg,
RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope,
RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg,
)
from django.contrib.postgres.fields import ArrayField
except ImportError:
pass # psycopg2 is not installed
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.aggs = AggregateTestModel.objects.bulk_create([
AggregateTestModel(
boolean_field=True,
char_field='Foo1',
text_field='Text1',
integer_field=0,
),
AggregateTestModel(
boolean_field=False,
char_field='Foo2',
text_field='Text2',
integer_field=1,
json_field={'lang': 'pl'},
),
AggregateTestModel(
boolean_field=False,
char_field='Foo4',
text_field='Text4',
integer_field=2,
json_field={'lang': 'en'},
),
AggregateTestModel(
boolean_field=True,
char_field='Foo3',
text_field='Text3',
integer_field=0,
json_field={'breed': 'collie'},
),
])
@ignore_warnings(category=RemovedInDjango50Warning)
def test_empty_result_set(self):
AggregateTestModel.objects.all().delete()
tests = [
(ArrayAgg('char_field'), []),
(ArrayAgg('integer_field'), []),
(ArrayAgg('boolean_field'), []),
(BitAnd('integer_field'), None),
(BitOr('integer_field'), None),
(BoolAnd('boolean_field'), None),
(BoolOr('boolean_field'), None),
(JSONBAgg('integer_field'), []),
(StringAgg('char_field', delimiter=';'), ''),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = AggregateTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = AggregateTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
def test_default_argument(self):
AggregateTestModel.objects.all().delete()
tests = [
(ArrayAgg('char_field', default=['<empty>']), ['<empty>']),
(ArrayAgg('integer_field', default=[0]), [0]),
(ArrayAgg('boolean_field', default=[False]), [False]),
(BitAnd('integer_field', default=0), 0),
(BitOr('integer_field', default=0), 0),
(BoolAnd('boolean_field', default=False), False),
(BoolOr('boolean_field', default=False), False),
(JSONBAgg('integer_field', default=Value('["<empty>"]')), ['<empty>']),
(StringAgg('char_field', delimiter=';', default=Value('<empty>')), '<empty>'),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = AggregateTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = AggregateTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
def test_convert_value_deprecation(self):
AggregateTestModel.objects.all().delete()
queryset = AggregateTestModel.objects.all()
with self.assertWarnsMessage(RemovedInDjango50Warning, ArrayAgg.deprecation_msg):
queryset.aggregate(aggregation=ArrayAgg('boolean_field'))
with self.assertWarnsMessage(RemovedInDjango50Warning, JSONBAgg.deprecation_msg):
queryset.aggregate(aggregation=JSONBAgg('integer_field'))
with self.assertWarnsMessage(RemovedInDjango50Warning, StringAgg.deprecation_msg):
queryset.aggregate(aggregation=StringAgg('char_field', delimiter=';'))
# No warnings raised if default argument provided.
self.assertEqual(
queryset.aggregate(aggregation=ArrayAgg('boolean_field', default=None)),
{'aggregation': None},
)
self.assertEqual(
queryset.aggregate(aggregation=JSONBAgg('integer_field', default=None)),
{'aggregation': None},
)
self.assertEqual(
queryset.aggregate(
aggregation=StringAgg('char_field', delimiter=';', default=None),
),
{'aggregation': None},
)
self.assertEqual(
queryset.aggregate(aggregation=ArrayAgg('boolean_field', default=Value([]))),
{'aggregation': []},
)
self.assertEqual(
queryset.aggregate(aggregation=JSONBAgg('integer_field', default=Value('[]'))),
{'aggregation': []},
)
self.assertEqual(
queryset.aggregate(
aggregation=StringAgg('char_field', delimiter=';', default=Value('')),
),
{'aggregation': ''},
)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']})
def test_array_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(F('char_field').asc(), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(F('char_field'), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
([F('boolean_field'), F('char_field').desc()], ['Foo4', 'Foo2', 'Foo3', 'Foo1']),
((F('boolean_field'), F('char_field').desc()), ['Foo4', 'Foo2', 'Foo3', 'Foo1']),
('char_field', ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
('-char_field', ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(Concat('char_field', Value('@')), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(Concat('char_field', Value('@')).desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(
(Substr('char_field', 1, 1), F('integer_field'), Substr('char_field', 4, 1).desc()),
['Foo3', 'Foo1', 'Foo2', 'Foo4'],
),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('char_field', ordering=ordering)
)
self.assertEqual(values, {'arrayagg': expected_output})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_integerfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', ordering=F('integer_field').desc())
)
self.assertEqual(values, {'arrayagg': [2, 1, 0, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_booleanfield_ordering(self):
ordering_test_cases = (
(F('boolean_field').asc(), [False, False, True, True]),
(F('boolean_field').desc(), [True, True, False, False]),
(F('boolean_field'), [False, False, True, True]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('boolean_field', ordering=ordering)
)
self.assertEqual(values, {'arrayagg': expected_output})
def test_array_agg_jsonfield(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg(
KeyTransform('lang', 'json_field'),
filter=Q(json_field__lang__isnull=False),
),
)
self.assertEqual(values, {'arrayagg': ['pl', 'en']})
def test_array_agg_jsonfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg(
KeyTransform('lang', 'json_field'),
filter=Q(json_field__lang__isnull=False),
ordering=KeyTransform('lang', 'json_field'),
),
)
self.assertEqual(values, {'arrayagg': ['en', 'pl']})
def test_array_agg_filter(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', filter=Q(integer_field__gt=0)),
)
self.assertEqual(values, {'arrayagg': [1, 2]})
def test_array_agg_lookups(self):
aggr1 = AggregateTestModel.objects.create()
aggr2 = AggregateTestModel.objects.create()
StatTestModel.objects.create(related_field=aggr1, int1=1, int2=0)
StatTestModel.objects.create(related_field=aggr1, int1=2, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=3, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=4, int2=0)
qs = StatTestModel.objects.values('related_field').annotate(
array=ArrayAgg('int1')
).filter(array__overlap=[2]).values_list('array', flat=True)
self.assertCountEqual(qs.get(), [1, 2])
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_q_object(self):
values = AggregateTestModel.objects.aggregate(
booland=BoolAnd(Q(integer_field__gt=2)),
)
self.assertEqual(values, {'booland': False})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_q_object(self):
values = AggregateTestModel.objects.aggregate(
boolor=BoolOr(Q(integer_field__gt=2)),
)
self.assertEqual(values, {'boolor': False})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_delimiter_escaping(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter="'"))
self.assertEqual(values, {'stringagg': "Foo1'Foo2'Foo4'Foo3"})
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo4;Foo3'})
def test_string_agg_default_output_field(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg('text_field', delimiter=';'),
)
self.assertEqual(values, {'stringagg': 'Text1;Text2;Text4;Text3'})
def test_string_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), 'Foo4;Foo3;Foo2;Foo1'),
(F('char_field').asc(), 'Foo1;Foo2;Foo3;Foo4'),
(F('char_field'), 'Foo1;Foo2;Foo3;Foo4'),
('char_field', 'Foo1;Foo2;Foo3;Foo4'),
('-char_field', 'Foo4;Foo3;Foo2;Foo1'),
(Concat('char_field', Value('@')), 'Foo1;Foo2;Foo3;Foo4'),
(Concat('char_field', Value('@')).desc(), 'Foo4;Foo3;Foo2;Foo1'),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg('char_field', delimiter=';', ordering=ordering)
)
self.assertEqual(values, {'stringagg': expected_output})
def test_string_agg_jsonfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg(
KeyTextTransform('lang', 'json_field'),
delimiter=';',
ordering=KeyTextTransform('lang', 'json_field'),
output_field=CharField(),
),
)
self.assertEqual(values, {'stringagg': 'en;pl'})
def test_string_agg_filter(self):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg(
'char_field',
delimiter=';',
filter=Q(char_field__endswith='3') | Q(char_field__endswith='1'),
)
)
self.assertEqual(values, {'stringagg': 'Foo1;Foo3'})
def test_orderable_agg_alternative_fields(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', ordering=F('char_field').asc())
)
self.assertEqual(values, {'arrayagg': [0, 1, 0, 2]})
def test_jsonb_agg(self):
values = AggregateTestModel.objects.aggregate(jsonbagg=JSONBAgg('char_field'))
self.assertEqual(values, {'jsonbagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']})
def test_jsonb_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(F('char_field').asc(), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(F('char_field'), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
('char_field', ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
('-char_field', ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(Concat('char_field', Value('@')), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(Concat('char_field', Value('@')).desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg('char_field', ordering=ordering),
)
self.assertEqual(values, {'jsonbagg': expected_output})
def test_jsonb_agg_integerfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg('integer_field', ordering=F('integer_field').desc()),
)
self.assertEqual(values, {'jsonbagg': [2, 1, 0, 0]})
def test_jsonb_agg_booleanfield_ordering(self):
ordering_test_cases = (
(F('boolean_field').asc(), [False, False, True, True]),
(F('boolean_field').desc(), [True, True, False, False]),
(F('boolean_field'), [False, False, True, True]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg('boolean_field', ordering=ordering),
)
self.assertEqual(values, {'jsonbagg': expected_output})
def test_jsonb_agg_jsonfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg(
KeyTransform('lang', 'json_field'),
filter=Q(json_field__lang__isnull=False),
ordering=KeyTransform('lang', 'json_field'),
),
)
self.assertEqual(values, {'jsonbagg': ['en', 'pl']})
def test_jsonb_agg_key_index_transforms(self):
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 28),
]
HotelReservation.objects.create(
datespan=(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
requirements={'double_bed': True, 'parking': True},
)
HotelReservation.objects.create(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
requirements={'double_bed': False, 'sea_view': True, 'parking': False},
)
HotelReservation.objects.create(
datespan=(datetimes[0].date(), datetimes[2].date()),
start=datetimes[0],
end=datetimes[2],
room=room101,
requirements={'sea_view': False},
)
values = Room.objects.annotate(
requirements=JSONBAgg(
'hotelreservation__requirements',
ordering='-hotelreservation__start',
)
).filter(requirements__0__sea_view=True).values('number', 'requirements')
self.assertSequenceEqual(values, [
{'number': 102, 'requirements': [
{'double_bed': False, 'sea_view': True, 'parking': False},
{'double_bed': True, 'parking': True},
]},
])
def test_string_agg_array_agg_ordering_in_subquery(self):
stats = []
for i, agg in enumerate(AggregateTestModel.objects.order_by('char_field')):
stats.append(StatTestModel(related_field=agg, int1=i, int2=i + 1))
stats.append(StatTestModel(related_field=agg, int1=i + 1, int2=i))
StatTestModel.objects.bulk_create(stats)
for aggregate, expected_result in (
(
ArrayAgg('stattestmodel__int1', ordering='-stattestmodel__int2'),
[('Foo1', [0, 1]), ('Foo2', [1, 2]), ('Foo3', [2, 3]), ('Foo4', [3, 4])],
),
(
StringAgg(
Cast('stattestmodel__int1', CharField()),
delimiter=';',
ordering='-stattestmodel__int2',
),
[('Foo1', '0;1'), ('Foo2', '1;2'), ('Foo3', '2;3'), ('Foo4', '3;4')],
),
):
with self.subTest(aggregate=aggregate.__class__.__name__):
subquery = AggregateTestModel.objects.filter(
pk=OuterRef('pk'),
).annotate(agg=aggregate).values('agg')
values = AggregateTestModel.objects.annotate(
agg=Subquery(subquery),
).order_by('char_field').values_list('char_field', 'agg')
self.assertEqual(list(values), expected_result)
def test_string_agg_array_agg_filter_in_subquery(self):
StatTestModel.objects.bulk_create([
StatTestModel(related_field=self.aggs[0], int1=0, int2=5),
StatTestModel(related_field=self.aggs[0], int1=1, int2=4),
StatTestModel(related_field=self.aggs[0], int1=2, int2=3),
])
for aggregate, expected_result in (
(
ArrayAgg('stattestmodel__int1', filter=Q(stattestmodel__int2__gt=3)),
[('Foo1', [0, 1]), ('Foo2', None)],
),
(
StringAgg(
Cast('stattestmodel__int2', CharField()),
delimiter=';',
filter=Q(stattestmodel__int1__lt=2),
),
[('Foo1', '5;4'), ('Foo2', None)],
),
):
with self.subTest(aggregate=aggregate.__class__.__name__):
subquery = AggregateTestModel.objects.filter(
pk=OuterRef('pk'),
).annotate(agg=aggregate).values('agg')
values = AggregateTestModel.objects.annotate(
agg=Subquery(subquery),
).filter(
char_field__in=['Foo1', 'Foo2'],
).order_by('char_field').values_list('char_field', 'agg')
self.assertEqual(list(values), expected_result)
def test_string_agg_filter_in_subquery_with_exclude(self):
subquery = AggregateTestModel.objects.annotate(
stringagg=StringAgg(
'char_field',
delimiter=';',
filter=Q(char_field__endswith='1'),
)
).exclude(stringagg='').values('id')
self.assertSequenceEqual(
AggregateTestModel.objects.filter(id__in=Subquery(subquery)),
[self.aggs[0]],
)
def test_ordering_isnt_cleared_for_array_subquery(self):
inner_qs = AggregateTestModel.objects.order_by('-integer_field')
qs = AggregateTestModel.objects.annotate(
integers=Func(
Subquery(inner_qs.values('integer_field')),
function='ARRAY',
output_field=ArrayField(base_field=IntegerField()),
),
)
self.assertSequenceEqual(
qs.first().integers,
inner_qs.values_list('integer_field', flat=True),
)
class TestAggregateDistinct(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Bar')
def test_string_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=False))
self.assertEqual(values['stringagg'].count('Foo'), 2)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_string_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=True))
self.assertEqual(values['stringagg'].count('Foo'), 1)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_array_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=False))
self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo', 'Foo'])
def test_array_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=True))
self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo'])
def test_jsonb_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg('char_field', distinct=False),
)
self.assertEqual(sorted(values['jsonbagg']), ['Bar', 'Foo', 'Foo'])
def test_jsonb_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(
jsonbagg=JSONBAgg('char_field', distinct=True),
)
self.assertEqual(sorted(values['jsonbagg']), ['Bar', 'Foo'])
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_empty_result_set(self):
StatTestModel.objects.all().delete()
tests = [
(Corr(y='int2', x='int1'), None),
(CovarPop(y='int2', x='int1'), None),
(CovarPop(y='int2', x='int1', sample=True), None),
(RegrAvgX(y='int2', x='int1'), None),
(RegrAvgY(y='int2', x='int1'), None),
(RegrCount(y='int2', x='int1'), 0),
(RegrIntercept(y='int2', x='int1'), None),
(RegrR2(y='int2', x='int1'), None),
(RegrSlope(y='int2', x='int1'), None),
(RegrSXX(y='int2', x='int1'), None),
(RegrSXY(y='int2', x='int1'), None),
(RegrSYY(y='int2', x='int1'), None),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = StatTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = StatTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
def test_default_argument(self):
StatTestModel.objects.all().delete()
tests = [
(Corr(y='int2', x='int1', default=0), 0),
(CovarPop(y='int2', x='int1', default=0), 0),
(CovarPop(y='int2', x='int1', sample=True, default=0), 0),
(RegrAvgX(y='int2', x='int1', default=0), 0),
(RegrAvgY(y='int2', x='int1', default=0), 0),
# RegrCount() doesn't support the default argument.
(RegrIntercept(y='int2', x='int1', default=0), 0),
(RegrR2(y='int2', x='int1', default=0), 0),
(RegrSlope(y='int2', x='int1', default=0), 0),
(RegrSXX(y='int2', x='int1', default=0), 0),
(RegrSXY(y='int2', x='int1', default=0), 0),
(RegrSYY(y='int2', x='int1', default=0), 0),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = StatTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = StatTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {'aggregation': expected_result})
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_default(self):
msg = 'RegrCount does not allow default.'
with self.assertRaisesMessage(TypeError, msg):
RegrCount(y='int2', x='int1', default=0)
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
| {
"content_hash": "0580aa004c6151cf3ae2d20456b2f795",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 119,
"avg_line_length": 44.3739837398374,
"alnum_prop": 0.5775925247343349,
"repo_name": "ghickman/django",
"id": "07200f9f9742304cfeb3d9eca3a3c4b93b44ab13",
"size": "32748",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/postgres_tests/test_aggregates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170436"
},
{
"name": "JavaScript",
"bytes": "255321"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11414242"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""SqueezeDet Demo.
In image detection mode, for a given image, detect objects and draw bounding
boxes around them. In video detection mode, perform real-time detection on the
video stream.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import time
import sys
import os
import glob
import numpy as np
import tensorflow as tf
from config import *
from train import _draw_box
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'mode', 'image', """'image' or 'video'.""")
tf.app.flags.DEFINE_string(
'checkpoint', './data/model_checkpoints/vgg16/model.ckpt-101500',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'input_path', './data/sample.png',
"""Input image or video to be detected. Can process glob input such as """
"""./data/00000*.png.""")
tf.app.flags.DEFINE_string(
'out_dir', './data/out/', """Directory to dump output image or video.""")
def video_demo():
"""Detect videos."""
cap = cv2.VideoCapture(FLAGS.input_path)
# Define the codec and create VideoWriter object
# fourcc = cv2.cv.CV_FOURCC(*'XVID')
# fourcc = cv2.cv.CV_FOURCC(*'MJPG')
# in_file_name = os.path.split(FLAGS.input_path)[1]
# out_file_name = os.path.join(FLAGS.out_dir, 'out_'+in_file_name)
# out = cv2.VideoWriter(out_file_name, fourcc, 30.0, (375,1242), True)
# out = VideoWriter(out_file_name, frameSize=(1242, 375))
# out.open()
with tf.Graph().as_default():
# Load model
mc = kitti_vgg16_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = VGG16ConvDet(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
times = {}
count = 0
while cap.isOpened():
t_start = time.time()
count += 1
out_im_name = os.path.join(FLAGS.out_dir, str(count).zfill(6)+'.jpg')
# Load images from video and crop
ret, frame = cap.read()
if ret==True:
# crop frames
frame = frame[500:-205, 239:-439, :]
im_input = frame.astype(np.float32) - mc.BGR_MEANS
else:
break
t_reshape = time.time()
times['reshape']= t_reshape - t_start
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[im_input], model.keep_prob: 1.0})
t_detect = time.time()
times['detect']= t_detect - t_reshape
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[0], det_probs[0], det_class[0])
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
t_filter = time.time()
times['filter']= t_filter - t_detect
# Draw boxes
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
_draw_box(
frame, final_boxes,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(final_class, final_probs)],
cdict=cls2clr
)
t_draw = time.time()
times['draw']= t_draw - t_filter
cv2.imwrite(out_im_name, frame)
# out.write(frame)
times['total']= time.time() - t_start
# time_str = ''
# for t in times:
# time_str += '{} time: {:.4f} '.format(t[0], t[1])
# time_str += '\n'
time_str = 'Total time: {:.4f}, detection time: {:.4f}, filter time: '\
'{:.4f}'. \
format(times['total'], times['detect'], times['filter'])
print (time_str)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything if job is finished
cap.release()
# out.release()
cv2.destroyAllWindows()
def image_demo():
"""Detect image."""
with tf.Graph().as_default():
# Load model
mc = kitti_vgg16_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = VGG16ConvDet(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
for f in glob.iglob(FLAGS.input_path):
im = cv2.imread(f)
im = im.astype(np.float32, copy=False)
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
input_image = im - mc.BGR_MEANS
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[input_image], model.keep_prob: 1.0})
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[0], det_probs[0], det_class[0])
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
# Draw boxes
_draw_box(
im, final_boxes,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(final_class, final_probs)],
cdict=cls2clr,
)
file_name = os.path.split(f)[1]
out_file_name = os.path.join(FLAGS.out_dir, 'out_'+file_name)
cv2.imwrite(out_file_name, im)
print ('Image detection output saved to {}'.format(out_file_name))
def main(argv=None):
if not tf.gfile.Exists(FLAGS.out_dir):
tf.gfile.MakeDirs(FLAGS.out_dir)
if FLAGS.mode == 'image':
image_demo()
else:
video_demo()
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "bce19256e354c71cc38a23053b9caae3",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 79,
"avg_line_length": 30.804651162790698,
"alnum_prop": 0.5794956968141326,
"repo_name": "goan15910/ConvDet",
"id": "cee5eef0b22df999fe43dce808ef25832e43e269",
"size": "6677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/demo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "31297"
},
{
"name": "Makefile",
"bytes": "123"
},
{
"name": "Python",
"bytes": "203070"
},
{
"name": "Shell",
"bytes": "5723"
}
],
"symlink_target": ""
} |
"""Module that implements SSDP protocol."""
import re
import select
import socket
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Set # noqa: F401
from xml.etree import ElementTree
import requests
import zeroconf
from netdisco.util import etree_to_dict
DISCOVER_TIMEOUT = 2
# MX is a suggested random wait time for a device to reply, so should be
# bound by our discovery timeout.
SSDP_MX = DISCOVER_TIMEOUT
SSDP_TARGET = ("239.255.255.250", 1900)
RESPONSE_REGEX = re.compile(r'\n(.*?)\: *(.*)\r')
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=59)
# Devices and services
ST_ALL = "ssdp:all"
# Devices only, some devices will only respond to this query
ST_ROOTDEVICE = "upnp:rootdevice"
class SSDP:
"""Control the scanning of uPnP devices and services and caches output."""
def __init__(self):
"""Initialize the discovery."""
self.entries = [] # type: List[UPNPEntry]
self.last_scan = None
def scan(self):
"""Scan the network."""
self.update()
def all(self):
"""Return all found entries.
Will scan for entries if not scanned recently.
"""
self.update()
return list(self.entries)
# pylint: disable=invalid-name
def find_by_st(self, st):
"""Return a list of entries that match the ST."""
self.update()
return [entry for entry in self.entries
if entry.st == st]
def find_by_device_description(self, values):
"""Return a list of entries that match the description.
Pass in a dict with values to match against the device tag in the
description.
"""
self.update()
seen = set() # type: Set[Optional[str]]
results = []
# Make unique based on the location since we don't care about ST here
for entry in self.entries:
location = entry.location
if location not in seen and entry.match_device_description(values):
results.append(entry)
seen.add(location)
return results
def update(self, force_update=False):
"""Scan for new uPnP devices and services."""
if self.last_scan is None or force_update or \
datetime.now()-self.last_scan > MIN_TIME_BETWEEN_SCANS:
self.remove_expired()
self.entries.extend(
entry for entry in scan()
if entry not in self.entries)
self.last_scan = datetime.now()
def remove_expired(self):
"""Filter out expired entries."""
self.entries = [entry for entry in self.entries
if not entry.is_expired]
class UPNPEntry:
"""Found uPnP entry."""
DESCRIPTION_CACHE = {'_NO_LOCATION': {}} # type: Dict[str, Dict]
def __init__(self, values):
"""Initialize the discovery."""
self.values = values
self.created = datetime.now()
if 'cache-control' in self.values:
cache_directive = self.values['cache-control']
max_age = re.findall(r'max-age *= *\d+', cache_directive)
if max_age:
cache_seconds = int(max_age[0].split('=')[1])
self.expires = self.created + timedelta(seconds=cache_seconds)
else:
self.expires = None
else:
self.expires = None
@property
def is_expired(self):
"""Return if the entry is expired or not."""
return self.expires is not None and datetime.now() > self.expires
# pylint: disable=invalid-name
@property
def st(self):
"""Return ST value."""
return self.values.get('st')
@property
def location(self):
"""Return Location value."""
return self.values.get('location')
@property
def description(self):
"""Return the description from the uPnP entry."""
url = self.values.get('location', '_NO_LOCATION')
if url not in UPNPEntry.DESCRIPTION_CACHE:
try:
xml = requests.get(url, timeout=5).text
if not xml:
# Samsung Smart TV sometimes returns an empty document the
# first time. Retry once.
xml = requests.get(url, timeout=5).text
tree = ElementTree.fromstring(xml)
UPNPEntry.DESCRIPTION_CACHE[url] = \
etree_to_dict(tree).get('root', {})
except requests.RequestException:
logging.getLogger(__name__).debug(
"Error fetching description at %s", url)
UPNPEntry.DESCRIPTION_CACHE[url] = {}
except ElementTree.ParseError:
logging.getLogger(__name__).debug(
"Found malformed XML at %s: %s", url, xml)
UPNPEntry.DESCRIPTION_CACHE[url] = {}
return UPNPEntry.DESCRIPTION_CACHE[url]
def match_device_description(self, values):
"""Fetch description and matches against it.
Values should only contain lowercase keys.
"""
device = self.description.get('device')
if device is None:
return False
return all(device.get(key) in val
if isinstance(val, list)
else val == device.get(key)
for key, val in values.items())
@classmethod
def from_response(cls, response):
"""Create a uPnP entry from a response."""
return UPNPEntry({key.lower(): item for key, item
in RESPONSE_REGEX.findall(response)})
def __eq__(self, other):
"""Return the comparison."""
return (self.__class__ == other.__class__ and
self.values == other.values)
def __repr__(self):
"""Return the entry."""
return "<UPNPEntry {} - {}>".format(self.location or '', self.st or '')
def ssdp_request(ssdp_st, ssdp_mx=SSDP_MX):
"""Return request bytes for given st and mx."""
return "\r\n".join([
'M-SEARCH * HTTP/1.1',
'ST: {}'.format(ssdp_st),
'MX: {:d}'.format(ssdp_mx),
'MAN: "ssdp:discover"',
'HOST: {}:{}'.format(*SSDP_TARGET),
'', '']).encode('utf-8')
# pylint: disable=invalid-name,too-many-locals,too-many-branches
def scan(timeout=DISCOVER_TIMEOUT):
"""Send a message over the network to discover uPnP devices.
Inspired by Crimsdings
https://github.com/crimsdings/ChromeCast/blob/master/cc_discovery.py
Protocol explanation:
https://embeddedinn.wordpress.com/tutorials/upnp-device-architecture/
"""
ssdp_requests = ssdp_request(ST_ALL), ssdp_request(ST_ROOTDEVICE)
stop_wait = datetime.now() + timedelta(seconds=timeout)
sockets = []
for addr in zeroconf.get_all_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the time-to-live for messages for local network
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL,
SSDP_MX)
sock.bind((addr, 0))
sockets.append(sock)
except socket.error:
pass
entries = {}
for sock in [s for s in sockets]:
try:
for req in ssdp_requests:
sock.sendto(req, SSDP_TARGET)
sock.setblocking(False)
except socket.error:
sockets.remove(sock)
sock.close()
try:
while sockets:
time_diff = stop_wait - datetime.now()
seconds_left = time_diff.total_seconds()
if seconds_left <= 0:
break
ready = select.select(sockets, [], [], seconds_left)[0]
for sock in ready:
try:
data, address = sock.recvfrom(1024)
response = data.decode("utf-8")
except UnicodeDecodeError:
logging.getLogger(__name__).debug(
'Ignoring invalid unicode response from %s', address)
continue
except socket.error:
logging.getLogger(__name__).exception(
"Socket error while discovering SSDP devices")
sockets.remove(sock)
sock.close()
continue
entry = UPNPEntry.from_response(response)
entries[(entry.st, entry.location)] = entry
finally:
for s in sockets:
s.close()
return sorted(entries.values(), key=lambda entry: entry.location or '')
def main():
"""Test SSDP discovery."""
from pprint import pprint
print("Scanning SSDP..")
pprint(scan())
if __name__ == "__main__":
main()
| {
"content_hash": "c0aaee7586c1749d9b21c30d994918d1",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 79,
"avg_line_length": 30.302405498281786,
"alnum_prop": 0.5647539124518032,
"repo_name": "balloob/netdisco",
"id": "7c4dd9ccd8af3a1d7b8870e7ffd5d4c376824716",
"size": "8818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "netdisco/ssdp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28555"
}
],
"symlink_target": ""
} |
from telebot import types
inline_buttons = {
'settings': {
'index': {'Приветствие': 'greetings',
"Интервал команды '/joke'": 'joke',
'Запретить стикеры': 'stickers'
},
'greetings': {
'Включить приветствия': 'allow_greetings',
'Тип приветственного сообщения': 'greetings_type',
'Текст приветственного сообщения': 'greetings_text',
'Назад': 'index',
},
'allow_greetings': {
'Да': 'greetings_allow yes',
'Нет': 'greetings_allow no',
},
'greetings_type': {
'Текст': 'greetings_set_type text',
'Стикер': 'greetings_set_type sticker',
},
'joke': {
'Назад': 'index',
"Отключить команду 'joke'": 'joke_off'
},
'stickers': {
'Да': 'stickers_remove yes',
'Нет': 'stickers_remove no'
},
'endpoint': {
'Назад': 'index',
},
}
}
def generate_inline_keyboard(category, subcategory, row_width=3):
buttons = [types.InlineKeyboardButton(text=button[0], callback_data=button[1])
for button in inline_buttons[category][subcategory].items()]
keyboard = types.InlineKeyboardMarkup(row_width=row_width)
keyboard.add(*buttons)
return keyboard
| {
"content_hash": "71e5fa4686d97058dd30ef7ef08ca718",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 82,
"avg_line_length": 31.976744186046513,
"alnum_prop": 0.5214545454545455,
"repo_name": "Ars2014/PyBananium",
"id": "7c724e2892872632412d5165ec2bcc547fc5088e",
"size": "1569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36702"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class HttpMessage(Model):
"""HTTP message.
:param content: HTTP message content.
:type content: object
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'object'},
}
def __init__(self, content=None):
super(HttpMessage, self).__init__()
self.content = content
| {
"content_hash": "1d7e7015af3c037b28020b2ed61c9c2b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 21.529411764705884,
"alnum_prop": 0.5901639344262295,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "f94eada2e8a3fdeb5a7c3e8e620fe5d81dbfce98",
"size": "840",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-resource/azure/mgmt/resource/resources/v2017_05_10/models/http_message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.