hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b759e7bd31af70be7eea8575585cafd0450c2cfe | 1,310 | py | Python | app/app11_plos_HumanEKC/prepData2.py | ameenetemady/DeepPep | 121826309667f1290fa1121746a2992943d0927b | [
"Apache-2.0"
] | 1 | 2020-05-30T06:01:50.000Z | 2020-05-30T06:01:50.000Z | app/app11_plos_HumanEKC/prepData2.py | ameenetemady/DeepPep | 121826309667f1290fa1121746a2992943d0927b | [
"Apache-2.0"
] | null | null | null | app/app11_plos_HumanEKC/prepData2.py | ameenetemady/DeepPep | 121826309667f1290fa1121746a2992943d0927b | [
"Apache-2.0"
] | 1 | 2019-10-20T21:11:48.000Z | 2019-10-20T21:11:48.000Z | # Prerequisite: directories for "in_strProtRefsDir" and "sparseData2", should not contain any ".txt" file
# Output: under sparseData2 directory: target.csv, metaInfo.csv, *.txt
import sys
import os
sys.path.append('../..')
import prepLib
in_strFastaFilename = '{!s}/data/protein/plos_HumanEKC/HumanEKC_uniprot-reviewed_up000005640_DECOY.fasta'.format(os.environ.get('HOME'))
in_strPeptideFilename = '{!s}/data/protein/plos_HumanEKC/HumanEKC_dataset_peptide_identification_plos.txt'.format(os.environ.get('HOME'))
in_strProtRefsDir = './protRefs'
out_strOutputBaseDir = './sparseData2'
protDic, pepDic = prepLib.loadProtPeptideDic(in_strPeptideFilename)
prepLib.breakFasta(in_strFastaFilename, in_strProtRefsDir, protDic)
listProtRefFileName = prepLib.getProtRefFileNames(in_strProtRefsDir)
# match peptides with proteins
prepLib.fuRunAllProt(listProtRefFileName, in_strProtRefsDir, out_strOutputBaseDir, protDic)
strMetaInfoFilename = '{!s}/metaInfo.csv'.format(out_strOutputBaseDir)
prepLib.fuSaveMetaInfo(out_strOutputBaseDir, strMetaInfoFilename, in_strProtRefsDir)
pepProbsList = sorted(list(pepDic.values()),key=lambda x: x[0])
pepProbsList = [pepProbsList[i][1:3] for i in range(0,len(pepProbsList))]
prepLib.fuSavePepProbsTargetFromList('{!s}/target.csv'.format(out_strOutputBaseDir), pepProbsList) | 52.4 | 137 | 0.816031 | # Prerequisite: directories for "in_strProtRefsDir" and "sparseData2", should not contain any ".txt" file
# Output: under sparseData2 directory: target.csv, metaInfo.csv, *.txt
import sys
import os
sys.path.append('../..')
import prepLib
in_strFastaFilename = '{!s}/data/protein/plos_HumanEKC/HumanEKC_uniprot-reviewed_up000005640_DECOY.fasta'.format(os.environ.get('HOME'))
in_strPeptideFilename = '{!s}/data/protein/plos_HumanEKC/HumanEKC_dataset_peptide_identification_plos.txt'.format(os.environ.get('HOME'))
in_strProtRefsDir = './protRefs'
out_strOutputBaseDir = './sparseData2'
protDic, pepDic = prepLib.loadProtPeptideDic(in_strPeptideFilename)
prepLib.breakFasta(in_strFastaFilename, in_strProtRefsDir, protDic)
listProtRefFileName = prepLib.getProtRefFileNames(in_strProtRefsDir)
# match peptides with proteins
prepLib.fuRunAllProt(listProtRefFileName, in_strProtRefsDir, out_strOutputBaseDir, protDic)
strMetaInfoFilename = '{!s}/metaInfo.csv'.format(out_strOutputBaseDir)
prepLib.fuSaveMetaInfo(out_strOutputBaseDir, strMetaInfoFilename, in_strProtRefsDir)
pepProbsList = sorted(list(pepDic.values()),key=lambda x: x[0])
pepProbsList = [pepProbsList[i][1:3] for i in range(0,len(pepProbsList))]
prepLib.fuSavePepProbsTargetFromList('{!s}/target.csv'.format(out_strOutputBaseDir), pepProbsList) | 0 | 0 | 0 |
2c97c0f192d433d52d140b286b27d5678d5f5e29 | 10,138 | py | Python | tests/utils/test_gmm_utils.py | Prithwijit-Chak/simpeg | d93145d768b5512621cdd75566b4a8175fee9ed3 | [
"MIT"
] | 358 | 2015-03-11T05:48:41.000Z | 2022-03-26T02:04:12.000Z | tests/utils/test_gmm_utils.py | thast/simpeg | 8021082b8b53f3c08fa87fc085547bdd56437c6b | [
"MIT"
] | 885 | 2015-01-19T09:23:48.000Z | 2022-03-29T12:08:34.000Z | tests/utils/test_gmm_utils.py | thast/simpeg | 8021082b8b53f3c08fa87fc085547bdd56437c6b | [
"MIT"
] | 214 | 2015-03-11T05:48:43.000Z | 2022-03-02T01:05:11.000Z | import numpy as np
import unittest
import discretize
from SimPEG.maps import Wires
from SimPEG.utils import (
mkvc,
WeightedGaussianMixture,
GaussianMixtureWithPrior,
)
from scipy.stats import norm, multivariate_normal
if __name__ == "__main__":
unittest.main()
| 35.823322 | 96 | 0.56451 | import numpy as np
import unittest
import discretize
from SimPEG.maps import Wires
from SimPEG.utils import (
mkvc,
WeightedGaussianMixture,
GaussianMixtureWithPrior,
)
from scipy.stats import norm, multivariate_normal
class TestGMMs(unittest.TestCase):
def setUp(self):
np.random.seed(518936)
# Create a cloud of random points from a random gaussian mixture
self.ndim = 2
self.n_components = 2
sigma = np.random.randn(self.n_components, self.ndim, self.ndim)
sigma = np.c_[[sigma[i].dot(sigma[i].T) for i in range(sigma.shape[0])]]
sigma[0] += np.eye(self.ndim)
sigma[1] += np.eye(self.ndim) - 0.25 * np.eye(self.ndim).transpose((1, 0))
self.sigma = sigma
self.means = (
np.abs(np.random.randn(self.ndim, self.ndim)) * np.c_[[100.0, -100.0]]
)
self.rv0 = multivariate_normal(self.means[0], self.sigma[0])
self.rv1 = multivariate_normal(self.means[1], self.sigma[1])
self.proportions = np.r_[0.6, 0.4]
self.nsample = 1000
self.s0 = self.rv0.rvs(int(self.nsample * self.proportions[0]))
self.s1 = self.rv1.rvs(int(self.nsample * self.proportions[1]))
self.samples = np.r_[self.s0, self.s1]
self.model = mkvc(self.samples)
self.mesh = discretize.TensorMesh(
[np.maximum(1e-1, np.random.randn(self.nsample) ** 2.0)]
)
self.wires = Wires(("s0", self.mesh.nC), ("s1", self.mesh.nC))
self.PlotIt = False
def test_weighted_gaussian_mixture_multicomponents_multidimensions(self):
clf = WeightedGaussianMixture(
mesh=self.mesh,
n_components=self.n_components,
covariance_type="full",
max_iter=1000,
n_init=20,
tol=1e-8,
means_init=self.means,
warm_start=True,
precisions_init=np.linalg.inv(self.sigma),
weights_init=self.proportions,
)
clf.fit(self.samples)
checking_means = np.c_[
np.average(
self.s0, axis=0, weights=self.mesh.cell_volumes[: self.s0.shape[0]]
),
np.average(
self.s1, axis=0, weights=self.mesh.cell_volumes[self.s0.shape[0] :]
),
].T
checking_covariances = np.r_[
np.cov(
self.s0.T, ddof=0, aweights=self.mesh.cell_volumes[: self.s0.shape[0]]
),
np.cov(
self.s1.T, ddof=0, aweights=self.mesh.cell_volumes[self.s0.shape[0] :]
),
].reshape(clf.covariances_.shape)
checking_proportions = np.r_[
self.mesh.cell_volumes[: self.s0.shape[0]].sum(),
self.mesh.cell_volumes[self.s0.shape[0] :].sum(),
]
checking_proportions /= checking_proportions.sum()
self.assertTrue(np.all(np.isclose(clf.means_, checking_means)))
self.assertTrue(np.all(np.isclose(clf.covariances_, checking_covariances)))
self.assertTrue(np.all(np.isclose(clf.weights_, checking_proportions)))
print(
"WeightedGaussianMixture is estimating correctly in 2D with 2 components."
)
def test_weighted_gaussian_mixture_one_component_1d(self):
model1d = self.wires.s0 * self.model
clf = WeightedGaussianMixture(
mesh=self.mesh,
n_components=1,
covariance_type="full",
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
)
clf.fit(model1d.reshape(-1, 1))
cheching_mean = np.average(model1d, weights=self.mesh.cell_volumes)
checking_covariance = np.cov(model1d, ddof=0, aweights=self.mesh.cell_volumes)
self.assertTrue(np.isclose(clf.means_[0], cheching_mean))
self.assertTrue(np.isclose(clf.covariances_[0], checking_covariance))
print("WeightedGaussianMixture is estimating correctly in 1D with 1 component.")
def test_MAP_estimate_one_component_1d(self):
# subsample mesh and model between mle and prior
n_samples = int(self.nsample * self.proportions.min())
model_map = self.wires.s0 * self.model
model_mle = model_map[:n_samples]
model_prior = model_map[-n_samples:]
actv = np.zeros(self.mesh.nC, dtype="bool")
actv[:n_samples] = np.ones(n_samples, dtype="bool")
clfref = WeightedGaussianMixture(
mesh=self.mesh,
actv=actv,
n_components=1,
covariance_type="full",
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
)
clfref.fit(model_prior.reshape(-1, 1))
clf = GaussianMixtureWithPrior(
gmmref=clfref,
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
nu=1,
kappa=1,
zeta=1,
prior_type="full",
update_covariances=True,
)
clf.fit(model_mle.reshape(-1, 1))
checking_means = np.average(
np.r_[model_mle, model_prior],
weights=np.r_[self.mesh.cell_volumes[actv], self.mesh.cell_volumes[actv]],
)
checking_covariance = np.cov(
np.r_[model_mle, model_prior],
ddof=0,
aweights=np.r_[self.mesh.cell_volumes[actv], self.mesh.cell_volumes[actv]],
)
self.assertTrue(np.isclose(checking_covariance, clf.covariances_))
self.assertTrue(np.isclose(checking_means, clf.means_))
print(
"GaussianMixtureWithPrior is fully-MAP-estimating correctly in 1D with 1 component."
)
clfsemi = GaussianMixtureWithPrior(
gmmref=clfref,
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
nu=1,
kappa=1,
zeta=1,
prior_type="semi",
update_covariances=True,
)
clfsemi.fit(model_mle.reshape(-1, 1))
checking_means_semi = np.average(
np.r_[model_mle, model_prior],
weights=np.r_[self.mesh.cell_volumes[actv], self.mesh.cell_volumes[actv]],
)
checking_covariance_semi = 0.5 * np.cov(
model_mle, ddof=0, aweights=self.mesh.cell_volumes[actv]
) + 0.5 * np.cov(model_prior, ddof=0, aweights=self.mesh.cell_volumes[actv])
self.assertTrue(np.isclose(checking_covariance_semi, clfsemi.covariances_))
self.assertTrue(np.isclose(checking_means_semi, clfsemi.means_))
print(
"GaussianMixtureWithPrior is semi-MAP-estimating correctly in 1D with 1 component."
)
def test_MAP_estimate_multi_component_multidimensions(self):
# prior model at three-quarter-way the means and identity covariances
model_prior = (
np.random.randn(*self.samples.shape)
+ 0.9 * self.means[np.random.choice(2, size=self.nsample, p=[0.9, 0.1])]
)
clfref = WeightedGaussianMixture(
mesh=self.mesh,
n_components=self.n_components,
covariance_type="full",
max_iter=1000,
n_init=10,
tol=1e-8,
warm_start=True,
)
clfref.fit(model_prior)
clfref.order_clusters_GM_weight()
clf = GaussianMixtureWithPrior(
gmmref=clfref,
max_iter=1000,
n_init=100,
tol=1e-10,
nu=1,
kappa=1,
zeta=1,
prior_type="semi",
update_covariances=True,
)
clf.fit(self.samples)
# This is a rough estimate of the multidimensional, multi-components means
checking_means = np.c_[
(
clf.weights_[0]
* np.average(
self.s0, axis=0, weights=self.mesh.cell_volumes[: self.s0.shape[0]]
)
+ clfref.weights_[0] * clfref.means_[0]
)
/ (clf.weights_[0] + clfref.weights_[0]),
(
clf.weights_[1]
* np.average(
self.s1, axis=0, weights=self.mesh.cell_volumes[self.s0.shape[0] :]
)
+ clfref.weights_[1] * clfref.means_[1]
)
/ (clf.weights_[1] + clfref.weights_[1]),
].T
self.assertTrue(np.all(np.isclose(checking_means, clf.means_, rtol=1e-2)))
# This is a rough estimate of the multidimensional, multi-components covariances_
checking_covariances = np.r_[
(
clf.weights_[0]
* np.cov(
self.s0.T,
ddof=0,
aweights=self.mesh.cell_volumes[: self.s0.shape[0]],
)
+ clfref.weights_[0] * clfref.covariances_[0]
)
/ (clf.weights_[0] + clfref.weights_[0]),
(
clf.weights_[1]
* np.cov(
self.s1.T,
ddof=0,
aweights=self.mesh.cell_volumes[self.s0.shape[0] :],
)
+ clfref.weights_[1] * clfref.covariances_[1]
)
/ (clf.weights_[1] + clfref.weights_[1]),
].reshape(clf.covariances_.shape)
self.assertTrue(
np.all(np.isclose(checking_covariances, clf.covariances_, rtol=0.15))
)
checking_proportions = np.r_[
self.mesh.cell_volumes[: self.s0.shape[0]].sum()
+ clfref.weights_[0] * self.mesh.cell_volumes.sum(),
self.mesh.cell_volumes[self.s0.shape[0] :].sum()
+ +clfref.weights_[1] * self.mesh.cell_volumes.sum(),
]
checking_proportions /= checking_proportions.sum()
self.assertTrue(np.all(np.isclose(checking_proportions, clf.weights_)))
print(
"GaussianMixtureWithPrior is semi-MAP-estimating correctly in 2D with 2 components."
)
if __name__ == "__main__":
unittest.main()
| 9,687 | 13 | 157 |
92db716832059479b2d853e09b420af4b7d94b8c | 1,863 | py | Python | lang/ir_env.py | aleksiy325/compiler | c8d135709907e31f671061da25a76b3d64b67292 | [
"MIT"
] | null | null | null | lang/ir_env.py | aleksiy325/compiler | c8d135709907e31f671061da25a76b3d64b67292 | [
"MIT"
] | null | null | null | lang/ir_env.py | aleksiy325/compiler | c8d135709907e31f671061da25a76b3d64b67292 | [
"MIT"
] | null | null | null | from llvmlite import ir, binding
from lang.scope import Scope
from collections import defaultdict
| 35.826923 | 73 | 0.657005 | from llvmlite import ir, binding
from lang.scope import Scope
from collections import defaultdict
class IREnvironment():
def __init__(self):
self.functions = {}
self.binding = binding
self.binding.initialize()
self.binding.initialize_native_target()
self.binding.initialize_native_asmprinter()
self.scope = Scope()
self._config_llvm()
self._create_execution_engine()
self._declare_print()
def _config_llvm(self):
self.module = ir.Module(name='main_module', context=ir.Context())
self.module.triple = self.binding.get_default_triple()
func_type = ir.FunctionType(ir.IntType(64), [], False)
main_func = ir.Function(self.module, func_type, name='main')
block = main_func.append_basic_block(name='entry')
self.builder = ir.IRBuilder(block)
def _create_execution_engine(self):
target = self.binding.Target.from_default_triple()
target_machine = target.create_target_machine()
backing_mod = self.binding.parse_assembly('')
self.engine = self.binding.create_mcjit_compiler(
backing_mod, target_machine)
def _declare_print(self):
voidptr_type = ir.IntType(8).as_pointer()
printf_type = ir.FunctionType(
ir.IntType(32), [voidptr_type], var_arg=True)
ir.Function(self.module, printf_type, name='printf')
def _compile_ir(self):
self.builder.ret(ir.Constant(ir.IntType(64), 0))
mod = self.binding.parse_assembly(str(self.module))
mod.verify()
self.engine.add_module(mod)
self.engine.finalize_object()
self.engine.run_static_constructors()
return mod
def save_ir(self, filename):
with open(filename, 'w+') as f:
self._compile_ir()
f.write(str(self.module))
| 1,579 | 1 | 184 |
ea221f0eca0e44612cb7425eb660c95162fd9ac7 | 3,176 | py | Python | wagtail/embeds/rich_text.py | wlcrs/wagtail | 8afbc6c3eccef9eb0f09ed56c54cd36779451882 | [
"BSD-3-Clause"
] | 3 | 2019-05-14T13:43:08.000Z | 2021-11-09T11:27:18.000Z | wagtail/embeds/rich_text.py | denza/wagtail | 3939397850f2c73d3f960cea5cc9c2cfae2d005d | [
"BSD-3-Clause"
] | 13 | 2019-03-25T19:57:35.000Z | 2019-12-28T19:25:23.000Z | wagtail/embeds/rich_text.py | denza/wagtail | 3939397850f2c73d3f960cea5cc9c2cfae2d005d | [
"BSD-3-Clause"
] | 1 | 2021-08-13T15:38:43.000Z | 2021-08-13T15:38:43.000Z | from draftjs_exporter.dom import DOM
from wagtail.admin.rich_text.converters import editor_html
from wagtail.admin.rich_text.converters.contentstate_models import Entity
from wagtail.admin.rich_text.converters.html_to_contentstate import AtomicBlockEntityElementHandler
from wagtail.embeds import embeds, format
from wagtail.embeds.exceptions import EmbedException
# Front-end conversion
def media_embedtype_handler(attrs):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation for use on the front-end.
"""
return format.embed_to_frontend_html(attrs['url'])
# hallo.js / editor-html conversion
class MediaEmbedHandler:
"""
MediaEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="media". The resulting element in the database
representation will be:
<embed embedtype="media" url="http://vimeo.com/XXXXX">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as a media embed (because it has a
data-embedtype="media" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'url': tag['data-url'],
}
@staticmethod
def expand_db_attributes(attrs):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation for use within the editor.
"""
try:
return format.embed_to_editor_html(attrs['url'])
except EmbedException:
# Could be replaced with a nice error message
return ''
EditorHTMLEmbedConversionRule = [
editor_html.EmbedTypeRule('media', MediaEmbedHandler)
]
# draft.js / contentstate conversion
def media_embed_entity(props):
"""
Helper to construct elements of the form
<embed embedtype="media" url="https://www.youtube.com/watch?v=y8Kyi0WNg40"/>
when converting from contentstate data
"""
return DOM.create_element('embed', {
'embedtype': 'media',
'url': props.get('url'),
})
class MediaEmbedElementHandler(AtomicBlockEntityElementHandler):
"""
Rule for building an embed entity when converting from database representation
to contentstate
"""
ContentstateMediaConversionRule = {
'from_database_format': {
'embed[embedtype="media"]': MediaEmbedElementHandler(),
},
'to_database_format': {
'entity_decorators': {'EMBED': media_embed_entity}
}
}
| 31.445545 | 99 | 0.662469 | from draftjs_exporter.dom import DOM
from wagtail.admin.rich_text.converters import editor_html
from wagtail.admin.rich_text.converters.contentstate_models import Entity
from wagtail.admin.rich_text.converters.html_to_contentstate import AtomicBlockEntityElementHandler
from wagtail.embeds import embeds, format
from wagtail.embeds.exceptions import EmbedException
# Front-end conversion
def media_embedtype_handler(attrs):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation for use on the front-end.
"""
return format.embed_to_frontend_html(attrs['url'])
# hallo.js / editor-html conversion
class MediaEmbedHandler:
"""
MediaEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="media". The resulting element in the database
representation will be:
<embed embedtype="media" url="http://vimeo.com/XXXXX">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as a media embed (because it has a
data-embedtype="media" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'url': tag['data-url'],
}
@staticmethod
def expand_db_attributes(attrs):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation for use within the editor.
"""
try:
return format.embed_to_editor_html(attrs['url'])
except EmbedException:
# Could be replaced with a nice error message
return ''
EditorHTMLEmbedConversionRule = [
editor_html.EmbedTypeRule('media', MediaEmbedHandler)
]
# draft.js / contentstate conversion
def media_embed_entity(props):
"""
Helper to construct elements of the form
<embed embedtype="media" url="https://www.youtube.com/watch?v=y8Kyi0WNg40"/>
when converting from contentstate data
"""
return DOM.create_element('embed', {
'embedtype': 'media',
'url': props.get('url'),
})
class MediaEmbedElementHandler(AtomicBlockEntityElementHandler):
"""
Rule for building an embed entity when converting from database representation
to contentstate
"""
def create_entity(self, name, attrs, state, contentstate):
try:
embed_obj = embeds.get_embed(attrs['url'])
embed_data = {
'embedType': embed_obj.type,
'url': embed_obj.url,
'providerName': embed_obj.provider_name,
'authorName': embed_obj.author_name,
'thumbnail': embed_obj.thumbnail_url,
'title': embed_obj.title,
}
except EmbedException:
embed_data = {'url': attrs['url']}
return Entity('EMBED', 'IMMUTABLE', embed_data)
ContentstateMediaConversionRule = {
'from_database_format': {
'embed[embedtype="media"]': MediaEmbedElementHandler(),
},
'to_database_format': {
'entity_decorators': {'EMBED': media_embed_entity}
}
}
| 569 | 0 | 26 |
1e5e1b379eece7a8be5749fbe67ee5b0d26fada8 | 332 | py | Python | exercises/ja/exc_01_03_02.py | YanaPalacheva/spacy-course | 59975f7348a601532303be91474d75d02d0540ef | [
"MIT"
] | 1 | 2021-12-30T06:40:11.000Z | 2021-12-30T06:40:11.000Z | exercises/ja/exc_01_03_02.py | YanaPalacheva/spacy-course | 59975f7348a601532303be91474d75d02d0540ef | [
"MIT"
] | null | null | null | exercises/ja/exc_01_03_02.py | YanaPalacheva/spacy-course | 59975f7348a601532303be91474d75d02d0540ef | [
"MIT"
] | 1 | 2020-06-08T13:26:06.000Z | 2020-06-08T13:26:06.000Z | # Englishクラスをインポートし、nlpオブジェクトを作成
from ____ import ____
nlp = ____
# テキストを処理
doc = ____("I like tree kangaroos and narwhals.")
# 「tree kangaroors」のスライスを選択
tree_kangaroos = ____
print(tree_kangaroos.text)
# 「tree kangaroos and narwhals」のスライスを選択(「.」は含まない)
tree_kangaroos_and_narwhals = ____
print(tree_kangaroos_and_narwhals.text)
| 20.75 | 49 | 0.795181 | # Englishクラスをインポートし、nlpオブジェクトを作成
from ____ import ____
nlp = ____
# テキストを処理
doc = ____("I like tree kangaroos and narwhals.")
# 「tree kangaroors」のスライスを選択
tree_kangaroos = ____
print(tree_kangaroos.text)
# 「tree kangaroos and narwhals」のスライスを選択(「.」は含まない)
tree_kangaroos_and_narwhals = ____
print(tree_kangaroos_and_narwhals.text)
| 0 | 0 | 0 |
268df34dd6cb7ca7216ce4dfd7e43748131043eb | 10,178 | py | Python | causal_world/task_generators/experimenting.py | LukasKapp-Schwoerer/CausalWorld | 20d2bbfe147ead64041bde5f4448a29780218f6c | [
"MIT"
] | null | null | null | causal_world/task_generators/experimenting.py | LukasKapp-Schwoerer/CausalWorld | 20d2bbfe147ead64041bde5f4448a29780218f6c | [
"MIT"
] | null | null | null | causal_world/task_generators/experimenting.py | LukasKapp-Schwoerer/CausalWorld | 20d2bbfe147ead64041bde5f4448a29780218f6c | [
"MIT"
] | null | null | null | from causal_world.task_generators.base_task import BaseTask
import numpy as np
| 45.641256 | 159 | 0.562782 | from causal_world.task_generators.base_task import BaseTask
import numpy as np
class ExperimentingTaskGenerator(BaseTask):
def __init__(self, variables_space='space_a',
fractional_reward_weight=0,
dense_reward_weights=np.array([0,0,0,0,0,0,0,0]),
activate_sparse_reward=False,
tool_block_mass=np.random.uniform(0.015, 0.045),
joint_positions=[-0.21737874, 0.55613149,
-1.09308519, -0.12868997,
0.52551013, -1.08006493,
-0.00221536, 0.46163487,
-1.00948735],#[-0.17530204, 0.00581657, 0.00332309, -0.09056193, -0.01266214, -0.00722627, -0.02970593, -0.06266382, -0.03557327],
tool_block_position=np.array([0, 0, 0.0325]),
tool_block_orientation=np.array([0, 0, 0, 1]),
enhanced_observations=False):
"""
This task generates a task without goal
:param variables_space: (str) space to be used either 'space_a' or
'space_b' or 'space_a_b'
:param fractional_reward_weight: (float) weight multiplied by the
fractional volumetric
overlap in the reward.
:param dense_reward_weights: (list float) specifies the reward weights
for all the other reward
terms calculated in the
calculate_dense_rewards
function.
:param activate_sparse_reward: (bool) specified if you want to
sparsify the reward by having
+1 or 0 if the volumetric
fraction overlap more than 90%.
:param tool_block_mass: (float) specifies the blocks mass.
:param joint_positions: (nd.array) specifies the joints position to start
the episode with. None if the default
to be used.
:param tool_block_position: (nd.array) specifies the cartesian position
of the tool block, x, y, z.
:param tool_block_orientation: (nd.array) specifies the euler orientation
of the tool block, yaw, roll, pitch.
:param goal_height: (float) specifies the goal height that needs to be
reached.
:param enhanced_observations: (bool) specifies if the observations should
contain tool block mass and tool block
friction.
"""
super().__init__(task_name="experimenting",
variables_space=variables_space,
fractional_reward_weight=fractional_reward_weight,
dense_reward_weights=dense_reward_weights,
activate_sparse_reward=activate_sparse_reward)
self._task_robot_observation_keys = ["time_left_for_task",
"joint_positions",
"joint_velocities",
"end_effector_positions"]
self._task_params["tool_block_mass"] = tool_block_mass
self._task_params["joint_positions"] = joint_positions
self._task_params["tool_block_position"] = tool_block_position
self._task_params["tool_block_orientation"] = tool_block_orientation
self.previous_object_position = None
self.previous_end_effector_positions = None
self.previous_joint_velocities = None
self.enhanced_observations = enhanced_observations
def get_description(self):
"""
:return: (str) returns the description of the task itself.
"""
return "Task where there is no goal"
def _set_up_stage_arena(self):
"""
:return:
"""
creation_dict = {
'name': "tool_block",
'shape': "cube",
'initial_position': self._task_params["tool_block_position"],
'initial_orientation': self._task_params["tool_block_orientation"],
'mass': self._task_params["tool_block_mass"]
}
self._stage.add_rigid_general_object(**creation_dict)
self._task_stage_observation_keys = [
"tool_block_type", "tool_block_size",
"tool_block_cartesian_position", "tool_block_orientation",
"tool_block_linear_velocity", "tool_block_angular_velocity",
]
if self.enhanced_observations:
self._task_stage_observation_keys.extend(["tool_block_mass",
"tool_block_friction"])
return
def _set_intervention_space_a(self):
"""
:return:
"""
super(ExperimentingTaskGenerator, self)._set_intervention_space_a()
for visual_object in self._stage.get_visual_objects():
self._intervention_space_a[visual_object]['cylindrical_position'][
0][-1] \
= 0.08
self._intervention_space_a[visual_object]['cylindrical_position'][
1][-1] \
= 0.20
return
def _set_intervention_space_b(self):
"""
:return:
"""
super(ExperimentingTaskGenerator, self)._set_intervention_space_b()
for visual_object in self._stage.get_visual_objects():
self._intervention_space_b[visual_object]['cylindrical_position'][0][
-1] \
= 0.20
self._intervention_space_b[visual_object]['cylindrical_position'][1][
-1] \
= 0.25
return
def get_reward(self):
return 0
def _update_task_state(self, update_task_info):
"""
:param update_task_info:
:return:
"""
self.previous_end_effector_positions = \
update_task_info['current_end_effector_positions']
self.previous_object_position = \
update_task_info['current_tool_block_position']
self.previous_joint_velocities = \
update_task_info['current_velocity']
return
def _set_task_state(self):
"""
:return:
"""
self.previous_end_effector_positions = \
self._robot.get_latest_full_state()['end_effector_positions']
self.previous_end_effector_positions = \
self.previous_end_effector_positions.reshape(-1, 3)
self.previous_object_position = \
self._stage.get_object_state('tool_block', 'cartesian_position')
self.previous_joint_velocities = \
self._robot.get_latest_full_state()['velocities']
return
def _handle_contradictory_interventions(self, interventions_dict):
"""
:param interventions_dict:
:return:
"""
# for example size on goal_or tool should be propagated to the other
if 'goal_block' in interventions_dict:
if 'size' in interventions_dict['goal_block']:
if 'tool_block' not in interventions_dict:
interventions_dict['tool_block'] = dict()
interventions_dict['tool_block']['size'] = \
interventions_dict['goal_block']['size']
if 'cylindrical_position' in interventions_dict['goal_block']:
interventions_dict['goal_block']['cylindrical_position'][0] = 0
interventions_dict['goal_block']['cylindrical_position'][1] = 0
elif 'tool_block' in interventions_dict:
if 'size' in interventions_dict['tool_block']:
if 'goal_block' not in interventions_dict:
interventions_dict['goal_block'] = dict()
interventions_dict['goal_block']['size'] = \
interventions_dict['tool_block']['size']
return interventions_dict
def sample_new_goal(self, level=None):
"""
Used to sample new goal from the corresponding shape families.
:param level: (int) specifying the level - not used for now.
:return: (dict) the corresponding interventions dict that could then
be applied to get a new sampled goal.
"""
intervention_dict = dict()
intervention_dict['goal_block'] = dict()
if self._task_params['variables_space'] == 'space_a':
intervention_space = self._intervention_space_a
elif self._task_params['variables_space'] == 'space_b':
intervention_space = self._intervention_space_b
elif self._task_params['variables_space'] == 'space_a_b':
intervention_space = self._intervention_space_a_b
intervention_dict['goal_block']['cylindrical_position'] = \
np.array([0, 0, np.random.uniform(intervention_space['goal_block']['cylindrical_position']
[0][-1],
intervention_space['goal_block']['cylindrical_position']
[1][-1])])
return intervention_dict
def _adjust_variable_spaces_after_intervention(self, interventions_dict):
spaces = [self._intervention_space_a,
self._intervention_space_b,
self._intervention_space_a_b]
if 'tool_block' in interventions_dict:
if 'size' in interventions_dict['tool_block']:
for variable_space in spaces:
variable_space['tool_block'][
'cylindrical_position'][0][
-1] = \
self._stage.get_object_state('tool_block', 'size')[
-1] / 2.0
return
| 624 | 9,447 | 23 |
abdf9913bb8e4a3e540e3afa3bc655a6aac7779e | 4,078 | py | Python | telegram_ecommerce/tamplates/products.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
] | 10 | 2020-11-20T20:55:52.000Z | 2022-02-10T20:25:45.000Z | telegram_ecommerce/tamplates/products.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
] | 1 | 2022-02-16T10:28:18.000Z | 2022-02-16T10:35:31.000Z | telegram_ecommerce/tamplates/products.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
] | 8 | 2021-05-01T01:13:09.000Z | 2022-03-13T14:00:01.000Z | from telegram import InputMediaPhoto
from ..language import get_text
from ..database.query import count_occurrence_of_specified_rating
from .buttons import (
get_list_of_buttons,
tamplate_for_show_a_list_of_products,
tamplate_for_show_a_detailed_product)
| 28.921986 | 78 | 0.664787 | from telegram import InputMediaPhoto
from ..language import get_text
from ..database.query import count_occurrence_of_specified_rating
from .buttons import (
get_list_of_buttons,
tamplate_for_show_a_list_of_products,
tamplate_for_show_a_detailed_product)
class Product():
def __init__(
self,
product_id,
name,
description,
price,
quantity_in_stock,
quantity_purchased,
category_id,
image_id = None):
self.product_id = product_id
self.name = name
self.description = description
self.price = price
self.quantity_in_stock = quantity_in_stock
self.quantity_purchased = quantity_purchased
self.category_id = category_id
self.image_id = image_id
def create_a_instance_of_this_class_from_a_list_of_properties(
properties):
return Product(*properties)
class ListProductIterator():
def __init__(self, *list_of_products):
self.list_of_products = list_of_products
self.iter = -1
def create_a_list_from_a_query(query):
list_of_instances_of_Product_class = list(map(
Product.create_a_instance_of_this_class_from_a_list_of_properties,
query))
return ListProductIterator(
*list_of_instances_of_Product_class)
def actual(self):
actual_product = self.list_of_products[self.iter]
return actual_product
def next(self):
self.__increment_iter__()
actual_product = self.list_of_products[self.iter]
return actual_product
def previus(self):
self.__decrement_iter__()
actual_product = self.list_of_products[self.iter]
return actual_product
def __increment_iter__(self):
if self.iter == len(self.list_of_products) - 1:
self.iter = 0
else:
self.iter += 1
def __decrement_iter__(self):
if self.iter <= 0:
self.iter = len(self.list_of_products) - 1
else:
self.iter -= 1
def is_empty(self):
if self.list_of_products:
return False
return True
def send_a_product(update, context, product, pattern_identifier):
query = update.callback_query
markup = tamplate_for_show_a_list_of_products(
pattern_identifier, context)
text = get_text_for_product(product, context)
query.message.edit_media(
media = InputMediaPhoto(product.image_id, text),
reply_markup = markup)
def send_a_detailed_product(update, context, product, pattern_identifier):
query = update.callback_query
markup = tamplate_for_show_a_detailed_product(
pattern_identifier, context)
text = get_text_for_detailed_product(product, context)
query.message.edit_media(
media = InputMediaPhoto(product.image_id, text),
reply_markup = markup)
def send_a_inline_with_a_list_of_products(
update,
context,
text,
list_of_names):
buttons_with_list_of_names = get_list_of_buttons(*list_of_names)
update.message.reply_text(text, reply_markup=buttons_with_list_of_names)
def get_text_for_product(product, context):
text = (product.name + "\n\n" +
get_text("price", context) + str(product.price))
return text
def get_text_for_detailed_product(product, context):
product_id = product.product_id
text = (product.name + "\n\n" +
get_text("price", context) + str(product.price) + '\n\n' +
str(product.description) + '\n\n' +
get_text("purchased", context) +
str(product.quantity_purchased) + '\n\n' +
get_text("rating", context) + '\n' +
str(count_occurrence_of_specified_rating(product_id, 10)) + ' ' +
get_text("good", context) + '\n' +
str(count_occurrence_of_specified_rating(product_id, 5)) + ' ' +
get_text("regular", context) + '\n' +
str(count_occurrence_of_specified_rating(product_id, 0)) + ' ' +
get_text("bad", context)
)
return text
| 3,357 | 2 | 434 |
c51070e21a3ea544b20c010d1a0a301c1ee793fa | 5,524 | py | Python | sequana/iotools.py | ddesvillechabrol/sequana | c92d6071e782df78566fffd15ad619c8c3df6fe3 | [
"BSD-3-Clause"
] | null | null | null | sequana/iotools.py | ddesvillechabrol/sequana | c92d6071e782df78566fffd15ad619c8c3df6fe3 | [
"BSD-3-Clause"
] | null | null | null | sequana/iotools.py | ddesvillechabrol/sequana | c92d6071e782df78566fffd15ad619c8c3df6fe3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2017 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import re
import ruamel.yaml
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["YamlDocParser"]
class YamlDocParser(object):
"""A simple parser to extract block content to be found in YAML files
So as to create tooltips automatically in :ref:`sequanix`, one can comment
YAML configuration file with block comments (see developers guide in
:ref:`developers` )
Once read and parsed, all block comments before top-level sections are to
be found in the dictionary :attr:`sections`.
.. doctest::
from sequana import snaketools
from sequana.iotools import YamlDocParser
module = snaketools.Module('quality_control')
r = YamlDocParser(module.config)
r.sections['fastqc']
Those lines are removed from the docstring but available as a dictionary
"""
def __init__(self, filename):
""".. rubric:: constructor
:param str filename: the YAML file to parse
::
# main documentation
# block comment
section1:
- item
# block comment
section2:
# a comment
section3:
Here, section1 and section2 have block comments but not section3
"""
self.filename = filename
self.regex_section = re.compile("^[a-z,A-Z,_,0-9]+:")
self._specials = ["choice__"]
self.sections = {}
self._read_data()
self._parse_data()
def _get_expected_sections(self):
"""Get the top level keys in the YAML file
:return: list of top level sections' names"""
with open(self.filename, "r") as fh:
data = ruamel.yaml.load(fh.read(), ruamel.yaml.RoundTripLoader)
keys = list(data.keys())
return keys
def _parse_data(self):
"""Parse the YAML file to get the block content (comments)
before each top-level sections. See doc in the constructor
Removes all # so that the block of comments can be interpreted as
a standard docstring in Sequanix
"""
current_block = []
current_section = "docstring"
# if we get a line that starts with #, this is a new comment or
# part of a block comment. Otherwise, it means the current block
# comment has ended.
for this in self.data:
# Beginning of a new section at top level
if self.regex_section.findall(this):
name = self.regex_section.findall(this)[0]
current_section = name.strip(":")
self.sections[current_section] = "".join(current_block)
current_block = []
current_section = None
elif this.startswith('#'): # a comment at top level
current_block.append(this)
elif this.strip() == "": # an empty line
#this was the main comment, or an isolated comment
current_block = []
else: # a non-empty line to skip
current_block = []
for key in self._get_expected_sections():
if key not in self.sections.keys():
logger.warning("section %s not dealt by the parsing function" % key)
def _get_specials(self, section):
"""This method extracts data from the docstring
Lines such as ::
field_choice__ = ["a", "b"]
are extracted. Where _choice is a special keyword to be
found.
"""
if section not in self.sections.keys():
logger.warning("%s not found in the yaml " % section)
return
comments = self.sections[section]
specials = {}
for line in comments.split("\n"):
if "#############" in line:
pass
elif sum([this in line for this in self._specials]):
for special in self._specials:
line = line[2:]
key, value = line.split("=", 1)
key = key.strip().rstrip("__")
value = value.strip()
specials[key] = list(eval(value))
return specials
| 32.304094 | 84 | 0.56336 | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2017 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import re
import ruamel.yaml
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["YamlDocParser"]
class YamlDocParser(object):
"""A simple parser to extract block content to be found in YAML files
So as to create tooltips automatically in :ref:`sequanix`, one can comment
YAML configuration file with block comments (see developers guide in
:ref:`developers` )
Once read and parsed, all block comments before top-level sections are to
be found in the dictionary :attr:`sections`.
.. doctest::
from sequana import snaketools
from sequana.iotools import YamlDocParser
module = snaketools.Module('quality_control')
r = YamlDocParser(module.config)
r.sections['fastqc']
Those lines are removed from the docstring but available as a dictionary
"""
def __init__(self, filename):
""".. rubric:: constructor
:param str filename: the YAML file to parse
::
# main documentation
# block comment
section1:
- item
# block comment
section2:
# a comment
section3:
Here, section1 and section2 have block comments but not section3
"""
self.filename = filename
self.regex_section = re.compile("^[a-z,A-Z,_,0-9]+:")
self._specials = ["choice__"]
self.sections = {}
self._read_data()
self._parse_data()
def _get_expected_sections(self):
"""Get the top level keys in the YAML file
:return: list of top level sections' names"""
with open(self.filename, "r") as fh:
data = ruamel.yaml.load(fh.read(), ruamel.yaml.RoundTripLoader)
keys = list(data.keys())
return keys
def _read_data(self):
with open(self.filename, "r") as fh:
self.data = fh.readlines()
def _parse_data(self):
"""Parse the YAML file to get the block content (comments)
before each top-level sections. See doc in the constructor
Removes all # so that the block of comments can be interpreted as
a standard docstring in Sequanix
"""
current_block = []
current_section = "docstring"
# if we get a line that starts with #, this is a new comment or
# part of a block comment. Otherwise, it means the current block
# comment has ended.
for this in self.data:
# Beginning of a new section at top level
if self.regex_section.findall(this):
name = self.regex_section.findall(this)[0]
current_section = name.strip(":")
self.sections[current_section] = "".join(current_block)
current_block = []
current_section = None
elif this.startswith('#'): # a comment at top level
current_block.append(this)
elif this.strip() == "": # an empty line
#this was the main comment, or an isolated comment
current_block = []
else: # a non-empty line to skip
current_block = []
for key in self._get_expected_sections():
if key not in self.sections.keys():
logger.warning("section %s not dealt by the parsing function" % key)
def _block2docstring(self, section):
if section not in self.sections.keys():
logger.warning("%s not found in the yaml " % section)
return
comments = self.sections[section]
docstring = []
for line in comments.split("\n"):
if "#############" in line:
pass
elif sum([this in line for this in self._specials]):
pass
else:
if len(line)<2: # an empty line (to keep)
docstring.append("")
else:
docstring.append(line[2:]) # strip the "# "characters
docstring = "\n".join(docstring).strip()
return docstring
def _get_specials(self, section):
"""This method extracts data from the docstring
Lines such as ::
field_choice__ = ["a", "b"]
are extracted. Where _choice is a special keyword to be
found.
"""
if section not in self.sections.keys():
logger.warning("%s not found in the yaml " % section)
return
comments = self.sections[section]
specials = {}
for line in comments.split("\n"):
if "#############" in line:
pass
elif sum([this in line for this in self._specials]):
for special in self._specials:
line = line[2:]
key, value = line.split("=", 1)
key = key.strip().rstrip("__")
value = value.strip()
specials[key] = list(eval(value))
return specials
| 773 | 0 | 54 |
fb65b58ba30ff18957a19ea60f05ec0892aff289 | 4,628 | py | Python | examples/websocket/tests.py | PyCN/pulsar | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | [
"BSD-3-Clause"
] | 1,410 | 2015-01-02T14:55:07.000Z | 2022-03-28T17:22:06.000Z | examples/websocket/tests.py | PyCN/pulsar | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | [
"BSD-3-Clause"
] | 194 | 2015-01-22T06:18:24.000Z | 2020-10-20T21:21:58.000Z | examples/websocket/tests.py | PyCN/pulsar | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | [
"BSD-3-Clause"
] | 168 | 2015-01-31T10:29:55.000Z | 2022-03-14T10:22:24.000Z | '''Tests the websocket middleware in pulsar.apps.ws.'''
import unittest
import asyncio
from pulsar.api import send
from pulsar.apps.ws import WebSocket, WS
from pulsar.apps.http import HttpClient
from pulsar.apps.test import run_test_server
from examples.websocket.manage import server
| 33.294964 | 74 | 0.612576 | '''Tests the websocket middleware in pulsar.apps.ws.'''
import unittest
import asyncio
from pulsar.api import send
from pulsar.apps.ws import WebSocket, WS
from pulsar.apps.http import HttpClient
from pulsar.apps.test import run_test_server
from examples.websocket.manage import server
class Echo(WS):
def __init__(self, loop):
self.queue = asyncio.Queue(loop=loop)
def get(self):
return self.queue.get()
def on_message(self, ws, message):
self.queue.put_nowait(message)
def on_ping(self, ws, body):
ws.pong(body)
self.queue.put_nowait('PING: %s' % body.decode('utf-8'))
def on_pong(self, ws, body):
self.queue.put_nowait('PONG: %s' % body.decode('utf-8'))
def on_close(self, ws):
self.queue.put_nowait('CLOSE')
class TestWebSocket(unittest.TestCase):
app_cfg = None
concurrency = 'process'
@classmethod
async def setUpClass(cls):
await run_test_server(cls, server)
addr = cls.app_cfg.addresses[0]
cls.ws_uri = 'ws://{0}:{1}/data'.format(*addr)
cls.ws_echo = 'ws://{0}:{1}/echo'.format(*addr)
@classmethod
def tearDownClass(cls):
if cls.app_cfg is not None:
return send('arbiter', 'kill_actor', cls.app_cfg.name)
def http(self, **params):
return HttpClient(**params)
def test_hybikey(self):
w = WebSocket('/', None)
v = w.challenge_response('dGhlIHNhbXBsZSBub25jZQ==')
self.assertEqual(v, "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=")
async def test_bad_requests(self):
c = self.http()
response = await c.post(self.ws_uri)
self.assertEqual(response.status_code, 405)
#
response = await c.get(self.ws_uri,
headers=[('Sec-Websocket-Key', 'x')])
self.assertEqual(response.status_code, 400)
#
response = await c.get(self.ws_uri,
headers=[('Sec-Websocket-Key', 'bla')])
self.assertEqual(response.status_code, 400)
#
response = await c.get(self.ws_uri,
headers=[('Sec-Websocket-version', 'xxx')])
self.assertEqual(response.status_code, 400)
async def test_upgrade(self):
c = self.http()
handler = Echo(c._loop)
ws = await c.get(self.ws_echo, websocket_handler=handler)
response = ws.handshake
self.assertEqual(response.status_code, 101)
self.assertEqual(response.headers['upgrade'], 'websocket')
self.assertEqual(ws.connection, response.connection)
self.assertEqual(ws.handler, handler)
#
self.assertTrue(response.event('post_request').fired())
self.assertFalse(ws.event('post_request').fired())
# Send a message to the websocket
ws.write('Hi there!')
message = await handler.get()
self.assertEqual(message, 'Hi there!')
async def test_ping(self):
c = self.http()
handler = Echo(c._loop)
ws = await c.get(self.ws_echo, websocket_handler=handler)
#
# ASK THE SERVER TO SEND A PING FRAME
ws.write('send ping TESTING PING')
message = await handler.get()
self.assertEqual(message, 'PING: TESTING PING')
async def test_pong(self):
c = self.http()
handler = Echo(c._loop)
ws = await c.get(self.ws_echo, websocket_handler=handler)
#
ws.ping('TESTING CLIENT PING')
message = await handler.get()
self.assertEqual(message, 'PONG: TESTING CLIENT PING')
async def test_close(self):
c = self.http()
handler = Echo(c._loop)
ws = await c.get(self.ws_echo, websocket_handler=handler)
self.assertEqual(ws.event('post_request').fired(), 0)
ws.write('send close 1001')
message = await handler.get()
self.assertEqual(message, 'CLOSE')
self.assertTrue(ws.close_reason)
self.assertEqual(ws.close_reason[0], 1001)
self.assertTrue(ws.connection.closed)
async def test_home(self):
c = self.http()
response = await c.get(self.uri)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['content-type'],
'text/html; charset=utf-8')
async def test_graph(self):
c = self.http()
handler = Echo(c._loop)
ws = await c.get(self.ws_uri, websocket_handler=handler)
self.assertEqual(ws.event('post_request').fired(), 0)
ws.write('data')
message = await handler.get()
self.assertTrue(message)
| 3,740 | 390 | 208 |
361a95a9d55113478f6cb1aacfcbe892d796532c | 3,217 | py | Python | app/main/address_ctlr.py | bvbgrad/betterJob | d54962d31eafde428fa7309c8bf5ac238bf6a640 | [
"MIT"
] | null | null | null | app/main/address_ctlr.py | bvbgrad/betterJob | d54962d31eafde428fa7309c8bf5ac238bf6a640 | [
"MIT"
] | null | null | null | app/main/address_ctlr.py | bvbgrad/betterJob | d54962d31eafde428fa7309c8bf5ac238bf6a640 | [
"MIT"
] | null | null | null | """
"""
import app.utils6L.utils6L as utils
import logging
import os
import PySimpleGUI as sg
from app.main.views import view_create_link_address
from app.model import db_session
from app.model.Company import Address, Company
from PySimpleGUI.PySimpleGUI import popup_scrolled
logger_name = os.getenv("LOGGER_NAME")
logger = logging.getLogger(logger_name)
NO_COMPANY_ADDRESS = 'No company address'
@utils.log_wrap
@utils.log_wrap
@utils.log_wrap
@utils.log_wrap
| 31.23301 | 79 | 0.632577 | """
"""
import app.utils6L.utils6L as utils
import logging
import os
import PySimpleGUI as sg
from app.main.views import view_create_link_address
from app.model import db_session
from app.model.Company import Address, Company
from PySimpleGUI.PySimpleGUI import popup_scrolled
logger_name = os.getenv("LOGGER_NAME")
logger = logging.getLogger(logger_name)
NO_COMPANY_ADDRESS = 'No company address'
@utils.log_wrap
def add_new_address():
logger.info(__name__ + ".add_new_address()")
text = sg.popup_get_text('Get address', 'Street address')
if text is not None:
if len(text) > 2:
address = Address(street=text)
with db_session() as db:
address.add_address(db, address)
else:
sg.popup("Addressess must have at least 3 characters")
@utils.log_wrap
def get_address_list():
logger.info(__name__ + ".get_address_list()")
address = Address()
with db_session() as db:
address_string = ""
address_list = address.get_address_list(db)
number_addresses = len(address_list)
for address in address_list:
address_string += f"\t{address}\n"
popup_scrolled(
f"There are {number_addresses} addresses:",
f"{address_string}",
title="Addresses in the database",
size=(100, number_addresses))
@utils.log_wrap
def link_address_to_company(company_info):
logger.info(__name__ + ".link_address_to_company()")
if len(company_info) == 1:
company_name = company_info[0]
msg = f"Link a new address to '{company_name}'?"
response = sg.popup_yes_no(msg)
if response == "Yes":
address01 = Address()
address01 = view_create_link_address(address01)
if address01 is not None:
company01 = Company()
with db_session() as db:
company01 = company01.get_company_by_name(db, company_name)
address01.company_IdFK = company01.company_Id
address01.add_address(db, address01)
else:
sg.popup("Choose a company")
@utils.log_wrap
def delete_address(window, address):
logger.info(__name__ + f".delete_address({address})")
if len(address) != 1:
sg.popup("Please select a single address to delete")
return
# address[0] is the candidate address to delete
# unless it is the default value that indicates there are no addresses
if address[0] == NO_COMPANY_ADDRESS:
sg.popup("There is no address to delete")
return
# we have a single address to delete
# create a skeleton address and scan the address list to find its equal
ap = address[0].split('*')
address01 = Address(ap[0], ap[1], ap[2], ap[3])
with db_session() as db:
addresses = address01.get_address_list(db)
for address in addresses:
address_found = False
if address01 == address:
address_found = True
break
if address_found:
address.delete_self(db)
logger.info(f"Deleted: {address}")
else:
logger.info(f"Not found: {address}")
| 2,653 | 0 | 88 |
f71a4b33d3f34e4911b2fce2af6caffbdfbb62bf | 1,326 | py | Python | Assessments/count-contained-permutations.py | SaumyaRai2010/algoexpert-data-structures-algorithms | bcafd8d7798661bf86c2d6234221d764c68fc19f | [
"MIT"
] | 2 | 2021-08-17T14:13:01.000Z | 2021-08-17T14:13:16.000Z | Assessments/count-contained-permutations.py | SaumyaRai2010/algoexpert-data-structures-algorithms | bcafd8d7798661bf86c2d6234221d764c68fc19f | [
"MIT"
] | null | null | null | Assessments/count-contained-permutations.py | SaumyaRai2010/algoexpert-data-structures-algorithms | bcafd8d7798661bf86c2d6234221d764c68fc19f | [
"MIT"
] | null | null | null |
# COUNT CONTAINED PERMUTATIONS
# O(M * U + N) time and O(U) space, where M -> length of big string,
# U -> number of unique characters in small string, N -> length
# of small string.
# U is actually a constant since it can't be greater than 26. and
# M > N, so M will dissolve N
# So, modified complexities:
# O(M) time and O(1) space, M -> length of big string | 26 | 68 | 0.684766 |
# COUNT CONTAINED PERMUTATIONS
# O(M * U + N) time and O(U) space, where M -> length of big string,
# U -> number of unique characters in small string, N -> length
# of small string.
# U is actually a constant since it can't be greater than 26. and
# M > N, so M will dissolve N
# So, modified complexities:
# O(M) time and O(1) space, M -> length of big string
def countContainedPermutations(bigString, smallString):
# Write your code here.
smallCount, bigCount = {}, {}
for letter in smallString:
if letter not in smallCount:
smallCount[letter] = 0
smallCount[letter] += 1
bigSize, smallSize = len(bigString), len(smallString)
start, end, totalCount = 0, 0, 0
while end < bigSize:
letterToAdd = bigString[end]
if letterToAdd not in bigCount:
bigCount[letterToAdd] = 0
bigCount[letterToAdd] += 1
if end - start == smallSize:
letterToRemove = bigString[start]
if bigCount[letterToRemove] == 1:
del bigCount[letterToRemove]
else:
bigCount[letterToRemove] -= 1
start += 1
if matchCounts(bigCount, smallCount):
totalCount += 1
end += 1
return totalCount
def matchCounts(bigCount, smallCount):
for letter in smallCount:
if letter not in bigCount:
return False
if smallCount[letter] != bigCount[letter]:
return False
return True | 917 | 0 | 45 |
014ea7ee19fe30e5e999c1aec260c631eca49b6a | 372 | py | Python | rblxpy/__init__.py | INfoUpgraders/rblxpy | 481af14bb7a8bc349c635b865afa1c113097b3a7 | [
"MIT"
] | null | null | null | rblxpy/__init__.py | INfoUpgraders/rblxpy | 481af14bb7a8bc349c635b865afa1c113097b3a7 | [
"MIT"
] | null | null | null | rblxpy/__init__.py | INfoUpgraders/rblxpy | 481af14bb7a8bc349c635b865afa1c113097b3a7 | [
"MIT"
] | null | null | null | import urllib.request, json
print(Users.get_user(Users("INfoUpgradersYT")))
| 26.571429 | 116 | 0.669355 | import urllib.request, json
class Users:
def __init__(self, username):
self.username = username
def get_user(self):
with urllib.request.urlopen(f"http://api.roblox.com/users/get-by-username?username={self.username}") as url:
data = json.loads(url.read().decode())
return data
print(Users.get_user(Users("INfoUpgradersYT")))
| 227 | -9 | 77 |
4ac0d9b1f7062d530a2af91182823f4011c59b45 | 1,996 | py | Python | torch/distributions/pareto.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | 206 | 2020-11-28T22:56:38.000Z | 2022-03-27T02:33:04.000Z | torch/distributions/pareto.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | 19 | 2020-12-09T23:13:14.000Z | 2022-01-24T23:24:08.000Z | torch/distributions/pareto.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | 28 | 2020-11-29T15:25:12.000Z | 2022-01-20T02:16:27.000Z | from torch.distributions import constraints
from torch.distributions.exponential import Exponential
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, ExpTransform
from torch.distributions.utils import broadcast_all
class Pareto(TransformedDistribution):
r"""
Samples from a Pareto Type 1 distribution.
Example::
>>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1
tensor([ 1.5623])
Args:
scale (float or Tensor): Scale parameter of the distribution
alpha (float or Tensor): Shape parameter of the distribution
"""
arg_constraints = {'alpha': constraints.positive, 'scale': constraints.positive}
@property
@property
@constraints.dependent_property
| 36.962963 | 88 | 0.677355 | from torch.distributions import constraints
from torch.distributions.exponential import Exponential
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, ExpTransform
from torch.distributions.utils import broadcast_all
class Pareto(TransformedDistribution):
r"""
Samples from a Pareto Type 1 distribution.
Example::
>>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1
tensor([ 1.5623])
Args:
scale (float or Tensor): Scale parameter of the distribution
alpha (float or Tensor): Shape parameter of the distribution
"""
arg_constraints = {'alpha': constraints.positive, 'scale': constraints.positive}
def __init__(self, scale, alpha, validate_args=None):
self.scale, self.alpha = broadcast_all(scale, alpha)
base_dist = Exponential(self.alpha)
transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
super(Pareto, self).__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Pareto, _instance)
new.scale = self.scale.expand(batch_shape)
new.alpha = self.alpha.expand(batch_shape)
return super(Pareto, self).expand(batch_shape, _instance=new)
@property
def mean(self):
# mean is inf for alpha <= 1
a = self.alpha.clamp(min=1)
return a * self.scale / (a - 1)
@property
def variance(self):
# var is inf for alpha <= 2
a = self.alpha.clamp(min=2)
return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))
@constraints.dependent_property
def support(self):
return constraints.greater_than(self.scale)
def entropy(self):
return ((self.scale / self.alpha).log() + (1 + self.alpha.reciprocal()))
| 933 | 0 | 159 |
2fefa0cc0491cc014b49bbd1dfae85fb15a3a408 | 1,123 | py | Python | bitmovin/services/outputs/output_service.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 44 | 2016-12-12T17:37:23.000Z | 2021-03-03T09:48:48.000Z | bitmovin/services/outputs/output_service.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 38 | 2017-01-09T14:45:45.000Z | 2022-02-27T18:04:33.000Z | bitmovin/services/outputs/output_service.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 27 | 2017-02-02T22:49:31.000Z | 2019-11-21T07:04:57.000Z | from bitmovin.bitmovin_object import BitmovinObject
from .s3_output_service import S3
from .gcs_output_service import GCS
from .akamai_netstorage_output_service import AkamaiNetStorage
from .azure_output_service import Azure
from .ftp_output_service import FTP
from .sftp_output_service import SFTP
from .generic_s3_output_service import GenericS3
from .local_output_service import Local
from .s3_role_based_output_service import S3RoleBased
| 43.192308 | 78 | 0.779163 | from bitmovin.bitmovin_object import BitmovinObject
from .s3_output_service import S3
from .gcs_output_service import GCS
from .akamai_netstorage_output_service import AkamaiNetStorage
from .azure_output_service import Azure
from .ftp_output_service import FTP
from .sftp_output_service import SFTP
from .generic_s3_output_service import GenericS3
from .local_output_service import Local
from .s3_role_based_output_service import S3RoleBased
class OutputService(BitmovinObject):
def __init__(self, http_client):
super().__init__()
self.http_client = http_client
self.S3 = S3(http_client=self.http_client)
self.GCS = GCS(http_client=self.http_client)
self.AkamaiNetStorage = AkamaiNetStorage(http_client=self.http_client)
self.Azure = Azure(http_client=self.http_client)
self.FTP = FTP(http_client=self.http_client)
self.SFTP = SFTP(http_client=self.http_client)
self.GenericS3 = GenericS3(http_client=self.http_client)
self.Local = Local(http_client=self.http_client)
self.S3RoleBased = S3RoleBased(http_client=self.http_client)
| 616 | 15 | 49 |
5821dd47fba75dd2ea15fba39cbd3efaf72653d2 | 431 | py | Python | Markets/BTCMarkets/btcmarkets_fetch.py | infectiious/Pharaoh | edde36f578fec1f0621da4ef212c3b01ea7b67ed | [
"MIT"
] | null | null | null | Markets/BTCMarkets/btcmarkets_fetch.py | infectiious/Pharaoh | edde36f578fec1f0621da4ef212c3b01ea7b67ed | [
"MIT"
] | null | null | null | Markets/BTCMarkets/btcmarkets_fetch.py | infectiious/Pharaoh | edde36f578fec1f0621da4ef212c3b01ea7b67ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from btcmarkets_api import Market
BTC = Market("/market/BTC/AUD/tick", "BTC")
LTC = Market("/market/LTC/AUD/tick", "LTC")
ETH = Market("/market/ETH/AUD/tick", "ETH")
ETC = Market("/market/ETC/AUD/tick", "ETC")
XRP = Market("/market/XRP/AUD/tick", "XRP")
BCH = Market("/market/BCH/AUD/tick", "BCH")
BTC.update_data()
LTC.update_data()
ETH.update_data()
ETC.update_data()
XRP.update_data()
BCH.update_data()
| 25.352941 | 43 | 0.691415 | #!/usr/bin/env python3
from btcmarkets_api import Market
BTC = Market("/market/BTC/AUD/tick", "BTC")
LTC = Market("/market/LTC/AUD/tick", "LTC")
ETH = Market("/market/ETH/AUD/tick", "ETH")
ETC = Market("/market/ETC/AUD/tick", "ETC")
XRP = Market("/market/XRP/AUD/tick", "XRP")
BCH = Market("/market/BCH/AUD/tick", "BCH")
BTC.update_data()
LTC.update_data()
ETH.update_data()
ETC.update_data()
XRP.update_data()
BCH.update_data()
| 0 | 0 | 0 |
616c3bf20e5e0eba25e4c7b33ae80a18eea27832 | 701 | py | Python | tests/concordance_test.py | stencila/libdh | 41b0dc826e6a6af3390877736185ed90b52459a2 | [
"Apache-2.0"
] | 2 | 2019-01-20T09:35:06.000Z | 2019-03-29T20:29:59.000Z | tests/concordance_test.py | stencila/libdh | 41b0dc826e6a6af3390877736185ed90b52459a2 | [
"Apache-2.0"
] | 1 | 2018-03-19T22:56:07.000Z | 2018-03-20T03:14:19.000Z | tests/concordance_test.py | stencila/libdh | 41b0dc826e6a6af3390877736185ed90b52459a2 | [
"Apache-2.0"
] | 1 | 2018-04-04T00:08:22.000Z | 2018-04-04T00:08:22.000Z | from funcs.concordance import concordance
def test_concordance_string():
"""
concordance can be called with a string (e.g. a single cell containing a string)
"""
grams = concordance('Hello world. Hello, my great world! Hello Alice and Bob.', 'world')
assert grams == [
('Hello world'),
('Hello, my great world')
]
def test_concordance_array_string():
"""
concordance can be called with an array of strings (e.g. a column
of cells containing strings)
"""
grams = concordance(['Hello world.', 'Hello, my great world!', 'Hello Alice and Bob.'], 'world')
assert grams == [
('Hello world'),
('Hello, my great world')
]
| 29.208333 | 100 | 0.623395 | from funcs.concordance import concordance
def test_concordance_string():
"""
concordance can be called with a string (e.g. a single cell containing a string)
"""
grams = concordance('Hello world. Hello, my great world! Hello Alice and Bob.', 'world')
assert grams == [
('Hello world'),
('Hello, my great world')
]
def test_concordance_array_string():
"""
concordance can be called with an array of strings (e.g. a column
of cells containing strings)
"""
grams = concordance(['Hello world.', 'Hello, my great world!', 'Hello Alice and Bob.'], 'world')
assert grams == [
('Hello world'),
('Hello, my great world')
]
| 0 | 0 | 0 |
77342629a5359863a3e4ac211322c8f62eafaf6f | 7,392 | py | Python | prototipo_mouse_expressions.py | AlbertMelo/MouseExpressions | 8e2bc36d8378541d512e04b17908bdefac6090e8 | [
"MIT"
] | 3 | 2019-07-22T18:29:10.000Z | 2019-09-11T12:24:05.000Z | prototipo_mouse_expressions.py | AlbertMelo/MouseExpressions | 8e2bc36d8378541d512e04b17908bdefac6090e8 | [
"MIT"
] | 7 | 2019-12-16T22:20:58.000Z | 2022-02-10T00:50:45.000Z | prototipo_mouse_expressions.py | AlbertMelo/MouseExpressions | 8e2bc36d8378541d512e04b17908bdefac6090e8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import cv2 as cv
from keras.preprocessing import image
from keras.models import model_from_json
import click
import pandas as pd
from keras.layers import Input
from keras import models
from keras.models import load_model
import pyautogui
import statistics
from PyQt5 import QtWidgets, QtGui
from configurar import configurarWindow
import sys
import configuracoes as cfg
import camera
import mouse
import teclado
import matplotlib.pyplot as plt
pyautogui.FAILSAFE = False
#Captura um posicao padrao da cabeca para que possa
#fazer o deslocamento do mouse
#ponto de referencia #melhorar
#trata imagem da face e faz a predicao
#Objetivo: Reconhecer expressões faciais e posição da cabeça
# em quadro extraído do vídeo recebido de uma chamada de rotina.
#-----------------------------
#Objetivo: Determinar a partir de informações fornecidas pelo UC 001 se
# ocorreu alguma intenção de ação por parte dos usuários a partir do quadro extraído do vídeo capturado pela webcam.
#Realiza emulacao de comando que está associada a expressao
#-----------------------------
#Objetivo: Identificar a ocorrência de expressões faciais e
# movimentos realizados com a cabeça utilizando imagens de vídeos capturadas pela webcam.
if __name__== '__main__':
mouse_expressions().executar() | 33.6 | 278 | 0.684389 | # -*- coding: utf-8 -*-
import numpy as np
import cv2 as cv
from keras.preprocessing import image
from keras.models import model_from_json
import click
import pandas as pd
from keras.layers import Input
from keras import models
from keras.models import load_model
import pyautogui
import statistics
from PyQt5 import QtWidgets, QtGui
from configurar import configurarWindow
import sys
import configuracoes as cfg
import camera
import mouse
import teclado
import matplotlib.pyplot as plt
pyautogui.FAILSAFE = False
class mouse_expressions(object):
def __init__(self):
#Constantes de controle
#comandos: [click, duplo_click, rolar_pagina_cima, rolar_pagina_baixo, sem_acao, click_contrario, atalho_alt_tab]
#emocoes = ['Raiva', 'Desgosto', 'Medo', 'Felicidade', 'Tristeza', 'Surpresa', 'Neutro']
self.emocoes = ["raiva", "desgosto", "medo", "felicidade", "tristeza", "surpresa", "neutro"]
self.FRAMES_VELOCIDADE = 30
#Carregando as configuracoes
self.FACES_CALIBRAGEM = 20
self.LOTE_IMAGENS = 15
self.SEQUENCIA_EXPRESSOES = []
self.DIMENSAO = 30
#Definicoes dos arquivso externos de configuracao
self.PATH_MODEL = "./model/modelo_cnn_major_20_10.h5"
self.PATH_SETUP = "setup.txt"
print("Carregando arquivos:\n")
#Carrega modelos do OpenCV para detectar
#FACE
self.face_cascade = cv.CascadeClassifier(cv.data.haarcascades + "haarcascade_frontalface_alt.xml")
print("- modelos do OpenCV")
# Carregando modelos de aprendidos CNN
print("- - compilando modelo \n")
self.model = load_model(self.PATH_MODEL)
self.model.compile
print("- arquivo de configurações")
self.COMANDOS = cfg.lerArquivoConfiguracoes()
print(self.COMANDOS)
#Instanciando Camera
self.camera = camera.Camera()
if camera != None:
(self.xi_cabeca, self.yi_cabeca) = self._calibra_posicao_inicial_cabeca(camera, self.FACES_CALIBRAGEM)
#Instanciando Mouse
self.mouse = mouse.Mouse(
self.xi_cabeca,
self.yi_cabeca,
self.DIMENSAO)
#Instanciando Teclado
self.teclado = teclado.Teclado()
#Captura um posicao padrao da cabeca para que possa
#fazer o deslocamento do mouse
#ponto de referencia #melhorar
def _calibra_posicao_inicial_cabeca(self, camera, FACES_CALIBRAGEM):
print("Inicio Calibragem da posicao de cabeca")
acx = 0
acy = 0
xi = -1000
xy = -1000
conta_faces = 0
for i in range(FACES_CALIBRAGEM):
ref, img = self.camera.lerImagem()
if ref:
frame = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
face = self.face_cascade.detectMultiScale(frame, 1.3, 5) #detecta o rosto
if len(face)>0:
for (xf,yf,hf,wf) in face:
acx += xf
acy += yf
conta_faces += 1
#cv.imshow('Calibrando posição cabeça',img)
if conta_faces>0:
xi = int(acx/conta_faces)
yi = int(acy/conta_faces)
else:
print("Não foi possível detectar rosto! Erro de calibragem")
camera.pararCaptura()
cv.destroyAllWindows()
exit()
print("Fim calibragem!")
print("posicao", xi , yi)
return (xi, yi)
#trata imagem da face e faz a predicao
def _classificarExpressao(self, img,frame, x, y, w, h):
#trata imagem
cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) #retangulo na face
cv.rectangle(img,(self.xi_cabeca-self.DIMENSAO,
self.yi_cabeca-self.DIMENSAO + int(self.DIMENSAO/3)),
(self.xi_cabeca+self.DIMENSAO,
self.yi_cabeca+self.DIMENSAO-+ int(self.DIMENSAO/3)),
(0,255,0),1)
detected_face = frame[int(y):int(y+h), int(x):int(x+w)] #recorta face detectada
detected_face = cv.resize(detected_face,(48, 48))#redimensiona imagem para 48x48
#trata imagem do rosto para classificar a expressao
rosto = image.img_to_array(detected_face)
rosto = np.expand_dims(rosto, axis = 0)
rosto /= 255. #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]
rosto = rosto.reshape(-1, 48, 48, 1)
#predição da expressão do rosto de acordo com o modelo treinado
expressao = self.model.predict(rosto)
return np.argmax(expressao)
#Objetivo: Reconhecer expressões faciais e posição da cabeça
# em quadro extraído do vídeo recebido de uma chamada de rotina.
def _reconhecerExpressoesFaciais(self, img, frame, face, xi_cabeca, yi_cabeca):
for (x,y,w,h) in face: #detectar faces que esta em primeiro plano (trabalhos futuros)
#detecta a expressao do usuario
expressao = self._classificarExpressao(img, frame, x, y, w, h)
return(expressao)
#-----------------------------
#Objetivo: Determinar a partir de informações fornecidas pelo UC 001 se
# ocorreu alguma intenção de ação por parte dos usuários a partir do quadro extraído do vídeo capturado pela webcam.
#Realiza emulacao de comando que está associada a expressao
def _classificarExpressaoAcao(self, expressao):
#array 0: angry, 1:disgust, 2:fear, 3:happy, 4:sad, 5:surprise, 6:neutral
for i in range(len(self.COMANDOS)):
if int(self.COMANDOS[i]) == expressao:
self.mouse.emularComandosMouse(str(i))
#-----------------------------
#Objetivo: Identificar a ocorrência de expressões faciais e
# movimentos realizados com a cabeça utilizando imagens de vídeos capturadas pela webcam.
def _analisaOcorrenciaAcoesUsuario(self, img, frame, face, xi_cabeca, yi_cabeca):
moda = self.COMANDOS[4]
#movimentacao mecanica do mouse (a intecao de movimentar deve ser incluida em trabalhos futuros)
for (x,y,h,w) in face: # aprimorar para considerar apenas quem esta em frente ao equipamento (reconhecer o usuário)
expressao = self._reconhecerExpressoesFaciais(img, frame, face, xi_cabeca, yi_cabeca)
self.SEQUENCIA_EXPRESSOES.append(expressao)
if (len(self.SEQUENCIA_EXPRESSOES)>=self.LOTE_IMAGENS):
try:
moda = int(statistics.mode(self.SEQUENCIA_EXPRESSOES))
except:
moda = int(self.COMANDOS[4]) #padrao sem acao *****************************falta ajustar pra ser o valor da acao que naum executa nenhuma acao
expressao = moda
#Emula o comando
self._classificarExpressaoAcao(moda)
self.SEQUENCIA_EXPRESSOES = []
return expressao
def executar(self):
while(True):
#padrao de comando para não executar comandos
expressao=int(self.COMANDOS[4]) #comando 4 é padrao para naum realizar tarefa
ref, img = self.camera.lerImagem()
if ref: #leu imagem
frame = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
face = self.face_cascade.detectMultiScale(frame, 1.3, 5) #detecta os rostos no frame
if len(face)>0:
(x, y, h, w) = face[0]
#movimenta ponteiro mouse
self.mouse.move_mouse_peso(self.xi_cabeca,self.yi_cabeca,x, y)
#anlisa expressao
expressao = self._analisaOcorrenciaAcoesUsuario(img, frame, face, self.xi_cabeca,self.yi_cabeca)
cv.putText(img, self.emocoes[expressao], (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (200, 0, 0), 3, cv.LINE_AA)
cv.imshow('img',img)
#encerra loop/programa
key = cv.waitKey(1)
if key==27: #press Esc para encerrar
break
#finaliza objetos do OpenCV
self.camera.pararCaptura()
cv.destroyAllWindows()
exit()
if __name__== '__main__':
mouse_expressions().executar() | 5,568 | 11 | 193 |
4c97aa5d3d19d9348f63b93ed4291c1b26df65b9 | 1,496 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/plugins/doc_fragments/vmware_rest_client.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/plugins/doc_fragments/vmware_rest_client.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/plugins/doc_fragments/vmware_rest_client.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
| 34.790698 | 129 | 0.701872 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Parameters for VMware REST Client based modules
DOCUMENTATION = r'''
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_HOST) will be used instead.
type: str
username:
description:
- The username of the vSphere vCenter server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_USER) will be used instead.
type: str
aliases: [ admin, user ]
password:
description:
- The password of the vSphere vCenter server.
- If the value is not specified in the task, the value of environment variable C(VMWARE_PASSWORD) will be used instead.
type: str
aliases: [ pass, pwd ]
validate_certs:
description:
- Allows connection when SSL certificates are not valid.
- Set to C(no) when certificates are not trusted.
- If the value is not specified in the task, the value of environment variable C(VMWARE_VALIDATE_CERTS) will be used instead.
type: bool
default: yes
protocol:
description:
- The connection to protocol.
type: str
choices: [ http, https ]
default: https
'''
| 0 | 1,253 | 23 |
fa88f585ee7d7db3b7f7a6e6a849089d906b2efb | 1,062 | py | Python | datacollection/utils.py | playfulMIT/kimchi | 66802cc333770932a8c8b1a44ea5d235d916a8f1 | [
"MIT"
] | null | null | null | datacollection/utils.py | playfulMIT/kimchi | 66802cc333770932a8c8b1a44ea5d235d916a8f1 | [
"MIT"
] | 16 | 2019-12-10T19:40:27.000Z | 2022-02-10T11:51:06.000Z | datacollection/utils.py | playfulMIT/kimchi | 66802cc333770932a8c8b1a44ea5d235d916a8f1 | [
"MIT"
] | null | null | null | import json
from .models import URL
| 32.181818 | 76 | 0.658192 | import json
from .models import URL
def get_group(self, data_json):
print("get_group start session: " + str(self.customsession.session_key))
namedata = data_json["data"]
namejson = json.loads(namedata)
if self.customsession.url is not None:
print("urlpk found: ")
url = self.customsession.url
print("url: " + str(url.pk))
else:
urlname = "no-url-or-group-specified"
print("urlpk not found, using default")
url, created = URL.objects.get_or_create(name=urlname)
self.customsession.url = url
# overeride if group is specified
if "group" in namejson:
urlname = namejson["group"]
print("group override")
url, created = URL.objects.get_or_create(name=urlname)
self.customsession.url = url
print(url.name)
self.customsession.save(update_fields=["url"])
self.scope["session"].accessed = False
self.scope["session"].modified = False
print("get_group end session: " + str(self.customsession.session_key))
return url, namejson
| 1,001 | 0 | 23 |
679ad77e84c8222952811ad00bc89e353d4ed471 | 5,617 | py | Python | automol/zmatrix/bmat.py | kevinmooreiii/autochem | 87f50adc09c3f1170459c629697aadd74154c769 | [
"Apache-2.0"
] | null | null | null | automol/zmatrix/bmat.py | kevinmooreiii/autochem | 87f50adc09c3f1170459c629697aadd74154c769 | [
"Apache-2.0"
] | null | null | null | automol/zmatrix/bmat.py | kevinmooreiii/autochem | 87f50adc09c3f1170459c629697aadd74154c769 | [
"Apache-2.0"
] | null | null | null | """
IN PROGRESS:Transliterating Carlo's routine from Fortran
Form the B-Matrix and C-Matrix used to convert the coordinates
Calcualtes all of the derivaties via finite-difference
define starting xyz geometry.
convention: atom 1 is is 0 0 0
atom 2 bd 0 0
atom 3 on xy plane
"""
# import numpy as np
#
# NATOMS = 10 # maybe need, don't know
# INT_COORDS = ''
# DELTAX = 0.01
# DELTAY = 0.01
# DELTAZ = 0.01
#
#
# def compute_bmat(natoms, coords, deltax, deltay, deltaz):
# """ compute the bmatrix by central difference
# where B_ik = dq_i / dx_k
# """
#
# b_mat = np.zeros(3*natoms, 3*natoms)
# for j in range(3):
# for k in range(3):
#
# # perturb x + dx and x - dx
# xpert_xp = 1
# xpert_xn = 1
# _perturb_coordinates(coords, jpert, delta)
#
# # perturb y + dy and y - dy
# xpert_yp = 1
# xpert_yn = 1
# _perturb_coordinates(coords, jpert, delta)
#
# # perturb z + dz and z - dz
# xpert_zp = 1
# xpert_zn = 1
# _perturb_coordinates(coords, jpert, delta)
#
# # Now calculate the jk component C-Matrix
# _calculate_bmat_k_component(b_mat, coords, j, j*k,
# x_pert_pp, x_pert_pn,
# x_pert_np, x_pert_nn)
#
# # now update iangsub1 bmat component (whatever this is)
# b_mat = _update_bmat(bmat, coords)
#
# return b_mat
#
#
# def compute_cmat(natoms, coords, deltax, deltay, deltaz):
# """ compute the bmatrix by central difference
# where C_ijk = d2q_i / (dx_j.dx_k)
# """
#
# c_mat = np.zeros(3*natoms, 3*natoms, 3*natoms)
# for j in range(3):
# for k in range(3):
# # perturb xj + dxj and xk + dxk
# x_pert_pp = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # perturb xj - dxj and yk + dyk
# x_pert_np = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # perturb xj + dxj and yk - dyk
# x_pert_pn = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # perturb xj - dxj and xk - dxk
# x_pert_nn = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # Now calculate the jk component C-Matrix
# _calculate_cmat_k_component(c_mat, coords, j, j*k,
# x_pert_pp, x_pert_pn,
# x_pert_np, x_pert_nn)
#
# return c_mat
#
#
# def _perturb_coordinates(coords, jpert, delta1, kpert=None, delta2=None):
# """ Generate coordinates that have been perturbed
# """
# coords[jpert] += delta1
# coords[kpert] += delta2
# # call update_zmat(natom,natomt,intcoor,bislab,ibconn,
# # $ iaconn,idconn,bname,anname,dname,atname,cooxpp,cooypp,
# # $ coozpp,xintpp,tauopt,ntau,idummy,ilin_fr,aconnt,bconnt,
# # $ dconnt,atomlabel,ifilu)
#
# return coords
#
#
# def _calculate_bmat_k_component(b_mat, j_idx, coords, delta,
# x_pert_p, x_pert_n):
# """ Calculate one nine components of B_ij for given __
# """
#
# for i, coord in enumerate(coords):
# if abs(xpert_p[i] - xpert_np[i]) > 300.0:
# if xpert_n[i] < 0.0:
# xpert_n[i] += 360.0
# elif xpert_n[i] > 0.0:
# xpert_n[i] -= 360.0
# if abs(xpert_p[i] - xpert_n[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord', kind, jind, i)
# b_mat[i, j_idx] = (
# ((xpert_p[i] - xpert_n[i]) / 2.0) * (1.0 / delta)
# )
#
# return b_mat
#
#
# def _calculate_cmat_k_component(c_mat, k_idx, coords, delta1, delta2,
# x_pert_pp, x_pert_pn, x_pert_np, x_pert_nn):
# """ Calculate one nine components of C_ijk for given j
# """
#
# for i, coord in enumerate(coords):
#
# if abs(xpert_pp[i] - xpert_np[i]) > 300.0:
# if xpert_pp[i] < 0.0:
# xpert_pp[i] += 360.0
# elif xpert_pp[i] > 0.0:
# xpert_pp[i] -= 360.0
# if abs(xpert_pp[i] - xpert_np[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord',
# kind, jind, i)
#
# if abs(xpert_np[i] - xpert_np[i]) > 300.0:
# if xpert_pn[i] < 0.0:
# xpert_pn[i] += 360.0
# elif xpert_pn[i] > 0.0:
# xpert_pn[i] -= 360.0
# if abs(xpert_pp[i] - xpert_pn[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord',
# kind, jind, i)
#
# if abs(xpert_np[i] - xpert_nn[i]) > 300.0:
# if xpert_nn[i] < 0.0:
# xpert_nn[i] += 360.0
# elif xpert_nn[i] > 0.0:
# xpert_nn[i] -= 360.0
# if abs(xpert_np[i] - xpert_nn[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord',
# kind, jind, i)
#
# c_mat[i, j_idx, k_idx] = (
# xpert_pp[i] - xpert_np[i] - xpert_pn[i] +
# (xpert_nn[i] / 4.0) * (1.0 / deltax) * (1.0 / deltaz)
# )
#
# return c_mat
#
#
# if __name__ == '__main__':
# b_mat = compute_bmat(NATOMS, COORDS, DELTAX, DELTAY, DELTAZ)
# c_mat = compute_cmat(NATOMS, COORDS, DELTAX, DELTAY, DELTAZ)
| 33.434524 | 79 | 0.513263 | """
IN PROGRESS:Transliterating Carlo's routine from Fortran
Form the B-Matrix and C-Matrix used to convert the coordinates
Calcualtes all of the derivaties via finite-difference
define starting xyz geometry.
convention: atom 1 is is 0 0 0
atom 2 bd 0 0
atom 3 on xy plane
"""
# import numpy as np
#
# NATOMS = 10 # maybe need, don't know
# INT_COORDS = ''
# DELTAX = 0.01
# DELTAY = 0.01
# DELTAZ = 0.01
#
#
# def compute_bmat(natoms, coords, deltax, deltay, deltaz):
# """ compute the bmatrix by central difference
# where B_ik = dq_i / dx_k
# """
#
# b_mat = np.zeros(3*natoms, 3*natoms)
# for j in range(3):
# for k in range(3):
#
# # perturb x + dx and x - dx
# xpert_xp = 1
# xpert_xn = 1
# _perturb_coordinates(coords, jpert, delta)
#
# # perturb y + dy and y - dy
# xpert_yp = 1
# xpert_yn = 1
# _perturb_coordinates(coords, jpert, delta)
#
# # perturb z + dz and z - dz
# xpert_zp = 1
# xpert_zn = 1
# _perturb_coordinates(coords, jpert, delta)
#
# # Now calculate the jk component C-Matrix
# _calculate_bmat_k_component(b_mat, coords, j, j*k,
# x_pert_pp, x_pert_pn,
# x_pert_np, x_pert_nn)
#
# # now update iangsub1 bmat component (whatever this is)
# b_mat = _update_bmat(bmat, coords)
#
# return b_mat
#
#
# def compute_cmat(natoms, coords, deltax, deltay, deltaz):
# """ compute the bmatrix by central difference
# where C_ijk = d2q_i / (dx_j.dx_k)
# """
#
# c_mat = np.zeros(3*natoms, 3*natoms, 3*natoms)
# for j in range(3):
# for k in range(3):
# # perturb xj + dxj and xk + dxk
# x_pert_pp = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # perturb xj - dxj and yk + dyk
# x_pert_np = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # perturb xj + dxj and yk - dyk
# x_pert_pn = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # perturb xj - dxj and xk - dxk
# x_pert_nn = _perturb_coordinates(coords, jpert, kpert, d1, d2)
#
# # Now calculate the jk component C-Matrix
# _calculate_cmat_k_component(c_mat, coords, j, j*k,
# x_pert_pp, x_pert_pn,
# x_pert_np, x_pert_nn)
#
# return c_mat
#
#
# def _perturb_coordinates(coords, jpert, delta1, kpert=None, delta2=None):
# """ Generate coordinates that have been perturbed
# """
# coords[jpert] += delta1
# coords[kpert] += delta2
# # call update_zmat(natom,natomt,intcoor,bislab,ibconn,
# # $ iaconn,idconn,bname,anname,dname,atname,cooxpp,cooypp,
# # $ coozpp,xintpp,tauopt,ntau,idummy,ilin_fr,aconnt,bconnt,
# # $ dconnt,atomlabel,ifilu)
#
# return coords
#
#
# def _calculate_bmat_k_component(b_mat, j_idx, coords, delta,
# x_pert_p, x_pert_n):
# """ Calculate one nine components of B_ij for given __
# """
#
# for i, coord in enumerate(coords):
# if abs(xpert_p[i] - xpert_np[i]) > 300.0:
# if xpert_n[i] < 0.0:
# xpert_n[i] += 360.0
# elif xpert_n[i] > 0.0:
# xpert_n[i] -= 360.0
# if abs(xpert_p[i] - xpert_n[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord', kind, jind, i)
# b_mat[i, j_idx] = (
# ((xpert_p[i] - xpert_n[i]) / 2.0) * (1.0 / delta)
# )
#
# return b_mat
#
#
# def _calculate_cmat_k_component(c_mat, k_idx, coords, delta1, delta2,
# x_pert_pp, x_pert_pn, x_pert_np, x_pert_nn):
# """ Calculate one nine components of C_ijk for given j
# """
#
# for i, coord in enumerate(coords):
#
# if abs(xpert_pp[i] - xpert_np[i]) > 300.0:
# if xpert_pp[i] < 0.0:
# xpert_pp[i] += 360.0
# elif xpert_pp[i] > 0.0:
# xpert_pp[i] -= 360.0
# if abs(xpert_pp[i] - xpert_np[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord',
# kind, jind, i)
#
# if abs(xpert_np[i] - xpert_np[i]) > 300.0:
# if xpert_pn[i] < 0.0:
# xpert_pn[i] += 360.0
# elif xpert_pn[i] > 0.0:
# xpert_pn[i] -= 360.0
# if abs(xpert_pp[i] - xpert_pn[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord',
# kind, jind, i)
#
# if abs(xpert_np[i] - xpert_nn[i]) > 300.0:
# if xpert_nn[i] < 0.0:
# xpert_nn[i] += 360.0
# elif xpert_nn[i] > 0.0:
# xpert_nn[i] -= 360.0
# if abs(xpert_np[i] - xpert_nn[i]) > 300.0:
# raise ValueError(
# 'something did not work here: k, j coord',
# kind, jind, i)
#
# c_mat[i, j_idx, k_idx] = (
# xpert_pp[i] - xpert_np[i] - xpert_pn[i] +
# (xpert_nn[i] / 4.0) * (1.0 / deltax) * (1.0 / deltaz)
# )
#
# return c_mat
#
#
# if __name__ == '__main__':
# b_mat = compute_bmat(NATOMS, COORDS, DELTAX, DELTAY, DELTAZ)
# c_mat = compute_cmat(NATOMS, COORDS, DELTAX, DELTAY, DELTAZ)
| 0 | 0 | 0 |
58eab8d36c5aa9986701a4f86760228903a57b48 | 2,315 | py | Python | src/command_modules/azure-cli-botservice/azure/cli/command_modules/botservice/converged_app.py | jfcoz/azure-cli | 8459ef3fd3c76d9f99defd95d4c980923891fa6d | [
"MIT"
] | 1 | 2019-06-21T05:07:59.000Z | 2019-06-21T05:07:59.000Z | src/command_modules/azure-cli-botservice/azure/cli/command_modules/botservice/converged_app.py | jfcoz/azure-cli | 8459ef3fd3c76d9f99defd95d4c980923891fa6d | [
"MIT"
] | 3 | 2019-07-12T22:10:38.000Z | 2019-07-12T22:10:49.000Z | src/command_modules/azure-cli-botservice/azure/cli/command_modules/botservice/converged_app.py | jfcoz/azure-cli | 8459ef3fd3c76d9f99defd95d4c980923891fa6d | [
"MIT"
] | 1 | 2019-06-21T05:08:09.000Z | 2019-06-21T05:08:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import requests
from knack.util import CLIError
from azure.cli.command_modules.botservice import adal_authenticator
| 45.392157 | 120 | 0.621598 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import requests
from knack.util import CLIError
from azure.cli.command_modules.botservice import adal_authenticator
class ConvergedApp: # pylint:disable=too-few-public-methods
app_provision_api_url = 'https://dev.botframework.com/api/botApp/provisionConvergedApp?name={0}'
@staticmethod
def provision(bot_name, verbose=False):
# Use our authenticator to acquire a user token with a custom audience
token = adal_authenticator.AdalAuthenticator.acquire_token()
access_token = token['accessToken']
# Prepare headers to call dev portal converged app provisioning API
headers = {'Authorization': 'Bearer {0}'.format(access_token)}
# Provision app
response = requests.post(
ConvergedApp.app_provision_api_url.format(bot_name),
headers=headers
)
# TODO: Verbose logging
# TODO: Fix this status_code check. If any status code below 400 is acceptable, check for response.ok instead of
# a specific status code. See http://docs.python-requests.org/en/master/api/#requests.Response.ok
if response.status_code not in [201]:
if not verbose:
raise CLIError(
"Unable to provision Microsoft Application automatically. "
"To manually provision a Microsoft Application, go to the Application Registration Portal at "
"https://apps.dev.microsoft.com/. Once you manually create you application, "
"pass the application Id and password as parameters for bot creation.")
# Stub of logged error if verbose is True:
else:
raise CLIError("%s: %s" % (response.status_code, response.text))
response_content = json.loads(response.content.decode('utf-8'))
msa_app_id = response_content['AppId']
password = response_content['Password']
return msa_app_id, password
| 1,631 | 186 | 23 |
a29ae9caec88596cb11a22a6a82947490844cecd | 1,325 | py | Python | final_exam_prep/3rd.py | lowrybg/SoftuniPythonFund | ab80c424c35824bbfda5f43f1ba8ba5aa2f4efb5 | [
"MIT"
] | null | null | null | final_exam_prep/3rd.py | lowrybg/SoftuniPythonFund | ab80c424c35824bbfda5f43f1ba8ba5aa2f4efb5 | [
"MIT"
] | null | null | null | final_exam_prep/3rd.py | lowrybg/SoftuniPythonFund | ab80c424c35824bbfda5f43f1ba8ba5aa2f4efb5 | [
"MIT"
] | null | null | null | n = int(input())
pieces = {}
for _ in range(n):
piece, composer, key = input().split("|")
pieces[piece] = {'composer': composer, 'key': key}
data = input()
while not data == "Stop":
command = data.split("|")
if command[0] == "Add":
piece, composer, key = command[1:]
if piece in pieces:
print(f"{piece} is already in the collection!")
else:
pieces[piece] = {'composer': composer, 'key': key}
print(f"{piece} by {composer} in {key} added to the collection!")
elif command[0] == "Remove":
piece = command[1]
if piece in pieces:
del pieces[piece]
print(f"Successfully removed {piece}!")
else:
print(f"Invalid operation! {piece} does not exist in the collection.")
elif command[0] == "ChangeKey":
piece, new_key = command[1:]
if piece in pieces:
pieces[piece]['key'] = new_key
print(f"Changed the key of {piece} to {new_key}!")
else:
print(f"Invalid operation! {piece} does not exist in the collection.")
data = input()
sorted_pieces = sorted(pieces.items(), key=lambda tkvp: (tkvp[0], tkvp[1]['composer']))
for piece, data in sorted_pieces:
print(f"{piece} -> Composer: {data['composer']}, Key: {data['key']}") | 33.974359 | 87 | 0.568302 | n = int(input())
pieces = {}
for _ in range(n):
piece, composer, key = input().split("|")
pieces[piece] = {'composer': composer, 'key': key}
data = input()
while not data == "Stop":
command = data.split("|")
if command[0] == "Add":
piece, composer, key = command[1:]
if piece in pieces:
print(f"{piece} is already in the collection!")
else:
pieces[piece] = {'composer': composer, 'key': key}
print(f"{piece} by {composer} in {key} added to the collection!")
elif command[0] == "Remove":
piece = command[1]
if piece in pieces:
del pieces[piece]
print(f"Successfully removed {piece}!")
else:
print(f"Invalid operation! {piece} does not exist in the collection.")
elif command[0] == "ChangeKey":
piece, new_key = command[1:]
if piece in pieces:
pieces[piece]['key'] = new_key
print(f"Changed the key of {piece} to {new_key}!")
else:
print(f"Invalid operation! {piece} does not exist in the collection.")
data = input()
sorted_pieces = sorted(pieces.items(), key=lambda tkvp: (tkvp[0], tkvp[1]['composer']))
for piece, data in sorted_pieces:
print(f"{piece} -> Composer: {data['composer']}, Key: {data['key']}") | 0 | 0 | 0 |
374bd98516fb14783c4dd99d65e20d161f2986ee | 1,125 | py | Python | plugins/recorded_future/komand_recorded_future/actions/lookup_alert/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/recorded_future/komand_recorded_future/actions/lookup_alert/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/recorded_future/komand_recorded_future/actions/lookup_alert/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import LookupAlertInput, LookupAlertOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_recorded_future.util.api import Endpoint
| 36.290323 | 118 | 0.649778 | import insightconnect_plugin_runtime
from .schema import LookupAlertInput, LookupAlertOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_recorded_future.util.api import Endpoint
class LookupAlert(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="lookup_alert",
description=Component.DESCRIPTION,
input=LookupAlertInput(),
output=LookupAlertOutput(),
)
def run(self, params={}):
try:
return {
Output.ALERT: insightconnect_plugin_runtime.helper.clean(
self.connection.client.make_request(Endpoint.lookup_alert(params.get(Input.ALERT_ID))).get("data")
)
}
except AttributeError as e:
raise PluginException(
cause="Recorded Future returned unexpected response.",
assistance="Please check that the provided input is correct and try again.",
data=e,
)
| 748 | 35 | 76 |
bc85e43a576f203604dcb456c0e57b0aa707256a | 7,132 | py | Python | test/functional/feature_index_prune.py | hernanmarino/bitcoin | 51eebe082d0d5542ba10a470ee44dfcbd1f2d7e7 | [
"MIT"
] | 1 | 2020-01-03T01:06:07.000Z | 2020-01-03T01:06:07.000Z | test/functional/feature_index_prune.py | hernanmarino/bitcoin | 51eebe082d0d5542ba10a470ee44dfcbd1f2d7e7 | [
"MIT"
] | 1 | 2022-02-09T21:05:47.000Z | 2022-02-09T21:05:47.000Z | test/functional/feature_index_prune.py | hernanmarino/bitcoin | 51eebe082d0d5542ba10a470ee44dfcbd1f2d7e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test indices in conjunction with prune."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
if __name__ == '__main__':
FeatureIndexPruneTest().main()
| 45.717949 | 188 | 0.662227 | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test indices in conjunction with prune."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
class FeatureIndexPruneTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [
["-fastprune", "-prune=1", "-blockfilterindex=1"],
["-fastprune", "-prune=1", "-coinstatsindex=1"],
["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"],
[]
]
def sync_index(self, height):
expected_filter = {
'basic block filter index': {'synced': True, 'best_block_height': height},
}
self.wait_until(lambda: self.nodes[0].getindexinfo() == expected_filter)
expected_stats = {
'coinstatsindex': {'synced': True, 'best_block_height': height}
}
self.wait_until(lambda: self.nodes[1].getindexinfo() == expected_stats)
expected = {**expected_filter, **expected_stats}
self.wait_until(lambda: self.nodes[2].getindexinfo() == expected)
def reconnect_nodes(self):
self.connect_nodes(0,1)
self.connect_nodes(0,2)
self.connect_nodes(0,3)
def mine_batches(self, blocks):
n = blocks // 250
for _ in range(n):
self.generate(self.nodes[0], 250)
self.generate(self.nodes[0], blocks % 250)
self.sync_blocks()
def restart_without_indices(self):
for i in range(3):
self.restart_node(i, extra_args=["-fastprune", "-prune=1"])
self.reconnect_nodes()
def run_test(self):
filter_nodes = [self.nodes[0], self.nodes[2]]
stats_nodes = [self.nodes[1], self.nodes[2]]
self.log.info("check if we can access blockfilters and coinstats when pruning is enabled but no blocks are actually pruned")
self.sync_index(height=200)
tip = self.nodes[0].getbestblockhash()
for node in filter_nodes:
assert_greater_than(len(node.getblockfilter(tip)['filter']), 0)
for node in stats_nodes:
assert(node.gettxoutsetinfo(hash_type="muhash", hash_or_height=tip)['muhash'])
self.mine_batches(500)
self.sync_index(height=700)
self.log.info("prune some blocks")
for node in self.nodes[:2]:
with node.assert_debug_log(['limited pruning to height 689']):
pruneheight_new = node.pruneblockchain(400)
# the prune heights used here and below are magic numbers that are determined by the
# thresholds at which block files wrap, so they depend on disk serialization and default block file size.
assert_equal(pruneheight_new, 248)
self.log.info("check if we can access the tips blockfilter and coinstats when we have pruned some blocks")
tip = self.nodes[0].getbestblockhash()
for node in filter_nodes:
assert_greater_than(len(node.getblockfilter(tip)['filter']), 0)
for node in stats_nodes:
assert(node.gettxoutsetinfo(hash_type="muhash", hash_or_height=tip)['muhash'])
self.log.info("check if we can access the blockfilter and coinstats of a pruned block")
height_hash = self.nodes[0].getblockhash(2)
for node in filter_nodes:
assert_greater_than(len(node.getblockfilter(height_hash)['filter']), 0)
for node in stats_nodes:
assert(node.gettxoutsetinfo(hash_type="muhash", hash_or_height=height_hash)['muhash'])
# mine and sync index up to a height that will later be the pruneheight
self.generate(self.nodes[0], 51)
self.sync_index(height=751)
self.restart_without_indices()
self.log.info("make sure trying to access the indices throws errors")
for node in filter_nodes:
msg = "Index is not enabled for filtertype basic"
assert_raises_rpc_error(-1, msg, node.getblockfilter, height_hash)
for node in stats_nodes:
msg = "Querying specific block heights requires coinstatsindex"
assert_raises_rpc_error(-8, msg, node.gettxoutsetinfo, "muhash", height_hash)
self.mine_batches(749)
self.log.info("prune exactly up to the indices best blocks while the indices are disabled")
for i in range(3):
pruneheight_2 = self.nodes[i].pruneblockchain(1000)
assert_equal(pruneheight_2, 750)
# Restart the nodes again with the indices activated
self.restart_node(i, extra_args=self.extra_args[i])
self.log.info("make sure that we can continue with the partially synced indices after having pruned up to the index height")
self.sync_index(height=1500)
self.log.info("prune further than the indices best blocks while the indices are disabled")
self.restart_without_indices()
self.mine_batches(1000)
for i in range(3):
pruneheight_3 = self.nodes[i].pruneblockchain(2000)
assert_greater_than(pruneheight_3, pruneheight_2)
self.stop_node(i)
self.log.info("make sure we get an init error when starting the nodes again with the indices")
filter_msg = "Error: basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"
stats_msg = "Error: coinstatsindex best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"
for i, msg in enumerate([filter_msg, stats_msg, filter_msg]):
self.nodes[i].assert_start_raises_init_error(extra_args=self.extra_args[i], expected_msg=msg)
self.log.info("make sure the nodes start again with the indices and an additional -reindex arg")
for i in range(3):
restart_args = self.extra_args[i]+["-reindex"]
self.restart_node(i, extra_args=restart_args)
# The nodes need to be reconnected to the non-pruning node upon restart, otherwise they will be stuck
self.connect_nodes(i, 3)
self.sync_blocks(timeout=300)
for node in self.nodes[:2]:
with node.assert_debug_log(['limited pruning to height 2489']):
pruneheight_new = node.pruneblockchain(2500)
assert_equal(pruneheight_new, 2005)
self.log.info("ensure that prune locks don't prevent indices from failing in a reorg scenario")
with self.nodes[0].assert_debug_log(['basic block filter index prune lock moved back to 2480']):
self.nodes[3].invalidateblock(self.nodes[0].getblockhash(2480))
self.generate(self.nodes[3], 30)
self.sync_blocks()
if __name__ == '__main__':
FeatureIndexPruneTest().main()
| 6,423 | 29 | 184 |
25a0afd05def23f2a550381df6805d5015e2da6a | 751 | py | Python | core/cooggerapp/views/utopic/contributor.py | bisguzar/coogger | ba8da2d4dcc7014d5e277ea8c400e312c4721e8f | [
"MIT"
] | null | null | null | core/cooggerapp/views/utopic/contributor.py | bisguzar/coogger | ba8da2d4dcc7014d5e277ea8c400e312c4721e8f | [
"MIT"
] | null | null | null | core/cooggerapp/views/utopic/contributor.py | bisguzar/coogger | ba8da2d4dcc7014d5e277ea8c400e312c4721e8f | [
"MIT"
] | 1 | 2020-06-13T09:36:53.000Z | 2020-06-13T09:36:53.000Z | from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from ...models import Commit, UTopic
from ..utils import paginator
| 37.55 | 85 | 0.739015 | from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from ...models import Commit, UTopic
from ..utils import paginator
class Contributor(TemplateView):
template_name = "users/topic/detail/contributors.html"
def get_context_data(self, username, topic_permlink, **kwargs):
utopic = UTopic.objects.get(user__username=username, permlink=topic_permlink)
queryset = utopic.contributors.all()
context = super().get_context_data(**kwargs)
context["current_user"] = get_object_or_404(User, username=username)
context["queryset"] = paginator(self.request, queryset)
context["utopic"] = utopic
return context
| 425 | 97 | 23 |
17787b08cbccb8195aa6bd7bd0c829d41f386969 | 14,814 | py | Python | src/abaqus/Interaction/FluidCavityProperty.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Interaction/FluidCavityProperty.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Interaction/FluidCavityProperty.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from abaqusConstants import *
from .ContactProperty import ContactProperty
class FluidCavityProperty(ContactProperty):
"""The FluidCavityProperty object is an interaction property that defines the fluid
behavior for a surface-based fluid cavity.
The FluidCavityProperty object is derived from the InteractionProperty object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import interaction
mdb.models[name].interactionProperties[name]
The corresponding analysis keywords are:
- FLUID BEHAVIOR
- CAPACITY
- FLUID BULK MODULUS
- FLUID DENSITY
- FLUID EXPANSION
- MOLECULAR WEIGHT
"""
def __init__(self, name: str, definition: SymbolicConstant = HYDRAULIC, fluidDensity: float = None,
molecularWeight: float = None, useExpansion: Boolean = OFF,
expansionTempDep: Boolean = OFF, expansionDependencies: int = 0,
referenceTemperature: float = 0, expansionTable: tuple = (),
useBulkModulus: Boolean = OFF, bulkModulusTempDep: Boolean = OFF,
bulkModulusDependencies: int = 0, bulkModulusTable: tuple = (),
useCapacity: Boolean = OFF, capacityType: SymbolicConstant = POLYNOMIAL,
capacityTempDep: Boolean = OFF, capacityDependencies: int = 0,
capacityTable: tuple = ()):
"""This method creates a FluidCavityProperty object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].FluidCavityProperty
Parameters
----------
name
A String specifying the interaction property repository key.
definition
A SymbolicConstant specifying the type of fluid cavity property to be defined. Possible
values are HYDRAULIC and PNEUMATIC. The default value is HYDRAULIC.
fluidDensity
None or a Float specifying the reference fluid density. This argument is applicable only
when *definition*=HYDRAULIC, and is required in that case. The default value is None.
molecularWeight
None or a Float specifying the molecular weight of the ideal gas species. This argument
is applicable only when *definition*=PNEUMATIC, and is required in that case. The
default value is None.
useExpansion
A Boolean specifying whether thermal expansion coefficients will be defined. This
argument is applicable only when *definition*=HYDRAULIC. The default value is OFF.
expansionTempDep
A Boolean specifying whether the thermal fluid expansion data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is OFF.
expansionDependencies
An Int specifying the number of field variable dependencies in the thermal fluid
expansion data. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is 0.
referenceTemperature
A Float specifying the reference temperature for the coefficient of thermal expansion.
This argument is applicable only when *definition*=HYDRAULIC, when *useExpansion*=True,
and when either *expansionTempDep*=True or when *expansionDependencies* is greater than
0. The default value is 0.0.
expansionTable
A sequence of sequences of Floats specifying the thermal expansion coefficients. This
argument is applicable only when *definition*=HYDRAULIC and when *useExpansion*=True.
Each sequence contains the following data:
- The mean coefficient of thermal expansion.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useBulkModulus
A Boolean specifying whether fluid bulk modulus values will be defined. This argument is
applicable only when *definition*=HYDRAULIC. The default value is OFF.
bulkModulusTempDep
A Boolean specifying whether the fluid bulk modulus data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is OFF.
bulkModulusDependencies
An Int specifying the number of field variable dependencies in the fluid bulk modulus
data. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is 0.
bulkModulusTable
A sequence of sequences of Floats specifying the fluid bulk modulus values. This
argument is applicable only when *definition*=HYDRAULIC and when *useBulkModulus*=True.
Each sequence contains the following data:
- The fluid bulk modulus.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useCapacity
A Boolean specifying whether molar heat capacity values will be defined. This argument
is applicable only when *definition*=PNEUMATIC. The default value is OFF.
capacityType
A SymbolicConstant specifying the method to define the molar heat capacity. Possible
values are POLYNOMIAL and TABULAR. The default value is POLYNOMIAL.
capacityTempDep
A Boolean specifying whether the molar heat capacity data will have temperature
dependency. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is OFF.
capacityDependencies
An Int specifying the number of field variable dependencies in the molar heat capacity
data. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is 0.
capacityTable
A sequence of sequences of Floats specifying the molar heat capacity values in the form
of a polynomial expression. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=POLYNOMIAL. In
this form, only one sequence is specified and that sequence contains the following data:
- The first molar heat capacity coefficient.
- The second molar heat capacity coefficient.
- The third molar heat capacity coefficient.
- The fourth molar heat capacity coefficient.
- The fifth molar heat capacity coefficient.
Alternatively, the sequence data may specify the molar heat capacity values at constant
pressure for an ideal gas species. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=TABULAR. Each
sequence contains the following data:
- The molar heat capacity at constant pressure.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
Returns
-------
A FluidCavityProperty object.
"""
super().__init__(name)
pass
def setValues(self, definition: SymbolicConstant = HYDRAULIC, fluidDensity: float = None,
molecularWeight: float = None, useExpansion: Boolean = OFF,
expansionTempDep: Boolean = OFF, expansionDependencies: int = 0,
referenceTemperature: float = 0, expansionTable: tuple = (),
useBulkModulus: Boolean = OFF, bulkModulusTempDep: Boolean = OFF,
bulkModulusDependencies: int = 0, bulkModulusTable: tuple = (),
useCapacity: Boolean = OFF, capacityType: SymbolicConstant = POLYNOMIAL,
capacityTempDep: Boolean = OFF, capacityDependencies: int = 0,
capacityTable: tuple = ()):
"""This method modifies the FluidCavityProperty object.
Parameters
----------
definition
A SymbolicConstant specifying the type of fluid cavity property to be defined. Possible
values are HYDRAULIC and PNEUMATIC. The default value is HYDRAULIC.
fluidDensity
None or a Float specifying the reference fluid density. This argument is applicable only
when *definition*=HYDRAULIC, and is required in that case. The default value is None.
molecularWeight
None or a Float specifying the molecular weight of the ideal gas species. This argument
is applicable only when *definition*=PNEUMATIC, and is required in that case. The
default value is None.
useExpansion
A Boolean specifying whether thermal expansion coefficients will be defined. This
argument is applicable only when *definition*=HYDRAULIC. The default value is OFF.
expansionTempDep
A Boolean specifying whether the thermal fluid expansion data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is OFF.
expansionDependencies
An Int specifying the number of field variable dependencies in the thermal fluid
expansion data. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is 0.
referenceTemperature
A Float specifying the reference temperature for the coefficient of thermal expansion.
This argument is applicable only when *definition*=HYDRAULIC, when *useExpansion*=True,
and when either *expansionTempDep*=True or when *expansionDependencies* is greater than
0. The default value is 0.0.
expansionTable
A sequence of sequences of Floats specifying the thermal expansion coefficients. This
argument is applicable only when *definition*=HYDRAULIC and when *useExpansion*=True.
Each sequence contains the following data:
- The mean coefficient of thermal expansion.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useBulkModulus
A Boolean specifying whether fluid bulk modulus values will be defined. This argument is
applicable only when *definition*=HYDRAULIC. The default value is OFF.
bulkModulusTempDep
A Boolean specifying whether the fluid bulk modulus data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is OFF.
bulkModulusDependencies
An Int specifying the number of field variable dependencies in the fluid bulk modulus
data. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is 0.
bulkModulusTable
A sequence of sequences of Floats specifying the fluid bulk modulus values. This
argument is applicable only when *definition*=HYDRAULIC and when *useBulkModulus*=True.
Each sequence contains the following data:
- The fluid bulk modulus.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useCapacity
A Boolean specifying whether molar heat capacity values will be defined. This argument
is applicable only when *definition*=PNEUMATIC. The default value is OFF.
capacityType
A SymbolicConstant specifying the method to define the molar heat capacity. Possible
values are POLYNOMIAL and TABULAR. The default value is POLYNOMIAL.
capacityTempDep
A Boolean specifying whether the molar heat capacity data will have temperature
dependency. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is OFF.
capacityDependencies
An Int specifying the number of field variable dependencies in the molar heat capacity
data. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is 0.
capacityTable
A sequence of sequences of Floats specifying the molar heat capacity values in the form
of a polynomial expression. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=POLYNOMIAL. In
this form, only one sequence is specified and that sequence contains the following data:
- The first molar heat capacity coefficient.
- The second molar heat capacity coefficient.
- The third molar heat capacity coefficient.
- The fourth molar heat capacity coefficient.
- The fifth molar heat capacity coefficient.
Alternatively, the sequence data may specify the molar heat capacity values at constant
pressure for an ideal gas species. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=TABULAR. Each
sequence contains the following data:
- The molar heat capacity at constant pressure.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
"""
pass
| 58.785714 | 103 | 0.660659 | from abaqusConstants import *
from .ContactProperty import ContactProperty
class FluidCavityProperty(ContactProperty):
"""The FluidCavityProperty object is an interaction property that defines the fluid
behavior for a surface-based fluid cavity.
The FluidCavityProperty object is derived from the InteractionProperty object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import interaction
mdb.models[name].interactionProperties[name]
The corresponding analysis keywords are:
- FLUID BEHAVIOR
- CAPACITY
- FLUID BULK MODULUS
- FLUID DENSITY
- FLUID EXPANSION
- MOLECULAR WEIGHT
"""
def __init__(self, name: str, definition: SymbolicConstant = HYDRAULIC, fluidDensity: float = None,
molecularWeight: float = None, useExpansion: Boolean = OFF,
expansionTempDep: Boolean = OFF, expansionDependencies: int = 0,
referenceTemperature: float = 0, expansionTable: tuple = (),
useBulkModulus: Boolean = OFF, bulkModulusTempDep: Boolean = OFF,
bulkModulusDependencies: int = 0, bulkModulusTable: tuple = (),
useCapacity: Boolean = OFF, capacityType: SymbolicConstant = POLYNOMIAL,
capacityTempDep: Boolean = OFF, capacityDependencies: int = 0,
capacityTable: tuple = ()):
"""This method creates a FluidCavityProperty object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].FluidCavityProperty
Parameters
----------
name
A String specifying the interaction property repository key.
definition
A SymbolicConstant specifying the type of fluid cavity property to be defined. Possible
values are HYDRAULIC and PNEUMATIC. The default value is HYDRAULIC.
fluidDensity
None or a Float specifying the reference fluid density. This argument is applicable only
when *definition*=HYDRAULIC, and is required in that case. The default value is None.
molecularWeight
None or a Float specifying the molecular weight of the ideal gas species. This argument
is applicable only when *definition*=PNEUMATIC, and is required in that case. The
default value is None.
useExpansion
A Boolean specifying whether thermal expansion coefficients will be defined. This
argument is applicable only when *definition*=HYDRAULIC. The default value is OFF.
expansionTempDep
A Boolean specifying whether the thermal fluid expansion data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is OFF.
expansionDependencies
An Int specifying the number of field variable dependencies in the thermal fluid
expansion data. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is 0.
referenceTemperature
A Float specifying the reference temperature for the coefficient of thermal expansion.
This argument is applicable only when *definition*=HYDRAULIC, when *useExpansion*=True,
and when either *expansionTempDep*=True or when *expansionDependencies* is greater than
0. The default value is 0.0.
expansionTable
A sequence of sequences of Floats specifying the thermal expansion coefficients. This
argument is applicable only when *definition*=HYDRAULIC and when *useExpansion*=True.
Each sequence contains the following data:
- The mean coefficient of thermal expansion.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useBulkModulus
A Boolean specifying whether fluid bulk modulus values will be defined. This argument is
applicable only when *definition*=HYDRAULIC. The default value is OFF.
bulkModulusTempDep
A Boolean specifying whether the fluid bulk modulus data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is OFF.
bulkModulusDependencies
An Int specifying the number of field variable dependencies in the fluid bulk modulus
data. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is 0.
bulkModulusTable
A sequence of sequences of Floats specifying the fluid bulk modulus values. This
argument is applicable only when *definition*=HYDRAULIC and when *useBulkModulus*=True.
Each sequence contains the following data:
- The fluid bulk modulus.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useCapacity
A Boolean specifying whether molar heat capacity values will be defined. This argument
is applicable only when *definition*=PNEUMATIC. The default value is OFF.
capacityType
A SymbolicConstant specifying the method to define the molar heat capacity. Possible
values are POLYNOMIAL and TABULAR. The default value is POLYNOMIAL.
capacityTempDep
A Boolean specifying whether the molar heat capacity data will have temperature
dependency. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is OFF.
capacityDependencies
An Int specifying the number of field variable dependencies in the molar heat capacity
data. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is 0.
capacityTable
A sequence of sequences of Floats specifying the molar heat capacity values in the form
of a polynomial expression. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=POLYNOMIAL. In
this form, only one sequence is specified and that sequence contains the following data:
- The first molar heat capacity coefficient.
- The second molar heat capacity coefficient.
- The third molar heat capacity coefficient.
- The fourth molar heat capacity coefficient.
- The fifth molar heat capacity coefficient.
Alternatively, the sequence data may specify the molar heat capacity values at constant
pressure for an ideal gas species. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=TABULAR. Each
sequence contains the following data:
- The molar heat capacity at constant pressure.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
Returns
-------
A FluidCavityProperty object.
"""
super().__init__(name)
pass
def setValues(self, definition: SymbolicConstant = HYDRAULIC, fluidDensity: float = None,
molecularWeight: float = None, useExpansion: Boolean = OFF,
expansionTempDep: Boolean = OFF, expansionDependencies: int = 0,
referenceTemperature: float = 0, expansionTable: tuple = (),
useBulkModulus: Boolean = OFF, bulkModulusTempDep: Boolean = OFF,
bulkModulusDependencies: int = 0, bulkModulusTable: tuple = (),
useCapacity: Boolean = OFF, capacityType: SymbolicConstant = POLYNOMIAL,
capacityTempDep: Boolean = OFF, capacityDependencies: int = 0,
capacityTable: tuple = ()):
"""This method modifies the FluidCavityProperty object.
Parameters
----------
definition
A SymbolicConstant specifying the type of fluid cavity property to be defined. Possible
values are HYDRAULIC and PNEUMATIC. The default value is HYDRAULIC.
fluidDensity
None or a Float specifying the reference fluid density. This argument is applicable only
when *definition*=HYDRAULIC, and is required in that case. The default value is None.
molecularWeight
None or a Float specifying the molecular weight of the ideal gas species. This argument
is applicable only when *definition*=PNEUMATIC, and is required in that case. The
default value is None.
useExpansion
A Boolean specifying whether thermal expansion coefficients will be defined. This
argument is applicable only when *definition*=HYDRAULIC. The default value is OFF.
expansionTempDep
A Boolean specifying whether the thermal fluid expansion data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is OFF.
expansionDependencies
An Int specifying the number of field variable dependencies in the thermal fluid
expansion data. This argument is applicable only when *definition*=HYDRAULIC and when
*useExpansion*=True. The default value is 0.
referenceTemperature
A Float specifying the reference temperature for the coefficient of thermal expansion.
This argument is applicable only when *definition*=HYDRAULIC, when *useExpansion*=True,
and when either *expansionTempDep*=True or when *expansionDependencies* is greater than
0. The default value is 0.0.
expansionTable
A sequence of sequences of Floats specifying the thermal expansion coefficients. This
argument is applicable only when *definition*=HYDRAULIC and when *useExpansion*=True.
Each sequence contains the following data:
- The mean coefficient of thermal expansion.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useBulkModulus
A Boolean specifying whether fluid bulk modulus values will be defined. This argument is
applicable only when *definition*=HYDRAULIC. The default value is OFF.
bulkModulusTempDep
A Boolean specifying whether the fluid bulk modulus data will have temperature
dependency. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is OFF.
bulkModulusDependencies
An Int specifying the number of field variable dependencies in the fluid bulk modulus
data. This argument is applicable only when *definition*=HYDRAULIC and when
*useBulkModulus*=True. The default value is 0.
bulkModulusTable
A sequence of sequences of Floats specifying the fluid bulk modulus values. This
argument is applicable only when *definition*=HYDRAULIC and when *useBulkModulus*=True.
Each sequence contains the following data:
- The fluid bulk modulus.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
useCapacity
A Boolean specifying whether molar heat capacity values will be defined. This argument
is applicable only when *definition*=PNEUMATIC. The default value is OFF.
capacityType
A SymbolicConstant specifying the method to define the molar heat capacity. Possible
values are POLYNOMIAL and TABULAR. The default value is POLYNOMIAL.
capacityTempDep
A Boolean specifying whether the molar heat capacity data will have temperature
dependency. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is OFF.
capacityDependencies
An Int specifying the number of field variable dependencies in the molar heat capacity
data. This argument is applicable only when *definition*=PNEUMATIC, when
*useCapacity*=True, and when *capacityType*=TABULAR. The default value is 0.
capacityTable
A sequence of sequences of Floats specifying the molar heat capacity values in the form
of a polynomial expression. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=POLYNOMIAL. In
this form, only one sequence is specified and that sequence contains the following data:
- The first molar heat capacity coefficient.
- The second molar heat capacity coefficient.
- The third molar heat capacity coefficient.
- The fourth molar heat capacity coefficient.
- The fifth molar heat capacity coefficient.
Alternatively, the sequence data may specify the molar heat capacity values at constant
pressure for an ideal gas species. This argument is applicable only when
*definition*=PNEUMATIC, when *useCapacity*=True, and when *capacityType*=TABULAR. Each
sequence contains the following data:
- The molar heat capacity at constant pressure.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
"""
pass
| 0 | 0 | 0 |
83599f9c39e0eb9c2eb150ed207890f904271e54 | 437 | py | Python | slice_script.py | harsh52/endoscopy-artifact-detection | 496d89ae0d162b97e616d586c56c8675a7652b4f | [
"MIT"
] | 22 | 2019-07-08T02:34:22.000Z | 2021-11-29T05:25:04.000Z | slice_script.py | harsh52/endoscopy-artifact-detection | 496d89ae0d162b97e616d586c56c8675a7652b4f | [
"MIT"
] | 1 | 2019-12-03T15:30:28.000Z | 2019-12-03T15:30:28.000Z | slice_script.py | harsh52/endoscopy-artifact-detection | 496d89ae0d162b97e616d586c56c8675a7652b4f | [
"MIT"
] | 7 | 2019-08-29T16:34:40.000Z | 2020-12-08T10:23:25.000Z | import re
import csv
line = []
list2 = []
with open('output2.txt') as f:
for i in f:
line.append(i)
outList = re.findall(r"[-+]?\d*\.\d+|\d+", line[0]) # extracting integers from string
list2.append(outList[0])
list2.append(outList[2])
#writing into csv file
with open('epoch_loss.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(list2)
line.clear()
list2.clear()
| 24.277778 | 90 | 0.606407 | import re
import csv
line = []
list2 = []
with open('output2.txt') as f:
for i in f:
line.append(i)
outList = re.findall(r"[-+]?\d*\.\d+|\d+", line[0]) # extracting integers from string
list2.append(outList[0])
list2.append(outList[2])
#writing into csv file
with open('epoch_loss.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(list2)
line.clear()
list2.clear()
| 0 | 0 | 0 |
d8beb893a13694f876607ea939fc05d2189e77e6 | 3,947 | py | Python | handlers/acceptmembermulti.py | micjerry/groupservice | 807e5d53533897ac36d9bf1cce30aee09979ea9f | [
"Apache-2.0"
] | 1 | 2015-12-14T08:31:30.000Z | 2015-12-14T08:31:30.000Z | handlers/acceptmembermulti.py | micjerry/groupservice | 807e5d53533897ac36d9bf1cce30aee09979ea9f | [
"Apache-2.0"
] | null | null | null | handlers/acceptmembermulti.py | micjerry/groupservice | 807e5d53533897ac36d9bf1cce30aee09979ea9f | [
"Apache-2.0"
] | null | null | null | import tornado.web
import tornado.gen
import json
import io
import logging
import motor
from bson.objectid import ObjectId
import mickey.userfetcher
from mickey.basehandler import BaseHandler
| 32.089431 | 105 | 0.541677 | import tornado.web
import tornado.gen
import json
import io
import logging
import motor
from bson.objectid import ObjectId
import mickey.userfetcher
from mickey.basehandler import BaseHandler
class AcceptMultimemberHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
coll = self.application.db.groups
token = self.request.headers.get("Authorization", "")
data = json.loads(self.request.body.decode("utf-8"))
groupid = data.get("groupid", "")
members = data.get("members", [])
self._groupid = groupid
self._groupname = ""
self._authtoken = token
logging.info("begin to add members to group %s" % groupid)
if not groupid or not members:
logging.error("invalid request")
self.set_status(403)
self.finish()
return
strip_members = []
repeat_members = []
for item in members:
str_key = item.get("invite_id", "") + "_" + item.get("id", "")
if str_key in repeat_members:
continue
repeat_members.append(str_key)
strip_members.append(item)
members = strip_members
result = yield coll.find_one({"_id":ObjectId(groupid)})
if not result:
logging.error("group %s does not exist" % groupid)
self.set_status(404)
self.finish()
return
if result.get("owner", "") != self.p_userid:
logging.error("%s are not the owner" % self.p_userid)
self.set_status(403)
self.finish()
return;
self._groupname = result.get("name", "")
#get exist members
exist_ids = [x.get("id", "") for x in result.get("members", [])]
# get members and the receivers
add_members = list(filter(lambda x: x.get("id", "") not in exist_ids, members))
invite_groups = {}
adddb_members = []
for item in add_members:
invite_user = item.get("invite_id", "")
invited = item.get("id", "")
adddb_members.append({"id":invited})
invite_members = invite_groups.get(invite_user, [])
if not invite_members:
invite_groups[invite_user] = [invited]
else:
invite_members.append(invited)
if not adddb_members:
logging.error("no need to add user")
self.finish()
return
append_result = yield coll.find_and_modify({"_id":ObjectId(groupid)},
{
"$addToSet":{"appendings":{"$each": adddb_members}},
"$unset": {"garbage": 1}
})
if append_result:
self.set_status(200)
for key,value in invite_groups.items():
yield self.send_notify(key, value)
else:
self.set_status(500)
logging.error("add user failed %s" % groupid)
return
self.finish()
@tornado.gen.coroutine
def send_notify(self, inviteid, members):
publish = self.application.publish
notify = {}
notify["name"] = "mx.group.authgroup_invited"
notify["pub_type"] = "any"
notify["nty_type"] = "device"
notify["msg_type"] = "other"
notify["groupid"] = self._groupid
notify["groupname"] = self._groupname
notify["userid"] = inviteid
opter_info = yield mickey.userfetcher.getcontact(inviteid, self._authtoken)
if opter_info:
notify["username"] = opter_info.get("name", "")
else:
logging.error("get user info failed %s" % inviteid)
publish.publish_multi(members, notify)
| 3,570 | 160 | 23 |
43f9de34af9bda3faa21f568aaf1d5e8fc9aee2b | 4,560 | py | Python | code/parser_class.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | code/parser_class.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | code/parser_class.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | import os
import re
# Parses a given input file and returns a list of parameters for all structures.
| 53.647059 | 129 | 0.498904 | import os
import re
class Parser:
# Parses a given input file and returns a list of parameters for all structures.
def parse(lattice_type, elements):
#file_path = os.path.join(os.getcwd(), '../'+lattice_type+'_txt_files/'+file_name)
#file_path = os.path.join(os.getcwd(), lattice_type+'_txt_files/'+file_name)
file_name = elements[0]+'_'+elements[1]+'_'+lattice_type+'.txt'
file_path = os.path.join(os.getcwd(),'data_files/'+file_name)
f = open(file_path,"r")
lines = f.readlines()
structures_parameters_list = []
structDataList=[] #list for holding structure data
for i in range(len(lines)):
l = lines[i]
if not " Structure PRE " in l:
continue
else:
#print(l)
#Getting structure name
name = l[1:(l.find("#")-2)] # i and l are the index and string of line containing " Structure PRE " respectively
#Getting translation vectors
a = list(map(float,re.findall("[+-]?\d+(?:\.\d+)?", lines[i+3])[1:]))
b = list(map(float,re.findall("[+-]?\d+(?:\.\d+)?", lines[i+4])[1:]))
c = list(map(float,re.findall("[+-]?\d+(?:\.\d+)?", lines[i+5])[1:]))
#Getting source positions
i += 8
source_positions = {}
while not " Structure POST " in lines[i]:
pos = list(map(float,re.findall("[+-]?\d+(?:\.\d+)?", lines[i])[1:]))
element = lines[i].split()[-1]
if element.find("_") != -1:
element = element[0:element.find("_")]
if element.find("+") != -1:
element = element[0:element.find("+")]
x = [i * pos[0] for i in a]
y = [i * pos[1] for i in b]
z = [i * pos[2] for i in c]
pos = [sum(tmp) for tmp in zip(x,y)]
pos = [sum(tmp) for tmp in zip(pos,z)]
if not element in source_positions:
source_positions[element] = []
source_positions[element].append(pos)
i+=1
#print( source_positions)
# Getting total energy
while not " DATA " in lines[i]:
i += 1
#print(lines[i+2])
#print(list(map(float,re.findall("[+-]?\d+(?:\.\d+)?", lines[i+2]))))
total_energy = list(map(float,re.findall("[+-]?\d+(?:\.\d+)?", lines[i+2])))[1]
# Generating a structure parameters list
structure_parameters = [name, lattice_type, [a,b,c], source_positions, total_energy]
structures_parameters_list.append(structure_parameters)
#print(structure_parameters)
while not " aflowlib.out " in lines[i]: #searching output line for the structure
i += 1
#print(lines[i+1])
outputLine=lines[i+1].split('] ')[1] #removing structure name from the output line
#print( outputLine)
structData = dict(item.split("=") for item in outputLine.split("| ")) #dictionary of structure data
#print(structData['nspecies'])
#print(structData['stoichiometry'])
if int(structData['nspecies'])!=1: #adding a new key 'composition_frac' for composition in mol fraction
tempDict={'composition_frac':float(structData['stoichiometry'].split(',')[1])}
#print("tempDict",tempDict)
else:# stoichiometry holds only one value in case of one atom structure
#print("species",structData['species'], ", element[0] ",elements[0])
if structData['species'].strip() == elements[0].strip():
#print("matched")
tempDict={'composition_frac':0.0}
else:
#print("not matched")
tempDict={'composition_frac':1.0}
#print("tempDict",tempDict)
structData.update(tempDict)
#Adding the structure_parameters in structures_parameters_list
#print(structData['composition_frac'])
structDataList.append(structData)
return (structures_parameters_list,structDataList)
| 4,414 | -8 | 49 |
c9479db68d58e4ca0978a711caa8fb9f9f3b477b | 2,366 | py | Python | silasdk/ethwallet.py | deepanshutyagi/Sila-Python | d0c0335f3f103129490f7df0ecab73051ec73549 | [
"Apache-2.0"
] | null | null | null | silasdk/ethwallet.py | deepanshutyagi/Sila-Python | d0c0335f3f103129490f7df0ecab73051ec73549 | [
"Apache-2.0"
] | null | null | null | silasdk/ethwallet.py | deepanshutyagi/Sila-Python | d0c0335f3f103129490f7df0ecab73051ec73549 | [
"Apache-2.0"
] | null | null | null | from eth_account import Account
import sha3
import json
| 35.313433 | 113 | 0.546492 | from eth_account import Account
import sha3
import json
class EthWallet():
def create(entropy):
"""create an ethereum wallet for user
This will generate a private key and ethereum address, that can be used for trasaction,
however this not a recommended way to create your wallets
Args:
entropy : provide randomness to gnerate the wallet
Returns:
tuple: response body with ethereum address and private key
"""
account=Account.create(entropy)
return {"eth_private_key":account.privateKey.hex(),"eth_address":account.address}
def signMessage(msg,key=None):
"""Sign the message using an ethereum private key
This method signs the message for the user authentication mechanism
Args:
msg: message to be signed
private_key: the key can be an app key or a user key used to sign the message
Returns:
string: a signed message
"""
k= sha3.keccak_256()
encoded_message=(json.dumps(msg)).encode("utf-8")
k.update(encoded_message)
message_hash=k.hexdigest()
if key!=None:
signed_message=Account.signHash(message_hash,key)
sig_hx=signed_message.signature.hex()
return (str(sig_hx.replace("0x","")))
else:
return " "
def verifySignature(msg,sign):
"""Verify the message signature
This method signs the message for the user authentication mechanism
Args:
msg: original message
sign : the signed hash obtained after signing the message with private key
Returns:
string: returns the ethereum address corresponding to the private key the message was signed with
"""
k= sha3.keccak_256()
encoded_message=(json.dumps(msg)).encode("utf-8")
k.update(encoded_message)
message_hash=k.hexdigest()
return Account.recoverHash(message_hash,signature=sign)
| 0 | 2,279 | 23 |
3c41be0847b2540835a2e5b84d62ce2bdcdb833d | 7,601 | py | Python | rpi_pcm_ws281x.py | tvoverbeek/rpi-pcm-ws2811 | 479d7996f3b042cb579858caedd5ef7993d7549c | [
"MIT"
] | 5 | 2016-10-20T09:15:03.000Z | 2021-12-14T19:15:11.000Z | rpi_pcm_ws281x.py | tvoverbeek/rpi-pcm-ws2811 | 479d7996f3b042cb579858caedd5ef7993d7549c | [
"MIT"
] | 3 | 2016-10-21T10:13:33.000Z | 2018-08-11T22:04:45.000Z | rpi_pcm_ws281x.py | tvoverbeek/rpi-pcm-ws2811 | 479d7996f3b042cb579858caedd5ef7993d7549c | [
"MIT"
] | 1 | 2017-03-27T18:00:55.000Z | 2017-03-27T18:00:55.000Z | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
_rpi_pcm_ws281x = swig_import_helper()
del swig_import_helper
else:
import _rpi_pcm_ws281x
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
_object = object
_newclass = 1
except AttributeError:
_newclass = 0
WS2811_TARGET_FREQ = _rpi_pcm_ws281x.WS2811_TARGET_FREQ
WS2811_STRIP_RGB = _rpi_pcm_ws281x.WS2811_STRIP_RGB
WS2811_STRIP_RBG = _rpi_pcm_ws281x.WS2811_STRIP_RBG
WS2811_STRIP_GRB = _rpi_pcm_ws281x.WS2811_STRIP_GRB
WS2811_STRIP_GBR = _rpi_pcm_ws281x.WS2811_STRIP_GBR
WS2811_STRIP_BRG = _rpi_pcm_ws281x.WS2811_STRIP_BRG
WS2811_STRIP_BGR = _rpi_pcm_ws281x.WS2811_STRIP_BGR
ws2811_channel_t_swigregister = _rpi_pcm_ws281x.ws2811_channel_t_swigregister
ws2811_channel_t_swigregister(ws2811_channel_t)
ws2811_t_swigregister = _rpi_pcm_ws281x.ws2811_t_swigregister
ws2811_t_swigregister(ws2811_t)
ws2811_init = _rpi_pcm_ws281x.ws2811_init
ws2811_fini = _rpi_pcm_ws281x.ws2811_fini
ws2811_render = _rpi_pcm_ws281x.ws2811_render
ws2811_wait = _rpi_pcm_ws281x.ws2811_wait
ws2811_led_get = _rpi_pcm_ws281x.ws2811_led_get
ws2811_led_set = _rpi_pcm_ws281x.ws2811_led_set
# This file is compatible with both classic and new-style classes.
| 44.976331 | 142 | 0.778187 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_rpi_pcm_ws281x', [dirname(__file__)])
except ImportError:
import _rpi_pcm_ws281x
return _rpi_pcm_ws281x
if fp is not None:
try:
_mod = imp.load_module('_rpi_pcm_ws281x', fp, pathname, description)
finally:
fp.close()
return _mod
_rpi_pcm_ws281x = swig_import_helper()
del swig_import_helper
else:
import _rpi_pcm_ws281x
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
WS2811_TARGET_FREQ = _rpi_pcm_ws281x.WS2811_TARGET_FREQ
WS2811_STRIP_RGB = _rpi_pcm_ws281x.WS2811_STRIP_RGB
WS2811_STRIP_RBG = _rpi_pcm_ws281x.WS2811_STRIP_RBG
WS2811_STRIP_GRB = _rpi_pcm_ws281x.WS2811_STRIP_GRB
WS2811_STRIP_GBR = _rpi_pcm_ws281x.WS2811_STRIP_GBR
WS2811_STRIP_BRG = _rpi_pcm_ws281x.WS2811_STRIP_BRG
WS2811_STRIP_BGR = _rpi_pcm_ws281x.WS2811_STRIP_BGR
class ws2811_channel_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ws2811_channel_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ws2811_channel_t, name)
__repr__ = _swig_repr
__swig_setmethods__["gpionum"] = _rpi_pcm_ws281x.ws2811_channel_t_gpionum_set
__swig_getmethods__["gpionum"] = _rpi_pcm_ws281x.ws2811_channel_t_gpionum_get
if _newclass:gpionum = _swig_property(_rpi_pcm_ws281x.ws2811_channel_t_gpionum_get, _rpi_pcm_ws281x.ws2811_channel_t_gpionum_set)
__swig_setmethods__["invert"] = _rpi_pcm_ws281x.ws2811_channel_t_invert_set
__swig_getmethods__["invert"] = _rpi_pcm_ws281x.ws2811_channel_t_invert_get
if _newclass:invert = _swig_property(_rpi_pcm_ws281x.ws2811_channel_t_invert_get, _rpi_pcm_ws281x.ws2811_channel_t_invert_set)
__swig_setmethods__["count"] = _rpi_pcm_ws281x.ws2811_channel_t_count_set
__swig_getmethods__["count"] = _rpi_pcm_ws281x.ws2811_channel_t_count_get
if _newclass:count = _swig_property(_rpi_pcm_ws281x.ws2811_channel_t_count_get, _rpi_pcm_ws281x.ws2811_channel_t_count_set)
__swig_setmethods__["brightness"] = _rpi_pcm_ws281x.ws2811_channel_t_brightness_set
__swig_getmethods__["brightness"] = _rpi_pcm_ws281x.ws2811_channel_t_brightness_get
if _newclass:brightness = _swig_property(_rpi_pcm_ws281x.ws2811_channel_t_brightness_get, _rpi_pcm_ws281x.ws2811_channel_t_brightness_set)
__swig_setmethods__["strip_type"] = _rpi_pcm_ws281x.ws2811_channel_t_strip_type_set
__swig_getmethods__["strip_type"] = _rpi_pcm_ws281x.ws2811_channel_t_strip_type_get
if _newclass:strip_type = _swig_property(_rpi_pcm_ws281x.ws2811_channel_t_strip_type_get, _rpi_pcm_ws281x.ws2811_channel_t_strip_type_set)
__swig_setmethods__["leds"] = _rpi_pcm_ws281x.ws2811_channel_t_leds_set
__swig_getmethods__["leds"] = _rpi_pcm_ws281x.ws2811_channel_t_leds_get
if _newclass:leds = _swig_property(_rpi_pcm_ws281x.ws2811_channel_t_leds_get, _rpi_pcm_ws281x.ws2811_channel_t_leds_set)
def __init__(self):
this = _rpi_pcm_ws281x.new_ws2811_channel_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _rpi_pcm_ws281x.delete_ws2811_channel_t
__del__ = lambda self : None;
ws2811_channel_t_swigregister = _rpi_pcm_ws281x.ws2811_channel_t_swigregister
ws2811_channel_t_swigregister(ws2811_channel_t)
class ws2811_t(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ws2811_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ws2811_t, name)
__repr__ = _swig_repr
__swig_setmethods__["device"] = _rpi_pcm_ws281x.ws2811_t_device_set
__swig_getmethods__["device"] = _rpi_pcm_ws281x.ws2811_t_device_get
if _newclass:device = _swig_property(_rpi_pcm_ws281x.ws2811_t_device_get, _rpi_pcm_ws281x.ws2811_t_device_set)
__swig_setmethods__["rpi_hw"] = _rpi_pcm_ws281x.ws2811_t_rpi_hw_set
__swig_getmethods__["rpi_hw"] = _rpi_pcm_ws281x.ws2811_t_rpi_hw_get
if _newclass:rpi_hw = _swig_property(_rpi_pcm_ws281x.ws2811_t_rpi_hw_get, _rpi_pcm_ws281x.ws2811_t_rpi_hw_set)
__swig_setmethods__["freq"] = _rpi_pcm_ws281x.ws2811_t_freq_set
__swig_getmethods__["freq"] = _rpi_pcm_ws281x.ws2811_t_freq_get
if _newclass:freq = _swig_property(_rpi_pcm_ws281x.ws2811_t_freq_get, _rpi_pcm_ws281x.ws2811_t_freq_set)
__swig_setmethods__["dmanum"] = _rpi_pcm_ws281x.ws2811_t_dmanum_set
__swig_getmethods__["dmanum"] = _rpi_pcm_ws281x.ws2811_t_dmanum_get
if _newclass:dmanum = _swig_property(_rpi_pcm_ws281x.ws2811_t_dmanum_get, _rpi_pcm_ws281x.ws2811_t_dmanum_set)
__swig_setmethods__["channel"] = _rpi_pcm_ws281x.ws2811_t_channel_set
__swig_getmethods__["channel"] = _rpi_pcm_ws281x.ws2811_t_channel_get
if _newclass:channel = _swig_property(_rpi_pcm_ws281x.ws2811_t_channel_get, _rpi_pcm_ws281x.ws2811_t_channel_set)
def __init__(self):
this = _rpi_pcm_ws281x.new_ws2811_t()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _rpi_pcm_ws281x.delete_ws2811_t
__del__ = lambda self : None;
ws2811_t_swigregister = _rpi_pcm_ws281x.ws2811_t_swigregister
ws2811_t_swigregister(ws2811_t)
def ws2811_init(*args):
return _rpi_pcm_ws281x.ws2811_init(*args)
ws2811_init = _rpi_pcm_ws281x.ws2811_init
def ws2811_fini(*args):
return _rpi_pcm_ws281x.ws2811_fini(*args)
ws2811_fini = _rpi_pcm_ws281x.ws2811_fini
def ws2811_render(*args):
return _rpi_pcm_ws281x.ws2811_render(*args)
ws2811_render = _rpi_pcm_ws281x.ws2811_render
def ws2811_wait(*args):
return _rpi_pcm_ws281x.ws2811_wait(*args)
ws2811_wait = _rpi_pcm_ws281x.ws2811_wait
def ws2811_led_get(*args):
return _rpi_pcm_ws281x.ws2811_led_get(*args)
ws2811_led_get = _rpi_pcm_ws281x.ws2811_led_get
def ws2811_led_set(*args):
return _rpi_pcm_ws281x.ws2811_led_set(*args)
ws2811_led_set = _rpi_pcm_ws281x.ws2811_led_set
# This file is compatible with both classic and new-style classes.
| 1,943 | 3,831 | 326 |
ec821600b761fbf8efdb41b115e466a61978364e | 16,410 | py | Python | tests/test_pendulum.py | shromonag/active_testing | ca9c8f909f6b0f4e7b1affda6f9333e0d0b6c04b | [
"MIT"
] | 4 | 2019-03-09T12:38:46.000Z | 2021-12-08T15:45:44.000Z | tests/test_pendulum.py | shromonag/active_testing | ca9c8f909f6b0f4e7b1affda6f9333e0d0b6c04b | [
"MIT"
] | null | null | null | tests/test_pendulum.py | shromonag/active_testing | ca9c8f909f6b0f4e7b1affda6f9333e0d0b6c04b | [
"MIT"
] | 3 | 2019-01-09T13:43:06.000Z | 2021-11-30T22:15:28.000Z | '''
Here we consider a controller trained on nearest neighbor for the pendulum
environment in OpenAI Gym. The controller is taken from baselines ppo.
'''
import gym
import numpy as np
from gym import spaces
from baselines import deepq
from baselines.common import set_global_seeds, tf_util as U
import gym, logging
from baselines import logger
import numpy as np
import tensorflow as tf
from baselines.ppo1 import mlp_policy, pposgd_simple
from baselines.ppo1.pposgd_simple import *
U.make_session(num_cpu=1).__enter__()
env= gym.make('Pendulum-v1')
seed = 9699278477418928551
env.seed(seed)
num_timesteps=5e6
gym.logger.setLevel(logging.WARN)
pi = learn_return(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_batch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95,
)
from scipy.stats import norm
# ------------------------------------------------------------------------------
from active_testing import pred_node, max_node, min_node, test_module
from active_testing.utils import sample_from
rand_nums = [1161003323,
415998644,
4057120664,
1747557171,
2890879164,
2055758971,
2911473105,
618390143,
691777806,
4168149016,
1809706292,
2771371912,
1956477866,
2141514268,
4025209431]
# Requirement 1: Find the initial configuration that minimizes the reward
# We need only one node for the reward. The reward is a smooth function
# given that the closed loop system is deterministic
bounds = [(-np.pi, np.pi)] # Bounds on theta
bounds.append((-1., 1.)) # Bounds on theta dot
bounds.append((7., 9.)) # Bounds on the speed
bounds.append((1.5, 2.5)) # Bounds on the torque magnitude
smooth_details_r1 = []
random_details_r1 = []
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f=lambda traj: traj[1]['reward']/200 )
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0,init_sample = 60,
optimize_restarts=5, exp_weight=10, normalizer=True)
TM.initialize()
TM.run_BO(140)
smooth_details_r1.append([np.sum(TM.f_acqu.GP.Y < -5.),
np.sum(TM.f_acqu.GP.Y < -7.5),
TM.smooth_min_x,TM.smooth_min_val])
# With cost function
np.random.seed(r)
node0 = pred_node(f=lambda traj: traj[1]['reward']/200)
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0, with_random = True, init_sample = 60,
optimize_restarts=5, exp_weight=10,
normalizer=True)
TM.initialize()
TM.run_BO(30)
TM.k = 5
TM.run_BO(40)
TM.k = 2
TM.run_BO(70)
smooth_details_r1.append([np.sum(TM.f_acqu.GP.Y < -5.),
np.sum(TM.f_acqu.GP.Y < -7.5),
TM.smooth_min_x, TM.smooth_min_val])
random_details_r1.append([np.sum(np.array(TM.random_Y) < -5.),
np.sum(np.array(TM.random_Y) < -7.5),
TM.rand_min_x, TM.rand_min_val])
print(r, smooth_details_r1[-2], smooth_details_r1[-1], random_details_r1[-1])
rand_nums.append(r)
# Requirement 2: Find the initial condition such that the pendulum stabilizes to 0
smooth_details_r2 = []
random_details_r2 = []
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f=lambda traj: pred1(traj))
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0,init_sample = 60,
optimize_restarts=5, exp_weight=2, normalizer=True)
TM.initialize()
TM.run_BO(140)
smooth_vals = np.array(TM.f_acqu.find_GP_func())
smooth_details_r2.append([np.sum(smooth_vals < -1.00),
np.sum(smooth_vals < -10.0),
TM.smooth_min_x,TM.smooth_min_val,
TM.smooth_min_loc])
np.random.seed(r)
node0_ns = pred_node(f=lambda traj: pred1(traj))
TM_ns = test_module(bounds=bounds, sut=lambda x0: sut(500, x0, ead=True),
f_tree=node0_ns, init_sample=60, with_smooth=False,
with_ns=True,
optimize_restarts=5, exp_weight=10, normalizer=True)
TM_ns.initialize()
TM_ns.run_BO(30)
TM_ns.k = 5
TM_ns.run_BO(40)
TM_ns.k = 2
TM_ns.run_BO(70)
smooth_details_r2.append([np.sum(TM_ns.ns_GP.Y < -1.00),
np.sum(TM_ns.ns_GP.Y < -10.0),
TM_ns.ns_min_x, TM_ns.ns_min_val,
TM_ns.ns_min_loc])
# With cost function
np.random.seed(r)
node0_rand = pred_node(f=lambda traj: pred1(traj))
TM_rand = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0_rand, with_random = True, with_smooth=False,
init_sample = 60, optimize_restarts=5, exp_weight=10,
cost_model = cost_func, normalizer=True)
TM_rand.initialize()
TM_rand.run_BO(140)
random_details_r2.append([np.sum(np.array(TM_rand.random_Y) < -1.0),
np.sum(np.array(TM_rand.random_Y) < -10.0),
TM_rand.rand_min_x, TM_rand.rand_min_val,
TM_rand.rand_min_loc])
print(r, smooth_details_r2[-2], smooth_details_r2[-1],random_details_r2[-1])
# Requirement 3: Find the initial configuration such that it stabilizies to either
# 0 or to np.pi
smooth_details_r3 = []
ns_details_r3 = []
random_details_r3 = []
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f = lambda traj:pred1(traj))
node1 = pred_node(f = lambda traj:pred2(traj))
node2 = max_node(children=[node0, node1])
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node2,init_sample = 60,
optimize_restarts=5, exp_weight=2, normalizer=True)
TM.initialize()
TM.run_BO(140)
smooth_vals = np.array(TM.f_acqu.find_GP_func())
smooth_details_r3.append([np.sum(smooth_vals < -1.00),
np.sum(smooth_vals < -10.0),
TM.smooth_min_x,TM.smooth_min_val,
TM.smooth_min_loc])
np.random.seed(r)
node0_ns = pred_node(f=lambda traj: pred1(traj))
node1_ns = pred_node(f=lambda traj: pred2(traj))
node2_ns = max_node(children=[node0_ns, node1_ns])
TM_ns = test_module(bounds=bounds, sut=lambda x0: sut(500, x0, ead=True),
f_tree=node2_ns, init_sample=60, with_smooth=False,
with_ns=True,
optimize_restarts=5, exp_weight=2, normalizer=True)
TM_ns.initialize()
TM_ns.run_BO(140)
ns_details_r3.append([np.sum(TM_ns.ns_GP.Y < -1.00),
np.sum(TM_ns.ns_GP.Y < -10.0),
TM_ns.ns_min_x, TM_ns.ns_min_val,
TM_ns.ns_min_loc])
# With cost function
np.random.seed(r)
node0_rand = pred_node(f=lambda traj: pred1(traj))
node1_rand = pred_node(f=lambda traj: pred2(traj))
node2_rand = max_node(children=[node0_rand, node1_rand])
TM_rand = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node2_rand, with_random = True, with_smooth=False,
init_sample = 60, optimize_restarts=5, exp_weight=10,
cost_model = cost_func, normalizer=True)
TM_rand.initialize()
TM_rand.run_BO(140)
random_details_r3.append([np.sum(np.array(TM_rand.random_Y) < -1.0),
np.sum(np.array(TM_rand.random_Y) < -10.0),
TM_rand.rand_min_x, TM_rand.rand_min_val,
TM_rand.rand_min_loc])
print(r, smooth_details_r3[-1], ns_details_r3[-1],random_details_r3[-1])
| 40.220588 | 123 | 0.625168 | '''
Here we consider a controller trained on nearest neighbor for the pendulum
environment in OpenAI Gym. The controller is taken from baselines ppo.
'''
import gym
import numpy as np
from gym import spaces
from baselines import deepq
from baselines.common import set_global_seeds, tf_util as U
import gym, logging
from baselines import logger
import numpy as np
import tensorflow as tf
from baselines.ppo1 import mlp_policy, pposgd_simple
from baselines.ppo1.pposgd_simple import *
def learn_return(env, policy_func, *,
timesteps_per_batch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
schedule='constant' # annealing for stepsize parameters (epsilon and adam)
):
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space) # Construct network for new policy
oldpi = policy_func("oldpi", ob_space, ac_space) # Network for old policy
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule
clip_param = clip_param * lrmult # Annealed cliping parameter epislon
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = U.mean(kloldnew)
meanent = U.mean(ent)
pol_entpen = (-entcoeff) * meanent
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold
surr1 = ratio * atarg # surrogate from conservative policy iteration
surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #
pol_surr = - U.mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)
vfloss1 = tf.square(pi.vpred - ret)
vpredclipped = oldpi.vpred + tf.clip_by_value(pi.vpred - oldpi.vpred, -clip_param, clip_param)
vfloss2 = tf.square(vpredclipped - ret)
vf_loss = .5 * U.mean(tf.maximum(vfloss1, vfloss2)) # we do the same clipping-based trust region for the value function
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
var_list = pi.get_trainable_variables()
lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])
adam = MpiAdam(var_list)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)
U.initialize()
adam.sync()
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted"
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
elif max_seconds and time.time() - tstart >= max_seconds:
break
if schedule == 'constant':
cur_lrmult = 1.0
elif schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************"%iters_so_far)
seg = seg_gen.__next__()
print(sum(seg['rew']),seg['rew'], len(seg['rew']))
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)
optim_batchsize = optim_batchsize or ob.shape[0]
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
assign_old_eq_new() # set old parameter values to new parameter values
logger.log("Optimizing...")
logger.log(fmt_row(13, loss_names))
# Here we do a bunch of optimization epochs over the data
for _ in range(optim_epochs):
losses = [] # list of tuples, each of which gives the loss for a minibatch
for batch in d.iterate_once(optim_batchsize):
*newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
adam.update(g, optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far+=1
return pi
U.make_session(num_cpu=1).__enter__()
env= gym.make('Pendulum-v1')
seed = 9699278477418928551
env.seed(seed)
num_timesteps=5e6
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
gym.logger.setLevel(logging.WARN)
pi = learn_return(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_batch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95,
)
def compute_traj(max_steps, ead=False, **kwargs):
env.reset()
if 'init_state' in kwargs:
ob = kwargs['init_state']
env.env.state = ob
if 'max_speed' in kwargs:
env.env.max_speed = kwargs['max_speed']
high = np.array([1., 1., env.env.max_speed])
env.env.observation_space = spaces.Box(low=-high, high=high)
if 'max_torque' in kwargs:
env.env.max_torque = kwargs['max_torque']
env.env.action_space = spaces.Box(low=-env.env.max_torque,
high=env.env.max_torque, shape=(1,))
traj = []
reward = 0
ob = env.env._get_obs()
for _ in range(max_steps):
action, vpred = pi.act(False, ob)
ob, r, done, _ = env.step(action)
reward += r
traj.append(ob)
if done and ead:
break
additional_data = {'reward':reward}
return traj, additional_data
def sut(max_steps,x0,ead=False):
return compute_traj(max_steps, ead=ead,init_state=x0[0:2], max_speed=x0[2],
max_torque=x0[3])
from scipy.stats import norm
def cost_func(X):
theta_rv = norm(np.pi/2., np.pi/2.)
torque_rv = norm(2, 0.5)
speed_rv = norm(8,1)
theta_pdf = theta_rv.pdf(np.abs(X.T[0]))/theta_rv.pdf(np.pi/2.)
torque_pdf = torque_rv.pdf(X.T[3])/torque_rv.pdf(2)
speed_pdf = speed_rv.pdf(X.T[2])/speed_rv.pdf(8)
theta_pdf.resize(len(theta_pdf), 1)
torque_pdf.resize(len(torque_pdf), 1)
speed_pdf.resize(len(speed_pdf), 1)
return theta_pdf*torque_pdf*speed_pdf
# ------------------------------------------------------------------------------
from active_testing import pred_node, max_node, min_node, test_module
from active_testing.utils import sample_from
rand_nums = [1161003323,
415998644,
4057120664,
1747557171,
2890879164,
2055758971,
2911473105,
618390143,
691777806,
4168149016,
1809706292,
2771371912,
1956477866,
2141514268,
4025209431]
# Requirement 1: Find the initial configuration that minimizes the reward
# We need only one node for the reward. The reward is a smooth function
# given that the closed loop system is deterministic
bounds = [(-np.pi, np.pi)] # Bounds on theta
bounds.append((-1., 1.)) # Bounds on theta dot
bounds.append((7., 9.)) # Bounds on the speed
bounds.append((1.5, 2.5)) # Bounds on the torque magnitude
smooth_details_r1 = []
random_details_r1 = []
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f=lambda traj: traj[1]['reward']/200 )
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0,init_sample = 60,
optimize_restarts=5, exp_weight=10, normalizer=True)
TM.initialize()
TM.run_BO(140)
smooth_details_r1.append([np.sum(TM.f_acqu.GP.Y < -5.),
np.sum(TM.f_acqu.GP.Y < -7.5),
TM.smooth_min_x,TM.smooth_min_val])
# With cost function
np.random.seed(r)
node0 = pred_node(f=lambda traj: traj[1]['reward']/200)
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0, with_random = True, init_sample = 60,
optimize_restarts=5, exp_weight=10,
normalizer=True)
TM.initialize()
TM.run_BO(30)
TM.k = 5
TM.run_BO(40)
TM.k = 2
TM.run_BO(70)
smooth_details_r1.append([np.sum(TM.f_acqu.GP.Y < -5.),
np.sum(TM.f_acqu.GP.Y < -7.5),
TM.smooth_min_x, TM.smooth_min_val])
random_details_r1.append([np.sum(np.array(TM.random_Y) < -5.),
np.sum(np.array(TM.random_Y) < -7.5),
TM.rand_min_x, TM.rand_min_val])
print(r, smooth_details_r1[-2], smooth_details_r1[-1], random_details_r1[-1])
rand_nums.append(r)
# Requirement 2: Find the initial condition such that the pendulum stabilizes to 0
smooth_details_r2 = []
random_details_r2 = []
def pred1(traj, gamma=0.25):
traj = traj[0]
cos_thetas = np.array(traj).T[0]
theta_dots = np.array(traj).T[2]
stab_vals = 0
for ct, td in zip(cos_thetas, theta_dots):
stab_vals = np.abs(np.arccos(ct))**2 + np.abs(td)**2 + stab_vals*gamma
return -stab_vals
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f=lambda traj: pred1(traj))
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0,init_sample = 60,
optimize_restarts=5, exp_weight=2, normalizer=True)
TM.initialize()
TM.run_BO(140)
smooth_vals = np.array(TM.f_acqu.find_GP_func())
smooth_details_r2.append([np.sum(smooth_vals < -1.00),
np.sum(smooth_vals < -10.0),
TM.smooth_min_x,TM.smooth_min_val,
TM.smooth_min_loc])
np.random.seed(r)
node0_ns = pred_node(f=lambda traj: pred1(traj))
TM_ns = test_module(bounds=bounds, sut=lambda x0: sut(500, x0, ead=True),
f_tree=node0_ns, init_sample=60, with_smooth=False,
with_ns=True,
optimize_restarts=5, exp_weight=10, normalizer=True)
TM_ns.initialize()
TM_ns.run_BO(30)
TM_ns.k = 5
TM_ns.run_BO(40)
TM_ns.k = 2
TM_ns.run_BO(70)
smooth_details_r2.append([np.sum(TM_ns.ns_GP.Y < -1.00),
np.sum(TM_ns.ns_GP.Y < -10.0),
TM_ns.ns_min_x, TM_ns.ns_min_val,
TM_ns.ns_min_loc])
# With cost function
np.random.seed(r)
node0_rand = pred_node(f=lambda traj: pred1(traj))
TM_rand = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node0_rand, with_random = True, with_smooth=False,
init_sample = 60, optimize_restarts=5, exp_weight=10,
cost_model = cost_func, normalizer=True)
TM_rand.initialize()
TM_rand.run_BO(140)
random_details_r2.append([np.sum(np.array(TM_rand.random_Y) < -1.0),
np.sum(np.array(TM_rand.random_Y) < -10.0),
TM_rand.rand_min_x, TM_rand.rand_min_val,
TM_rand.rand_min_loc])
print(r, smooth_details_r2[-2], smooth_details_r2[-1],random_details_r2[-1])
# Requirement 3: Find the initial configuration such that it stabilizies to either
# 0 or to np.pi
smooth_details_r3 = []
ns_details_r3 = []
random_details_r3 = []
def pred1(traj, gamma=0.25):
traj = traj[0]
cos_thetas = np.array(traj).T[0]
theta_dots = np.array(traj).T[2]
stab_vals = 0
for ct, td in zip(cos_thetas, theta_dots):
stab_vals = np.abs(np.arccos(ct))**2 + np.abs(td)**2 + stab_vals*gamma
return -stab_vals
def pred2(traj, gamma=0.25):
traj = traj[0]
cos_thetas = np.array(traj).T[0]
theta_dots = np.array(traj).T[2]
stab_vals = 0
for ct, td in zip(cos_thetas, theta_dots):
stab_vals = (np.pi - np.abs(np.arccos(ct)))**2 + np.abs(td)**2 + stab_vals*gamma
return -stab_vals
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f = lambda traj:pred1(traj))
node1 = pred_node(f = lambda traj:pred2(traj))
node2 = max_node(children=[node0, node1])
TM = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node2,init_sample = 60,
optimize_restarts=5, exp_weight=2, normalizer=True)
TM.initialize()
TM.run_BO(140)
smooth_vals = np.array(TM.f_acqu.find_GP_func())
smooth_details_r3.append([np.sum(smooth_vals < -1.00),
np.sum(smooth_vals < -10.0),
TM.smooth_min_x,TM.smooth_min_val,
TM.smooth_min_loc])
np.random.seed(r)
node0_ns = pred_node(f=lambda traj: pred1(traj))
node1_ns = pred_node(f=lambda traj: pred2(traj))
node2_ns = max_node(children=[node0_ns, node1_ns])
TM_ns = test_module(bounds=bounds, sut=lambda x0: sut(500, x0, ead=True),
f_tree=node2_ns, init_sample=60, with_smooth=False,
with_ns=True,
optimize_restarts=5, exp_weight=2, normalizer=True)
TM_ns.initialize()
TM_ns.run_BO(140)
ns_details_r3.append([np.sum(TM_ns.ns_GP.Y < -1.00),
np.sum(TM_ns.ns_GP.Y < -10.0),
TM_ns.ns_min_x, TM_ns.ns_min_val,
TM_ns.ns_min_loc])
# With cost function
np.random.seed(r)
node0_rand = pred_node(f=lambda traj: pred1(traj))
node1_rand = pred_node(f=lambda traj: pred2(traj))
node2_rand = max_node(children=[node0_rand, node1_rand])
TM_rand = test_module(bounds=bounds, sut=lambda x0: sut(500,x0, ead=True),
f_tree = node2_rand, with_random = True, with_smooth=False,
init_sample = 60, optimize_restarts=5, exp_weight=10,
cost_model = cost_func, normalizer=True)
TM_rand.initialize()
TM_rand.run_BO(140)
random_details_r3.append([np.sum(np.array(TM_rand.random_Y) < -1.0),
np.sum(np.array(TM_rand.random_Y) < -10.0),
TM_rand.rand_min_x, TM_rand.rand_min_val,
TM_rand.rand_min_loc])
print(r, smooth_details_r3[-1], ns_details_r3[-1],random_details_r3[-1])
| 8,081 | 0 | 183 |
3bacdb47786b68ea0eb99f183c0ed33e5b4958ed | 1,189 | py | Python | setup.py | vvd170501/pyhton-gforms | 684956e06f4c6d8224ea3eb2f66827d3ec9bec46 | [
"MIT"
] | 3 | 2021-05-28T11:17:30.000Z | 2022-03-13T14:49:24.000Z | setup.py | vvd170501/pyhton-gforms | 684956e06f4c6d8224ea3eb2f66827d3ec9bec46 | [
"MIT"
] | 3 | 2021-05-26T17:19:18.000Z | 2022-03-01T09:46:14.000Z | setup.py | vvd170501/pyhton-gforms | 684956e06f4c6d8224ea3eb2f66827d3ec9bec46 | [
"MIT"
] | 2 | 2021-11-14T17:09:50.000Z | 2021-12-02T20:13:49.000Z | import re
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
with open('gforms/__init__.py', encoding='utf-8') as f:
version = re.search(r"__version__ = '(.+)'", f.read()).group(1)
setup(
name='gforms',
description='Google Forms wrapper for Python',
long_description=readme,
long_description_content_type='text/markdown',
author='vvd170501',
url='https://github.com/vvd170501/python-gforms',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=['gforms'],
version=version,
license_files=('LICENSE',),
python_requires='>=3.6',
install_requires=[
'beautifulsoup4',
'requests',
"typing-extensions;python_version<'3.8'",
],
extras_require={
'dev': [
'pytest',
]
},
)
| 25.297872 | 67 | 0.592094 | import re
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
with open('gforms/__init__.py', encoding='utf-8') as f:
version = re.search(r"__version__ = '(.+)'", f.read()).group(1)
setup(
name='gforms',
description='Google Forms wrapper for Python',
long_description=readme,
long_description_content_type='text/markdown',
author='vvd170501',
url='https://github.com/vvd170501/python-gforms',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=['gforms'],
version=version,
license_files=('LICENSE',),
python_requires='>=3.6',
install_requires=[
'beautifulsoup4',
'requests',
"typing-extensions;python_version<'3.8'",
],
extras_require={
'dev': [
'pytest',
]
},
)
| 0 | 0 | 0 |
f6e7faf06c12bf23f5e307d5f0d9371d91449b85 | 3,754 | py | Python | seqcluster/libs/annotation.py | kkarolis/seqcluster | 774e23add8cd4fdc83d626cea3bd1f458e7d060d | [
"MIT"
] | 33 | 2015-01-26T23:18:01.000Z | 2022-01-07T21:40:49.000Z | seqcluster/libs/annotation.py | kkarolis/seqcluster | 774e23add8cd4fdc83d626cea3bd1f458e7d060d | [
"MIT"
] | 44 | 2015-01-21T17:43:42.000Z | 2021-08-25T15:49:18.000Z | seqcluster/libs/annotation.py | kkarolis/seqcluster | 774e23add8cd4fdc83d626cea3bd1f458e7d060d | [
"MIT"
] | 18 | 2015-05-18T15:34:32.000Z | 2021-02-10T17:58:24.000Z | import seqcluster.libs.logger as mylog
import os
from seqcluster.libs.classes import annotation, dbannotation
logger = mylog.getLogger("run")
def read_gtf_line(cols, field="name"):
"""parse gtf line to get class/name information"""
field = field.lower()
try:
group = cols[2]
attrs = cols[8].split(";")
name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith(field)]
if not name:
name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("gene_id")]
if not name:
name = ["None"]
biotype = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("biotype")]
if biotype:
group = biotype[0]
c = cols[0]
s = int(cols[3])
e = int(cols[4])
st = cols[6]
return [c, s, e, st, group, name[0]]
except(Exception, e):
logger.error(cols)
logger.error("File is not in correct format")
logger.error("Expect chr source feature start end . strand attributes")
logger.error("Attributes are 'gene_name SNCA; gene_id ENSG; '")
logger.error("The 3rd column is used as type of small RNA (like miRNA)")
logger.error("at least should contains '; *name NAME; '")
logger.error(e)
raise
def _position_in_feature(pos_a, pos_b):
"""return distance to 3' and 5' end of the feature"""
strd = "-"
if pos_a[2] in pos_b[2]:
strd = "+"
if pos_a[2] in "+" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[1] + 1
if pos_a[2] in "+" and pos_b[2] in "-":
lento5 = pos_a[1] - pos_b[0] + 1
lento3 = pos_a[0] - pos_b[1] + 1
if pos_a[2] in "-" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[0] + 1
if pos_a[2] in "-" and pos_b[2] in "-":
lento3 = pos_a[0] - pos_b[0] + 1
lento5 = pos_a[1] - pos_b[1] + 1
else:
lento5 = pos_a[0] - pos_b[0] + 1
lento3 = pos_a[1] - pos_b[1] + 1
return lento5, lento3, strd
def anncluster(c, clus_obj, db, type_ann, feature_id="name"):
"""intersect transcription position with annotation files"""
id_sa, id_ea, id_id, id_idl, id_sta = 1, 2, 3, 4, 5
if type_ann == "bed":
id_sb = 7
id_eb = 8
id_stb = 11
id_tag = 9
ida = 0
clus_id = clus_obj.clus
loci_id = clus_obj.loci
db = os.path.splitext(db)[0]
logger.debug("Type:%s\n" % type_ann)
for cols in c.features():
if type_ann == "gtf":
cb, sb, eb, stb, db, tag = read_gtf_line(cols[6:], feature_id)
else:
sb = int(cols[id_sb])
eb = int(cols[id_eb])
stb = cols[id_stb]
tag = cols[id_tag]
id = int(cols[id_id])
idl = int(cols[id_idl])
if (id in clus_id):
clus = clus_id[id]
sa = int(cols[id_sa])
ea = int(cols[id_ea])
ida += 1
lento5, lento3, strd = _position_in_feature([sa, ea, cols[id_sta]], [sb, eb, stb])
if db in loci_id[idl].db_ann:
ann = annotation(db, tag, strd, lento5, lento3)
tdb = loci_id[idl].db_ann[db]
tdb.add_db_ann(ida, ann)
loci_id[idl].add_db(db, tdb)
else:
ann = annotation(db, tag, strd, lento5, lento3)
tdb = dbannotation(1)
tdb.add_db_ann(ida, ann)
loci_id[idl].add_db(db, tdb)
clus_id[id] = clus
clus_obj.clus = clus_id
clus_obj.loci = loci_id
return clus_obj
| 35.084112 | 123 | 0.537294 | import seqcluster.libs.logger as mylog
import os
from seqcluster.libs.classes import annotation, dbannotation
logger = mylog.getLogger("run")
def read_gtf_line(cols, field="name"):
"""parse gtf line to get class/name information"""
field = field.lower()
try:
group = cols[2]
attrs = cols[8].split(";")
name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith(field)]
if not name:
name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("gene_id")]
if not name:
name = ["None"]
biotype = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("biotype")]
if biotype:
group = biotype[0]
c = cols[0]
s = int(cols[3])
e = int(cols[4])
st = cols[6]
return [c, s, e, st, group, name[0]]
except(Exception, e):
logger.error(cols)
logger.error("File is not in correct format")
logger.error("Expect chr source feature start end . strand attributes")
logger.error("Attributes are 'gene_name SNCA; gene_id ENSG; '")
logger.error("The 3rd column is used as type of small RNA (like miRNA)")
logger.error("at least should contains '; *name NAME; '")
logger.error(e)
raise
def _position_in_feature(pos_a, pos_b):
"""return distance to 3' and 5' end of the feature"""
strd = "-"
if pos_a[2] in pos_b[2]:
strd = "+"
if pos_a[2] in "+" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[1] + 1
if pos_a[2] in "+" and pos_b[2] in "-":
lento5 = pos_a[1] - pos_b[0] + 1
lento3 = pos_a[0] - pos_b[1] + 1
if pos_a[2] in "-" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[0] + 1
if pos_a[2] in "-" and pos_b[2] in "-":
lento3 = pos_a[0] - pos_b[0] + 1
lento5 = pos_a[1] - pos_b[1] + 1
else:
lento5 = pos_a[0] - pos_b[0] + 1
lento3 = pos_a[1] - pos_b[1] + 1
return lento5, lento3, strd
def anncluster(c, clus_obj, db, type_ann, feature_id="name"):
"""intersect transcription position with annotation files"""
id_sa, id_ea, id_id, id_idl, id_sta = 1, 2, 3, 4, 5
if type_ann == "bed":
id_sb = 7
id_eb = 8
id_stb = 11
id_tag = 9
ida = 0
clus_id = clus_obj.clus
loci_id = clus_obj.loci
db = os.path.splitext(db)[0]
logger.debug("Type:%s\n" % type_ann)
for cols in c.features():
if type_ann == "gtf":
cb, sb, eb, stb, db, tag = read_gtf_line(cols[6:], feature_id)
else:
sb = int(cols[id_sb])
eb = int(cols[id_eb])
stb = cols[id_stb]
tag = cols[id_tag]
id = int(cols[id_id])
idl = int(cols[id_idl])
if (id in clus_id):
clus = clus_id[id]
sa = int(cols[id_sa])
ea = int(cols[id_ea])
ida += 1
lento5, lento3, strd = _position_in_feature([sa, ea, cols[id_sta]], [sb, eb, stb])
if db in loci_id[idl].db_ann:
ann = annotation(db, tag, strd, lento5, lento3)
tdb = loci_id[idl].db_ann[db]
tdb.add_db_ann(ida, ann)
loci_id[idl].add_db(db, tdb)
else:
ann = annotation(db, tag, strd, lento5, lento3)
tdb = dbannotation(1)
tdb.add_db_ann(ida, ann)
loci_id[idl].add_db(db, tdb)
clus_id[id] = clus
clus_obj.clus = clus_id
clus_obj.loci = loci_id
return clus_obj
| 0 | 0 | 0 |
286bc6b3c5ed8544085b2f819632a9e139dca2dd | 310 | py | Python | testttt.py | Srinivassan-Ramamurthy/python_programs | 53b390669c7e88532c67d80b758a9199d6fde8cf | [
"bzip2-1.0.6"
] | null | null | null | testttt.py | Srinivassan-Ramamurthy/python_programs | 53b390669c7e88532c67d80b758a9199d6fde8cf | [
"bzip2-1.0.6"
] | null | null | null | testttt.py | Srinivassan-Ramamurthy/python_programs | 53b390669c7e88532c67d80b758a9199d6fde8cf | [
"bzip2-1.0.6"
] | null | null | null | pattern()
| 23.846154 | 37 | 0.380645 | def pattern():
a=input('enter 5 letter word ')
for i in range(len(a)):
for j in range(len(a)):
if(j==i):
print(a[i],end='')
elif(j+i==4):
print(a[-1-i],end='')
else:
print(' ',end='')
print()
pattern()
| 278 | 0 | 22 |
ac8b860ac3e67063a748e256dd35316334efdf95 | 4,826 | py | Python | compose/plugin.py | GM-Alex/compose-improved | 585175e50b71b07596b0cba3b7d66ad367c9b2ed | [
"Apache-2.0"
] | 1 | 2017-05-26T15:22:02.000Z | 2017-05-26T15:22:02.000Z | compose/plugin.py | GM-Alex/compose-improved | 585175e50b71b07596b0cba3b7d66ad367c9b2ed | [
"Apache-2.0"
] | null | null | null | compose/plugin.py | GM-Alex/compose-improved | 585175e50b71b07596b0cba3b7d66ad367c9b2ed | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import inspect
import json
import os
import re
from functools import partial
import compose
| 30.352201 | 101 | 0.627849 | from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import inspect
import json
import os
import re
from functools import partial
import compose
class PartialMethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(
self.func,
instance,
*(self.args or ()),
**(self.keywords or {})
)
def compose_patch(scope, name):
def wrapper(fnc):
original = getattr(scope, name)
if fnc.__doc__ is None:
fnc.__doc__ = original.__doc__
patched = PartialMethod(fnc, original)
patched.__doc__ = fnc.__doc__
if hasattr(original, '__standalone__'):
patched.__standalone__ = original.__standalone__
setattr(scope, name, patched)
return fnc
return wrapper
def compose_command(standalone=False):
def update_command_doc(original_doc, fnc_name, fnc_doc):
pre_doc = ''
command_regex = r'(\s*)([^ ]+)(\s*)(.*)'
doc_commands = None
for compose_doc_line in original_doc.splitlines():
if doc_commands is not None and re.match(command_regex, compose_doc_line):
command = re.search(command_regex, compose_doc_line)
doc_commands[command.group(2)] = compose_doc_line
if fnc_name not in doc_commands:
space_to_text = len(command.group(2) + command.group(3))
new_command = command.group(1) + fnc_name
new_command += (' ' * (space_to_text - len(fnc_name)))
new_command += fnc_doc.strip(' \t\n\r').splitlines()[0]
doc_commands[fnc_name] = new_command
else:
if re.match(r'\s*Commands:\s*', compose_doc_line):
doc_commands = {}
pre_doc += compose_doc_line + '\n'
doc_commands = collections.OrderedDict(sorted(doc_commands.items()))
return pre_doc + '\n'.join(doc_commands.values())
def wrap(fnc):
def return_fnc(*args, **kargs):
raise PluginCommandError(
"Command function '{}' must not called out of scope.".format(fnc.__name__)
)
# Using __modified_doc__ as fix for http://bugs.python.org/issue12773
if hasattr(compose.cli.main.TopLevelCommand, '__modified_doc__'):
original_doc = compose.cli.main.TopLevelCommand.__modified_doc__
else:
original_doc = compose.cli.main.TopLevelCommand.__doc__
fnc.__standalone__ = standalone
modified_doc = update_command_doc(original_doc, fnc.__name__, fnc.__doc__)
try:
compose.cli.main.TopLevelCommand.__doc__ = modified_doc
except AttributeError:
compose.cli.main.TopLevelCommand.__modified_doc__ = modified_doc
setattr(compose.cli.main.TopLevelCommand, fnc.__name__, fnc)
return return_fnc
return wrap
class PluginError(Exception):
pass
class PluginJsonFileError(PluginError):
pass
class PluginNotImplementError(PluginError):
pass
class PluginCommandError(PluginError):
pass
class Plugin:
required_fields = ['name', 'version']
def __init__(self, plugin_manager, config):
self.plugin_manager = plugin_manager
self.config = config
file = os.path.abspath(inspect.getfile(self.__class__))
self.path = os.path.dirname(file)
self.id = os.path.basename(self.path)
self.name = self.id
self.description = ''
self.version = None
self.config = None
plugin_file = os.path.join(self.path, 'plugin.json')
self.load_plugin_info_from_file(plugin_file)
@staticmethod
def check_required_plugin_file_settings(plugin_info, required_keys):
for required_key in required_keys:
if required_key not in plugin_info:
raise PluginJsonFileError("Missing json attribute '{}'".format(required_key))
return True
def load_plugin_info_from_file(self, file):
if os.path.isfile(file):
with open(file) as f:
plugin_info = json.load(f)
self.check_required_plugin_file_settings(plugin_info, self.required_fields)
self.name = plugin_info['name']
self.description = plugin_info['description'] if 'description' in plugin_info else ''
self.version = plugin_info['version']
else:
raise PluginJsonFileError('JSON plugin file not found')
def install(self):
return True
def uninstall(self):
return True
def update(self, old_version):
return None
def configure(self):
return None
| 4,066 | 350 | 210 |
d7e9adcefd3cab5ccb8d34cc92eaa8b709da5a03 | 3,774 | py | Python | pygsm/tests/test_ase.py | stenczelt/pyGSM | 48e7a710744ec768e2c4a0f4d8dc1f9ffd948ce1 | [
"MIT"
] | null | null | null | pygsm/tests/test_ase.py | stenczelt/pyGSM | 48e7a710744ec768e2c4a0f4d8dc1f9ffd948ce1 | [
"MIT"
] | 2 | 2021-05-29T13:04:31.000Z | 2021-05-30T11:05:41.000Z | pygsm/tests/test_ase.py | stenczelt/pyGSM | 48e7a710744ec768e2c4a0f4d8dc1f9ffd948ce1 | [
"MIT"
] | null | null | null | import numpy as np
from ase.calculators.lj import LennardJones
from ase.units import Bohr, Ha
from pytest import approx, raises
from pygsm.level_of_theories.ase import ASELoT, geom_to_ase, xyz_to_ase
from pygsm.level_of_theories.base_lot import LoTError
xyz_4x4 = [
["H", 1.0, 2.0, 3.0],
["He", 4.0, 5.0, 6.0],
["Li", 7.0, 8.0, 9.0],
["Be", 10.0, 11.0, 12.0],
]
| 28.80916 | 84 | 0.659777 | import numpy as np
from ase.calculators.lj import LennardJones
from ase.units import Bohr, Ha
from pytest import approx, raises
from pygsm.level_of_theories.ase import ASELoT, geom_to_ase, xyz_to_ase
from pygsm.level_of_theories.base_lot import LoTError
xyz_4x4 = [
["H", 1.0, 2.0, 3.0],
["He", 4.0, 5.0, 6.0],
["Li", 7.0, 8.0, 9.0],
["Be", 10.0, 11.0, 12.0],
]
def test_geom_to_ase():
numbers = [1, 2, 3, 4]
xyz = np.arange(12).reshape(4, 3)
atoms = geom_to_ase(numbers, xyz)
assert atoms.get_chemical_symbols() == ["H", "He", "Li", "Be"]
assert atoms.get_positions() == approx(xyz)
def test_xyz_to_ase():
atoms = xyz_to_ase(xyz_4x4)
assert atoms.get_chemical_symbols() == ["H", "He", "Li", "Be"]
assert atoms.get_positions() == approx(np.array([x[1:] for x in xyz_4x4]))
def test_ase_lot_from_string():
lot = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.LennardJones",
calculator_kwargs=dict(epsilon=1.234),
geom=xyz_4x4,
)
assert isinstance(lot.ase_calculator, LennardJones)
assert lot.ase_calculator.parameters["epsilon"] == approx(1.234)
def test_ase_lot_error():
with raises(LoTError, match="ASE-calculator's module is not found.*"):
_ = ASELoT.from_calculator_string(
calculator_import="ase.calculators.foo.Dummy", geom=xyz_4x4,
)
with raises(LoTError, match="ASE-calculator's class.*not found in module .*"):
_ = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.Dummy", geom=xyz_4x4,
)
# run not implemented mode
lot = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.LennardJones", geom=xyz_4x4,
)
with raises(
NotImplementedError,
match="Run type energgy is not implemented in the ASE calculator interface",
):
# misspelled energy
lot.run(xyz_4x4, 0, 0, runtype="energgy")
def test_ase_lot_copy():
lot = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.LennardJones", geom=xyz_4x4,
)
copy_lot = ASELoT.copy(lot, dict())
# we are NOT making a new instance of the calculator
assert lot.ase_calculator == copy_lot.ase_calculator
# options matching
for key in set(lot.options.keys()).union(copy_lot.options.keys()):
assert lot.options[key] == copy_lot.options[key]
def test_ase_lot_copy_update():
lot = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.LennardJones", geom=xyz_4x4,
)
copy_lot = ASELoT.copy(lot, dict(ID=1))
# options matching
for key in lot.options.keys():
if key == "ID":
assert copy_lot.options[key] == 1
else:
assert lot.options[key] == copy_lot.options[key]
def test_ase_calculation():
kw = dict(r0=3, rc=10, sigma=3)
lot = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.LennardJones",
geom=xyz_4x4,
calculator_kwargs=kw,
)
# ase ref
calc = LennardJones(**kw)
atoms = xyz_to_ase(xyz_4x4)
atoms.calc = calc
# not doing gradient if not asked for
# run the calculation
lot.run(xyz_4x4, 0, 0)
assert lot._Energies[(0, 0)][0] * Ha == atoms.get_potential_energy()
assert lot.Gradients[(0, 0)][0] * Ha / Bohr == approx(-atoms.get_forces())
def test_ase_calculation_nograd():
# not doing gradient if not asked for
lot = ASELoT.from_calculator_string(
calculator_import="ase.calculators.lj.LennardJones", geom=xyz_4x4,
)
# run the calculation
lot.run(xyz_4x4, 0, 0, runtype="energy")
assert (0, 0) in lot._Energies.keys()
assert (0, 0) not in lot.Gradients.keys()
| 3,202 | 0 | 184 |
59d73a2e5b7f78e4793c434d33a541c7dae2d810 | 254 | py | Python | survey/features/page_objects/weights.py | ericazhou7/uSurvey | 1236f33355662957e7e1e769dde1811b910673a5 | [
"BSD-3-Clause"
] | 5 | 2016-08-25T12:48:54.000Z | 2018-08-16T22:49:43.000Z | survey/features/page_objects/weights.py | ericazhou7/uSurvey | 1236f33355662957e7e1e769dde1811b910673a5 | [
"BSD-3-Clause"
] | 2 | 2016-08-11T06:43:56.000Z | 2016-12-08T09:11:36.000Z | survey/features/page_objects/weights.py | ericazhou7/uSurvey | 1236f33355662957e7e1e769dde1811b910673a5 | [
"BSD-3-Clause"
] | 7 | 2016-09-16T11:03:44.000Z | 2020-10-28T22:01:20.000Z | from survey.features.page_objects.base import PageObject
__author__ = 'mnandri'
| 21.166667 | 56 | 0.783465 | from survey.features.page_objects.base import PageObject
__author__ = 'mnandri'
class ListLocationWeightsPage(PageObject):
url = "/locations/weights/"
class ListLocationWeightsErrorLogPage(PageObject):
url = "/locations/weights/error_logs/"
| 0 | 125 | 46 |
be6fc59741cd65844495a4517390894b6f81b3ef | 11,198 | py | Python | muffin_debugtoolbar/plugin.py | klen/muffin-debugtoolbar | bce845f48ae3f21c859663190f5be3ae69970ccc | [
"MIT"
] | 4 | 2015-06-09T15:22:56.000Z | 2016-11-08T11:49:57.000Z | muffin_debugtoolbar/plugin.py | klen/muffin-debugtoolbar | bce845f48ae3f21c859663190f5be3ae69970ccc | [
"MIT"
] | null | null | null | muffin_debugtoolbar/plugin.py | klen/muffin-debugtoolbar | bce845f48ae3f21c859663190f5be3ae69970ccc | [
"MIT"
] | null | null | null | """Debug Toolbar Plugin."""
import asyncio
import importlib
import ipaddress as ip
import os.path as op
import re
import sys
import uuid
from muffin import (
Response, StaticRoute, HTTPException, HTTPBadRequest, to_coroutine, HTTPForbidden)
from muffin.plugins import BasePlugin, PluginException
from muffin.utils import json
from . import panels, utils
from .tbtools.tbtools import get_traceback
RE_BODY = re.compile(b'<\/body>', re.I)
U_SSE_PAYLOAD = "id: {0}\nevent: new_request\ndata: {1}\n\n"
REDIRECT_CODES = (300, 301, 302, 303, 305, 307, 308)
PLUGIN_ROOT = op.dirname(op.abspath(__file__))
@asyncio.coroutine
def debugtoolbar_middleware_factory(app, handler):
"""Setup Debug middleware."""
dbtb = app.ps.debugtoolbar
@asyncio.coroutine
def debugtoolbar_middleware(request):
"""Integrate to application."""
# Check for debugtoolbar is enabled for the request
if not dbtb.cfg.enabled or any(map(request.path.startswith, dbtb.cfg.exclude)):
return (yield from handler(request))
remote_host, remote_port = request.transport.get_extra_info('peername')
for host in dbtb.cfg.hosts:
if ip.ip_address(remote_host) in ip.ip_network(host):
break
else:
return (yield from handler(request))
# Initialize a debugstate for the request
state = DebugState(app, request)
dbtb.history[state.id] = state
context_switcher = state.wrap_handler(handler)
# Make response
try:
response = yield from context_switcher(handler(request))
state.status = response.status
except HTTPException as exc:
response = exc
state.status = response.status
except Exception as exc:
# Store traceback for unhandled exception
state.status = 500
if not dbtb.cfg.intercept_exc:
raise
tb = get_traceback(
info=sys.exc_info(), skip=1, show_hidden_frames=False,
ignore_system_exceptions=True, exc=exc)
dbtb.exceptions[tb.id] = request['pdbt_tb'] = tb
for frame in tb.frames:
dbtb.frames[id(frame)] = frame
response = Response(text=tb.render_full(request), content_type='text/html')
# Intercept http redirect codes and display an html page with a link to the target.
if dbtb.cfg.intercept_redirects and response.status in REDIRECT_CODES \
and 'Location' in response.headers:
response = yield from app.ps.jinja2.render(
'debugtoolbar/redirect.html', response=response)
response = Response(text=response, content_type='text/html')
yield from state.process_response(response)
if isinstance(response, Response) and response.content_type == 'text/html' and \
RE_BODY.search(response.body):
return (yield from dbtb.inject(state, response))
return response
return debugtoolbar_middleware
class Plugin(BasePlugin):
"""The plugin implementation."""
name = 'debugtoolbar'
defaults = {
'enabled': True,
'hosts': ['127.0.0.1'],
'prefix': '/_debug',
'intercept_exc': 'debug', # debug/display/False,
'intercept_redirects': True,
'exclude': [],
'panels': [
panels.HeaderDebugPanel,
panels.RequestVarsDebugPanel,
panels.LoggingDebugPanel,
panels.TracebackDebugPanel,
],
'additional_panels': [],
'global_panels': [
panels.RoutesDebugPanel,
panels.ConfigurationDebugPanel,
panels.MiddlewaresDebugPanel,
panels.VersionsDebugPanel,
]
}
def setup(self, app):
"""Setup the plugin and prepare application."""
super(Plugin, self).setup(app)
if 'jinja2' not in app.plugins:
raise PluginException('The plugin requires Muffin-Jinja2 plugin installed.')
self.cfg.prefix = self.cfg.prefix.rstrip('/') + '/'
self.cfg.exclude.append(self.cfg.prefix)
# Setup debugtoolbar templates
app.ps.jinja2.cfg.template_folders.append(op.join(PLUGIN_ROOT, 'templates'))
self.cfg.panels += list(self.cfg.additional_panels)
panels_ = []
for panel in self.cfg.panels:
if isinstance(panel, str):
mod, _, panel = panel.partition(':')
mod = importlib.import_module(mod)
panel = eval(panel or 'DebugPanel', mod.__dict__)
panels_.append(panel)
self.cfg.panels = panels_
# Setup debugtoolbar static files
app.router.register_route(StaticRoute(
'debugtoolbar.static',
self.cfg.prefix + 'static/',
op.join(PLUGIN_ROOT, 'static')))
app.register(self.cfg.prefix + 'sse', name='debugtoolbar.sse')(self.sse)
app.register(
self.cfg.prefix + 'exception', name='debugtoolbar.exception')(self.exception)
app.register(
self.cfg.prefix + 'execute', name='debugtoolbar.execute')(self.execute)
app.register(
self.cfg.prefix + 'source', name='debugtoolbar.source')(self.source)
app.register(
self.cfg.prefix.rstrip('/'),
self.cfg.prefix,
self.cfg.prefix + '{request_id}', name='debugtoolbar.request')(self.view)
app['debugtoolbar'] = {}
app['debugtoolbar']['pdbt_token'] = uuid.uuid4().hex
self.history = app['debugtoolbar']['history'] = utils.History(50)
self.exceptions = app['debugtoolbar']['exceptions'] = utils.History(50)
self.frames = app['debugtoolbar']['frames'] = utils.History(100)
@asyncio.coroutine
def start(self, app):
""" Start application. """
app.middlewares.insert(0, debugtoolbar_middleware_factory)
self.global_panels = [Panel(self.app) for Panel in self.cfg.global_panels]
@asyncio.coroutine
def inject(self, state, response):
""" Inject Debug Toolbar code to response body. """
html = yield from self.app.ps.jinja2.render(
'debugtoolbar/inject.html',
static_path=self.cfg.prefix + 'static',
toolbar_url=self.cfg.prefix + state.id,
)
html = html.encode(state.request.charset or 'utf-8')
response.body = RE_BODY.sub(html + b'</body>', response.body)
return response
@asyncio.coroutine
def view(self, request):
""" Debug Toolbar. """
auth = yield from self.authorize(request)
if not auth:
raise HTTPForbidden()
request_id = request.match_info.get('request_id')
state = self.history.get(request_id, None)
response = yield from self.app.ps.jinja2.render(
'debugtoolbar/toolbar.html',
debugtoolbar=self,
state=state,
static_path=self.cfg.prefix + 'static',
panels=state and state.panels or [],
global_panels=self.global_panels,
request=state and state.request or None,
)
return Response(text=response, content_type='text/html')
@asyncio.coroutine
def authorize(self, request): # noqa
"""Default authorization."""
return True
def authorization(self, func):
"""Define a authorization handler.
::
debugtoolbar = muffin_debugtoolbar.Plugin()
debugtoolbar.setup(app)
@debugtoolbar.authorization
def current_user_is_logged(request):
user = yield from load_session(request)
return user
"""
self.authorize = to_coroutine(func)
return func
@asyncio.coroutine
def sse(self, request):
"""SSE."""
response = Response(status=200)
response.content_type = 'text/event-stream'
response.text = ''
active_request_id = request.GET.get('request_id')
client_last_request_id = str(request.headers.get('Last-Event-Id', 0))
if self.history:
last_request_id = next(reversed(self.history))
if not last_request_id == client_last_request_id:
data = []
for _id in reversed(self.history):
data.append([
_id, self.history[_id].json, 'active' if active_request_id == _id else ''])
if data:
response.text = U_SSE_PAYLOAD.format(last_request_id, json.dumps(data))
return response
@asyncio.coroutine
@asyncio.coroutine
@asyncio.coroutine
class DebugState:
""" Store debug state. """
def __init__(self, app, request):
"""Store the params."""
self.request = request
self.status = 200
self.panels = [Panel(app, request) for Panel in app.ps.debugtoolbar.cfg.panels]
@property
def id(self):
"""Return state ID."""
return str(id(self))
@property
def json(self):
"""Return JSON."""
return {'method': self.request.method,
'path': self.request.path,
'scheme': 'http',
'status_code': self.status}
@asyncio.coroutine
def process_response(self, response):
"""Process response."""
for panel in self.panels:
yield from panel.process_response(response)
| 34.349693 | 99 | 0.610734 | """Debug Toolbar Plugin."""
import asyncio
import importlib
import ipaddress as ip
import os.path as op
import re
import sys
import uuid
from muffin import (
Response, StaticRoute, HTTPException, HTTPBadRequest, to_coroutine, HTTPForbidden)
from muffin.plugins import BasePlugin, PluginException
from muffin.utils import json
from . import panels, utils
from .tbtools.tbtools import get_traceback
RE_BODY = re.compile(b'<\/body>', re.I)
U_SSE_PAYLOAD = "id: {0}\nevent: new_request\ndata: {1}\n\n"
REDIRECT_CODES = (300, 301, 302, 303, 305, 307, 308)
PLUGIN_ROOT = op.dirname(op.abspath(__file__))
@asyncio.coroutine
def debugtoolbar_middleware_factory(app, handler):
"""Setup Debug middleware."""
dbtb = app.ps.debugtoolbar
@asyncio.coroutine
def debugtoolbar_middleware(request):
"""Integrate to application."""
# Check for debugtoolbar is enabled for the request
if not dbtb.cfg.enabled or any(map(request.path.startswith, dbtb.cfg.exclude)):
return (yield from handler(request))
remote_host, remote_port = request.transport.get_extra_info('peername')
for host in dbtb.cfg.hosts:
if ip.ip_address(remote_host) in ip.ip_network(host):
break
else:
return (yield from handler(request))
# Initialize a debugstate for the request
state = DebugState(app, request)
dbtb.history[state.id] = state
context_switcher = state.wrap_handler(handler)
# Make response
try:
response = yield from context_switcher(handler(request))
state.status = response.status
except HTTPException as exc:
response = exc
state.status = response.status
except Exception as exc:
# Store traceback for unhandled exception
state.status = 500
if not dbtb.cfg.intercept_exc:
raise
tb = get_traceback(
info=sys.exc_info(), skip=1, show_hidden_frames=False,
ignore_system_exceptions=True, exc=exc)
dbtb.exceptions[tb.id] = request['pdbt_tb'] = tb
for frame in tb.frames:
dbtb.frames[id(frame)] = frame
response = Response(text=tb.render_full(request), content_type='text/html')
# Intercept http redirect codes and display an html page with a link to the target.
if dbtb.cfg.intercept_redirects and response.status in REDIRECT_CODES \
and 'Location' in response.headers:
response = yield from app.ps.jinja2.render(
'debugtoolbar/redirect.html', response=response)
response = Response(text=response, content_type='text/html')
yield from state.process_response(response)
if isinstance(response, Response) and response.content_type == 'text/html' and \
RE_BODY.search(response.body):
return (yield from dbtb.inject(state, response))
return response
return debugtoolbar_middleware
class Plugin(BasePlugin):
"""The plugin implementation."""
name = 'debugtoolbar'
defaults = {
'enabled': True,
'hosts': ['127.0.0.1'],
'prefix': '/_debug',
'intercept_exc': 'debug', # debug/display/False,
'intercept_redirects': True,
'exclude': [],
'panels': [
panels.HeaderDebugPanel,
panels.RequestVarsDebugPanel,
panels.LoggingDebugPanel,
panels.TracebackDebugPanel,
],
'additional_panels': [],
'global_panels': [
panels.RoutesDebugPanel,
panels.ConfigurationDebugPanel,
panels.MiddlewaresDebugPanel,
panels.VersionsDebugPanel,
]
}
def setup(self, app):
"""Setup the plugin and prepare application."""
super(Plugin, self).setup(app)
if 'jinja2' not in app.plugins:
raise PluginException('The plugin requires Muffin-Jinja2 plugin installed.')
self.cfg.prefix = self.cfg.prefix.rstrip('/') + '/'
self.cfg.exclude.append(self.cfg.prefix)
# Setup debugtoolbar templates
app.ps.jinja2.cfg.template_folders.append(op.join(PLUGIN_ROOT, 'templates'))
self.cfg.panels += list(self.cfg.additional_panels)
panels_ = []
for panel in self.cfg.panels:
if isinstance(panel, str):
mod, _, panel = panel.partition(':')
mod = importlib.import_module(mod)
panel = eval(panel or 'DebugPanel', mod.__dict__)
panels_.append(panel)
self.cfg.panels = panels_
# Setup debugtoolbar static files
app.router.register_route(StaticRoute(
'debugtoolbar.static',
self.cfg.prefix + 'static/',
op.join(PLUGIN_ROOT, 'static')))
app.register(self.cfg.prefix + 'sse', name='debugtoolbar.sse')(self.sse)
app.register(
self.cfg.prefix + 'exception', name='debugtoolbar.exception')(self.exception)
app.register(
self.cfg.prefix + 'execute', name='debugtoolbar.execute')(self.execute)
app.register(
self.cfg.prefix + 'source', name='debugtoolbar.source')(self.source)
app.register(
self.cfg.prefix.rstrip('/'),
self.cfg.prefix,
self.cfg.prefix + '{request_id}', name='debugtoolbar.request')(self.view)
app['debugtoolbar'] = {}
app['debugtoolbar']['pdbt_token'] = uuid.uuid4().hex
self.history = app['debugtoolbar']['history'] = utils.History(50)
self.exceptions = app['debugtoolbar']['exceptions'] = utils.History(50)
self.frames = app['debugtoolbar']['frames'] = utils.History(100)
@asyncio.coroutine
def start(self, app):
""" Start application. """
app.middlewares.insert(0, debugtoolbar_middleware_factory)
self.global_panels = [Panel(self.app) for Panel in self.cfg.global_panels]
@asyncio.coroutine
def inject(self, state, response):
""" Inject Debug Toolbar code to response body. """
html = yield from self.app.ps.jinja2.render(
'debugtoolbar/inject.html',
static_path=self.cfg.prefix + 'static',
toolbar_url=self.cfg.prefix + state.id,
)
html = html.encode(state.request.charset or 'utf-8')
response.body = RE_BODY.sub(html + b'</body>', response.body)
return response
@asyncio.coroutine
def view(self, request):
""" Debug Toolbar. """
auth = yield from self.authorize(request)
if not auth:
raise HTTPForbidden()
request_id = request.match_info.get('request_id')
state = self.history.get(request_id, None)
response = yield from self.app.ps.jinja2.render(
'debugtoolbar/toolbar.html',
debugtoolbar=self,
state=state,
static_path=self.cfg.prefix + 'static',
panels=state and state.panels or [],
global_panels=self.global_panels,
request=state and state.request or None,
)
return Response(text=response, content_type='text/html')
@asyncio.coroutine
def authorize(self, request): # noqa
"""Default authorization."""
return True
def authorization(self, func):
"""Define a authorization handler.
::
debugtoolbar = muffin_debugtoolbar.Plugin()
debugtoolbar.setup(app)
@debugtoolbar.authorization
def current_user_is_logged(request):
user = yield from load_session(request)
return user
"""
self.authorize = to_coroutine(func)
return func
@asyncio.coroutine
def sse(self, request):
"""SSE."""
response = Response(status=200)
response.content_type = 'text/event-stream'
response.text = ''
active_request_id = request.GET.get('request_id')
client_last_request_id = str(request.headers.get('Last-Event-Id', 0))
if self.history:
last_request_id = next(reversed(self.history))
if not last_request_id == client_last_request_id:
data = []
for _id in reversed(self.history):
data.append([
_id, self.history[_id].json, 'active' if active_request_id == _id else ''])
if data:
response.text = U_SSE_PAYLOAD.format(last_request_id, json.dumps(data))
return response
def validate_pdtb_token(self, request):
token = request.GET.get('token')
if self.exceptions is None:
raise HTTPBadRequest(text='No exception history')
if not token:
raise HTTPBadRequest(text='No token in request')
if token != request.app['debugtoolbar']['pdbt_token']:
raise HTTPBadRequest(text='Bad token in request')
def get_frame(self, request):
frame = int(request.GET.get('frm'), 0)
if not frame or frame not in self.frames:
return HTTPBadRequest()
return self.frames[frame]
@asyncio.coroutine
def exception(self, request):
self.validate_pdtb_token(request)
tb = int(request.GET.get('tb', 0))
if not tb or tb not in self.exceptions:
raise HTTPBadRequest()
tb = self.exceptions[tb]
return Response(text=tb.render_full(request), content_type='text/html')
@asyncio.coroutine
def execute(self, request):
self.validate_pdtb_token(request)
if not self.cfg.intercept_exc == 'debug':
raise HTTPBadRequest()
cmd = request.GET.get('cmd')
if not cmd:
raise HTTPBadRequest()
frame = self.get_frame(request)
result = frame.console.eval(cmd)
return Response(text=result, content_type='text/html')
@asyncio.coroutine
def source(self, request):
self.validate_pdtb_token(request)
frame = self.get_frame(request)
return Response(text=frame.render_source(), content_type='text/html')
class DebugState:
""" Store debug state. """
def __init__(self, app, request):
"""Store the params."""
self.request = request
self.status = 200
self.panels = [Panel(app, request) for Panel in app.ps.debugtoolbar.cfg.panels]
@property
def id(self):
"""Return state ID."""
return str(id(self))
@property
def json(self):
"""Return JSON."""
return {'method': self.request.method,
'path': self.request.path,
'scheme': 'http',
'status_code': self.status}
def wrap_handler(self, handler):
context_switcher = utils.ContextSwitcher()
for panel in self.panels:
panel.wrap_handler(handler, context_switcher)
return context_switcher
@asyncio.coroutine
def process_response(self, response):
"""Process response."""
for panel in self.panels:
yield from panel.process_response(response)
| 1,552 | 0 | 159 |
0c7e1e059eedf931f70c49d679202f99a195fd40 | 8,200 | py | Python | Operations/PH_Walker.py | ClarkLabUVA/hctsa-py | 4382a7e852d21cdfefdac1a4a09ea6e11abd9be1 | [
"MIT"
] | 6 | 2020-08-14T00:16:19.000Z | 2022-01-20T05:49:12.000Z | Operations/PH_Walker.py | fairscape/hctsa-py | 4382a7e852d21cdfefdac1a4a09ea6e11abd9be1 | [
"MIT"
] | null | null | null | Operations/PH_Walker.py | fairscape/hctsa-py | 4382a7e852d21cdfefdac1a4a09ea6e11abd9be1 | [
"MIT"
] | 4 | 2020-08-14T00:22:45.000Z | 2021-02-18T05:31:14.000Z |
import numpy as np
from scipy import stats
import statsmodels.sandbox.stats.runs as runs
# 18/21 output statistics fully implemented from MATLAB, the other three are either from complex helper functions or MATLAB functions that don't transfer well
def PH_Walker(y, walkerRule='prop', walkerParams=np.array([])):
"""
PH_Walker simulates a hypothetical walker moving through the time domain
the hypothetical particle (or 'walker') moves in response to values of the time series at each point
Outputs from this operation are summaries of the walkers motion, and comparisons of it to the original time series
:param y: the input time series
:param walkerRule: the kinematic rule by which the walker moves in response to the time series over time
(i) 'prop': the walker narrows the gap between its value and that of the time series by a given proportion p
(ii) 'biasprop': the walker is biased to move more in one direction; when it is being pushed up by the time
series, it narrows the gap by a proportion p_{up}, and when it is being pushed down by the
time series it narrows the gap by a (potentially different) proportion p_{down}. walkerParams = [pup,pdown]
(iii) 'momentum': the walker moves as if it has mass m and inertia
from the previous time step and the time series acts
as a force altering its motion in a classical
Newtonian dynamics framework. [walkerParams = m], the mass.
(iv) 'runningvar': the walker moves with inertia as above, but
its values are also adjusted so as to match the local
variance of time series by a multiplicative factor.
walkerParams = [m,wl], where m is the inertial mass and wl
is the window length.
:param walkerParams: the parameters for the specified walker, explained above
:return: include the mean, spread, maximum, minimum, and autocorrelation of
the walker's trajectory, the number of crossings between the walker and the
original time series, the ratio or difference of some basic summary statistics
between the original time series and the walker, an Ansari-Bradley test
comparing the distributions of the walker and original time series, and
various statistics summarizing properties of the residuals between the
walker's trajectory and the original time series.
"""
# ----------------------------------------------------------------------------------------------------------------------------------
# PRELIMINARIES
#----------------------------------------------------------------------------------------------------------------------------------
N = len(y)
#----------------------------------------------------------------------------------------------------------------------------------
# CHECK INPUTS
#----------------------------------------------------------------------------------------------------------------------------------
if walkerRule == 'runningvar':
walkerParams = [1.5, 50]
if (len(walkerParams) == 0):
if walkerRule == 'prop':
walkerParams = np.array([0.5])
if walkerRule == 'biasprop':
walkerParams = np.array([0.1, 0.2])
if walkerRule == 'momentum':
walkerParams = np.array([2])
if walkerRule == 'runningvar':
walkerParams = [1.5, 50]
#----------------------------------------------------------------------------------------------------------------------------------
# (1) WALK
#----------------------------------------------------------------------------------------------------------------------------------
w = np.zeros(N)
if walkerRule == 'prop':
# walker starts at zero and narrows the gap between its position
# and the time series value at that point by the proportion given
# in walkerParams, to give the value at the subsequent time step
if isinstance(walkerParams,list):
walkerParams = walkerParams[0]
p = walkerParams
w[0] = 0
for i in range(1, N):
w[i] = w[i-1] + p*(y[i-1]-w[i-1])
elif walkerRule == 'biasprop':
# walker is biased in one or the other direction (i.e., prefers to
# go up, or down). Requires a vector of inputs: [p_up, p_down]
pup = walkerParams[0]
pdown = walkerParams[0]
w[0] = 0
for i in range (1, N):
if y[i] > y[i-1]:
w[i] = w[i-1] + pup*(y[i-1]-w[i-1])
else :
w[i] = w[i-1] + pdown*(y[i-1]-w[i-1])
elif walkerRule == 'momentum':
# walker moves as if it had inertia from the previous time step,
# i.e., it 'wants' to move the same amount; the time series acts as
# a force changing its motion
m = walkerParams[0] # inertial mass
w[0] = y[0]
w[1] = y[1]
for i in range(2, N):
w_inert = w[i-1] + (w[i-1]-w[i-2])
w[i] = w_inert + (y[i] - w_inert)/m # dissipative term
#equation of motion (s-s_0 = ut + F/m*t^2)
#where the 'force' is F is the change in the original time series at the point
elif walkerRule == 'runningvar':
m = walkerParams[0]
wl = walkerParams[1]
w[0] = y[0]
w[1] = y[1]
for i in range(2, N):
w_inert = w[i-1] + (w[i-1]-w[i-2])
w_mom = w_inert + (y[i] - w_inert)/m #dissipative term from time series
if i > wl:
w[i] = w_mom * (np.std(y[(i-wl):i]))/np.std(w[(i-wl):i])
else:
w[i] = w_mom
else :
print("Error: Unknown method: " + walkerRule + " for simulating walker on the time series")
#----------------------------------------------------------------------------------------------------------------------------------
# (2) STATISITICS ON THE WALK
#----------------------------------------------------------------------------------------------------------------------------------
out = {} # dictionary for storing variables
# (i) The walk itself -------------------------------------------------------------------------------------------
out['w_mean'] = np.mean(w)
out['w_median'] = np.median(w)
out['w_std'] = np.std(w)
out['w_ac1'] = CO_AutoCorr(w, 1, method='timedomainstat') # this function call in MATLAB uses method='Fourier', but we don't have that case implemented yet in autoCorr, however this seems to output the same thing
out['w_ac2'] = CO_AutoCorr(w, 2, method='timedomainstat')
out['w_tau'] = CO_FirstZero(w, 'ac')
out['w_min'] = np.min(w)
out['w_max'] = np.max(w)
out['propzcross'] = sum( np.multiply( w[0:(len(w)-2)], w[1:(len(w)-1)] ) < 0) / (N-1) # np.multiply performs elementwise multiplication like matlab .*
# differences between the walk at signal
# (ii) Differences between the walk at signal -------------------------------------------------------------------
out['sw_meanabsdiff'] = np.mean(np.abs(y-w))
out['sw_taudiff'] = CO_FirstZero(y, 'ac') - CO_FirstZero(w, 'ac')
out['sw_stdrat'] = np.std(w)/np.std(y) # will be thse same as w_std for z-scored signal
out['sw_ac1rat'] = out['w_ac1']/CO_AutoCorr(y, 1)
out['sw_minrat'] = min(w)/min(y)
out['sw_maxrat'] = max(w)/max(y)
out['sw_propcross'] = sum(np.multiply( w[0:(len(w)-1)] - y[0:(len(y)-1)] , w[1:(len(w))]-y[1:(len(y))]) < 0 )/(N-1) #np.multiply performs elementwise multiplication like matlab .*
ansari = stats.ansari(w, y)
out['sw_ansarib_pval'] = ansari[1]
# r = np.linspace( np.min(np.min(y), np.min(w)), np.max(np.max(y), np.max(w)), 200 )
# dy = stats.gaussian_kde(y, r)
# (iii) looking at residuals between time series and walker
res = w-y
# CLOSEST FUNCTION TO MATLAB RUNSTEST, found in statsmodels.sandbox.stats.runs
# runstest = runs.runstest_2samp(res, groups=2)
# out['res_runstest'] = runstest
out['res_acl'] = CO_AutoCorr(res, lag=1)
return out
| 41.836735 | 216 | 0.517561 |
import numpy as np
from scipy import stats
import statsmodels.sandbox.stats.runs as runs
# 18/21 output statistics fully implemented from MATLAB, the other three are either from complex helper functions or MATLAB functions that don't transfer well
def PH_Walker(y, walkerRule='prop', walkerParams=np.array([])):
"""
PH_Walker simulates a hypothetical walker moving through the time domain
the hypothetical particle (or 'walker') moves in response to values of the time series at each point
Outputs from this operation are summaries of the walkers motion, and comparisons of it to the original time series
:param y: the input time series
:param walkerRule: the kinematic rule by which the walker moves in response to the time series over time
(i) 'prop': the walker narrows the gap between its value and that of the time series by a given proportion p
(ii) 'biasprop': the walker is biased to move more in one direction; when it is being pushed up by the time
series, it narrows the gap by a proportion p_{up}, and when it is being pushed down by the
time series it narrows the gap by a (potentially different) proportion p_{down}. walkerParams = [pup,pdown]
(iii) 'momentum': the walker moves as if it has mass m and inertia
from the previous time step and the time series acts
as a force altering its motion in a classical
Newtonian dynamics framework. [walkerParams = m], the mass.
(iv) 'runningvar': the walker moves with inertia as above, but
its values are also adjusted so as to match the local
variance of time series by a multiplicative factor.
walkerParams = [m,wl], where m is the inertial mass and wl
is the window length.
:param walkerParams: the parameters for the specified walker, explained above
:return: include the mean, spread, maximum, minimum, and autocorrelation of
the walker's trajectory, the number of crossings between the walker and the
original time series, the ratio or difference of some basic summary statistics
between the original time series and the walker, an Ansari-Bradley test
comparing the distributions of the walker and original time series, and
various statistics summarizing properties of the residuals between the
walker's trajectory and the original time series.
"""
# ----------------------------------------------------------------------------------------------------------------------------------
# PRELIMINARIES
#----------------------------------------------------------------------------------------------------------------------------------
N = len(y)
#----------------------------------------------------------------------------------------------------------------------------------
# CHECK INPUTS
#----------------------------------------------------------------------------------------------------------------------------------
if walkerRule == 'runningvar':
walkerParams = [1.5, 50]
if (len(walkerParams) == 0):
if walkerRule == 'prop':
walkerParams = np.array([0.5])
if walkerRule == 'biasprop':
walkerParams = np.array([0.1, 0.2])
if walkerRule == 'momentum':
walkerParams = np.array([2])
if walkerRule == 'runningvar':
walkerParams = [1.5, 50]
#----------------------------------------------------------------------------------------------------------------------------------
# (1) WALK
#----------------------------------------------------------------------------------------------------------------------------------
w = np.zeros(N)
if walkerRule == 'prop':
# walker starts at zero and narrows the gap between its position
# and the time series value at that point by the proportion given
# in walkerParams, to give the value at the subsequent time step
if isinstance(walkerParams,list):
walkerParams = walkerParams[0]
p = walkerParams
w[0] = 0
for i in range(1, N):
w[i] = w[i-1] + p*(y[i-1]-w[i-1])
elif walkerRule == 'biasprop':
# walker is biased in one or the other direction (i.e., prefers to
# go up, or down). Requires a vector of inputs: [p_up, p_down]
pup = walkerParams[0]
pdown = walkerParams[0]
w[0] = 0
for i in range (1, N):
if y[i] > y[i-1]:
w[i] = w[i-1] + pup*(y[i-1]-w[i-1])
else :
w[i] = w[i-1] + pdown*(y[i-1]-w[i-1])
elif walkerRule == 'momentum':
# walker moves as if it had inertia from the previous time step,
# i.e., it 'wants' to move the same amount; the time series acts as
# a force changing its motion
m = walkerParams[0] # inertial mass
w[0] = y[0]
w[1] = y[1]
for i in range(2, N):
w_inert = w[i-1] + (w[i-1]-w[i-2])
w[i] = w_inert + (y[i] - w_inert)/m # dissipative term
#equation of motion (s-s_0 = ut + F/m*t^2)
#where the 'force' is F is the change in the original time series at the point
elif walkerRule == 'runningvar':
m = walkerParams[0]
wl = walkerParams[1]
w[0] = y[0]
w[1] = y[1]
for i in range(2, N):
w_inert = w[i-1] + (w[i-1]-w[i-2])
w_mom = w_inert + (y[i] - w_inert)/m #dissipative term from time series
if i > wl:
w[i] = w_mom * (np.std(y[(i-wl):i]))/np.std(w[(i-wl):i])
else:
w[i] = w_mom
else :
print("Error: Unknown method: " + walkerRule + " for simulating walker on the time series")
#----------------------------------------------------------------------------------------------------------------------------------
# (2) STATISITICS ON THE WALK
#----------------------------------------------------------------------------------------------------------------------------------
out = {} # dictionary for storing variables
# (i) The walk itself -------------------------------------------------------------------------------------------
out['w_mean'] = np.mean(w)
out['w_median'] = np.median(w)
out['w_std'] = np.std(w)
out['w_ac1'] = CO_AutoCorr(w, 1, method='timedomainstat') # this function call in MATLAB uses method='Fourier', but we don't have that case implemented yet in autoCorr, however this seems to output the same thing
out['w_ac2'] = CO_AutoCorr(w, 2, method='timedomainstat')
out['w_tau'] = CO_FirstZero(w, 'ac')
out['w_min'] = np.min(w)
out['w_max'] = np.max(w)
out['propzcross'] = sum( np.multiply( w[0:(len(w)-2)], w[1:(len(w)-1)] ) < 0) / (N-1) # np.multiply performs elementwise multiplication like matlab .*
# differences between the walk at signal
# (ii) Differences between the walk at signal -------------------------------------------------------------------
out['sw_meanabsdiff'] = np.mean(np.abs(y-w))
out['sw_taudiff'] = CO_FirstZero(y, 'ac') - CO_FirstZero(w, 'ac')
out['sw_stdrat'] = np.std(w)/np.std(y) # will be thse same as w_std for z-scored signal
out['sw_ac1rat'] = out['w_ac1']/CO_AutoCorr(y, 1)
out['sw_minrat'] = min(w)/min(y)
out['sw_maxrat'] = max(w)/max(y)
out['sw_propcross'] = sum(np.multiply( w[0:(len(w)-1)] - y[0:(len(y)-1)] , w[1:(len(w))]-y[1:(len(y))]) < 0 )/(N-1) #np.multiply performs elementwise multiplication like matlab .*
ansari = stats.ansari(w, y)
out['sw_ansarib_pval'] = ansari[1]
# r = np.linspace( np.min(np.min(y), np.min(w)), np.max(np.max(y), np.max(w)), 200 )
# dy = stats.gaussian_kde(y, r)
# (iii) looking at residuals between time series and walker
res = w-y
# CLOSEST FUNCTION TO MATLAB RUNSTEST, found in statsmodels.sandbox.stats.runs
# runstest = runs.runstest_2samp(res, groups=2)
# out['res_runstest'] = runstest
out['res_acl'] = CO_AutoCorr(res, lag=1)
return out
| 0 | 0 | 0 |
b15d0d88548bec009f72b3c26a79df2dbb7cf9c5 | 790 | py | Python | modules/crawler/Utilities/helpers.py | kivzcu/heatmap.zcu | 526c4df9c1c299eb1b3e9df6bd2be5578d462405 | [
"MIT"
] | null | null | null | modules/crawler/Utilities/helpers.py | kivzcu/heatmap.zcu | 526c4df9c1c299eb1b3e9df6bd2be5578d462405 | [
"MIT"
] | null | null | null | modules/crawler/Utilities/helpers.py | kivzcu/heatmap.zcu | 526c4df9c1c299eb1b3e9df6bd2be5578d462405 | [
"MIT"
] | null | null | null | from typing import Dict
SKIP = "SKIP"
UNKNOWN = "UNKNOWN!"
def detect_change(first: Dict[str, str], second: Dict[str, str],
compareKeys: [str]) -> bool:
"""Detects change between two dictonaries
Args:
first (Dict[str, str]): First dictionary
second (Dict[str, str]): Second dictionary
compareKeys ([type]): Keys to handle comparison
Returns:
bool: Is there a change ?
"""
for key in compareKeys:
if key not in second or key not in first:
return True
if first[key] != second[key]:
return True
return False
| 26.333333 | 64 | 0.588608 | from typing import Dict
SKIP = "SKIP"
UNKNOWN = "UNKNOWN!"
def should_skip(device: Dict[str, str]) -> bool:
return device['x'] == SKIP or device['y'] == SKIP or device[
'x'] == UNKNOWN or device['y'] == UNKNOWN
def detect_change(first: Dict[str, str], second: Dict[str, str],
compareKeys: [str]) -> bool:
"""Detects change between two dictonaries
Args:
first (Dict[str, str]): First dictionary
second (Dict[str, str]): Second dictionary
compareKeys ([type]): Keys to handle comparison
Returns:
bool: Is there a change ?
"""
for key in compareKeys:
if key not in second or key not in first:
return True
if first[key] != second[key]:
return True
return False
| 142 | 0 | 23 |
371b5d6ccd471e0f2f61a35d10fdcaf0c575c962 | 36 | py | Python | src/backend/training/__init__.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | src/backend/training/__init__.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | src/backend/training/__init__.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | from . helpers import get_timestamp
| 18 | 35 | 0.833333 | from . helpers import get_timestamp
| 0 | 0 | 0 |
5f5d51144aed675324b53158ca76f498ab1e3b36 | 1,197 | py | Python | cogs/cogsmgmt.py | jcoffm/ConciergeBot | fa33a12abebef1d7a6302b3c5ac5ce1e050af7d5 | [
"MIT"
] | null | null | null | cogs/cogsmgmt.py | jcoffm/ConciergeBot | fa33a12abebef1d7a6302b3c5ac5ce1e050af7d5 | [
"MIT"
] | null | null | null | cogs/cogsmgmt.py | jcoffm/ConciergeBot | fa33a12abebef1d7a6302b3c5ac5ce1e050af7d5 | [
"MIT"
] | null | null | null | from discord.ext import commands
| 25.468085 | 62 | 0.606516 | from discord.ext import commands
class CogManagement(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def cogreload(self, ctx):
cog_name = ctx.message.content.split(" ")[1]
print(f"Reloading cog {cog_name}...")
await ctx.channel.send(f"Reloading cog {cog_name}...")
self.bot.reload_extension(f"cogs.{cog_name}")
print("Done.")
await ctx.channel.send("Done.")
@commands.command()
async def cogload(self, ctx):
cog_name = ctx.message.content.split(" ")[1]
print(f"Loading cog {cog_name}...")
await ctx.channel.send(f"Loading cog {cog_name}...")
self.bot.load_extension(f"cogs.{cog_name}")
print("Done.")
await ctx.channel.send("Done.")
@commands.command()
async def cogunload(self, ctx):
cog_name = ctx.message.content.split(" ")[1]
print(f"Unloading cog {cog_name}...")
await ctx.channel.send(f"Unloading cog {cog_name}...")
self.bot.unload_extension(f"cogs.{cog_name}")
print("Done.")
await ctx.channel.send("Done.")
def setup(bot):
bot.add_cog(CogManagement(bot))
| 924 | 192 | 46 |
5b06d060ba415fd61bd539d1de87749c0e6359bb | 883 | py | Python | collect_pong.py | Loptt/pong-autoencoder | c5123bb2466f644f7513d807044e11a4a17aae22 | [
"MIT"
] | null | null | null | collect_pong.py | Loptt/pong-autoencoder | c5123bb2466f644f7513d807044e11a4a17aae22 | [
"MIT"
] | 1 | 2021-03-19T03:06:46.000Z | 2021-08-02T13:29:05.000Z | collect_pong.py | Loptt/pong-autoencoder | c5123bb2466f644f7513d807044e11a4a17aae22 | [
"MIT"
] | null | null | null | import gym
import numpy as np
from PIL import Image
import sys
env = gym.make('Pong-v0')
env.reset()
done = False
i = 0
start = 0
if len(sys.argv) < 3:
print("Usage: collect_pong <games> <start_point>")
exit()
try:
games = int(sys.argv[1])
start = int(sys.argv[2])
i = start
except:
print("Please provide a valid number for games and start point.")
exit()
for _ in range(games):
count = 0
while not done:
o, r, done, info = env.step(env.action_space.sample())
count += 1
# Ignore first 25 frames of the game, since the games starts after this amount.
if count < 25:
continue
img = Image.fromarray(o)
img.save("images/pong_" + str(i) + ".png")
i += 1
done = False
env.reset()
print("Saved {} images.".format(i-start))
print("Total images: {}".format(i))
env.close()
| 21.02381 | 87 | 0.596829 | import gym
import numpy as np
from PIL import Image
import sys
env = gym.make('Pong-v0')
env.reset()
done = False
i = 0
start = 0
if len(sys.argv) < 3:
print("Usage: collect_pong <games> <start_point>")
exit()
try:
games = int(sys.argv[1])
start = int(sys.argv[2])
i = start
except:
print("Please provide a valid number for games and start point.")
exit()
for _ in range(games):
count = 0
while not done:
o, r, done, info = env.step(env.action_space.sample())
count += 1
# Ignore first 25 frames of the game, since the games starts after this amount.
if count < 25:
continue
img = Image.fromarray(o)
img.save("images/pong_" + str(i) + ".png")
i += 1
done = False
env.reset()
print("Saved {} images.".format(i-start))
print("Total images: {}".format(i))
env.close()
| 0 | 0 | 0 |
1177ec39443f0c4afc68b0f4bb00985b22bbdd21 | 887 | py | Python | python/bubblesort.py | gmoise/sandbox | 1ad01904448b19cec5f9ad89e698b59bc11150ed | [
"MIT"
] | null | null | null | python/bubblesort.py | gmoise/sandbox | 1ad01904448b19cec5f9ad89e698b59bc11150ed | [
"MIT"
] | null | null | null | python/bubblesort.py | gmoise/sandbox | 1ad01904448b19cec5f9ad89e698b59bc11150ed | [
"MIT"
] | null | null | null | import random
mylist = []
for somethin in range(1,10):
x = random.randrange(0,9)
mylist.append(x)
print(mylist)
last_index=len(mylist)
print ("length of mylist is:",len(mylist))
print ("first element is:",mylist[0])
print ("last element is:",mylist[len(mylist)-1])
#is mylist sorted?
is_mylist_sorted = False
x=0
y=1
intermediate=None
#how many switches?
number_of_switches = 0
#bubble sort
while not is_mylist_sorted:
if mylist[x] > mylist[y]:
intermediate=mylist[x]
mylist[x]=mylist[y]
mylist[y]=intermediate
number_of_switches+=1
x+=1
y+=1
if y==last_index:
x=0
y=1
if number_of_switches==0:
is_mylist_sorted = True
else:
number_of_switches = 0
print("finished")
print("is my list sorted?",is_mylist_sorted)
print("my list",mylist)
| 16.425926 | 48 | 0.620068 | import random
mylist = []
for somethin in range(1,10):
x = random.randrange(0,9)
mylist.append(x)
print(mylist)
last_index=len(mylist)
print ("length of mylist is:",len(mylist))
print ("first element is:",mylist[0])
print ("last element is:",mylist[len(mylist)-1])
#is mylist sorted?
is_mylist_sorted = False
x=0
y=1
intermediate=None
#how many switches?
number_of_switches = 0
#bubble sort
while not is_mylist_sorted:
if mylist[x] > mylist[y]:
intermediate=mylist[x]
mylist[x]=mylist[y]
mylist[y]=intermediate
number_of_switches+=1
x+=1
y+=1
if y==last_index:
x=0
y=1
if number_of_switches==0:
is_mylist_sorted = True
else:
number_of_switches = 0
print("finished")
print("is my list sorted?",is_mylist_sorted)
print("my list",mylist)
| 0 | 0 | 0 |
c793722d0b378ee7db2b64de100d0736b9b47fda | 1,117 | py | Python | setup.py | stacy0416/afs2-datasource | 25d498fc56eb142f2e97ea2d274d534245a4c301 | [
"Apache-2.0"
] | 4 | 2019-07-19T05:40:37.000Z | 2021-03-31T05:49:30.000Z | setup.py | stacy0416/afs2-datasource | 25d498fc56eb142f2e97ea2d274d534245a4c301 | [
"Apache-2.0"
] | 1 | 2019-08-13T18:27:40.000Z | 2019-08-13T18:27:40.000Z | setup.py | stacy0416/afs2-datasource | 25d498fc56eb142f2e97ea2d274d534245a4c301 | [
"Apache-2.0"
] | null | null | null | import os
import setuptools
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
requirements_path = os.path.join(os.path.dirname(__file__), 'requirements.txt')
install_requires = parse_requirements(requirements_path, session='hack')
install_requires = [str(ir.req) for ir in install_requires]
with open(os.path.join(os.path.dirname(__file__), 'VERSION'), 'r') as f:
version = f.read()
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
long_description = f.read()
setuptools.setup(
name='afs2-datasource',
version=version,
description='For AFS developer to access Datasource',
long_description=long_description,
long_description_content_type='text/markdown',
author='WISE-PaaS/AFS',
author_email='stacy.yeh@advantech.com.tw',
packages=setuptools.find_packages(),
install_requires=install_requires,
keywords=['AFS'],
license='Apache License 2.0',
url='https://github.com/stacy0416/afs2-datasource'
)
# python setup.py bdist_wheel | 33.848485 | 80 | 0.733214 | import os
import setuptools
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
requirements_path = os.path.join(os.path.dirname(__file__), 'requirements.txt')
install_requires = parse_requirements(requirements_path, session='hack')
install_requires = [str(ir.req) for ir in install_requires]
with open(os.path.join(os.path.dirname(__file__), 'VERSION'), 'r') as f:
version = f.read()
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
long_description = f.read()
setuptools.setup(
name='afs2-datasource',
version=version,
description='For AFS developer to access Datasource',
long_description=long_description,
long_description_content_type='text/markdown',
author='WISE-PaaS/AFS',
author_email='stacy.yeh@advantech.com.tw',
packages=setuptools.find_packages(),
install_requires=install_requires,
keywords=['AFS'],
license='Apache License 2.0',
url='https://github.com/stacy0416/afs2-datasource'
)
# python setup.py bdist_wheel | 0 | 0 | 0 |
1d241f184661fa37b57913861e9fb9b2387cc4b1 | 5,168 | py | Python | predict.py | jaym096/FIFA-WorldCup-Match-Predictor | 4b5b6fbc91391133f595cba4f650736eea81d2e0 | [
"MIT"
] | null | null | null | predict.py | jaym096/FIFA-WorldCup-Match-Predictor | 4b5b6fbc91391133f595cba4f650736eea81d2e0 | [
"MIT"
] | null | null | null | predict.py | jaym096/FIFA-WorldCup-Match-Predictor | 4b5b6fbc91391133f595cba4f650736eea81d2e0 | [
"MIT"
] | null | null | null | import pandas as pd
#============== First Round ===================#
#===============================================#
#============== Other Rounds ===================#
#===============================================#
| 42.710744 | 124 | 0.614551 | import pandas as pd
#============== First Round ===================#
def predict_firstRound(ranking, final_dataset, logreg):
# Obtained from https://fixturedownload.com/results/fifa-world-cup-2018
fixtures = pd.read_csv('data/fixtures.csv')
#List for storing the group stage games
pred_set = []
# Add columns with ranking position of each team
fixtures.insert(1, 'first_position', fixtures['Home Team'].map(ranking.set_index('Team')['Position']))
fixtures.insert(2, 'second_position', fixtures['Away Team'].map(ranking.set_index('Team')['Position']))
# We only need the group stage games, so we have to slice the dataset
fixtures = fixtures.iloc[:48, :]
# Loop to add teams to new prediction dataset based on the ranking position of each team
for index, row in fixtures.iterrows():
if row['first_position'] < row['second_position']:
pred_set.append({'home_team': row['Home Team'], 'away_team': row['Away Team'], 'winning_team': None})
else:
pred_set.append({'home_team': row['Away Team'], 'away_team': row['Home Team'], 'winning_team': None})
pred_set = pd.DataFrame(pred_set)
backup_pred_set = pred_set
# Get dummy variables and drop winning_team column
pred_set = pd.get_dummies(pred_set, prefix=['home_team', 'away_team'], columns=['home_team', 'away_team'])
# Add missing columns compared to the model's training dataset
missing_cols = set(final_dataset.columns) - set(pred_set.columns)
for c in missing_cols:
pred_set[c] = 0
pred_set = pred_set[final_dataset.columns]
# Remove winning team column
pred_set = pred_set.drop(['winning_team'], axis=1)
predictions = logreg.predict(pred_set)
for i in range(fixtures.shape[0]):
print(backup_pred_set.iloc[i, 1] + " and " + backup_pred_set.iloc[i, 0])
if predictions[i] == 1:
print("Winner: " + backup_pred_set.iloc[i, 1])
elif predictions[i] == 0:
print("Tie")
elif predictions[i] == 2:
print("Winner: " + backup_pred_set.iloc[i, 0])
print('Probability of ' + backup_pred_set.iloc[i, 1] + ' winning: ', '%.3f'%(logreg.predict_proba(pred_set)[i][1]))
print('Probability of Tie: ', '%.3f'%(logreg.predict_proba(pred_set)[i][0]))
print('Probability of ' + backup_pred_set.iloc[i, 0] + ' winning: ', '%.3f'%(logreg.predict_proba(pred_set)[i][2]))
print("")
#===============================================#
#============== Other Rounds ===================#
def clean_and_predict(matches, ranking, final, logreg):
# Initialization of auxiliary list for data cleaning
positions = []
# Loop to retrieve each team's position according to FIFA ranking
for match in matches:
positions.append(ranking.loc[ranking['Team'] == match[0],'Position'].iloc[0])
positions.append(ranking.loc[ranking['Team'] == match[1],'Position'].iloc[0])
# Creating the DataFrame for prediction
pred_set = []
# Initializing iterators for while loop
i = 0
j = 0
# 'i' will be the iterator for the 'positions' list, and 'j' for the list of matches (list of tuples)
while i < len(positions):
dict1 = {}
# If position of first team is better, he will be the 'home' team, and vice-versa
if positions[i] < positions[i + 1]:
dict1.update({'home_team': matches[j][0], 'away_team': matches[j][1]})
else:
dict1.update({'home_team': matches[j][1], 'away_team': matches[j][0]})
# Append updated dictionary to the list, that will later be converted into a DataFrame
pred_set.append(dict1)
i += 2
j += 1
# Convert list into DataFrame
pred_set = pd.DataFrame(pred_set)
backup_pred_set = pred_set
# Get dummy variables and drop winning_team column
pred_set = pd.get_dummies(pred_set, prefix=['home_team', 'away_team'], columns=['home_team', 'away_team'])
# Add missing columns compared to the model's training dataset
missing_cols2 = set(final.columns) - set(pred_set.columns)
for c in missing_cols2:
pred_set[c] = 0
pred_set = pred_set[final.columns]
# Remove winning team column
pred_set = pred_set.drop(['winning_team'], axis=1)
# Predict!
predictions = logreg.predict(pred_set)
for i in range(len(pred_set)):
print(backup_pred_set.iloc[i, 1] + " and " + backup_pred_set.iloc[i, 0])
if predictions[i] == 2:
print("Winner: " + backup_pred_set.iloc[i, 1])
elif predictions[i] == 1:
print("Tie")
elif predictions[i] == 0:
print("Winner: " + backup_pred_set.iloc[i, 0])
print('Probability of ' + backup_pred_set.iloc[i, 1] + ' winning: ' , '%.3f'%(logreg.predict_proba(pred_set)[i][2]))
print('Probability of Tie: ', '%.3f'%(logreg.predict_proba(pred_set)[i][1]))
print('Probability of ' + backup_pred_set.iloc[i, 0] + ' winning: ', '%.3f'%(logreg.predict_proba(pred_set)[i][0]))
print("")
#===============================================#
| 4,898 | 0 | 44 |
e4f38538428142f303485b2cb2db48de167944ae | 410 | py | Python | Aula_01/exercicio_03.py | Luferat/Curso_Python_Senac | a0710b25e4fedeacb1139c1c8f4e4a00c63dba73 | [
"MIT"
] | 1 | 2021-11-18T19:02:10.000Z | 2021-11-18T19:02:10.000Z | Aula_01/exercicio_03.py | Luferat/Curso_Python_Senac | a0710b25e4fedeacb1139c1c8f4e4a00c63dba73 | [
"MIT"
] | null | null | null | Aula_01/exercicio_03.py | Luferat/Curso_Python_Senac | a0710b25e4fedeacb1139c1c8f4e4a00c63dba73 | [
"MIT"
] | null | null | null | """
Exercício 03
Peça ao usuário para digitar 3 valores inteiros e imprima a soma deles.
"""
print('Digite três números inteiros para somá-los:\n')
num1 = int(float(input('Primeiro número: ').replace(',', '.')))
num2 = int(float(input('Segundo número: ').replace(',', '.')))
num3 = int(float(input('Terceiro número: ').replace(',', '.')))
sum = num1 + num2 + num3
print(f'_____\nA soma dos valores é: {sum}')
| 34.166667 | 71 | 0.658537 | """
Exercício 03
Peça ao usuário para digitar 3 valores inteiros e imprima a soma deles.
"""
print('Digite três números inteiros para somá-los:\n')
num1 = int(float(input('Primeiro número: ').replace(',', '.')))
num2 = int(float(input('Segundo número: ').replace(',', '.')))
num3 = int(float(input('Terceiro número: ').replace(',', '.')))
sum = num1 + num2 + num3
print(f'_____\nA soma dos valores é: {sum}')
| 0 | 0 | 0 |
d5e35b7eef70f9ce665c2f92c0cb10aaae53e7e9 | 4,228 | py | Python | plugins/module_utils/definitions/discovery_network_device.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/module_utils/definitions/discovery_network_device.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/module_utils/definitions/discovery_network_device.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
module_definition = json.loads(
"""{
"family": "discovery",
"name": "discovery_network_device",
"operations": {
"get": [
"get_discovered_network_devices_by_discovery_id",
"get_discovered_devices_by_range",
"get_devices_discovered_by_id",
"get_network_devices_from_discovery"
]
},
"parameters": {
"get_devices_discovered_by_id": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "task_id",
"required": false,
"type": "string"
},
{
"artificial": true,
"name": "count",
"required": true,
"type": "boolean"
}
],
"get_discovered_devices_by_range": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "records_to_return",
"required": true,
"type": "integer"
},
{
"name": "start_index",
"required": true,
"type": "integer"
},
{
"name": "task_id",
"required": false,
"type": "string"
}
],
"get_discovered_network_devices_by_discovery_id": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "task_id",
"required": false,
"type": "string"
}
],
"get_network_devices_from_discovery": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "cli_status",
"required": false,
"type": "string"
},
{
"name": "http_status",
"required": false,
"type": "string"
},
{
"name": "ip_address",
"required": false,
"type": "string"
},
{
"name": "netconf_status",
"required": false,
"type": "string"
},
{
"name": "ping_status",
"required": false,
"type": "string"
},
{
"name": "snmp_status",
"required": false,
"type": "string"
},
{
"name": "sort_by",
"required": false,
"type": "string"
},
{
"name": "sort_order",
"required": false,
"type": "string"
},
{
"name": "task_id",
"required": false,
"type": "string"
},
{
"artificial": true,
"name": "summary",
"required": true,
"type": "boolean"
}
]
},
"responses": {
"get_devices_discovered_by_id": {
"properties": [
"response",
"version"
],
"type": "object"
},
"get_discovered_devices_by_range": {
"properties": [
"response",
"version"
],
"type": "object"
},
"get_discovered_network_devices_by_discovery_id": {
"properties": [
"response",
"version"
],
"type": "object"
},
"get_network_devices_from_discovery": {
"properties": [
"response",
"version"
],
"type": "object"
}
}
}"""
)
| 26.26087 | 66 | 0.346736 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
module_definition = json.loads(
"""{
"family": "discovery",
"name": "discovery_network_device",
"operations": {
"get": [
"get_discovered_network_devices_by_discovery_id",
"get_discovered_devices_by_range",
"get_devices_discovered_by_id",
"get_network_devices_from_discovery"
]
},
"parameters": {
"get_devices_discovered_by_id": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "task_id",
"required": false,
"type": "string"
},
{
"artificial": true,
"name": "count",
"required": true,
"type": "boolean"
}
],
"get_discovered_devices_by_range": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "records_to_return",
"required": true,
"type": "integer"
},
{
"name": "start_index",
"required": true,
"type": "integer"
},
{
"name": "task_id",
"required": false,
"type": "string"
}
],
"get_discovered_network_devices_by_discovery_id": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "task_id",
"required": false,
"type": "string"
}
],
"get_network_devices_from_discovery": [
{
"name": "id",
"required": true,
"type": "string"
},
{
"name": "cli_status",
"required": false,
"type": "string"
},
{
"name": "http_status",
"required": false,
"type": "string"
},
{
"name": "ip_address",
"required": false,
"type": "string"
},
{
"name": "netconf_status",
"required": false,
"type": "string"
},
{
"name": "ping_status",
"required": false,
"type": "string"
},
{
"name": "snmp_status",
"required": false,
"type": "string"
},
{
"name": "sort_by",
"required": false,
"type": "string"
},
{
"name": "sort_order",
"required": false,
"type": "string"
},
{
"name": "task_id",
"required": false,
"type": "string"
},
{
"artificial": true,
"name": "summary",
"required": true,
"type": "boolean"
}
]
},
"responses": {
"get_devices_discovered_by_id": {
"properties": [
"response",
"version"
],
"type": "object"
},
"get_discovered_devices_by_range": {
"properties": [
"response",
"version"
],
"type": "object"
},
"get_discovered_network_devices_by_discovery_id": {
"properties": [
"response",
"version"
],
"type": "object"
},
"get_network_devices_from_discovery": {
"properties": [
"response",
"version"
],
"type": "object"
}
}
}"""
)
| 0 | 0 | 0 |
72b850981a06edc2f84483d0a9fb974636249292 | 1,156 | py | Python | tools/deep_memory_profiler/subcommands/stacktrace.py | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | tools/deep_memory_profiler/subcommands/stacktrace.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-02-14T21:55:58.000Z | 2017-02-14T21:55:58.000Z | tools/deep_memory_profiler/subcommands/stacktrace.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from lib.bucket import BUCKET_ID
from lib.subcommand import SubCommand
| 27.52381 | 72 | 0.67128 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from lib.bucket import BUCKET_ID
from lib.subcommand import SubCommand
class StacktraceCommand(SubCommand):
def __init__(self):
super(StacktraceCommand, self).__init__(
'Usage: %prog stacktrace <dump>')
def do(self, sys_argv):
_, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
(bucket_set, dump) = SubCommand.load_basic_files(dump_path, False)
StacktraceCommand._output(dump, bucket_set, sys.stdout)
return 0
@staticmethod
def _output(dump, bucket_set, out):
"""Outputs a given stacktrace.
Args:
bucket_set: A BucketSet object.
out: A file object to output.
"""
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket:
continue
for i in range(0, BUCKET_ID - 1):
out.write(words[i] + ' ')
for frame in bucket.symbolized_stackfunction:
out.write(frame + ' ')
out.write('\n')
| 300 | 586 | 23 |
431b39143b9b0cf0b40e592c7fd8d42ef1d24d16 | 978 | py | Python | FCFS.py | Aamina27/Amina-Afzal | 90c4b2ab97051bfa6c7ec83aa216ad303b3d1135 | [
"MIT"
] | null | null | null | FCFS.py | Aamina27/Amina-Afzal | 90c4b2ab97051bfa6c7ec83aa216ad303b3d1135 | [
"MIT"
] | null | null | null | FCFS.py | Aamina27/Amina-Afzal | 90c4b2ab97051bfa6c7ec83aa216ad303b3d1135 | [
"MIT"
] | null | null | null | burst_time=[]
print("Enter the number of process: ")
n=int(input())
print("Enter the burst time of the processes: \n")
burst_time=list(map(int, input().split()))
waiting_time=[]
avg_waiting_time=0
turnaround_time=[]
avg_turnaround_time=0
waiting_time.insert(0,0)
turnaround_time.insert(0,burst_time[0])
for i in range(1,len(burst_time)):
waiting_time.insert(i,waiting_time[i-1]+burst_time[i-1])
turnaround_time.insert(i,waiting_time[i]+burst_time[i])
avg_waiting_time+=waiting_time[i]
avg_turnaround_time+=turnaround_time[i]
avg_waiting_time=float(avg_waiting_time)/n
avg_turnaround_time=float(avg_turnaround_time)/n
print("\n")
print("Process\t Burst Time\t Waiting Time\t Turn Around Time")
for i in range(0,n):
print(str(i)+"\t\t"+str(burst_time[i])+"\t\t"+str(waiting_time[i])+"\t\t"+str(turnaround_time[i]))
print("\n")
print("Average Waiting time is: "+str(avg_waiting_time))
print("Average Turn Arount Time is: "+str(avg_turnaround_time)) | 39.12 | 100 | 0.741309 | burst_time=[]
print("Enter the number of process: ")
n=int(input())
print("Enter the burst time of the processes: \n")
burst_time=list(map(int, input().split()))
waiting_time=[]
avg_waiting_time=0
turnaround_time=[]
avg_turnaround_time=0
waiting_time.insert(0,0)
turnaround_time.insert(0,burst_time[0])
for i in range(1,len(burst_time)):
waiting_time.insert(i,waiting_time[i-1]+burst_time[i-1])
turnaround_time.insert(i,waiting_time[i]+burst_time[i])
avg_waiting_time+=waiting_time[i]
avg_turnaround_time+=turnaround_time[i]
avg_waiting_time=float(avg_waiting_time)/n
avg_turnaround_time=float(avg_turnaround_time)/n
print("\n")
print("Process\t Burst Time\t Waiting Time\t Turn Around Time")
for i in range(0,n):
print(str(i)+"\t\t"+str(burst_time[i])+"\t\t"+str(waiting_time[i])+"\t\t"+str(turnaround_time[i]))
print("\n")
print("Average Waiting time is: "+str(avg_waiting_time))
print("Average Turn Arount Time is: "+str(avg_turnaround_time)) | 0 | 0 | 0 |
2b08ec7a52d6c60e3308d6a7da8ed66b3992c9ad | 1,299 | py | Python | airflow_sqlcmd_operator/sqlcmd.py | dewes/airflow-sqlcmd-operator | 29177b111d5e87bd9a856388ac0e6254c391f99a | [
"MIT"
] | null | null | null | airflow_sqlcmd_operator/sqlcmd.py | dewes/airflow-sqlcmd-operator | 29177b111d5e87bd9a856388ac0e6254c391f99a | [
"MIT"
] | 1 | 2021-08-06T21:54:02.000Z | 2021-08-06T21:54:02.000Z | airflow_sqlcmd_operator/sqlcmd.py | dewes/airflow-sqlcmd-operator | 29177b111d5e87bd9a856388ac0e6254c391f99a | [
"MIT"
] | null | null | null | import os
from airflow.hooks.base_hook import BaseHook
from airflow.operators.bash_operator import BashOperator
from airflow.utils.decorators import apply_defaults
| 36.083333 | 143 | 0.667436 | import os
from airflow.hooks.base_hook import BaseHook
from airflow.operators.bash_operator import BashOperator
from airflow.utils.decorators import apply_defaults
class SqlcmdOperator(BashOperator):
template_fields = ("task_id", "bash_command", "sql_command", "sql_folder", "sql_file")
# Currently works only with fixed sqlcmd binary
# Must keep a whitespace at the end of the string.
sql_command = "/opt/mssql-tools/bin/sqlcmd -b -C -S {{ params.host }} -U {{ params.login }} -P {{ params.password }} -i {{ params.file }} "
@apply_defaults
def __init__(self, *, mssql_conn_id, sql_folder, sql_file, **kwargs):
db = BaseHook.get_connection(mssql_conn_id)
params = {
"host": db.host,
"login": db.login,
"password": db.password,
"file": self.sql_script_path(sql_folder, sql_file),
}
super(SqlcmdOperator, self).__init__(bash_command=self.sql_command, params=params, **kwargs)
self.mssql_conn_id = mssql_conn_id
self.sql_folder = sql_folder
self.sql_file = sql_file
def sql_script_path(self, sql_folder, sql_file):
"""Returns the corrected file path with quotation marks."""
path = os.path.join(sql_folder, sql_file)
return f"'{path}'"
| 507 | 603 | 23 |
bcbe7560b6bd844d2aac187e3e4e713f4dc467a8 | 13,482 | py | Python | benchmarks/code/cifar10.py | nhahn7/SDTF | d9ff23b57f8b16a1be7d2fd49fb319b5a90bc20e | [
"MIT"
] | null | null | null | benchmarks/code/cifar10.py | nhahn7/SDTF | d9ff23b57f8b16a1be7d2fd49fb319b5a90bc20e | [
"MIT"
] | null | null | null | benchmarks/code/cifar10.py | nhahn7/SDTF | d9ff23b57f8b16a1be7d2fd49fb319b5a90bc20e | [
"MIT"
] | null | null | null | """
Author: Haoyin Xu
"""
import time
import psutil
import argparse
import numpy as np
import torchvision.datasets as datasets
from numpy.random import permutation
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from river import tree
from skgarden import MondrianForestClassifier
from sdtf import StreamDecisionForest
def write_result(filename, acc_ls):
"""Writes results to specified text file"""
output = open(filename, "w")
for acc in acc_ls:
output.write(str(acc) + "\n")
def prediction(classifier):
"""Generates predictions from model"""
predictions = classifier.predict(X_test)
p_t = 0
for i in range(X_test.shape[0]):
if predictions[i] == y_test[i]:
p_t += 1
return p_t / X_test.shape[0]
def experiment_dt():
"""Runs experiments for Batch Decision Tree"""
dt_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
dt = DecisionTreeClassifier()
for i in range(500):
X_t = X_r[: (i + 1) * 100]
y_t = y_r[: (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
dt.fit(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
dt_l.append(prediction(dt))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
return dt_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_rf():
"""Runs experiments for Random Forest"""
rf_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
rf = RandomForestClassifier()
for i in range(500):
X_t = X_r[: (i + 1) * 100]
y_t = y_r[: (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
rf.fit(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
rf_l.append(prediction(rf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
return rf_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_ht():
"""Runs experiments for Hoeffding Tree"""
ht_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
ht = tree.HoeffdingTreeClassifier(max_size=1000, grace_period=2)
for i in range(X_train.shape[0]):
X_t = X_r[i]
y_t = y_r[i]
idx = range(1024)
X_t = dict(zip(idx, X_t))
start_time = time.perf_counter()
ht.learn_one(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
if i > 0 and (i + 1) % 100 == 0:
p_t = 0.0
start_time = time.perf_counter()
for j in range(X_test.shape[0]):
y_pred = ht.predict_one(X_test[j])
if y_pred == y_test[j]:
p_t += 1
ht_l.append(p_t / X_test.shape[0])
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
new_train_time_l = []
for i in range(1, X_train.shape[0]):
train_time_l[i] += train_time_l[i - 1]
if i > 0 and (i + 1) % 100 == 0:
new_train_time_l.append(train_time_l[i])
train_time_l = new_train_time_l
return ht_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_mf():
"""Runs experiments for Mondrian Forest"""
mf_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
mf = MondrianForestClassifier(n_estimators=10)
for i in range(500):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
mf.partial_fit(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
mf_l.append(prediction(mf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
for i in range(1, 500):
train_time_l[i] += train_time_l[i - 1]
return mf_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_sdt():
"""Runs experiments for Stream Decision Tree"""
sdt_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
sdt = DecisionTreeClassifier()
for i in range(500):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
sdt.partial_fit(X_t, y_t, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
sdt_l.append(prediction(sdt))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
for i in range(1, 500):
train_time_l[i] += train_time_l[i - 1]
return sdt_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_sdf():
"""Runs experiments for Stream Decision Forest"""
sdf_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
sdf = StreamDecisionForest()
for i in range(500):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
sdf.partial_fit(X_t, y_t, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
sdf_l.append(prediction(sdf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
for i in range(1, 500):
train_time_l[i] += train_time_l[i - 1]
return sdf_l, train_time_l, test_time_l, v_m_l, s_m_l
# Prepare CIFAR data
# Normalize
scale = np.mean(np.arange(0, 256))
normalize = lambda x: (x - scale) / scale
# Train data
cifar_trainset = datasets.CIFAR10(root="../", train=True, download=True, transform=None)
X_train = normalize(cifar_trainset.data)
y_train = np.array(cifar_trainset.targets)
# Test data
cifar_testset = datasets.CIFAR10(root="../", train=False, download=True, transform=None)
X_test = normalize(cifar_testset.data)
y_test = np.array(cifar_testset.targets)
X_train = X_train.reshape(-1, 32 * 32 * 3)
X_test = X_test.reshape(-1, 32 * 32 * 3)
# Parse classifier choices
parser = argparse.ArgumentParser()
parser.add_argument("-all", help="all classifiers", required=False, action="store_true")
parser.add_argument("-dt", help="decision forests", required=False, action="store_true")
parser.add_argument("-rf", help="random forests", required=False, action="store_true")
parser.add_argument("-ht", help="hoeffding trees", required=False, action="store_true")
parser.add_argument("-mf", help="mondrian forests", required=False, action="store_true")
parser.add_argument(
"-sdt", help="stream decision trees", required=False, action="store_true"
)
parser.add_argument(
"-sdf", help="stream decision forests", required=False, action="store_true"
)
args = parser.parse_args()
# Perform experiments
if args.all or args.dt:
dt_acc_l = []
dt_train_t_l = []
dt_test_t_l = []
dt_v_m_l = []
dt_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
dt_acc, dt_train_t, dt_test_t, dt_v_m, dt_s_m = experiment_dt()
dt_acc_l.append(dt_acc)
dt_train_t_l.append(dt_train_t)
dt_test_t_l.append(dt_test_t)
dt_v_m_l.append(dt_v_m)
dt_s_m_l.append(dt_s_m)
write_result("../results/dt/cifar10_acc.txt", dt_acc_l)
write_result("../results/dt/cifar10_train_t.txt", dt_train_t_l)
write_result("../results/dt/cifar10_test_t.txt", dt_test_t_l)
write_result("../results/dt/cifar10_v_m.txt", dt_v_m_l)
write_result("../results/dt/cifar10_s_m.txt", dt_s_m_l)
if args.all or args.rf:
rf_acc_l = []
rf_train_t_l = []
rf_test_t_l = []
rf_v_m_l = []
rf_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
rf_acc, rf_train_t, rf_test_t, rf_v_m, rf_s_m = experiment_rf()
rf_acc_l.append(rf_acc)
rf_train_t_l.append(rf_train_t)
rf_test_t_l.append(rf_test_t)
rf_v_m_l.append(rf_v_m)
rf_s_m_l.append(rf_s_m)
write_result("../results/rf/cifar10_acc.txt", rf_acc_l)
write_result("../results/rf/cifar10_train_t.txt", rf_train_t_l)
write_result("../results/rf/cifar10_test_t.txt", rf_test_t_l)
write_result("../results/rf/cifar10_v_m.txt", rf_v_m_l)
write_result("../results/rf/cifar10_s_m.txt", rf_s_m_l)
if args.all or args.ht:
ht_acc_l = []
ht_train_t_l = []
ht_test_t_l = []
ht_v_m_l = []
ht_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
ht_acc, ht_train_t, ht_test_t, ht_v_m, ht_s_m = experiment_ht()
ht_acc_l.append(ht_acc)
ht_train_t_l.append(ht_train_t)
ht_test_t_l.append(ht_test_t)
ht_v_m_l.append(ht_v_m)
ht_s_m_l.append(ht_s_m)
write_result("../results/ht/cifar10_acc.txt", ht_acc_l)
write_result("../results/ht/cifar10_train_t.txt", ht_train_t_l)
write_result("../results/ht/cifar10_test_t.txt", ht_test_t_l)
write_result("../results/ht/cifar10_v_m.txt", ht_v_m_l)
write_result("../results/ht/cifar10_s_m.txt", ht_s_m_l)
if args.all or args.mf:
mf_acc_l = []
mf_train_t_l = []
mf_test_t_l = []
mf_v_m_l = []
mf_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
mf_acc, mf_train_t, mf_test_t, mf_v_m, mf_s_m = experiment_mf()
mf_acc_l.append(mf_acc)
mf_train_t_l.append(mf_train_t)
mf_test_t_l.append(mf_test_t)
mf_v_m_l.append(mf_v_m)
mf_s_m_l.append(mf_s_m)
write_result("../results/mf/cifar10_acc.txt", mf_acc_l)
write_result("../results/mf/cifar10_train_t.txt", mf_train_t_l)
write_result("../results/mf/cifar10_test_t.txt", mf_test_t_l)
write_result("../results/mf/cifar10_v_m.txt", mf_v_m_l)
write_result("../results/mf/cifar10_s_m.txt", mf_s_m_l)
if args.all or args.sdt:
sdt_acc_l = []
sdt_train_t_l = []
sdt_test_t_l = []
sdt_v_m_l = []
sdt_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
sdt_acc, sdt_train_t, sdt_test_t, sdt_v_m, sdt_s_m = experiment_sdt()
sdt_acc_l.append(sdt_acc)
sdt_train_t_l.append(sdt_train_t)
sdt_test_t_l.append(sdt_test_t)
sdt_v_m_l.append(sdt_v_m)
sdt_s_m_l.append(sdt_s_m)
write_result("../results/sdt/cifar10_acc.txt", sdt_acc_l)
write_result("../results/sdt/cifar10_train_t.txt", sdt_train_t_l)
write_result("../results/sdt/cifar10_test_t.txt", sdt_test_t_l)
write_result("../results/sdt/cifar10_v_m.txt", sdt_v_m_l)
write_result("../results/sdt/cifar10_s_m.txt", sdt_s_m_l)
if args.all or args.sdf:
sdf_acc_l = []
sdf_train_t_l = []
sdf_test_t_l = []
sdf_v_m_l = []
sdf_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
sdf_acc, sdf_train_t, sdf_test_t, sdf_v_m, sdf_s_m = experiment_sdf()
sdf_acc_l.append(sdf_acc)
sdf_train_t_l.append(sdf_train_t)
sdf_test_t_l.append(sdf_test_t)
sdf_v_m_l.append(sdf_v_m)
sdf_s_m_l.append(sdf_s_m)
write_result("../results/sdf/cifar10_acc.txt", sdf_acc_l)
write_result("../results/sdf/cifar10_train_t.txt", sdf_train_t_l)
write_result("../results/sdf/cifar10_test_t.txt", sdf_test_t_l)
write_result("../results/sdf/cifar10_v_m.txt", sdf_v_m_l)
write_result("../results/sdf/cifar10_s_m.txt", sdf_s_m_l)
| 29.565789 | 88 | 0.61927 | """
Author: Haoyin Xu
"""
import time
import psutil
import argparse
import numpy as np
import torchvision.datasets as datasets
from numpy.random import permutation
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from river import tree
from skgarden import MondrianForestClassifier
from sdtf import StreamDecisionForest
def write_result(filename, acc_ls):
"""Writes results to specified text file"""
output = open(filename, "w")
for acc in acc_ls:
output.write(str(acc) + "\n")
def prediction(classifier):
"""Generates predictions from model"""
predictions = classifier.predict(X_test)
p_t = 0
for i in range(X_test.shape[0]):
if predictions[i] == y_test[i]:
p_t += 1
return p_t / X_test.shape[0]
def experiment_dt():
"""Runs experiments for Batch Decision Tree"""
dt_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
dt = DecisionTreeClassifier()
for i in range(500):
X_t = X_r[: (i + 1) * 100]
y_t = y_r[: (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
dt.fit(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
dt_l.append(prediction(dt))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
return dt_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_rf():
"""Runs experiments for Random Forest"""
rf_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
rf = RandomForestClassifier()
for i in range(500):
X_t = X_r[: (i + 1) * 100]
y_t = y_r[: (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
rf.fit(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
rf_l.append(prediction(rf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
return rf_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_ht():
"""Runs experiments for Hoeffding Tree"""
ht_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
ht = tree.HoeffdingTreeClassifier(max_size=1000, grace_period=2)
for i in range(X_train.shape[0]):
X_t = X_r[i]
y_t = y_r[i]
idx = range(1024)
X_t = dict(zip(idx, X_t))
start_time = time.perf_counter()
ht.learn_one(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
if i > 0 and (i + 1) % 100 == 0:
p_t = 0.0
start_time = time.perf_counter()
for j in range(X_test.shape[0]):
y_pred = ht.predict_one(X_test[j])
if y_pred == y_test[j]:
p_t += 1
ht_l.append(p_t / X_test.shape[0])
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
new_train_time_l = []
for i in range(1, X_train.shape[0]):
train_time_l[i] += train_time_l[i - 1]
if i > 0 and (i + 1) % 100 == 0:
new_train_time_l.append(train_time_l[i])
train_time_l = new_train_time_l
return ht_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_mf():
"""Runs experiments for Mondrian Forest"""
mf_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
mf = MondrianForestClassifier(n_estimators=10)
for i in range(500):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
mf.partial_fit(X_t, y_t)
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
mf_l.append(prediction(mf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
for i in range(1, 500):
train_time_l[i] += train_time_l[i - 1]
return mf_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_sdt():
"""Runs experiments for Stream Decision Tree"""
sdt_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
sdt = DecisionTreeClassifier()
for i in range(500):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
sdt.partial_fit(X_t, y_t, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
sdt_l.append(prediction(sdt))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
for i in range(1, 500):
train_time_l[i] += train_time_l[i - 1]
return sdt_l, train_time_l, test_time_l, v_m_l, s_m_l
def experiment_sdf():
"""Runs experiments for Stream Decision Forest"""
sdf_l = []
train_time_l = []
test_time_l = []
v_m_l = []
s_m_l = []
sdf = StreamDecisionForest()
for i in range(500):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
sdf.partial_fit(X_t, y_t, classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
sdf_l.append(prediction(sdf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Check memory
v_m = psutil.virtual_memory()[2]
v_m_l.append(v_m)
s_m = psutil.swap_memory()[3]
s_m_l.append(s_m)
# Reformat the train times
for i in range(1, 500):
train_time_l[i] += train_time_l[i - 1]
return sdf_l, train_time_l, test_time_l, v_m_l, s_m_l
# Prepare CIFAR data
# Normalize
scale = np.mean(np.arange(0, 256))
normalize = lambda x: (x - scale) / scale
# Train data
cifar_trainset = datasets.CIFAR10(root="../", train=True, download=True, transform=None)
X_train = normalize(cifar_trainset.data)
y_train = np.array(cifar_trainset.targets)
# Test data
cifar_testset = datasets.CIFAR10(root="../", train=False, download=True, transform=None)
X_test = normalize(cifar_testset.data)
y_test = np.array(cifar_testset.targets)
X_train = X_train.reshape(-1, 32 * 32 * 3)
X_test = X_test.reshape(-1, 32 * 32 * 3)
# Parse classifier choices
parser = argparse.ArgumentParser()
parser.add_argument("-all", help="all classifiers", required=False, action="store_true")
parser.add_argument("-dt", help="decision forests", required=False, action="store_true")
parser.add_argument("-rf", help="random forests", required=False, action="store_true")
parser.add_argument("-ht", help="hoeffding trees", required=False, action="store_true")
parser.add_argument("-mf", help="mondrian forests", required=False, action="store_true")
parser.add_argument(
"-sdt", help="stream decision trees", required=False, action="store_true"
)
parser.add_argument(
"-sdf", help="stream decision forests", required=False, action="store_true"
)
args = parser.parse_args()
# Perform experiments
if args.all or args.dt:
dt_acc_l = []
dt_train_t_l = []
dt_test_t_l = []
dt_v_m_l = []
dt_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
dt_acc, dt_train_t, dt_test_t, dt_v_m, dt_s_m = experiment_dt()
dt_acc_l.append(dt_acc)
dt_train_t_l.append(dt_train_t)
dt_test_t_l.append(dt_test_t)
dt_v_m_l.append(dt_v_m)
dt_s_m_l.append(dt_s_m)
write_result("../results/dt/cifar10_acc.txt", dt_acc_l)
write_result("../results/dt/cifar10_train_t.txt", dt_train_t_l)
write_result("../results/dt/cifar10_test_t.txt", dt_test_t_l)
write_result("../results/dt/cifar10_v_m.txt", dt_v_m_l)
write_result("../results/dt/cifar10_s_m.txt", dt_s_m_l)
if args.all or args.rf:
rf_acc_l = []
rf_train_t_l = []
rf_test_t_l = []
rf_v_m_l = []
rf_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
rf_acc, rf_train_t, rf_test_t, rf_v_m, rf_s_m = experiment_rf()
rf_acc_l.append(rf_acc)
rf_train_t_l.append(rf_train_t)
rf_test_t_l.append(rf_test_t)
rf_v_m_l.append(rf_v_m)
rf_s_m_l.append(rf_s_m)
write_result("../results/rf/cifar10_acc.txt", rf_acc_l)
write_result("../results/rf/cifar10_train_t.txt", rf_train_t_l)
write_result("../results/rf/cifar10_test_t.txt", rf_test_t_l)
write_result("../results/rf/cifar10_v_m.txt", rf_v_m_l)
write_result("../results/rf/cifar10_s_m.txt", rf_s_m_l)
if args.all or args.ht:
ht_acc_l = []
ht_train_t_l = []
ht_test_t_l = []
ht_v_m_l = []
ht_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
ht_acc, ht_train_t, ht_test_t, ht_v_m, ht_s_m = experiment_ht()
ht_acc_l.append(ht_acc)
ht_train_t_l.append(ht_train_t)
ht_test_t_l.append(ht_test_t)
ht_v_m_l.append(ht_v_m)
ht_s_m_l.append(ht_s_m)
write_result("../results/ht/cifar10_acc.txt", ht_acc_l)
write_result("../results/ht/cifar10_train_t.txt", ht_train_t_l)
write_result("../results/ht/cifar10_test_t.txt", ht_test_t_l)
write_result("../results/ht/cifar10_v_m.txt", ht_v_m_l)
write_result("../results/ht/cifar10_s_m.txt", ht_s_m_l)
if args.all or args.mf:
mf_acc_l = []
mf_train_t_l = []
mf_test_t_l = []
mf_v_m_l = []
mf_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
mf_acc, mf_train_t, mf_test_t, mf_v_m, mf_s_m = experiment_mf()
mf_acc_l.append(mf_acc)
mf_train_t_l.append(mf_train_t)
mf_test_t_l.append(mf_test_t)
mf_v_m_l.append(mf_v_m)
mf_s_m_l.append(mf_s_m)
write_result("../results/mf/cifar10_acc.txt", mf_acc_l)
write_result("../results/mf/cifar10_train_t.txt", mf_train_t_l)
write_result("../results/mf/cifar10_test_t.txt", mf_test_t_l)
write_result("../results/mf/cifar10_v_m.txt", mf_v_m_l)
write_result("../results/mf/cifar10_s_m.txt", mf_s_m_l)
if args.all or args.sdt:
sdt_acc_l = []
sdt_train_t_l = []
sdt_test_t_l = []
sdt_v_m_l = []
sdt_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
sdt_acc, sdt_train_t, sdt_test_t, sdt_v_m, sdt_s_m = experiment_sdt()
sdt_acc_l.append(sdt_acc)
sdt_train_t_l.append(sdt_train_t)
sdt_test_t_l.append(sdt_test_t)
sdt_v_m_l.append(sdt_v_m)
sdt_s_m_l.append(sdt_s_m)
write_result("../results/sdt/cifar10_acc.txt", sdt_acc_l)
write_result("../results/sdt/cifar10_train_t.txt", sdt_train_t_l)
write_result("../results/sdt/cifar10_test_t.txt", sdt_test_t_l)
write_result("../results/sdt/cifar10_v_m.txt", sdt_v_m_l)
write_result("../results/sdt/cifar10_s_m.txt", sdt_s_m_l)
if args.all or args.sdf:
sdf_acc_l = []
sdf_train_t_l = []
sdf_test_t_l = []
sdf_v_m_l = []
sdf_s_m_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
sdf_acc, sdf_train_t, sdf_test_t, sdf_v_m, sdf_s_m = experiment_sdf()
sdf_acc_l.append(sdf_acc)
sdf_train_t_l.append(sdf_train_t)
sdf_test_t_l.append(sdf_test_t)
sdf_v_m_l.append(sdf_v_m)
sdf_s_m_l.append(sdf_s_m)
write_result("../results/sdf/cifar10_acc.txt", sdf_acc_l)
write_result("../results/sdf/cifar10_train_t.txt", sdf_train_t_l)
write_result("../results/sdf/cifar10_test_t.txt", sdf_test_t_l)
write_result("../results/sdf/cifar10_v_m.txt", sdf_v_m_l)
write_result("../results/sdf/cifar10_s_m.txt", sdf_s_m_l)
| 0 | 0 | 0 |
116ebabd373174e9bc708935a5a25a2354aabd43 | 11,479 | py | Python | src/textwrap.py | pineapplemachine/mwde | e68378b74d1712347351f1d8fe41edd434d7af16 | [
"MIT"
] | 6 | 2018-08-19T18:50:13.000Z | 2021-06-05T01:14:24.000Z | src/textwrap.py | pineapplemachine/mwde | e68378b74d1712347351f1d8fe41edd434d7af16 | [
"MIT"
] | 1 | 2018-12-01T14:18:34.000Z | 2018-12-01T14:59:22.000Z | src/textwrap.py | pineapplemachine/mwde | e68378b74d1712347351f1d8fe41edd434d7af16 | [
"MIT"
] | 1 | 2021-06-05T01:14:31.000Z | 2021-06-05T01:14:31.000Z | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
# Modified by Sophie Kirschner
# https://github.com/python/cpython/blob/master/Lib/textwrap.py
# https://github.com/python/cpython/blob/master/LICENSE
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text.decode("latin-1"))
chunks = list(filter(None, chunks)) # remove empty chunks
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
# Added to consider basic ANSI escape sequences as zero-width
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = self._get_chunk_length(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and self._get_chunk_length(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
chunks = self._split(text)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
| 41.143369 | 80 | 0.605105 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
# Modified by Sophie Kirschner
# https://github.com/python/cpython/blob/master/Lib/textwrap.py
# https://github.com/python/cpython/blob/master/LICENSE
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text.decode("latin-1"))
chunks = list(filter(None, chunks)) # remove empty chunks
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
# Added to consider basic ANSI escape sequences as zero-width
def _get_chunk_length(self, chunk):
length = 0
in_escape = False
for i in range(len(chunk)):
if chunk[i] == "\\" and chunk[i + 1] == "\033":
in_escape = True
elif in_escape and chunk[i] == "m":
in_escape = False
elif not in_escape:
length += 1
return length
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = self._get_chunk_length(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and self._get_chunk_length(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
chunks = self._split(text)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
| 1,258 | 0 | 53 |
729c2aa69dc495d2e9bbc6997b3e87438b9334a0 | 9,382 | py | Python | nova/objects/network.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 1 | 2019-07-29T10:30:24.000Z | 2019-07-29T10:30:24.000Z | nova/objects/network.py | ljzjohnson/nova | 87e1951a1b8c03b9ecdf8f75610d14690b61f272 | [
"Apache-2.0"
] | 11 | 2017-06-19T01:28:55.000Z | 2017-06-23T02:01:47.000Z | nova/objects/network.py | ljzjohnson/nova | 87e1951a1b8c03b9ecdf8f75610d14690b61f272 | [
"Apache-2.0"
] | 7 | 2015-01-20T10:30:08.000Z | 2020-02-05T10:29:05.000Z | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_utils import versionutils
import nova.conf
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
CONF = nova.conf.CONF
# TODO(berrange): Remove NovaObjectDictCompat
@obj_base.NovaObjectRegistry.register
@obj_base.NovaObjectRegistry.register
| 40.791304 | 78 | 0.649329 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_utils import versionutils
import nova.conf
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
CONF = nova.conf.CONF
# TODO(berrange): Remove NovaObjectDictCompat
@obj_base.NovaObjectRegistry.register
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added in_use_on_host()
# Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'label': fields.StringField(),
'injected': fields.BooleanField(),
'cidr': fields.IPV4NetworkField(nullable=True),
'cidr_v6': fields.IPV6NetworkField(nullable=True),
'multi_host': fields.BooleanField(),
'netmask': fields.IPV4AddressField(nullable=True),
'gateway': fields.IPV4AddressField(nullable=True),
'broadcast': fields.IPV4AddressField(nullable=True),
'netmask_v6': fields.IPV6AddressField(nullable=True),
'gateway_v6': fields.IPV6AddressField(nullable=True),
'bridge': fields.StringField(nullable=True),
'bridge_interface': fields.StringField(nullable=True),
'dns1': fields.IPAddressField(nullable=True),
'dns2': fields.IPAddressField(nullable=True),
'vlan': fields.IntegerField(nullable=True),
'vpn_public_address': fields.IPAddressField(nullable=True),
'vpn_public_port': fields.IntegerField(nullable=True),
'vpn_private_address': fields.IPAddressField(nullable=True),
'dhcp_start': fields.IPV4AddressField(nullable=True),
'rxtx_base': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'priority': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'mtu': fields.IntegerField(nullable=True),
'dhcp_server': fields.IPAddressField(nullable=True),
'enable_dhcp': fields.BooleanField(),
'share_address': fields.BooleanField(),
}
@staticmethod
def _convert_legacy_ipv6_netmask(netmask):
"""Handle netmask_v6 possibilities from the database.
Historically, this was stored as just an integral CIDR prefix,
but in the future it should be stored as an actual netmask.
Be tolerant of either here.
"""
try:
prefix = int(netmask)
return netaddr.IPNetwork('1::/%i' % prefix).netmask
except ValueError:
pass
try:
return netaddr.IPNetwork(netmask).netmask
except netaddr.AddrFormatError:
raise ValueError(_('IPv6 netmask "%s" must be a netmask '
'or integral prefix') % netmask)
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'mtu' in primitive:
del primitive['mtu']
if 'enable_dhcp' in primitive:
del primitive['enable_dhcp']
if 'dhcp_server' in primitive:
del primitive['dhcp_server']
if 'share_address' in primitive:
del primitive['share_address']
@staticmethod
def _from_db_object(context, network, db_network):
for field in network.fields:
db_value = db_network[field]
if field is 'netmask_v6' and db_value is not None:
db_value = network._convert_legacy_ipv6_netmask(db_value)
if field is 'dhcp_server' and db_value is None:
db_value = db_network['gateway']
if field is 'share_address' and CONF.share_dhcp_address:
db_value = CONF.share_dhcp_address
network[field] = db_value
network._context = context
network.obj_reset_changes()
return network
@obj_base.remotable_classmethod
def get_by_id(cls, context, network_id, project_only='allow_none'):
db_network = db.network_get(context, network_id,
project_only=project_only)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_uuid(cls, context, network_uuid):
db_network = db.network_get_by_uuid(context, network_uuid)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_cidr(cls, context, cidr):
db_network = db.network_get_by_cidr(context, cidr)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def associate(cls, context, project_id, network_id=None, force=False):
db.network_associate(context, project_id, network_id=network_id,
force=force)
@obj_base.remotable_classmethod
def disassociate(cls, context, network_id, host=False, project=False):
db.network_disassociate(context, network_id, host, project)
@obj_base.remotable_classmethod
def in_use_on_host(cls, context, network_id, host):
return db.network_in_use_on_host(context, network_id, host)
def _get_primitive_changes(self):
changes = {}
for key, value in self.obj_get_changes().items():
if isinstance(value, netaddr.IPAddress):
changes[key] = str(value)
else:
changes[key] = value
return changes
@obj_base.remotable
def create(self):
updates = self._get_primitive_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
db_network = db.network_create_safe(self._context, updates)
self._from_db_object(self._context, self, db_network)
@obj_base.remotable
def destroy(self):
db.network_delete_safe(self._context, self.id)
self.deleted = True
self.obj_reset_changes(['deleted'])
@obj_base.remotable
def save(self):
context = self._context
updates = self._get_primitive_changes()
if 'netmask_v6' in updates:
# NOTE(danms): For some reason, historical code stores the
# IPv6 netmask as just the CIDR mask length, so convert that
# back here before saving for now.
updates['netmask_v6'] = netaddr.IPNetwork(
updates['netmask_v6']).netmask
set_host = 'host' in updates
if set_host:
db.network_set_host(context, self.id, updates.pop('host'))
if updates:
db_network = db.network_update(context, self.id, updates)
elif set_host:
db_network = db.network_get(context, self.id)
else:
db_network = None
if db_network is not None:
self._from_db_object(context, self, db_network)
@obj_base.NovaObjectRegistry.register
class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_project()
# Version 1.2: Network <= version 1.2
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Network'),
}
@obj_base.remotable_classmethod
def get_all(cls, context, project_only='allow_none'):
db_networks = db.network_get_all(context, project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_uuids(cls, context, network_uuids, project_only='allow_none'):
db_networks = db.network_get_all_by_uuids(context, network_uuids,
project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_networks = db.network_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id, associate=True):
db_networks = db.project_get_networks(context, project_id,
associate=associate)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
| 4,617 | 3,724 | 44 |
aa48483bb4d631377a6e0d2824f5c99ecea55f93 | 2,968 | py | Python | ImageData.py | Guwudao/WeChat-Python-demo | 243b0c15f4d6a27271713f38ac5a987472844ce0 | [
"MIT"
] | null | null | null | ImageData.py | Guwudao/WeChat-Python-demo | 243b0c15f4d6a27271713f38ac5a987472844ce0 | [
"MIT"
] | 3 | 2021-12-13T20:41:27.000Z | 2022-03-12T00:22:24.000Z | ImageData.py | Guwudao/WeChat-Python-demo | 243b0c15f4d6a27271713f38ac5a987472844ce0 | [
"MIT"
] | null | null | null | from PIL import Image
from PIL.ExifTags import TAGS
import exifread
import re
import json
def get_exif_data(fname):
"""Get embedded EXIF data from image file."""
ret = {}
try:
img = Image.open(fname)
if hasattr( img, '_getexif' ):
exifinfo = img._getexif()
if exifinfo != None:
for tag, value in exifinfo.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
except IOError:
print('IOERROR ' + fname)
return ret
if __name__ == '__main__':
fileName = "1 (36).jpg"
# exif = get_exif_data(fileName)
# print(exif)
read() | 39.052632 | 109 | 0.506739 | from PIL import Image
from PIL.ExifTags import TAGS
import exifread
import re
import json
def read():
with open('IMG_1956.HEIC', 'rb') as f:
try:
contents = exifread.process_file(f)
for key in contents:
if key == "GPS GPSLongitude":
print("经度: ", contents[key], contents['GPS GPSLatitudeRef'])
print("纬度: ", contents['GPS GPSLatitude'], contents['GPS GPSLongitudeRef'])
print("高度基准: ", contents['GPS GPSAltitudeRef'])
print("海拔高度: ", contents['GPS GPSAltitude'])
if re.match('Image Make', key):
print('品牌信息: ', contents[key])
if re.match('Image Model', key):
print('具体型号: ', contents[key])
if re.match('Image DateTime', key):
print('拍摄时间: ', contents[key])
if re.match('EXIF ExifImageWidth', key):
print('照片尺寸: ', contents[key], '*', contents['EXIF ExifImageLength'])
if re.match('Image ImageDescription', key):
print('图像描述: ', contents[key])
print("-" * 100)
exif_dict = exifread.process_file(f)
for key in exif_dict:
print("%s: %s" % (key, exif_dict[key]))
print('拍摄时间:', exif_dict['EXIF DateTimeOriginal'])
print('照相机制造商:', exif_dict['Image Make'])
print('照相机型号:', exif_dict['Image Model'])
print('照片尺寸:', exif_dict['EXIF ExifImageWidth'], exif_dict['EXIF ExifImageLength'])
# 经度
lon_ref = exif_dict["GPS GPSLongitudeRef"].printable
lon = exif_dict["GPS GPSLongitude"].printable[1:-1].replace(" ", "").replace("/", ",").split(",")
lon = float(lon[0]) + float(lon[1]) / 60 + float(lon[2]) / float(lon[3]) / 3600
if lon_ref != "E":
lon = lon * (-1)
# 纬度
lat_ref = exif_dict["GPS GPSLatitudeRef"].printable
lat = exif_dict["GPS GPSLatitude"].printable[1:-1].replace(" ", "").replace("/", ",").split(",")
lat = float(lat[0]) + float(lat[1]) / 60 + float(lat[2]) / float(lat[3]) / 3600
if lat_ref != "N":
lat = lat * (-1)
print('照片的经纬度:', (lat, lon))
except Exception as e:
print("error: ", e)
def get_exif_data(fname):
"""Get embedded EXIF data from image file."""
ret = {}
try:
img = Image.open(fname)
if hasattr( img, '_getexif' ):
exifinfo = img._getexif()
if exifinfo != None:
for tag, value in exifinfo.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
except IOError:
print('IOERROR ' + fname)
return ret
if __name__ == '__main__':
fileName = "1 (36).jpg"
# exif = get_exif_data(fileName)
# print(exif)
read() | 2,413 | 0 | 23 |
f6f4753aa6c34ab70ffbc004f50c721b01c9c100 | 1,788 | py | Python | app/__init__.py | avb76/barbershop | 975b501b0c53600909910619e248dff627acaa22 | [
"MIT"
] | null | null | null | app/__init__.py | avb76/barbershop | 975b501b0c53600909910619e248dff627acaa22 | [
"MIT"
] | null | null | null | app/__init__.py | avb76/barbershop | 975b501b0c53600909910619e248dff627acaa22 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_moment import Moment
from flask_mail import Mail
# from flask_mail_sendgrid import MailSendGrid
from config import Config
from logging.handlers import RotatingFileHandler
import logging
import os
db = SQLAlchemy()
migrate = Migrate()
bootstrap = Bootstrap()
login = LoginManager()
moment = Moment()
mail = Mail()
from app import models
| 28.83871 | 74 | 0.709172 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_moment import Moment
from flask_mail import Mail
# from flask_mail_sendgrid import MailSendGrid
from config import Config
from logging.handlers import RotatingFileHandler
import logging
import os
db = SQLAlchemy()
migrate = Migrate()
bootstrap = Bootstrap()
login = LoginManager()
moment = Moment()
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
bootstrap.init_app(app)
login.init_app(app)
login.login_view = 'auth_barber.login'
login.login_message = 'Please log in to access this page.'
moment.init_app(app)
mail.init_app(app)
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.auth_barber import bp as auth_barber_bp
app.register_blueprint(auth_barber_bp, url_prefix='/auth/barber')
from app.barber import bp as barber_bp
app.register_blueprint(barber_bp, url_prefix='/barber')
if not app.debug and not app.testing:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/barbershop.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Barbershop startup')
return app
from app import models
| 1,241 | 0 | 23 |
35e9d55eab18f67cc2fc4818d7246c681b504cad | 1,407 | py | Python | gale/timeseries/test_conversions.py | adamrpah/GALE | 94ab2613c5d53ea471f664a75c7d780a2689302f | [
"WTFPL"
] | null | null | null | gale/timeseries/test_conversions.py | adamrpah/GALE | 94ab2613c5d53ea471f664a75c7d780a2689302f | [
"WTFPL"
] | null | null | null | gale/timeseries/test_conversions.py | adamrpah/GALE | 94ab2613c5d53ea471f664a75c7d780a2689302f | [
"WTFPL"
] | null | null | null | '''
File: test_conversions.py
Author: Adam Pah
Description:
py.test test ensemble
'''
import pytest
import conversions as conv
class TestConvertTimeseries:
'''
Covers the convert_timeseries_to_intervalseries function
'''
timeseries = [[0, 2], [2, 3], [5, 3]]
def test_basic(self):
'''
Timeseries conversion test.
'''
#Set up the answer
intervalseries = [[0, 2], [1, 3]]
#Get the intervalseries
test_intervals = conv.convert_timeseries_to_intervalseries(self.timeseries)
#Just make sure that these things aren't the same
assert intervalseries == test_intervals
def test_yaxis_only(self):
'''
Timeseries conversion test with the yaxis only
'''
#Set up the answer
intervalseries = [2, 3]
#Get the intervalseries
test_intervals = conv.convert_timeseries_to_intervalseries(self.timeseries, yaxis_only=True)
#Just make sure that these things aren't the same
assert intervalseries == test_intervals
def test_negative_bounds(self):
'''
Test to make sure that system exit happens
'''
#Load up the data
timeseries = [[0, 2], [-2, 3], [4, 3]]
#Check for the system exit
with pytest.raises(SystemExit):
conv.convert_timeseries_to_intervalseries(timeseries, yaxis_only=True)
| 29.93617 | 100 | 0.637527 | '''
File: test_conversions.py
Author: Adam Pah
Description:
py.test test ensemble
'''
import pytest
import conversions as conv
class TestConvertTimeseries:
'''
Covers the convert_timeseries_to_intervalseries function
'''
timeseries = [[0, 2], [2, 3], [5, 3]]
def test_basic(self):
'''
Timeseries conversion test.
'''
#Set up the answer
intervalseries = [[0, 2], [1, 3]]
#Get the intervalseries
test_intervals = conv.convert_timeseries_to_intervalseries(self.timeseries)
#Just make sure that these things aren't the same
assert intervalseries == test_intervals
def test_yaxis_only(self):
'''
Timeseries conversion test with the yaxis only
'''
#Set up the answer
intervalseries = [2, 3]
#Get the intervalseries
test_intervals = conv.convert_timeseries_to_intervalseries(self.timeseries, yaxis_only=True)
#Just make sure that these things aren't the same
assert intervalseries == test_intervals
def test_negative_bounds(self):
'''
Test to make sure that system exit happens
'''
#Load up the data
timeseries = [[0, 2], [-2, 3], [4, 3]]
#Check for the system exit
with pytest.raises(SystemExit):
conv.convert_timeseries_to_intervalseries(timeseries, yaxis_only=True)
| 0 | 0 | 0 |
1c0994f61423ca0f43a923170a867f08704e7dbb | 7,724 | py | Python | amy/extrequests/tests/test_event_requests.py | mattipt/amy | c8c7212fd51d875d22c413781d28e94837a3db2e | [
"MIT"
] | null | null | null | amy/extrequests/tests/test_event_requests.py | mattipt/amy | c8c7212fd51d875d22c413781d28e94837a3db2e | [
"MIT"
] | 1 | 2019-12-13T11:22:47.000Z | 2019-12-13T11:22:47.000Z | amy/extrequests/tests/test_event_requests.py | mattipt/amy | c8c7212fd51d875d22c413781d28e94837a3db2e | [
"MIT"
] | null | null | null | from django.urls import reverse
from extforms.deprecated_forms import SWCEventRequestForm, DCEventRequestForm
from extrequests.models import (
EventRequest,
)
from workshops.models import Event, Organization
from workshops.tests.base import TestBase
| 41.304813 | 78 | 0.617556 | from django.urls import reverse
from extforms.deprecated_forms import SWCEventRequestForm, DCEventRequestForm
from extrequests.models import (
EventRequest,
)
from workshops.models import Event, Organization
from workshops.tests.base import TestBase
class TestSWCEventRequestForm(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
def test_fields_presence(self):
"""Test if the form shows correct fields."""
form = SWCEventRequestForm()
fields_left = set(form.fields.keys())
fields_right = set([
'name', 'email', 'affiliation', 'location', 'country',
'conference', 'preferred_date', 'language', 'workshop_type',
'approx_attendees', 'attendee_domains', 'attendee_domains_other',
'attendee_academic_levels', 'attendee_computing_levels',
'cover_travel_accomodation', 'understand_admin_fee',
'travel_reimbursement', 'travel_reimbursement_other',
'admin_fee_payment', 'comment', 'captcha', 'privacy_consent',
])
self.assertEqual(fields_left, fields_right)
def test_request_form_redirects(self):
self.assertEqual(len(EventRequest.objects.all()), 0)
rv = self.client.get(reverse('swc_workshop_request'))
self.assertRedirects(rv, reverse('workshop_landing'))
self.assertEqual(len(EventRequest.objects.all()), 0)
def test_request_discarded(self):
"""Ensure the request is discarded properly."""
# add a minimal request
er = EventRequest.objects.create(
name='Harry Potter', email='harry@potter.com',
affiliation='Hogwarts', location='United Kingdom',
country='GB', workshop_type='swc',
)
rv = self.client.get(reverse('eventrequest_set_state',
args=[er.pk, 'discarded']))
self.assertEqual(rv.status_code, 302)
er.refresh_from_db()
self.assertEqual(er.state, 'd')
class TestDCEventRequestForm(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
def test_fields_presence(self):
"""Test if the form shows correct fields."""
form = DCEventRequestForm()
fields_left = set(form.fields.keys())
fields_right = set([
'name', 'email', 'affiliation', 'location', 'country',
'conference', 'preferred_date', 'language', 'workshop_type',
'approx_attendees', 'attendee_domains', 'attendee_domains_other',
'data_types', 'data_types_other', 'attendee_academic_levels',
'attendee_data_analysis_level', 'cover_travel_accomodation',
'understand_admin_fee', 'fee_waiver_request',
'travel_reimbursement', 'travel_reimbursement_other',
'comment', 'privacy_consent', 'captcha',
])
self.assertEqual(fields_left, fields_right)
def test_request_form_redirects(self):
self.assertEqual(len(EventRequest.objects.all()), 0)
rv = self.client.get(reverse('dc_workshop_request'))
self.assertRedirects(rv, reverse('workshop_landing'))
self.assertEqual(len(EventRequest.objects.all()), 0)
def test_request_discarded(self):
"""Ensure the request is discarded properly."""
# add a minimal request
er = EventRequest.objects.create(
name='Harry Potter', email='harry@potter.com',
affiliation='Hogwarts', location='United Kingdom',
country='GB', workshop_type='dc',
)
rv = self.client.get(reverse('eventrequest_set_state',
args=[er.pk, 'discarded']))
self.assertEqual(rv.status_code, 302)
er.refresh_from_db()
self.assertEqual(er.state, 'd')
class TestEventRequestsViews(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
self.er1 = EventRequest.objects.create(
state="p", name="Harry Potter", email="harry@potter.com",
affiliation="Hogwarts", location="Scotland", country="GB",
preferred_date="soon",
)
self.er2 = EventRequest.objects.create(
state="d", name="Harry Potter", email="harry@potter.com",
affiliation="Hogwarts", location="Scotland", country="GB",
preferred_date="soon",
)
def test_pending_requests_list(self):
rv = self.client.get(reverse('all_eventrequests'))
self.assertIn(self.er1, rv.context['requests'])
self.assertNotIn(self.er2, rv.context['requests'])
def test_discarded_requests_list(self):
rv = self.client.get(reverse('all_eventrequests') + '?state=d')
self.assertNotIn(self.er1, rv.context['requests'])
self.assertIn(self.er2, rv.context['requests'])
def test_set_state_pending_request_view(self):
rv = self.client.get(reverse('eventrequest_set_state',
args=[self.er1.pk, 'discarded']))
self.assertEqual(rv.status_code, 302)
self.er1.refresh_from_db()
self.assertEqual(self.er1.state, "d")
def test_set_state_discarded_request_view(self):
rv = self.client.get(reverse('eventrequest_set_state',
args=[self.er2.pk, 'discarded']))
self.assertEqual(rv.status_code, 302)
self.er2.refresh_from_db()
self.assertEqual(self.er2.state, "d")
def test_pending_request_accept(self):
rv = self.client.get(reverse('eventrequest_set_state',
args=[self.er1.pk, 'accepted']))
self.assertEqual(rv.status_code, 302)
def test_pending_request_accepted_with_event(self):
"""Ensure a backlink from Event to EventRequest that created the
event exists after ER is accepted."""
data = {
'slug': '2016-06-30-test-event',
'host': Organization.objects.first().pk,
'tags': [1],
'invoice_status': 'unknown',
}
rv = self.client.post(
reverse('eventrequest_accept_event', args=[self.er1.pk]),
data)
self.assertEqual(rv.status_code, 302)
request = Event.objects.get(slug='2016-06-30-test-event').eventrequest
self.assertEqual(request, self.er1)
def test_discarded_request_accepted_with_event(self):
rv = self.client.get(reverse('eventrequest_accept_event',
args=[self.er2.pk]))
self.assertEqual(rv.status_code, 404)
def test_pending_request_discard(self):
rv = self.client.get(reverse('eventrequest_set_state',
args=[self.er1.pk, 'discarded']),
follow=True)
self.assertEqual(rv.status_code, 200)
def test_discarded_request_discard(self):
rv = self.client.get(reverse('eventrequest_set_state',
args=[self.er2.pk, 'discarded']),
follow=True)
self.assertEqual(rv.status_code, 200)
def test_discarded_request_reopened(self):
self.er1.state = "a"
self.er1.save()
rv = self.client.get(
reverse('eventrequest_set_state',
args=[self.er1.pk, 'pending']),
follow=True)
self.er1.refresh_from_db()
self.assertEqual(self.er1.state, "p")
def test_accepted_request_reopened(self):
self.assertEqual(self.er2.state, "d")
rv = self.client.get(
reverse('eventrequest_set_state',
args=[self.er2.pk, 'pending']),
follow=True)
self.er2.refresh_from_db()
self.assertEqual(self.er2.state, "p")
| 3,541 | 3,856 | 69 |
5f3c6e3145e4c5a9194485b8fe52b7dad8e138d8 | 2,449 | py | Python | scripts/dbutil/hads_delete_dups.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | 1 | 2019-10-07T17:01:24.000Z | 2019-10-07T17:01:24.000Z | scripts/dbutil/hads_delete_dups.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | scripts/dbutil/hads_delete_dups.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """
Our HADS database gets loaded up with duplicates, this cleans it up.
called from RUN_MIDNIGHT.sh
"""
from __future__ import print_function
import datetime
import sys
import pytz
from pyiem.util import get_dbconn, utc
def query(sql, args=None):
"""
Do a query and make it atomic
"""
pgconn = get_dbconn('hads')
hcursor = pgconn.cursor()
sts = datetime.datetime.now()
hcursor.execute("set work_mem='16GB'")
hcursor.execute(sql, args if args is not None else [])
ets = datetime.datetime.now()
print("%7s [%8.4fs] %s" % (hcursor.rowcount, (ets - sts).total_seconds(),
sql))
hcursor.close()
pgconn.commit()
def workflow(valid):
''' Do the work for this date, which is set to 00 UTC '''
# Delete schoolnet data, since we created it in the first place!
tbl = "raw%s" % (valid.strftime("%Y_%m"),)
sql = """DELETE from """ + tbl + """ WHERE station IN
(SELECT id from stations WHERE network in ('KCCI','KELO','KIMT')
)"""
query(sql)
# make sure our tmp table does not exist
query("DROP TABLE IF EXISTS tmp")
# Extract unique obs to special table
sql = """CREATE table tmp as select distinct * from """+tbl+"""
WHERE valid BETWEEN %s and %s"""
args = (valid, valid + datetime.timedelta(hours=24))
query(sql, args)
# Delete them all!
sql = """delete from """+tbl+""" WHERE valid BETWEEN %s and %s"""
query(sql, args)
sql = "DROP index IF EXISTS "+tbl+"_idx"
query(sql)
sql = "DROP index IF EXISTS "+tbl+"_valid_idx"
query(sql)
# Insert from special table
sql = "INSERT into "+tbl+" SELECT * from tmp"
query(sql)
sql = "CREATE index %s_idx on %s(station,valid)" % (tbl, tbl)
query(sql)
sql = "CREATE index %s_valid_idx on %s(valid)" % (tbl, tbl)
query(sql)
sql = "DROP TABLE IF EXISTS tmp"
query(sql)
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
utcnow = utc(int(argv[1]), int(argv[2]), int(argv[3]))
workflow(utcnow)
return
utcnow = datetime.datetime.utcnow()
utcnow = utcnow.replace(hour=0, minute=0, second=0, microsecond=0,
tzinfo=pytz.utc)
# Run for 'yesterday' and 35 days ago
for day in [1, 35]:
workflow(utcnow - datetime.timedelta(days=day))
if __name__ == '__main__':
# See how we are called
main(sys.argv)
| 28.476744 | 78 | 0.601062 | """
Our HADS database gets loaded up with duplicates, this cleans it up.
called from RUN_MIDNIGHT.sh
"""
from __future__ import print_function
import datetime
import sys
import pytz
from pyiem.util import get_dbconn, utc
def query(sql, args=None):
"""
Do a query and make it atomic
"""
pgconn = get_dbconn('hads')
hcursor = pgconn.cursor()
sts = datetime.datetime.now()
hcursor.execute("set work_mem='16GB'")
hcursor.execute(sql, args if args is not None else [])
ets = datetime.datetime.now()
print("%7s [%8.4fs] %s" % (hcursor.rowcount, (ets - sts).total_seconds(),
sql))
hcursor.close()
pgconn.commit()
def workflow(valid):
''' Do the work for this date, which is set to 00 UTC '''
# Delete schoolnet data, since we created it in the first place!
tbl = "raw%s" % (valid.strftime("%Y_%m"),)
sql = """DELETE from """ + tbl + """ WHERE station IN
(SELECT id from stations WHERE network in ('KCCI','KELO','KIMT')
)"""
query(sql)
# make sure our tmp table does not exist
query("DROP TABLE IF EXISTS tmp")
# Extract unique obs to special table
sql = """CREATE table tmp as select distinct * from """+tbl+"""
WHERE valid BETWEEN %s and %s"""
args = (valid, valid + datetime.timedelta(hours=24))
query(sql, args)
# Delete them all!
sql = """delete from """+tbl+""" WHERE valid BETWEEN %s and %s"""
query(sql, args)
sql = "DROP index IF EXISTS "+tbl+"_idx"
query(sql)
sql = "DROP index IF EXISTS "+tbl+"_valid_idx"
query(sql)
# Insert from special table
sql = "INSERT into "+tbl+" SELECT * from tmp"
query(sql)
sql = "CREATE index %s_idx on %s(station,valid)" % (tbl, tbl)
query(sql)
sql = "CREATE index %s_valid_idx on %s(valid)" % (tbl, tbl)
query(sql)
sql = "DROP TABLE IF EXISTS tmp"
query(sql)
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
utcnow = utc(int(argv[1]), int(argv[2]), int(argv[3]))
workflow(utcnow)
return
utcnow = datetime.datetime.utcnow()
utcnow = utcnow.replace(hour=0, minute=0, second=0, microsecond=0,
tzinfo=pytz.utc)
# Run for 'yesterday' and 35 days ago
for day in [1, 35]:
workflow(utcnow - datetime.timedelta(days=day))
if __name__ == '__main__':
# See how we are called
main(sys.argv)
| 0 | 0 | 0 |
ecc7d9a9b5336ce6466f8857ffbb551a37b659f2 | 929 | py | Python | api/api/video/models.py | dmitryro/django-starter | 59a61500664285cfadad16fa127a85f0cee3b3c1 | [
"MIT"
] | null | null | null | api/api/video/models.py | dmitryro/django-starter | 59a61500664285cfadad16fa127a85f0cee3b3c1 | [
"MIT"
] | 6 | 2020-06-05T22:49:59.000Z | 2021-06-10T18:53:10.000Z | api/api/video/models.py | dmitryro/django-starter | 59a61500664285cfadad16fa127a85f0cee3b3c1 | [
"MIT"
] | null | null | null | from django.db import models
| 38.708333 | 73 | 0.723358 | from django.db import models
class Video(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
uuid = models.CharField(max_length=200, blank=True, null=True)
description = models.CharField(max_length=200, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, null=True)
aspect_ratio = models.FloatField(blank=True, default=0.0)
duration = models.FloatField(blank=True, default=0.0)
source = models.CharField(max_length=1200, blank=True, null=True)
extension = models.CharField(max_length=200, blank=True, null=True)
time_published = models.DateTimeField(auto_now_add=True)
time_created = models.DateTimeField(blank=True, null=True)
meta = models.CharField(max_length=1500, blank=True, null=True)
class Meta:
verbose_name = 'Video'
verbose_name_plural = 'Videos'
def __str__(self):
return self.title
| 23 | 852 | 23 |
417bba34d989778ffd40ffac7f4e3a0415f9798f | 11,193 | py | Python | Filter3x3.py | charlie-jones/Chinese-Character-Recognition | 26ba530effa4d14542ffc76bfbb1dec1795c18b0 | [
"MIT"
] | 2 | 2020-07-26T14:18:08.000Z | 2020-11-07T01:56:31.000Z | Filter3x3.py | charlie-jones/Chinese-Character-Recognition | 26ba530effa4d14542ffc76bfbb1dec1795c18b0 | [
"MIT"
] | null | null | null | Filter3x3.py | charlie-jones/Chinese-Character-Recognition | 26ba530effa4d14542ffc76bfbb1dec1795c18b0 | [
"MIT"
] | null | null | null | import numpy as np
import os
import pickle
#128x128
####################################################
'''
Inputs 128x128 pixel array
Returns label where:
label 0 = 1
label 1 = 2
etc
'''
'''
returns an array of arrays, each one is the data from one image
'''
###########################################
# training Code for class (comment it before running flask app)
#train()
# for filename in os.listdir('[more here]/images'):
# data = readTrainingData(path + filename)
# character = data[6]
# character = np.array(character, dtype='int')
# for i in range(128):
# print()
# for j in range(128):
# if (character[i][j] == 255):
# print('*', end ="")
# else:
# print('7', end ="")
# print()
# print('------------------------------------------------------------')
# print()
# print()
| 35.087774 | 133 | 0.558653 | import numpy as np
import os
import pickle
#128x128
class Filter3x3:
n_filters = 0
PATH_NAME = '' # set this if you want to train it
filters = []
weights = []
biases = []
# generate weights and biases
# self.weights = random.randn(n_inputs, n_nodes) / n_inputs
# self.biases = zeros(n_nodes)
lastInShape = []
lastIn = []
lastTotals = 0
lastPoolIn = []
lastFilterIn = []
'''
Takes 2d matrix of image and transforms it with all the 3x3 filters
Outputs 3d array of transformed images
'''
def filter(self, imageMatrix): # input image 2d array/matrix
imageMatrix = np.subtract(np.divide(imageMatrix, 255), 0.5) # make values between -0.5 and 0.5 #
#imageMatrix = pad(imageMatrix, (1, 1), 'constant') # pad 0s around
self.lastFilterIn = imageMatrix
h, w = imageMatrix.shape
transformedImage = np.zeros((self.n_filters, h-2, w-2)) # same dimension as original image matrix
for k in range(self.n_filters):
for i in range(h-2): # iterates all possible 3x3 regions in the image
for j in range(w-2):
temp3x3 = imageMatrix[i:(i+3), j:(j+3)] #selects 3x3 area using current indexes
transformedImage[k, i, j] = np.sum(temp3x3 * self.filters[k])
return transformedImage
'''
Backward prop for filter
'''
def bpFilter(self, lossGradient, learn_rate):
lossGradientFilters = np.zeros(self.filters.shape)
h, w = self.lastFilterIn.shape
for f in range(self.n_filters):
for i in range(h-2): # iterates all possible size x size regions in the image
for j in range(w-2):
tempSel = self.lastFilterIn[i:(i+3), j:(j+3)]
lossGradientFilters[f] += tempSel * lossGradient[f, i, j]
# Update filters
self.filters -= learn_rate * lossGradientFilters
#1st layer -> return nothing
return None
'''
Cuts down the size of image to get rid of redundant info
'''
def pool(self, imageMatrix): # pool by size of 2
x, h, w = imageMatrix.shape
h = h // 2
w = w // 2
self.lastPoolIn = imageMatrix
transformedImage = np.zeros((self.n_filters, h, w)) # same dimension as original image matrix
for k in range(self.n_filters):
for i in range(h): # iterates all possible size x size regions in the image
for j in range(w):
tempSel = imageMatrix[k, (i * 2):(i * 2 + 2), (j * 2):(j * 2 + 2)]
transformedImage[k, i, j] = np.amax(tempSel) # was amax
return transformedImage
'''
Pooling back prop. reverse of pool()
'''
def bpPool(self, lossGradient):
x, h, w = self.lastPoolIn.shape
h = h // 2
w = w // 2
newGradientLoss = np.zeros(self.lastPoolIn.shape) # same dimension as original image matrix
for i in range(h): # iterates all possible size x size regions in the image
for j in range(w):
tempPoolSel = self.lastPoolIn[0:self.n_filters, (i * 2):(i * 2 + 2), (j * 2):(j * 2 + 2)]
f, h2, w2 = tempPoolSel.shape
maxSel = np.amax(tempPoolSel, axis=(1,2)) # was amax
# loop through selection to get max pixel
for k in range(f):
for i2 in range(h2):
for j2 in range(w2):
if tempPoolSel[k, i2, j2] == maxSel[k]:
newGradientLoss[k, i * 2 + i2, j * 2 + j2] = lossGradient[k, i, j]
return newGradientLoss
'''
Calculate the probablity of result
'''
def softmax(self, input):
self.lastInShape = input.shape # before flatten
input = input.flatten()
self.lastIn = input # after flatten
inputLen, nodes = self.weights.shape
totals = np.dot(input, self.weights) + self.biases
self.lastTotals = totals
ex = np.exp(totals)
return ex / np.sum(ex, axis=0) # shape: 1D array of size = n_nodes. each node value = probablity of node is correct
'''
Derive gradient for output
'''
def bpSoftMax(self, lossGradient, learn_rate):
for i, gradient in enumerate(lossGradient):
if gradient == 0:
continue
# e^totals
t_exp = np.exp(self.lastTotals)
# Sum of all e^totals
S = np.sum(t_exp)
# Gradients of out[i] against totals
d_out_d_t = -t_exp[i] * t_exp / (S ** 2) ## S^2
d_out_d_t[i] = t_exp[i] * (S - t_exp[i]) / (S ** 2)
# Gradients of totals against weights/biases/input
d_t_d_w = self.lastIn
d_t_d_b = 1
d_t_d_inputs = self.weights
# Gradients of loss against totals
d_L_d_t = gradient * d_out_d_t
# Gradients of loss against weights/biases/input
d_L_d_w = d_t_d_w[np.newaxis].T @ d_L_d_t[np.newaxis]
d_L_d_b = d_L_d_t * d_t_d_b
d_L_d_inputs = d_t_d_inputs @ d_L_d_t
# Update weights / biases
self.weights -= learn_rate * d_L_d_w
self.biases -= learn_rate * d_L_d_b
return d_L_d_inputs.reshape(self.lastInShape)
# save the weights of the network to a file named weights.txt (so we can save the weights after training the network)
def saveWeights(self):
f = open("weights.txt", "wb+") # create a new file if it doesn't already exist
pickle.dump(self.weights, f)
f.close()
# read the saved weights from weights.txt and set the network's weights to those
def readWeights(self):
f = open("weights.txt", "rb")
self.weights = pickle.load(f)
f.close()
# save biases to biases.txt
def saveBiases(self):
f = open("biases.txt", "wb+")
pickle.dump(self.biases, f)
f.close()
# read biases from biases.txt and set network's biases to those
def readBiases(self):
f = open("biases.txt", "rb")
self.biases = pickle.load(f)
f.close()
# save biases to biases.txt
def saveFilters(self):
f = open("filters.txt", "wb+")
pickle.dump(self.filters, f)
f.close()
# read biases from biases.txt and set network's biases to those
def readFilters(self):
f = open("filters.txt", "rb")
self.filters = pickle.load(f)
self.n_filters = self.filters.shape[0]
f.close()
def randWeightsBiases(self, n_filters, n_inputs, n_nodes):
#self.n_filters = n_filters
self.n_filters = n_filters
# self.filters = np.random.randn(n_filters, 3, 3) / 9 # generate 3D array (3x3xn_filters)
# self.weights = np.random.randn(n_inputs, n_nodes) / n_inputs
# self.biases = np.zeros(n_nodes)
####################################################
'''
Inputs 128x128 pixel array
Returns label where:
label 0 = 1
label 1 = 2
etc
'''
def getCharacter(character, filter):
# character = np.rot90(character, 3, (0,1))
# character = np.flip(character, 1)
character[character > 90] = 255
character[character <= 90] = 0
for i in range(128):
print()
for j in range(128):
if (character[i][j] == 255):
print('*', end ="")
else:
print('7', end ="")
out = filter.filter(character)
out = filter.pool(out)
out = filter.softmax(out) # array of probabilities
print(out)
largest = np.argmax(out);
out[largest] = -1
secondlargest = np.argmax(out);
out[secondlargest] = -1
thirdlargest = np.argmax(out);
return (largest+1,secondlargest+1, thirdlargest+1)
return np.argmax(out)
'''
returns an array of arrays, each one is the data from one image
'''
def readTrainingData(filename):
f = open(filename, 'r') # open the file in read mode
contents = f.read()
input = contents.split('image ') # convert the string to a list
input.pop(0) # gets rid of weird character ()
rtn = []
for entry in input:
entry = entry.split()
entry = entry[1:]
entry = np.reshape(entry, (128, 128)) # convert 1d to 2d array
rtn.append(entry)
return rtn
###########################################
def train():
learning_rate = 0.005 # making too high might cause overflow errors
num_possible_inputs = 10
num_filters = 5
step_progress = 10
print('started')
loss = 0
num_correct = 0
filter = Filter3x3()
filter.randWeightsBiases(num_filters, num_filters * 63 * 63, num_possible_inputs) # and random filters
filter.readFilters() #self.filters = np.random.randn(n_filters, 3, 3) / 9 # generate 3D array (3x3xn_filters)
filter.readWeights() #self.weights = np.random.randn(n_inputs, n_nodes) / n_inputs
filter.readBiases() #self.biases = np.zeros(n_nodes)
i = 1
labels = [0,1,2,3,4,5,6,7,8,9]
path = PATH_NAME + 'images/'
for filename in os.listdir(path):
np.random.shuffle(labels)
data = readTrainingData(path + filename)
for label in labels:
character = data[label]
# forward
character = np.array(character, dtype='int')
out = filter.filter(character)
out = filter.pool(out)
out = filter.softmax(out) # array of probabilities
l = -np.log(out[label])
acc = 1 if np.argmax(out) == label else 0
loss += l
num_correct += acc
print(str(np.argmax(out)+1) + ":" + str(label+1))
# input for softmax backprop input
gradient = np.zeros(num_possible_inputs)
gradient[label] = -1 / out[label]
#backward from here
gradient = filter.bpSoftMax(gradient, learning_rate)
gradient = filter.bpPool(gradient)
gradient = filter.bpFilter(gradient, learning_rate)
if i > 0 and i % step_progress == 0:
print(
filename + ' : [Step %d] : Average Loss %.3f | Accuracy: %d%%' %
(i, loss / step_progress, num_correct / step_progress * 100)
)
loss = 0
num_correct = 0
i+=1
filter.saveWeights()
filter.saveBiases()
filter.saveFilters();
print("done. saved")
# training Code for class (comment it before running flask app)
#train()
# for filename in os.listdir('[more here]/images'):
# data = readTrainingData(path + filename)
# character = data[6]
# character = np.array(character, dtype='int')
# for i in range(128):
# print()
# for j in range(128):
# if (character[i][j] == 255):
# print('*', end ="")
# else:
# print('7', end ="")
# print()
# print('------------------------------------------------------------')
# print()
# print()
| 8,637 | 1,539 | 89 |
9a90caab9991af9886325ebdd19a8ec880dc8732 | 1,381 | py | Python | stalinsort.py | Gagis/stalinsort | b5ad8dad2b76ab5843deb9ba66f9eabb98d34479 | [
"MIT"
] | null | null | null | stalinsort.py | Gagis/stalinsort | b5ad8dad2b76ab5843deb9ba66f9eabb98d34479 | [
"MIT"
] | null | null | null | stalinsort.py | Gagis/stalinsort | b5ad8dad2b76ab5843deb9ba66f9eabb98d34479 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Python implementation of the StalinSort algorithm.
References
----------
- :cite:`mathew` : @mathew@mastodon.social (2018/10/26 04:20:16)
''I came up with a single pass O(n) sort algorithm I call StalinSort. You
iterate down the list of elements checking if they're in order. Any element
which is out of order is eliminated. At the end you have a sorted list.''
"""
def stalinsort(iterable, key=None, ascending=False):
"""Sorts iterable according to the single pass O(n) StalinSort algorithm.
Parameters
----------
iterable: iterable object
key: function
A function of one argument that is used to extract a comparison key
from each element. Default is None.
Returns
-------
survivors: list
List of surviving elements of iterable.
Example
-------
>>>from stalinsort import stalinsort
>>>a = [3, 2, 5, 7, 1, 3]
>>>stalinsort(a)
[3, 2, 1]
"""
ascending = False # There is only descent under communism.
if key is not None:
keys = iterable.apply(key)
else:
keys = list(iterable)
survivors = iterable[:1] # I prefer to think in terms of survivors.
for index, victim in enumerate(iterable[1:]):
if survivors[-1] >= keys[index + 1]:
survivors.append(victim)
return survivors
| 27.62 | 79 | 0.621289 | # -*- coding: utf-8 -*-
"""Python implementation of the StalinSort algorithm.
References
----------
- :cite:`mathew` : @mathew@mastodon.social (2018/10/26 04:20:16)
''I came up with a single pass O(n) sort algorithm I call StalinSort. You
iterate down the list of elements checking if they're in order. Any element
which is out of order is eliminated. At the end you have a sorted list.''
"""
def stalinsort(iterable, key=None, ascending=False):
"""Sorts iterable according to the single pass O(n) StalinSort algorithm.
Parameters
----------
iterable: iterable object
key: function
A function of one argument that is used to extract a comparison key
from each element. Default is None.
Returns
-------
survivors: list
List of surviving elements of iterable.
Example
-------
>>>from stalinsort import stalinsort
>>>a = [3, 2, 5, 7, 1, 3]
>>>stalinsort(a)
[3, 2, 1]
"""
ascending = False # There is only descent under communism.
if key is not None:
keys = iterable.apply(key)
else:
keys = list(iterable)
survivors = iterable[:1] # I prefer to think in terms of survivors.
for index, victim in enumerate(iterable[1:]):
if survivors[-1] >= keys[index + 1]:
survivors.append(victim)
return survivors
| 0 | 0 | 0 |
420d28be65691041897d75814dd5308850466971 | 423 | py | Python | examples/pade_agent/run_master.py | bressanmarcos/pade-fmi | e3c7a4a5c60bcd3afc8070465640c5aa8051380a | [
"MIT"
] | null | null | null | examples/pade_agent/run_master.py | bressanmarcos/pade-fmi | e3c7a4a5c60bcd3afc8070465640c5aa8051380a | [
"MIT"
] | null | null | null | examples/pade_agent/run_master.py | bressanmarcos/pade-fmi | e3c7a4a5c60bcd3afc8070465640c5aa8051380a | [
"MIT"
] | 1 | 2020-12-11T04:09:49.000Z | 2020-12-11T04:09:49.000Z | import matplotlib.pyplot as plt
import numpy as np
from pyfmi import load_fmu
model = load_fmu('./PadeSlave.fmu')
inputs = ('inputVariable', lambda t: 5. * np.cos(t))
simulation = model.simulate(final_time=30, input=inputs)
plt.plot(simulation['time'], simulation['inputVariable'])
plt.plot(simulation['time'], simulation['outputVariable'])
plt.legend(['inputVariable', 'outputVariable'])
plt.xlabel('time')
plt.show()
| 26.4375 | 58 | 0.742317 | import matplotlib.pyplot as plt
import numpy as np
from pyfmi import load_fmu
model = load_fmu('./PadeSlave.fmu')
inputs = ('inputVariable', lambda t: 5. * np.cos(t))
simulation = model.simulate(final_time=30, input=inputs)
plt.plot(simulation['time'], simulation['inputVariable'])
plt.plot(simulation['time'], simulation['outputVariable'])
plt.legend(['inputVariable', 'outputVariable'])
plt.xlabel('time')
plt.show()
| 0 | 0 | 0 |
1b433d882adc6a9898daff097cf811d91e1207bd | 40,416 | py | Python | Tools/ResizePic/nnedi3_resample.py | capric98/macOS-Miscellaneous | 0b3a42bfd5aad2a4477c0e220c5c55e56cda2e57 | [
"WTFPL"
] | 1 | 2021-06-21T17:30:22.000Z | 2021-06-21T17:30:22.000Z | Tools/ResizePic/nnedi3_resample.py | capric98/macOS-Miscellaneous | 0b3a42bfd5aad2a4477c0e220c5c55e56cda2e57 | [
"WTFPL"
] | null | null | null | Tools/ResizePic/nnedi3_resample.py | capric98/macOS-Miscellaneous | 0b3a42bfd5aad2a4477c0e220c5c55e56cda2e57 | [
"WTFPL"
] | null | null | null | # Requirements:
# - fmtc
# - nnedi3
# From:
# - https://github.com/mawen1250/VapourSynth-script
# - https://github.com/HomeOfVapourSynthEvolution/mvsfunc
import vapoursynth as vs
import math
## Gamma conversion functions from HAvsFunc-r18
# Convert the luma channel to linear light
# Convert back a clip to gamma-corrected luma
# Apply the inverse sigmoid curve to a clip in linear luminance
# Convert back a clip to linear luminance
## Gamma conversion functions from HAvsFunc-r18 | 39.584721 | 677 | 0.606715 | # Requirements:
# - fmtc
# - nnedi3
# From:
# - https://github.com/mawen1250/VapourSynth-script
# - https://github.com/HomeOfVapourSynthEvolution/mvsfunc
import vapoursynth as vs
import math
def SetColorSpace(clip, ChromaLocation=None, ColorRange=None, Primaries=None, Matrix=None, Transfer=None):
# Set VS core and function name
core = vs.core
funcName = 'SetColorSpace'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Modify frame properties
if ChromaLocation is None:
pass
elif isinstance(ChromaLocation, bool):
if ChromaLocation is False:
clip = core.std.SetFrameProp(clip, prop='_ChromaLocation', delete=True)
elif isinstance(ChromaLocation, int):
if ChromaLocation >= 0 and ChromaLocation <=5:
clip = core.std.SetFrameProp(clip, prop='_ChromaLocation', intval=ChromaLocation)
else:
raise ValueError(funcName + ': valid range of \"ChromaLocation\" is [0, 5]!')
else:
raise TypeError(funcName + ': \"ChromaLocation\" must be an int or a bool!')
if ColorRange is None:
pass
elif isinstance(ColorRange, bool):
if ColorRange is False:
clip = core.std.SetFrameProp(clip, prop='_ColorRange', delete=True)
elif isinstance(ColorRange, int):
if ColorRange >= 0 and ColorRange <=1:
clip = core.std.SetFrameProp(clip, prop='_ColorRange', intval=ColorRange)
else:
raise ValueError(funcName + ': valid range of \"ColorRange\" is [0, 1]!')
else:
raise TypeError(funcName + ': \"ColorRange\" must be an int or a bool!')
if Primaries is None:
pass
elif isinstance(Primaries, bool):
if Primaries is False:
clip = core.std.SetFrameProp(clip, prop='_Primaries', delete=True)
elif isinstance(Primaries, int):
clip = core.std.SetFrameProp(clip, prop='_Primaries', intval=Primaries)
else:
raise TypeError(funcName + ': \"Primaries\" must be an int or a bool!')
if Matrix is None:
pass
elif isinstance(Matrix, bool):
if Matrix is False:
clip = core.std.SetFrameProp(clip, prop='_Matrix', delete=True)
elif isinstance(Matrix, int):
clip = core.std.SetFrameProp(clip, prop='_Matrix', intval=Matrix)
else:
raise TypeError(funcName + ': \"Matrix\" must be an int or a bool!')
if Transfer is None:
pass
elif isinstance(Transfer, bool):
if Transfer is False:
clip = core.std.SetFrameProp(clip, prop='_Transfer', delete=True)
elif isinstance(Transfer, int):
clip = core.std.SetFrameProp(clip, prop='_Transfer', intval=Transfer)
else:
raise TypeError(funcName + ': \"Transfer\" must be an int or a bool!')
# Output
return clip
def _quantization_parameters(sample=None, depth=None, full=None, chroma=None, funcName='_quantization_parameters'):
qp = {}
if sample is None:
sample = vs.INTEGER
if depth is None:
depth = 8
elif depth < 1:
raise ValueError(funcName + ': \"depth\" should not be less than 1!')
if full is None:
full = True
if chroma is None:
chroma = False
lShift = depth - 8
rShift = 8 - depth
if sample == vs.INTEGER:
if chroma:
qp['floor'] = 0 if full else 16 << lShift if lShift >= 0 else 16 >> rShift
qp['neutral'] = 128 << lShift if lShift >= 0 else 128 >> rShift
qp['ceil'] = (1 << depth) - 1 if full else 240 << lShift if lShift >= 0 else 240 >> rShift
qp['range'] = qp['ceil'] - qp['floor']
else:
qp['floor'] = 0 if full else 16 << lShift if lShift >= 0 else 16 >> rShift
qp['neutral'] = qp['floor']
qp['ceil'] = (1 << depth) - 1 if full else 235 << lShift if lShift >= 0 else 235 >> rShift
qp['range'] = qp['ceil'] - qp['floor']
elif sample == vs.FLOAT:
if chroma:
qp['floor'] = -0.5
qp['neutral'] = 0.0
qp['ceil'] = 0.5
qp['range'] = qp['ceil'] - qp['floor']
else:
qp['floor'] = 0.0
qp['neutral'] = qp['floor']
qp['ceil'] = 1.0
qp['range'] = qp['ceil'] - qp['floor']
else:
raise ValueError(funcName + ': Unsupported \"sample\" specified!')
return qp
def _quantization_conversion(clip, depths=None, depthd=None, sample=None, fulls=None, fulld=None, chroma=None,\
clamp=None, dbitPS=None, mode=None, funcName='_quantization_conversion'):
# Set VS core and function name
core = vs.core
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Get properties of input clip
sFormat = clip.format
sColorFamily = sFormat.color_family
sIsRGB = sColorFamily == vs.RGB
sIsYUV = sColorFamily == vs.YUV
sIsGRAY = sColorFamily == vs.GRAY
sIsYCOCG = sColorFamily == vs.YCOCG
if sColorFamily == vs.COMPAT:
raise ValueError(funcName + ': color family *COMPAT* is not supported!')
sbitPS = sFormat.bits_per_sample
sSType = sFormat.sample_type
if depths is None:
depths = sbitPS
elif not isinstance(depths, int):
raise TypeError(funcName + ': \"depths\" must be an int!')
if fulls is None:
# If not set, assume limited range for YUV and Gray input
fulls = False if sIsYUV or sIsGRAY else True
elif not isinstance(fulls, int):
raise TypeError(funcName + ': \"fulls\" must be a bool!')
if chroma is None:
chroma = False
elif not isinstance(chroma, int):
raise TypeError(funcName + ': \"chroma\" must be a bool!')
elif not sIsGRAY:
chroma = False
# Get properties of output clip
if depthd is None:
pass
elif not isinstance(depthd, int):
raise TypeError(funcName + ': \"depthd\" must be an int!')
if sample is None:
if depthd is None:
dSType = sSType
depthd = depths
else:
dSType = vs.FLOAT if dbitPS >= 32 else vs.INTEGER
elif not isinstance(sample, int):
raise TypeError(funcName + ': \"sample\" must be an int!')
elif sample != vs.INTEGER and sample != vs.FLOAT:
raise ValueError(funcName + ': \"sample\" must be either 0(vs.INTEGER) or 1(vs.FLOAT)!')
else:
dSType = sample
if dSType == vs.INTEGER and (dbitPS < 1 or dbitPS > 16):
raise ValueError(funcName + ': {0}-bit integer output is not supported!'.format(dbitPS))
if dSType == vs.FLOAT and (dbitPS != 16 and dbitPS != 32):
raise ValueError(funcName + ': {0}-bit float output is not supported!'.format(dbitPS))
if fulld is None:
fulld = fulls
elif not isinstance(fulld, int):
raise TypeError(funcName + ': \"fulld\" must be a bool!')
if clamp is None:
clamp = dSType == vs.INTEGER
elif not isinstance(clamp, int):
raise TypeError(funcName + ': \"clamp\" must be a bool!')
if dbitPS is None:
if depthd < 8:
dbitPS = 8
else:
dbitPS = depthd
elif not isinstance(dbitPS, int):
raise TypeError(funcName + ': \"dbitPS\" must be an int!')
if mode is None:
mode = 0
elif not isinstance(mode, int):
raise TypeError(funcName + ': \"mode\" must be an int!')
elif depthd >= 8:
mode = 0
dFormat = core.register_format(sFormat.color_family, dSType, dbitPS, sFormat.subsampling_w, sFormat.subsampling_h)
# Expression function
def gen_expr(chroma, mode):
if dSType == vs.INTEGER:
exprLower = 0
exprUpper = 1 << (dFormat.bytes_per_sample * 8) - 1
else:
exprLower = float('-inf')
exprUpper = float('inf')
sQP = _quantization_parameters(sSType, depths, fulls, chroma, funcName)
dQP = _quantization_parameters(dSType, depthd, fulld, chroma, funcName)
gain = dQP['range'] / sQP['range']
offset = dQP['neutral' if chroma else 'floor'] - sQP['neutral' if chroma else 'floor'] * gain
if mode == 1:
scale = 256
gain = gain * scale
offset = offset * scale
else:
scale = 1
if gain != 1 or offset != 0 or clamp:
expr = " x "
if gain != 1: expr = expr + " {} * ".format(gain)
if offset != 0: expr = expr + " {} + ".format(offset)
if clamp:
if dQP['floor'] * scale > exprLower: expr = expr + " {} max ".format(dQP['floor'] * scale)
if dQP['ceil'] * scale < exprUpper: expr = expr + " {} min ".format(dQP['ceil'] * scale)
else:
expr = ""
return expr
# Process
Yexpr = gen_expr(False, mode)
Cexpr = gen_expr(True, mode)
if sIsYUV or sIsYCOCG:
expr = [Yexpr, Cexpr]
elif sIsGRAY and chroma:
expr = Cexpr
else:
expr = Yexpr
clip = core.std.Expr(clip, expr, format=dFormat.id)
# Output
clip = SetColorSpace(clip, ColorRange=0 if fulld else 1)
return clip
def zDepth(clip, sample=None, depth=None, range=None, range_in=None, dither_type=None, cpu_type=None, prefer_props=None):
# Set VS core and function name
core = vs.core
funcName = 'zDepth'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Get properties of input clip
sFormat = clip.format
# Get properties of output clip
if sample is None:
sample = sFormat.sample_type
elif not isinstance(sample, int):
raise TypeError(funcName + ': \"sample\" must be an int!')
if depth is None:
depth = sFormat.bits_per_sample
elif not isinstance(depth, int):
raise TypeError(funcName + ': \"depth\" must be an int!')
format = core.register_format(sFormat.color_family, sample, depth, sFormat.subsampling_w, sFormat.subsampling_h)
# Process
zimgResize = core.version_number() >= 29
zimgPlugin = core.get_plugins().__contains__('the.weather.channel')
if zimgResize:
clip = core.resize.Bicubic(clip, format=format.id, range=range, range_in=range_in, dither_type=dither_type, prefer_props=prefer_props)
elif zimgPlugin and core.z.get_functions().__contains__('Format'):
clip = core.z.Format(clip, format=format.id, range=range, range_in=range_in, dither_type=dither_type, cpu_type=cpu_type)
elif zimgPlugin and core.z.get_functions().__contains__('Depth'):
clip = core.z.Depth(clip, dither=dither_type, sample=sample, depth=depth, fullrange_in=range_in, fullrange_out=range)
else:
raise AttributeError(funcName + ': Available zimg not found!')
# Output
return clip
def Depth(input, depth=None, sample=None, fulls=None, fulld=None, \
dither=None, useZ=None, prefer_props=None, ampo=None, ampn=None, dyn=None, staticnoise=None):
# Set VS core and function name
core = vs.core
funcName = 'Depth'
clip = input
if not isinstance(input, vs.VideoNode):
raise TypeError(funcName + ': \"input\" must be a clip!')
prefer_props_range = None
# Get properties of input clip
sFormat = input.format
sColorFamily = sFormat.color_family
sIsRGB = sColorFamily == vs.RGB
sIsYUV = sColorFamily == vs.YUV
sIsGRAY = sColorFamily == vs.GRAY
sIsYCOCG = sColorFamily == vs.YCOCG
if sColorFamily == vs.COMPAT:
raise ValueError(funcName + ': color family *COMPAT* is not supported!')
sbitPS = sFormat.bits_per_sample
sSType = sFormat.sample_type
if fulls is None:
# If not set, assume limited range for YUV and Gray input
fulls = False if sIsYUV or sIsGRAY else True
elif not isinstance(fulls, int):
raise TypeError(funcName + ': \"fulls\" must be a bool!')
# Get properties of output clip
lowDepth = False
if depth is None:
dbitPS = sbitPS
elif not isinstance(depth, int):
raise TypeError(funcName + ': \"depth\" must be an int!')
else:
if depth < 8:
dbitPS = 8
lowDepth = True
else:
dbitPS = depth
if sample is None:
if depth is None:
dSType = sSType
depth = dbitPS
else:
dSType = vs.FLOAT if dbitPS >= 32 else vs.INTEGER
elif not isinstance(sample, int):
raise TypeError(funcName + ': \"sample\" must be an int!')
elif sample != vs.INTEGER and sample != vs.FLOAT:
raise ValueError(funcName + ': \"sample\" must be either 0(vs.INTEGER) or 1(vs.FLOAT)!')
else:
dSType = sample
if depth is None and sSType != vs.FLOAT and sample == vs.FLOAT:
dbitPS = 32
elif depth is None and sSType != vs.INTEGER and sample == vs.INTEGER:
dbitPS = 16
if dSType == vs.INTEGER and (dbitPS < 1 or dbitPS > 16):
raise ValueError(funcName + ': {0}-bit integer output is not supported!'.format(dbitPS))
if dSType == vs.FLOAT and (dbitPS != 16 and dbitPS != 32):
raise ValueError(funcName + ': {0}-bit float output is not supported!'.format(dbitPS))
if fulld is None:
fulld = fulls
elif not isinstance(fulld, int):
raise TypeError(funcName + ': \"fulld\" must be a bool!')
# Low-depth support
if lowDepth:
if dither == "none" or dither == 1:
clip = _quantization_conversion(clip, sbitPS, depth, vs.INTEGER, fulls, fulld, False, False, 8, 0, funcName)
clip = _quantization_conversion(clip, depth, 8, vs.INTEGER, fulld, fulld, False, False, 8, 0, funcName)
return clip
else:
full = fulld
clip = _quantization_conversion(clip, sbitPS, depth, vs.INTEGER, fulls, full, False, False, 16, 1, funcName)
sSType = vs.INTEGER
sbitPS = 16
fulls = False
fulld = False
# Whether to use zDepth or fmtc.bitdepth for conversion
# When 13-,15-bit integer or 16-bit float format is involved, force using zDepth
if useZ is None:
useZ = False
elif not isinstance(useZ, int):
raise TypeError(funcName + ': \"useZ\" must be a bool!')
if sSType == vs.INTEGER and (sbitPS == 13 or sbitPS == 15):
useZ = True
if dSType == vs.INTEGER and (dbitPS == 13 or dbitPS == 15):
useZ = True
if (sSType == vs.FLOAT and sbitPS < 32) or (dSType == vs.FLOAT and dbitPS < 32):
useZ = True
if prefer_props is None:
prefer_props = False
elif not isinstance(prefer_props, int):
raise TypeError(funcName + ': \"prefer_props\" must be a bool!')
if prefer_props_range is None:
prefer_props_range = prefer_props
# Dithering type
if ampn is not None and not isinstance(ampn, float) and not isinstance(ampn, int):
raise TypeError(funcName + ': \"ampn\" must be an int or a float!')
if dither is None:
if dbitPS == 32 or (dbitPS >= sbitPS and fulld == fulls and fulld == False):
dither = "none" if useZ else 1
else:
dither = "error_diffusion" if useZ else 3
elif not isinstance(dither, int) and not isinstance(dither, str):
raise TypeError(funcName + ': \"dither\" must be an int or a str!')
else:
if isinstance(dither, str):
dither = dither.lower()
if dither != "none" and dither != "ordered" and dither != "random" and dither != "error_diffusion":
raise ValueError(funcName + ': Unsupported \"dither\" specified!')
else:
if dither < 0 or dither > 7:
raise ValueError(funcName + ': Unsupported \"dither\" specified!')
if useZ and isinstance(dither, int):
if dither == 0:
dither = "ordered"
elif dither == 1 or dither == 2:
if ampn is not None and ampn > 0:
dither = "random"
else:
dither = "none"
else:
dither = "error_diffusion"
elif not useZ and isinstance(dither, str):
if dither == "none":
dither = 1
elif dither == "ordered":
dither = 0
elif dither == "random":
if ampn is None:
dither = 1
ampn = 1
elif ampn > 0:
dither = 1
else:
dither = 3
else:
dither = 3
if not useZ:
if ampo is None:
ampo = 1.5 if dither == 0 else 1
elif not isinstance(ampo, float) and not isinstance(ampo, int):
raise TypeError(funcName + ': \"ampo\" must be an int or a float!')
# Skip processing if not needed
if dSType == sSType and dbitPS == sbitPS and (sSType == vs.FLOAT or (fulld == fulls and not prefer_props_range)) and not lowDepth:
return clip
# Apply conversion
if useZ:
clip = zDepth(clip, sample=dSType, depth=dbitPS, range=fulld, range_in=fulls, dither_type=dither, prefer_props=prefer_props_range)
else:
clip = core.fmtc.bitdepth(clip, bits=dbitPS, flt=dSType, fulls=fulls, fulld=fulld, dmode=dither, ampo=ampo, ampn=ampn, dyn=dyn, staticnoise=staticnoise)
clip = SetColorSpace(clip, ColorRange=0 if fulld else 1)
# Low-depth support
if lowDepth:
clip = _quantization_conversion(clip, depth, 8, vs.INTEGER, full, full, False, False, 8, 0, funcName)
# Output
return clip
def nnedi3_resample(input, target_width=None, target_height=None, src_left=None, src_top=None, src_width=None, src_height=None, csp=None, mats=None, matd=None, cplaces=None, cplaced=None, fulls=None, fulld=None, curves=None, curved=None, sigmoid=None, scale_thr=None, nsize=None, nns=None, qual=None, etype=None, pscrn=None, opt=None, int16_prescreener=None, int16_predictor=None, exp=None, kernel=None, invks=False, taps=None, invkstaps=3, a1=None, a2=None, chromak_up=None, chromak_up_taps=None, chromak_up_a1=None, chromak_up_a2=None, chromak_down=None, chromak_down_invks=False, chromak_down_invkstaps=3, chromak_down_taps=None, chromak_down_a1=None, chromak_down_a2=None):
core = vs.core
funcName = 'nnedi3_resample'
# Get property about input clip
if not isinstance(input, vs.VideoNode):
raise TypeError(funcName + ': This is not a clip!')
sFormat = input.format
sColorFamily = sFormat.color_family
if sColorFamily == vs.COMPAT:
raise ValueError(funcName + ': Color family *COMPAT* of input clip is not supported!')
sIsGRAY = sColorFamily == vs.GRAY
sIsYUV = sColorFamily == vs.YUV or sColorFamily == vs.YCOCG
sIsRGB = sColorFamily == vs.RGB
sbitPS = sFormat.bits_per_sample
sHSubS = 1 << sFormat.subsampling_w
sVSubS = 1 << sFormat.subsampling_h
sIsSubS = sHSubS > 1 or sVSubS > 1
sPlaneNum = sFormat.num_planes
# Get property about output clip
dFormat = sFormat if csp is None else core.get_format(csp)
dColorFamily = dFormat.color_family
if dColorFamily == vs.COMPAT:
raise ValueError(funcName + ': Color family *COMPAT* of output clip is not supported!')
dIsGRAY = dColorFamily == vs.GRAY
dIsYUV = dColorFamily == vs.YUV or dColorFamily == vs.YCOCG
dIsRGB = dColorFamily == vs.RGB
dbitPS = dFormat.bits_per_sample
dHSubS = 1 << dFormat.subsampling_w
dVSubS = 1 << dFormat.subsampling_h
dIsSubS = dHSubS > 1 or dVSubS > 1
dPlaneNum = dFormat.num_planes
# Parameters of format
SD = input.width <= 1024 and input.height <= 576
HD = input.width <= 2048 and input.height <= 1536
if mats is None:
mats = "601" if SD else "709" if HD else "2020"
else:
mats = mats.lower()
if matd is None:
matd = mats
else:
matd = matd.lower()
# Matrix of output clip makes sense only if dst is not of RGB
if dIsRGB:
matd = mats
# Matrix of input clip makes sense only src is not of GRAY or RGB
if sIsGRAY or sIsRGB:
mats = matd
if cplaces is None:
if sHSubS == 4:
cplaces = 'dv'
else:
cplaces = 'mpeg2'
else:
cplaces = cplaces.lower()
if cplaced is None:
if dHSubS == 4:
cplaced = 'dv'
else:
cplaced = cplaces
else:
cplaced = cplaced.lower()
if fulls is None:
fulls = sColorFamily == vs.YCOCG or sColorFamily == vs.RGB
if fulld is None:
if dColorFamily == sColorFamily:
fulld = fulls
else:
fulld = dColorFamily == vs.YCOCG or dColorFamily == vs.RGB
if curves is None:
curves = 'linear'
else:
curves = curves.lower()
if curved is None:
curved = curves
else:
curved = curved.lower()
if sigmoid is None:
sigmoid = False
# Parameters of scaling
if target_width is None:
target_width = input.width
if target_height is None:
target_height = input.height
if src_left is None:
src_left = 0
if src_top is None:
src_top = 0
if src_width is None:
src_width = input.width
elif src_width <= 0:
src_width = input.width - src_left + src_width
if src_height is None:
src_height = input.height
elif src_height <= 0:
src_height = input.height - src_top + src_height
if scale_thr is None:
scale_thr = 1.125
src_right = src_width - input.width + src_left
src_bottom = src_height - input.height + src_top
hScale = target_width / src_width
vScale = target_height / src_height
# Parameters of nnedi3
if nsize is None:
nsize = 0
if nns is None:
nns = 3
if qual is None:
qual = 2
# Parameters of fmtc.resample
if kernel is None:
if not invks:
kernel = 'spline36'
else:
kernel = 'bilinear'
else:
kernel = kernel.lower()
if chromak_up is None:
chromak_up = 'nnedi3'
else:
chromak_up = chromak_up.lower()
if chromak_up == 'softcubic':
chromak_up = 'bicubic'
if chromak_up_a1 is None:
chromak_up_a1 = 75
chromak_up_a1 = chromak_up_a1 / 100
chromak_up_a2 = 1 - chromak_up_a1
if chromak_down is None:
chromak_down = 'bicubic'
else:
chromak_down = chromak_down.lower()
if chromak_down == 'softcubic':
chromak_down = 'bicubic'
if chromak_down_a1 is None:
chromak_down_a1 = 75
chromak_down_a1 = chromak_down_a1 / 100
chromak_down_a2 = 1 - chromak_down_a1
# Procedure decision
hIsScale = hScale != 1
vIsScale = vScale != 1
isScale = hIsScale or vIsScale
hResample = hIsScale or int(src_left) != src_left or int(src_right) != src_right
vResample = vIsScale or int(src_top) != src_top or int(src_bottom) != src_bottom
resample = hResample or vResample
hReSubS = dHSubS != sHSubS
vReSubS = dVSubS != sVSubS
reSubS = hReSubS or vReSubS
sigmoid = sigmoid and resample
sGammaConv = curves != 'linear'
dGammaConv = curved != 'linear'
gammaConv = (sGammaConv or dGammaConv or sigmoid) and (resample or curved != curves)
scaleInGRAY = sIsGRAY or dIsGRAY
scaleInYUV = not scaleInGRAY and mats == matd and not gammaConv and (reSubS or (sIsYUV and dIsYUV))
scaleInRGB = not scaleInGRAY and not scaleInYUV
# If matrix conversion or gamma correction is applied, scaling will be done in RGB. Otherwise, if at least one of input&output clip is RGB and no chroma subsampling is involved, scaling will be done in RGB.
# Chroma placement relative to the frame center in luma scale
sCLeftAlign = cplaces == 'mpeg2' or cplaces == 'dv'
sHCPlace = 0 if not sCLeftAlign else 0.5 - sHSubS / 2
sVCPlace = 0
dCLeftAlign = cplaced == 'mpeg2' or cplaced == 'dv'
dHCPlace = 0 if not dCLeftAlign else 0.5 - dHSubS / 2
dVCPlace = 0
# Convert depth to 16-bit
last = Depth(input, depth=16, fulls=fulls)
# Color space conversion before scaling
if scaleInGRAY and sIsYUV:
if mats != matd:
last = core.fmtc.matrix(last, mats=mats, matd=matd, fulls=fulls, fulld=fulld, col_fam=vs.GRAY, singleout=0)
last = core.std.ShufflePlanes(last, [0], vs.GRAY)
elif scaleInGRAY and sIsRGB:
# Matrix conversion for output clip of GRAY
last = core.fmtc.matrix(last, mat=matd, fulls=fulls, fulld=fulld, col_fam=vs.GRAY, singleout=0)
fulls = fulld
elif scaleInRGB and sIsYUV:
# Chroma upsampling
if sIsSubS:
if chromak_up == 'nnedi3':
# Separate planes
Y = core.std.ShufflePlanes(last, [0], vs.GRAY)
U = core.std.ShufflePlanes(last, [1], vs.GRAY)
V = core.std.ShufflePlanes(last, [2], vs.GRAY)
# Chroma up-scaling
U = nnedi3_resample_kernel(U, Y.width, Y.height, -sHCPlace / sHSubS, -sVCPlace / sVSubS, None, None, 1, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2)
V = nnedi3_resample_kernel(V, Y.width, Y.height, -sHCPlace / sHSubS, -sVCPlace / sVSubS, None, None, 1, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2)
# Merge planes
last = core.std.ShufflePlanes([Y, U, V], [0, 0, 0], last.format.color_family)
else:
last = core.fmtc.resample(last, kernel=chromak_up, taps=chromak_up_taps, a1=chromak_up_a1, a2=chromak_up_a2, css="444", fulls=fulls, cplaces=cplaces)
# Matrix conversion
if mats == '2020cl':
last = core.fmtc.matrix2020cl(last, fulls)
else:
last = core.fmtc.matrix(last, mat=mats, fulls=fulls, fulld=True, col_fam=vs.RGB, singleout=-1)
fulls = True
elif scaleInYUV and sIsRGB:
# Matrix conversion
if matd == '2020cl':
last = core.fmtc.matrix2020cl(last, fulld)
else:
last = core.fmtc.matrix(last, mat=matd, fulls=fulls, fulld=fulld, col_fam=vs.YUV, singleout=-1)
fulls = fulld
# Scaling
if scaleInGRAY or scaleInRGB:
if gammaConv and sGammaConv:
last = GammaToLinear(last, fulls, fulls, curves, sigmoid=sigmoid)
elif sigmoid:
last = SigmoidInverse(last)
last = nnedi3_resample_kernel(last, target_width, target_height, src_left, src_top, src_width, src_height, scale_thr, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2, invks, invkstaps)
if gammaConv and dGammaConv:
last = LinearToGamma(last, fulls, fulls, curved, sigmoid=sigmoid)
elif sigmoid:
last = SigmoidDirect(last)
elif scaleInYUV:
# Separate planes
Y = core.std.ShufflePlanes(last, [0], vs.GRAY)
U = core.std.ShufflePlanes(last, [1], vs.GRAY)
V = core.std.ShufflePlanes(last, [2], vs.GRAY)
# Scale Y
Y = nnedi3_resample_kernel(Y, target_width, target_height, src_left, src_top, src_width, src_height, scale_thr, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2)
# Scale UV
dCw = target_width // dHSubS
dCh = target_height // dVSubS
dCsx = ((src_left - sHCPlace) * hScale + dHCPlace) / hScale / sHSubS
dCsy = ((src_top - sVCPlace) * vScale + dVCPlace) / vScale / sVSubS
dCsw = src_width / sHSubS
dCsh = src_height / sVSubS
U = nnedi3_resample_kernel(U, dCw, dCh, dCsx, dCsy, dCsw, dCsh, scale_thr, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2)
V = nnedi3_resample_kernel(V, dCw, dCh, dCsx, dCsy, dCsw, dCsh, scale_thr, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2)
# Merge planes
last = core.std.ShufflePlanes([Y, U, V], [0, 0, 0], last.format.color_family)
# Color space conversion after scaling
if scaleInGRAY and dIsYUV:
dCw = target_width // dHSubS
dCh = target_height // dVSubS
last = Depth(last, depth=dbitPS, fulls=fulls, fulld=fulld)
blkUV = core.std.BlankClip(last, dCw, dCh, color=[1 << (dbitPS - 1)])
last = core.std.ShufflePlanes([last, blkUV, blkUV], [0, 0, 0], dColorFamily)
elif scaleInGRAY and dIsRGB:
last = Depth(last, depth=dbitPS, fulls=fulls, fulld=fulld)
last = core.std.ShufflePlanes([last, last, last], [0, 0, 0], dColorFamily)
elif scaleInRGB and dIsYUV:
# Matrix conversion
if matd == '2020cl':
last = core.fmtc.matrix2020cl(last, fulld)
else:
last = core.fmtc.matrix(last, mat=matd, fulls=fulls, fulld=fulld, col_fam=dColorFamily, singleout=-1)
# Chroma subsampling
if dIsSubS:
dCSS = '411' if dHSubS == 4 else '420' if dVSubS == 2 else '422'
last = core.fmtc.resample(last, kernel=chromak_down, taps=chromak_down_taps, a1=chromak_down_a1, a2=chromak_down_a2, css=dCSS, fulls=fulld, cplaced=cplaced, invks=chromak_down_invks, invkstaps=chromak_down_invkstaps, planes=[2,3,3])
last = Depth(last, depth=dbitPS, fulls=fulld)
elif scaleInYUV and dIsRGB:
# Matrix conversion
if mats == '2020cl':
last = core.fmtc.matrix2020cl(last, fulls)
else:
last = core.fmtc.matrix(last, mat=mats, fulls=fulls, fulld=True, col_fam=vs.RGB, singleout=-1)
last = Depth(last, depth=dbitPS, fulls=True, fulld=fulld)
else:
last = Depth(last, depth=dbitPS, fulls=fulls, fulld=fulld)
# Output
return last
def nnedi3_resample_kernel(input, target_width=None, target_height=None, src_left=None, src_top=None, src_width=None, src_height=None, scale_thr=None, nsize=None, nns=None, qual=None, etype=None, pscrn=None, opt=None, int16_prescreener=None, int16_predictor=None, exp=None, kernel=None, taps=None, a1=None, a2=None, invks=False, invkstaps=3):
core = vs.core
# Parameters of scaling
if target_width is None:
target_width = input.width
if target_height is None:
target_height = input.height
if src_left is None:
src_left = 0
if src_top is None:
src_top = 0
if src_width is None:
src_width = input.width
elif src_width <= 0:
src_width = input.width - src_left + src_width
if src_height is None:
src_height = input.height
elif src_height <= 0:
src_height = input.height - src_top + src_height
if scale_thr is None:
scale_thr = 1.125
src_right = src_width - input.width + src_left
src_bottom = src_height - input.height + src_top
hScale = target_width / src_width
vScale = target_height / src_height
# Parameters of nnedi3
if nsize is None:
nsize = 0
if nns is None:
nns = 3
if qual is None:
qual = 2
# Parameters of fmtc.resample
if kernel is None:
kernel = 'spline36'
else:
kernel = kernel.lower()
# Procedure decision
hIsScale = hScale != 1
vIsScale = vScale != 1
isScale = hIsScale or vIsScale
hResample = hIsScale or int(src_left) != src_left or int(src_right) != src_right
vResample = vIsScale or int(src_top) != src_top or int(src_bottom) != src_bottom
resample = hResample or vResample
# Scaling
last = input
if hResample:
last = core.std.Transpose(last)
last = nnedi3_resample_kernel_vertical(last, target_width, src_left, src_width, scale_thr, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2, invks, invkstaps)
last = core.std.Transpose(last)
if vResample:
last = nnedi3_resample_kernel_vertical(last, target_height, src_top, src_height, scale_thr, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp, kernel, taps, a1, a2, invks, invkstaps)
# Output
return last
def nnedi3_resample_kernel_vertical(input, target_height=None, src_top=None, src_height=None, scale_thr=None, nsize=None, nns=None, qual=None, etype=None, pscrn=None, opt=None, int16_prescreener=None, int16_predictor=None, exp=None, kernel=None, taps=None, a1=None, a2=None, invks=False, invkstaps=3):
core = vs.core
# Parameters of scaling
if target_height is None:
target_height = input.height
if src_top is None:
src_top = 0
if src_height is None:
src_height = input.height
elif src_height <= 0:
src_height = input.height - src_top + src_height
if scale_thr is None:
scale_thr = 1.125
scale = target_height / src_height # Total scaling ratio
eTimes = math.ceil(math.log(scale / scale_thr, 2)) if scale > scale_thr else 0 # Iterative times of nnedi3
eScale = 1 << eTimes # Scaling ratio of nnedi3
pScale = scale / eScale # Scaling ratio of fmtc.resample
# Parameters of nnedi3
if nsize is None:
nsize = 0
if nns is None:
nns = 3
if qual is None:
qual = 2
# Parameters of fmtc.resample
if kernel is None:
kernel = 'spline36'
else:
kernel = kernel.lower()
# Skip scaling if not needed
if scale == 1 and src_top == 0 and src_height == input.height:
return input
# Scaling with nnedi3
last = nnedi3_rpow2_vertical(input, eTimes, 1, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp)
# Center shift calculation
vShift = 0.5 if eTimes >= 1 else 0
# Scaling with fmtc.resample as well as correct center shift
w = last.width
h = target_height
sx = 0
sy = src_top * eScale - vShift
sw = last.width
sh = src_height * eScale
if h != last.height or sy != 0 or sh != last.height:
if h < last.height and invks is True:
last = core.fmtc.resample(last, w, h, sx, sy, sw, sh, kernel=kernel, taps=taps, a1=a1, a2=a2, invks=True, invkstaps=invkstaps)
else:
last = core.fmtc.resample(last, w, h, sx, sy, sw, sh, kernel=kernel, taps=taps, a1=a1, a2=a2)
# Output
return last
def nnedi3_rpow2_vertical(input, eTimes=1, field=1, nsize=None, nns=None, qual=None, etype=None, pscrn=None, opt=None, int16_prescreener=None, int16_predictor=None, exp=None):
core = vs.core
if eTimes >= 1:
last = nnedi3_dh(input, field, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp)
eTimes = eTimes - 1
field = 0
else:
last = input
if eTimes >= 1:
return nnedi3_rpow2_vertical(last, eTimes, field, nsize, nns, qual, etype, pscrn, opt, int16_prescreener, int16_predictor, exp)
else:
return last
def nnedi3_dh(input, field=1, nsize=None, nns=None, qual=None, etype=None, pscrn=None, opt=None, int16_prescreener=None, int16_predictor=None, exp=None):
core = vs.core
return core.nnedi3.nnedi3(input, field=field, dh=True, nsize=nsize, nns=nns, qual=qual, etype=etype, pscrn=pscrn, opt=opt, int16_prescreener=int16_prescreener, int16_predictor=int16_predictor, exp=exp)
## Gamma conversion functions from HAvsFunc-r18
# Convert the luma channel to linear light
def GammaToLinear(src, fulls=True, fulld=True, curve='709', planes=[0, 1, 2], gcor=1., sigmoid=False, thr=0.5, cont=6.5):
if not isinstance(src, vs.VideoNode) or src.format.bits_per_sample != 16:
raise ValueError('GammaToLinear: This is not a 16-bit clip')
return LinearAndGamma(src, False, fulls, fulld, curve.lower(), planes, gcor, sigmoid, thr, cont)
# Convert back a clip to gamma-corrected luma
def LinearToGamma(src, fulls=True, fulld=True, curve='709', planes=[0, 1, 2], gcor=1., sigmoid=False, thr=0.5, cont=6.5):
if not isinstance(src, vs.VideoNode) or src.format.bits_per_sample != 16:
raise ValueError('LinearToGamma: This is not a 16-bit clip')
return LinearAndGamma(src, True, fulls, fulld, curve.lower(), planes, gcor, sigmoid, thr, cont)
def LinearAndGamma(src, l2g_flag, fulls, fulld, curve, planes, gcor, sigmoid, thr, cont):
core = vs.core
if curve == 'srgb':
c_num = 0
elif curve in ['709', '601', '170']:
c_num = 1
elif curve == '240':
c_num = 2
elif curve == '2020':
c_num = 3
else:
raise ValueError('LinearAndGamma: wrong curve value')
if src.format.color_family == vs.GRAY:
planes = [0]
# BT-709/601
# sRGB SMPTE 170M SMPTE 240M BT-2020
k0 = [0.04045, 0.081, 0.0912, 0.08145][c_num]
phi = [12.92, 4.5, 4.0, 4.5][c_num]
alpha = [0.055, 0.099, 0.1115, 0.0993][c_num]
gamma = [2.4, 2.22222, 2.22222, 2.22222][c_num]
def g2l(x):
expr = x / 65536 if fulls else (x - 4096) / 56064
if expr <= k0:
expr /= phi
else:
expr = ((expr + alpha) / (1 + alpha)) ** gamma
if gcor != 1 and expr >= 0:
expr **= gcor
if sigmoid:
x0 = 1 / (1 + math.exp(cont * thr))
x1 = 1 / (1 + math.exp(cont * (thr - 1)))
expr = thr - math.log(max(1 / max(expr * (x1 - x0) + x0, 0.000001) - 1, 0.000001)) / cont
if fulld:
return min(max(round(expr * 65536), 0), 65535)
else:
return min(max(round(expr * 56064 + 4096), 0), 65535)
# E' = (E <= k0 / phi) ? E * phi : (E ^ (1 / gamma)) * (alpha + 1) - alpha
def l2g(x):
expr = x / 65536 if fulls else (x - 4096) / 56064
if sigmoid:
x0 = 1 / (1 + math.exp(cont * thr))
x1 = 1 / (1 + math.exp(cont * (thr - 1)))
expr = (1 / (1 + math.exp(cont * (thr - expr))) - x0) / (x1 - x0)
if gcor != 1 and expr >= 0:
expr **= gcor
if expr <= k0 / phi:
expr *= phi
else:
expr = expr ** (1 / gamma) * (alpha + 1) - alpha
if fulld:
return min(max(round(expr * 65536), 0), 65535)
else:
return min(max(round(expr * 56064 + 4096), 0), 65535)
return core.std.Lut(src, planes=planes, function=l2g if l2g_flag else g2l)
# Apply the inverse sigmoid curve to a clip in linear luminance
def SigmoidInverse(src, thr=0.5, cont=6.5, planes=[0, 1, 2]):
core = vs.core
if not isinstance(src, vs.VideoNode) or src.format.bits_per_sample != 16:
raise ValueError('SigmoidInverse: This is not a 16-bit clip')
if src.format.color_family == vs.GRAY:
planes = [0]
def get_lut(x):
x0 = 1 / (1 + math.exp(cont * thr))
x1 = 1 / (1 + math.exp(cont * (thr - 1)))
return min(max(round((thr - math.log(max(1 / max(x / 65536 * (x1 - x0) + x0, 0.000001) - 1, 0.000001)) / cont) * 65536), 0), 65535)
return core.std.Lut(src, planes=planes, function=get_lut)
# Convert back a clip to linear luminance
def SigmoidDirect(src, thr=0.5, cont=6.5, planes=[0, 1, 2]):
core = vs.core
if not isinstance(src, vs.VideoNode) or src.format.bits_per_sample != 16:
raise ValueError('SigmoidDirect: This is not a 16-bit clip')
if src.format.color_family == vs.GRAY:
planes = [0]
def get_lut(x):
x0 = 1 / (1 + math.exp(cont * thr))
x1 = 1 / (1 + math.exp(cont * (thr - 1)))
return min(max(round(((1 / (1 + math.exp(cont * (thr - x / 65536))) - x0) / (x1 - x0)) * 65536), 0), 65535)
return core.std.Lut(src, planes=planes, function=get_lut)
## Gamma conversion functions from HAvsFunc-r18 | 39,577 | 0 | 341 |
330a2345fb075320f63417f0ddce382a5a7b8c85 | 1,243 | py | Python | teamlogic/migrations/0004_auto_20170317_1729.py | SarFootball/backend | 9c51118becf34085dfd9bf8cb1c765631839a43b | [
"Apache-2.0"
] | 11 | 2017-07-05T20:03:00.000Z | 2018-09-19T17:18:26.000Z | teamlogic/migrations/0004_auto_20170317_1729.py | SarFootball/backend | 9c51118becf34085dfd9bf8cb1c765631839a43b | [
"Apache-2.0"
] | 50 | 2016-07-15T16:21:03.000Z | 2018-04-17T11:18:06.000Z | teamlogic/migrations/0004_auto_20170317_1729.py | SarFootball/backend | 9c51118becf34085dfd9bf8cb1c765631839a43b | [
"Apache-2.0"
] | 6 | 2017-06-23T14:53:38.000Z | 2022-01-03T12:38:46.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-17 17:29
from __future__ import unicode_literals
from django.db import migrations, models
| 30.317073 | 106 | 0.583266 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-17 17:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teamlogic', '0003_auto_20170316_1536'),
]
operations = [
migrations.AddField(
model_name='match',
name='status',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='match',
name='away_goals',
field=models.ManyToManyField(blank=True, null=True, related_name='away', to='teamlogic.Goal'),
),
migrations.AlterField(
model_name='match',
name='date_time',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='match',
name='home_goals',
field=models.ManyToManyField(blank=True, null=True, related_name='home', to='teamlogic.Goal'),
),
migrations.AlterField(
model_name='player',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='media'),
),
]
| 0 | 1,065 | 23 |
489ac134887d5f6319601625cffa35d5b3daae5d | 9,635 | py | Python | EZ/parameter.py | Alex6022/EZ | c90d13b78b3fb67fd191700965e0827ee40eccbf | [
"MIT"
] | null | null | null | EZ/parameter.py | Alex6022/EZ | c90d13b78b3fb67fd191700965e0827ee40eccbf | [
"MIT"
] | null | null | null | EZ/parameter.py | Alex6022/EZ | c90d13b78b3fb67fd191700965e0827ee40eccbf | [
"MIT"
] | 2 | 2021-01-26T16:57:26.000Z | 2021-03-21T18:04:25.000Z | import numpy as np
import EZ.stderr as stderr
| 24.516539 | 72 | 0.532745 | import numpy as np
import EZ.stderr as stderr
class Parameter():
def __init__(self, x, y, name="", unit="", scale=1, xlabel=""):
self.name = name
self.unit = unit
self.scale = scale
self.x = x
self.y = y
self.fit_x = None
self.fit_y = None
self.model_func = None
self.fit_result = None
self.xlabel = xlabel
self.update()
self.build_ylabel()
def update(self):
self.values = list()
self.stderrs = list()
for y in self.y:
self.values.append(y.value)
if y.stderr is not None:
self.stderrs.append(y.stderr)
else:
self.stderrs.append(0)
self.values = np.array(self.values)
self.stderrs = np.array(self.stderrs)
def fit(self, pars=dict(), range_x=[], print_result=True):
if self.model_func is not None:
idx, min_x, max_x = lim_x(self.x, range_x)
x = self.x[idx]
y = self.values[idx]
model = lmfit.Model(self.model_func, nan_policy='omit')
params = model.make_params()
for key in pars:
params[key].set(**pars[key])
self.fit_result = model.fit(y, params, x=x)
self.pars = self.fit_result.params
self.fit_x = np.linspace(min_x, max_x, 200)
self.fit_y = self.model_func(
x=self.fit_x,
**self.fit_result.params.valuesdict()
)
if print_result:
display_pars(self.fit_result.params)
def build_ylabel(self):
self.ylabel = f"{self.name}"
if self.unit != "":
self.ylabel += f" [{self.unit}]"
def plot(
self,
fig=None,
range_x=[],
label="",
marker=".",
errorbar=True
):
if fig is None:
fig = plt.figure()
plt.xlabel(self.xlabel)
self.build_ylabel()
plt.ylabel(self.ylabel)
idx, *dmy = lim_x(self.x, range_x)
if errorbar:
l = plt.errorbar(
self.x[idx],
self.values[idx] * self.scale,
yerr=self.stderrs[idx] * self.scale,
marker=marker,
label=label
)
else:
l = plt.plot(
self.x[idx],
self.values[idx] * self.scale,
marker=marker,
label=label
)
if self.fit_result is not None:
l[0].set_linestyle("none")
idx, *dmy = lim_x(self.fit_x, range_x)
plt.plot(
self.fit_x[idx],
self.fit_y[idx] * self.scale,
color=l[0].get_color()
)
def export(self, name, errorbar=True):
# build columns names
self.build_ylabel()
stderr_label = "stderr"
if self.unit != "":
stderr_label += f" [{self.unit}]"
columns = [self.xlabel, self.ylabel]
# create data
data = np.array([
self.x,
self.values
])
if errorbar:
columns.append(stderr_label)
data = np.vstack((data, self.stderrs))
# create dataframe
df = pd.DataFrame(columns=columns, data=data.T)
df.to_csv(name + ".csv", index=None, header=True)
# export fit
if self.fit_x is not None:
data_fit = np.array([
self.fit_x,
self.fit_y
])
df = pd.DataFrame(columns=columns[0:2], data=data_fit.T)
df.to_csv(name + "_fit.csv", index=None, header=True)
# export results
if self.fit_result is not None:
df_res = display_pars(self.fit_result.params)
df_res.to_csv(name + "_fit_results.csv", header=True)
def __add__(self, other):
return self.operator("add", other)
def __radd__(self, other):
return self.operator("radd", other)
def __sub__(self, other):
return self.operator("sub", other)
def __rsub__(self, other):
return self.operator("rsub", other)
def __mul__(self, other):
return self.operator("mul", other)
def __rmul__(self, other):
return self.operator("rmul", other)
def __div__(self, other):
return self.operator("div", other)
def __rdiv__(self, other):
return self.operator("rdiv", other)
def __truediv__(self, other):
return self.operator("div", other)
def __rtruediv__(self, other):
return self.operator("rdiv", other)
def __pow__(self, other):
return self.operator("pow", other)
def __rpow__(self, other):
return self.operator("rpow", other)
def log(self):
return self.operator("log", other=None)
def log10(self):
return self.operator("log10", other=None)
def exp(self):
return self.operator("exp", other=None)
def sin(self):
return self.operator("sin", other=None)
def cos(self):
return self.operator("cos", other=None)
def tan(self):
return self.operator("tan", other=None)
def operator(self, name, other):
y_res = list()
for i in range(len(self.y)):
if isinstance(other, Parameter):
y = self.y[i].operator(name, other.y[i])
elif isinstance(other, (int, float)):
y = self.y[i].operator(name, other)
y_res.append(y)
result = Parameter(x=self.x, y=y_res, xlabel=self.xlabel)
result.model_func = self.model_func
return result
def calc_C(Q, n, R):
C = (R * Q)**(1 / n) / R
C.name = "C" + Q.name[1:]
C.scale = 1e6
C.unit = r"$\rm\mu$F$\cdot$cm$^{-2}$"
return C
def calc_MS(C):
MS = 1 / C**2
MS.name = r"1/%s$^2$" % C.name
MS.unit = r"F$^{-2}\cdot$cm$^{4}$"
MS.model_func = f_MS
return MS
def f_MS(x, E_fb, N, eps, f_r, C_dl):
k = const.k
q = const.e
eps_0 = const.epsilon_0
T = const.zero_Celsius
y_MS = -2 * (x - E_fb - k * T / q) / (q * eps * eps_0 * N * f_r**2)
idx = y_MS <= 0
y = y_MS + 1 / C_dl**2
y[idx] = 1 / C_dl**2
return y
def calc_DOS(C):
DOS = C / const.e
DOS.name = "DOS"
DOS.unit = r"$eV^{-1} \cdot cm^{-2}$"
DOS.model_func = f_DOS
return DOS
def f_DOS(x, N_SS, E_SS, width, slope_bkg, const_bkg):
gaussian = (N_SS / (np.sqrt(2 * np.pi) * width)) * \
np.exp(-(x - E_SS)**2 / (2 * width**2))
bkg = slope_bkg * x + const_bkg
y = gaussian + bkg
return y
def calc_W(MS):
N = MS.pars["N"] * 1e6
eps = MS.pars["eps"]
E_fb = MS.pars["E_fb"]
E = np.linspace(np.min(MS.x), np.max(MS.x), 200)
eps0 = const.epsilon_0
q = const.e
Vbi = E_fb - E
idx = Vbi / N > 0
W = E * 0
W[idx] = np.sqrt(2 * eps * eps0 * Vbi[idx] / (q * N)) * 1e9
y = list()
for val in W:
y.append(stderr.FloatStd(val, 0))
W = Parameter(E, y, name="W", unit="nm", scale=1, xlabel=MS.xlabel)
return W
def figure_layout():
labels = {
"omega": r"$\rm \omega\;[rad \cdot s^{-1}]$",
"real": r"$\rm Re(Z)\;[k\Omega \cdot cm^{2}]$",
"imag": r"$\rm -Im(Z)\;[k\Omega \cdot cm^{2}]$"
}
width = 18 / 2.54 # 18 cm to inches
fig = plt.figure(figsize=(width, width / 2))
ax = list()
grid = (2, 2)
ax.append(plt.subplot2grid(grid, (0, 0)))
ax.append(plt.subplot2grid(grid, (1, 0)))
ax.append(plt.subplot2grid(grid, (0, 1), rowspan=2))
plt.subplots_adjust(hspace=0)
plt.subplots_adjust(wspace=0.25)
ax[0].set_ylabel(labels["imag"])
ax[1].set_ylabel(labels["real"])
ax[1].set_xlabel(labels["omega"])
ax[2].set_ylabel(labels["imag"])
ax[2].set_xlabel(labels["real"])
ax[0].set_xscale("log")
ax[1].set_xscale("log")
ax[0].set_xticks([])
ax[2].set_aspect('equal', 'box')
return fig, ax
def display_pars(pars):
index = list()
columns = ["value", "min", "max", "stderr", "vary"]
data = list()
for name in pars:
index.append(name)
values = list()
for attr in columns:
if attr == "vary" or None:
values.append(f"{getattr(pars[name], attr)}")
else:
values.append(f"{getattr(pars[name], attr):.3g}")
data.append(values)
df = pd.DataFrame(index=index, columns=columns, data=data)
display(df)
return df
def lim_x(x, range_x):
# Limit data to range of interest
if len(range_x) > 0:
min_x = np.min(range_x)
max_x = np.max(range_x)
else:
min_x = np.min(x)
max_x = np.max(x)
idx = (x >= min_x) * (x <= max_x)
return idx, min_x, max_x
def make_ticks(x):
min_x = np.min(x)
max_x = np.max(x)
# step if 10 intervals
step = (max_x - min_x) / 7
# find power of ten
exp10_x = int(np.floor(np.log10(abs(step))))
# find mantissa
mant_x = step / 10**exp10_x
# find closest to mantissa in 2,5,10
step_vals = np.array([1, 2, 5])
idx = (np.abs(step_vals - mant_x)).argmin()
# recalculate step
step_label = step_vals[idx] * 10**exp10_x
# recalculate min and max
decimals = exp10_x + 1
decimals = -decimals
min_x_label = np.round(min_x, decimals=decimals)
max_x_label = np.round(max_x, decimals=decimals)
# generate the ticks
ticks = np.arange(min_x_label, max_x_label + step_label, step_label)
ticks[np.abs(ticks) < 1e-15] = 0
return ticks
| 8,655 | -3 | 928 |
d26b10ff6669fa3fb71b08771c9e2a65a51f7bb3 | 9,074 | py | Python | deep_coach.py | jendelel/rhl-algs | d5b8779d7e271265d4f0bfcb3602bc56958e3eb3 | [
"Apache-2.0"
] | 2 | 2019-03-30T23:29:10.000Z | 2019-04-05T21:54:21.000Z | deep_coach.py | jendelel/rhl-algs | d5b8779d7e271265d4f0bfcb3602bc56958e3eb3 | [
"Apache-2.0"
] | 3 | 2019-03-29T11:23:17.000Z | 2020-12-28T02:00:17.000Z | deep_coach.py | jendelel/rhl-algs | d5b8779d7e271265d4f0bfcb3602bc56958e3eb3 | [
"Apache-2.0"
] | null | null | null | from PyQt5 import QtGui, QtCore, QtWidgets
from collections import namedtuple
import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import utils
HumanFeedback = namedtuple('HumanFeedback', ['feedback_value'])
SavedAction = namedtuple('SavedAction', ['state', 'action', 'logprob'])
SavedActionsWithFeedback = namedtuple('SavedActionsWithFeedback', ['saved_actions', 'final_feedback'])
| 45.144279 | 119 | 0.622548 | from PyQt5 import QtGui, QtCore, QtWidgets
from collections import namedtuple
import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import utils
HumanFeedback = namedtuple('HumanFeedback', ['feedback_value'])
SavedAction = namedtuple('SavedAction', ['state', 'action', 'logprob'])
SavedActionsWithFeedback = namedtuple('SavedActionsWithFeedback', ['saved_actions', 'final_feedback'])
def parse_args(parser):
parser.add_argument('--batch_size', type=int, default=16, help='batch_size (default: 16)')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate (default:0.00025)')
parser.add_argument('--eligibility_decay', type=float, default=0.35, help='Learning rate (default:0.01)')
parser.add_argument("--coach_window_size", type=int, default=10, help="Number of transitions in a window.")
parser.add_argument('--entropy_reg', type=float, default=1.5, help='Entropy regularization beta')
parser.add_argument('--feedback_delay_factor', type=int, default=1, help='COACH Feedback delay factor.')
parser.add_argument(
'--ppo_eps',
type=float,
default=0.2,
help='PPO-like clipping of the loss. Negative value turns the ppo clipping off.')
parser.add_argument('--no_cuda', action='store_true', default=True, help='disables CUDA training')
class DeepCoach():
def __init__(self, window, args, env):
self.window = window
self.args = args
self.env = env
torch.manual_seed(args.seed)
self.device = torch.device("cuda" if not args.no_cuda else "cpu")
if window is not None:
self.setup_ui(window)
PolicyNet = CategoricalPolicyNet if hasattr(self.env.action_space, 'n') else GaussianPolicyNet
self.policy_net = PolicyNet(env.observation_space.shape[0], env.action_space).to(device=self.device)
self.optimizer = torch.optim.RMSprop(self.policy_net.parameters(), lr=args.learning_rate)
self.feedback = None
def setup_ui(self, window):
@QtCore.pyqtSlot(QtGui.QKeyEvent)
def keyPressed(event):
numpad_mod = int(event.modifiers()) & QtCore.Qt.KeypadModifier
if (event.key() == QtCore.Qt.Key_Minus and numpad_mod) or event.key() == QtCore.Qt.Key_M:
self.buttonClicked(-1)
elif (event.key() == QtCore.Qt.Key_Plus and numpad_mod) or event.key() == QtCore.Qt.Key_P:
self.buttonClicked(1)
else:
print("ERROR: Unknown key: ", event)
hor = QtWidgets.QHBoxLayout()
for i in range(-1, 2):
if i == 0:
continue
but = QtWidgets.QPushButton()
but.setText(str(i))
but.clicked.connect(lambda bla, def_arg=i: self.buttonClicked(def_arg))
hor.addWidget(but)
window.feedback_widget.setLayout(hor)
window.keyPressedSignal.connect(keyPressed)
def buttonClicked(self, value):
self.feedback = HumanFeedback(feedback_value=value)
def to_tensor(self, value):
return torch.tensor(value).float().to(device=self.device)
def select_action(self, state):
state = torch.from_numpy(state).to(device=self.device).float()
action, logprob, entropy = self.policy_net(state)
return logprob, action.detach().cpu().numpy(), entropy
def update_net(self, savedActionsWithFeedback, current_entropy):
if not savedActionsWithFeedback:
return
print("training")
e_losses = []
for saf in savedActionsWithFeedback:
final_feedback = saf.final_feedback
for n, sa in enumerate(saf.saved_actions[::-1]):
log_p_old = torch.tensor(sa.logprob).to(self.device)
log_prob, _, _ = self.select_action(sa.state)
probs_ratio = (log_prob - log_p_old).exp()
if self.args.ppo_eps > 0:
surr1 = final_feedback * probs_ratio
surr2 = torch.clamp(probs_ratio, 1.0 - self.args.ppo_eps, 1.0 + self.args.ppo_eps) * final_feedback
loss_term = torch.min(surr1, surr2)
else:
loss_term = probs_ratio * final_feedback
e_loss = (self.args.eligibility_decay**(n)) * loss_term
e_loss = torch.sum(e_loss, dim=0) # Sum the loss across all actions.
e_losses.append(e_loss)
loss = -(self.to_tensor(1 /
(len(savedActionsWithFeedback))) * torch.stack(e_losses).to(device=self.device).sum() +
torch.sum(self.args.entropy_reg * current_entropy, dim=0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def processFeedback(self, savedActions, buffer):
feedback = self.feedback.feedback_value
if feedback is not None and len(savedActions) > 0:
print("Feedback: ", feedback)
if feedback > 0:
self.window.viewer.num_pos_feedback += 1
elif feedback < 0:
self.window.viewer.num_neg_feedback += 1
window_size = min(len(savedActions), self.args.coach_window_size)
del savedActions[:-(window_size + self.args.feedback_delay_factor)]
window = savedActions[:-self.args.feedback_delay_factor] # Copy the list
savedActionsWithFeedback = SavedActionsWithFeedback(saved_actions=window, final_feedback=feedback)
buffer.append(savedActionsWithFeedback)
self.feedback = None
def train(self):
buffer = []
running_reward = 10
for i_episode in range(1, 10000):
state, ep_reward = self.env.reset(), 0
savedActions = []
for t in range(1, 10000): # Don't infinite loop while learning
logprob, action, entropy = self.select_action(state)
old_state = state
state, reward, done, _ = self.env.step(action)
ep_reward += reward
savedActions.append(SavedAction(state=state, action=action, logprob=logprob.detach().cpu().numpy()))
self.window.render(self.env)
if not self.window.isVisible():
break
if self.feedback:
self.processFeedback(savedActions, buffer)
if len(buffer[-1].saved_actions) > 0 and self.window.trainCheck.isChecked():
self.update_net([buffer[-1]], self.select_action(old_state)[2])
time.sleep(self.window.renderSpin.value())
if len(buffer) > 50:
del buffer[:10]
if len(buffer) >= self.args.batch_size and self.window.trainCheck.isChecked():
indicies = random.sample(range(len(buffer)), self.args.batch_size)
mini_batch = [buffer[i] for i in indicies]
self.update_net(mini_batch, entropy)
print("Action: {}, Reward: {:.2f}, ep_reward: {:.2f}".format(action, reward, ep_reward))
if done:
break
if not self.window.isVisible():
break
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
print("Running reward %d" % running_reward)
def start(window, args, env):
alg = DeepCoach(window, args, env)
print("Number of trainable parameters:", utils.count_parameters(alg.policy_net))
alg.train()
env.close()
class CategoricalPolicyNet(nn.Module):
def __init__(self, observation_space_shape, action_space):
super(CategoricalPolicyNet, self).__init__()
action_dim = action_space.n
self.hidden1 = nn.Linear(observation_space_shape, 16)
# self.hidden2 = nn.Linear(30, 30)
self.action_probs = nn.Linear(16, action_dim)
def forward(self, x):
x = F.tanh(self.hidden1(x))
# x = F.relu(self.hidden2(x))
logits = self.action_probs(x)
action = torch.argmax(logits, dim=-1)
distribution = torch.distributions.Categorical(logits=logits)
return action, distribution.log_prob(action), distribution.entropy()
class GaussianPolicyNet(nn.Module):
def __init__(self, observation_space_shape, action_space):
super(GaussianPolicyNet, self).__init__()
action_dim = action_space.shape[-1]
self.hidden1 = nn.Linear(observation_space_shape, 16)
# self.hidden2 = nn.Linear(30, 30)
self.mu_head = nn.Linear(16, action_dim)
self.log_std = torch.nn.parameter.Parameter(-0.5 * torch.ones(action_dim))
def forward(self, x):
x = F.tanh(self.hidden1(x))
# x = F.relu(self.hidden2(x))
mean = self.mu_head(x)
std = self.log_std.expand_as(mean).exp()
distribution = torch.distributions.Normal(mean, std)
action = torch.normal(mean, std)
return action, distribution.log_prob(action), distribution.entropy()
| 8,167 | 28 | 439 |
07b7f98b58a60c6fd215e2e961645309aca54a73 | 7,048 | py | Python | BogalSolver.py | NAU-OSS/CS499_Group_7 | 8986ca82410b907b47f6eeca53ae382031442481 | [
"MIT"
] | 1 | 2018-09-27T15:48:10.000Z | 2018-09-27T15:48:10.000Z | BogalSolver.py | NAU-OSS/CS499_Group_7 | 8986ca82410b907b47f6eeca53ae382031442481 | [
"MIT"
] | 3 | 2018-09-25T16:15:13.000Z | 2018-10-04T02:54:51.000Z | BogalSolver.py | NAU-OSS/CS499_Group_7 | 8986ca82410b907b47f6eeca53ae382031442481 | [
"MIT"
] | 5 | 2018-09-25T15:30:40.000Z | 2018-10-08T05:00:38.000Z | import sys, random, string, time
rawBoard = ''
moves = 0
# size -> int
# generate board of size size x size filled with random chars
# @returns none
# textFile -> string
# loads a board from a text file
# @returns board in 2D list form
# board -> 2D array
# prints out the bogal board
# coordinate -> list, board -> 2D list
# @returns list of all possible next positions
# possibleMoves -> 2D list, usedPath -> 2D list
# @returns the list of all legal moves
# Function used for setting up all prefix dictionaries.
# This is not run with my program but was created because I'm lazy and
# didn't want to create the prefix dictionaries by hand.
# board -> 2D list, currPos -> list, path -> 2D list
# boggle board, xy pair current position, path that got to that position
# @returns tuple of the word created and whether it is a real word.
if __name__ == "__main__":
main() | 34.048309 | 112 | 0.588394 | import sys, random, string, time
rawBoard = ''
moves = 0
# size -> int
# generate board of size size x size filled with random chars
# @returns none
def generateBoard(size):
outFile = 'test.txt'
out = open(outFile, 'w')
charlist = string.ascii_uppercase
for z in range(0, size):
line = ' '.join(random.choices(charlist, k=size))
out.write((line + "\n"))
out.close()
return
# textFile -> string
# loads a board from a text file
# @returns board in 2D list form
def loadBoard(textFile):
global rawBoard
boardFile = open(textFile, 'r')
rawBoard = boardFile.read().strip()
n1 = rawBoard.count('\n') + 1
n2 = rawBoard.count(' ') + n1
assert n1**2 == n2
# convert to 2-D array of chars
board = rawBoard.replace(' ', '')
board = board.split('\n')
newBoard = []
for i in range(0,len(board)):
temp = board[i]
newBoard += [list(temp)]
boardFile.close()
return newBoard
# board -> 2D array
# prints out the bogal board
def printBoard(board):
print(rawBoard)
# coordinate -> list, board -> 2D list
# @returns list of all possible next positions
def possibleMoves(coordinate, board):
possibleMoves = []
#search for possible moves
#try down move
if coordinate[0] + 1 < len(board):
possibleMoves.append([coordinate[0] + 1, coordinate[1]])
# try up move
if coordinate[0] - 1 > -1:
possibleMoves.append([coordinate[0] - 1, coordinate[1]])
#try right move
if coordinate[1] + 1 < len(board):
possibleMoves.append([coordinate[0], coordinate[1] + 1])
# try left move
if coordinate[1] - 1 > -1:
possibleMoves.append([coordinate[0], coordinate[1] - 1])
# try upper right corner move
if coordinate[0] - 1 > -1 and coordinate[1] + 1 < len(board):
possibleMoves.append([coordinate[0] - 1, coordinate[1] + 1])
# try down right corner move
if coordinate[0] + 1 < len(board) and coordinate[1] + 1 < len(board):
possibleMoves.append([coordinate[0] + 1, coordinate[1] + 1])
# try upper left corner move
if coordinate[0] - 1 > -1 and coordinate[1] - 1 > -1:
possibleMoves.append([coordinate[0] - 1, coordinate[1] - 1])
# try down left corner move
if coordinate[0]+1<len(board) and coordinate[1]-1>-1:
possibleMoves.append([coordinate[0] + 1, coordinate[1] - 1])
return possibleMoves
# possibleMoves -> 2D list, usedPath -> 2D list
# @returns the list of all legal moves
def legalMoves(possibleMoves, usedPath):
moves = list(set(tuple(i) for i in possibleMoves) - set(tuple(j) for j in usedPath))
return moves
# Function used for setting up all prefix dictionaries.
# This is not run with my program but was created because I'm lazy and
# didn't want to create the prefix dictionaries by hand.
def prefixDict():
preFile = open('preDict.txt', 'w')
preFile3 = open('preDict3.txt', 'w')
preFile4 = open('preDict4.txt', 'w')
preFile5 = open('preDict5.txt', 'w')
dictFile = open('Dict.txt', 'r')
preTxt = dictFile.read().split('\n')
dictFile.close()
txtSet = set()
txtSet3 = set()
txtSet4 = set()
txtSet5 = set()
for word in preTxt:
txtSet.add(word[:2])
txtSet3.add(word[:3])
txtSet4.add(word[:4])
txtSet5.add(word[:5])
for word in txtSet:
preFile.write(word)
preFile.write('\n')
for word in txtSet3:
preFile3.write(word)
preFile3.write('\n')
for word in txtSet4:
preFile4.write(word)
preFile4.write('\n')
for word in txtSet5:
preFile5.write(word)
preFile5.write('\n')
# board -> 2D list, currPos -> list, path -> 2D list
# boggle board, xy pair current position, path that got to that position
# @returns tuple of the word created and whether it is a real word.
def examineState(board, currPos, path, words, dict, preDict):
global moves
moves += 1
path = path + [currPos]
word = ''
for i in range(0, len(path)):
curr = path[i]
word = (word + board[curr[0]][curr[1]]).lower()
word = word[1:]
if len(word) == 2:
if word not in preDict[0]:
return
if len(word) == 3:
if word not in preDict[1]:
return
if len(word) == 4:
if word not in preDict[2]:
return
if len(word) == 5:
if word not in preDict[3]:
return
# legalWord = (word, 'no')
if word in dict:
# legalWord = (word, 'yes')
words.append(word)
# if there is a legal move remaining, take it, otherwise return out of that level of recursion
if legalMoves(possibleMoves(curr, board), path) != []:
for i in range(0, len(legalMoves(possibleMoves(currPos, board), path))):
examineState(board, legalMoves(possibleMoves(currPos, board), path)[i], path, words, dict, preDict)
return words
else:
return words
def classifyWords(words):
classWords = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for i in range(0, len(words)):
lengthWord = len(words[i])
classWords[lengthWord - 1].append(words[i])
return classWords
def main():
#prefixDict()
prefixDictionary = [{},{},{},{},{}]
# test run
board = loadBoard('test.txt')
# setup dictionaries from text files
dictFile = open('Dict.txt', 'r')
dict = set(dictFile.read().split('\n'))
preDictFile = open('preDict.txt', 'r')
prefixDictionary[0] = set(preDictFile.read().split('\n'))
preDictFile3 = open('preDict3.txt', 'r')
prefixDictionary[1] = set(preDictFile3.read().split('\n'))
preDictFile4 = open('preDict4.txt', 'r')
prefixDictionary[2] = set(preDictFile4.read().split('\n'))
preDictFile5 = open('preDict5.txt', 'r')
prefixDictionary[3] = set(preDictFile5.read().split('\n'))
dictFile.close()
print('OUPUT FROM FLINTSTONE CLASSIC BAM-BAM BASH-IT APPROACH:')
printBoard(board)
print("And we're off!")
print('Running with cleverness ON')
words = []
startTime = time.time()
for j in range(0,4):
for i in range(0,4):
words.extend(examineState(board, [i,j], [[i,j]], [], dict, prefixDictionary))
endTime = time.time()
searchTime = endTime - startTime
print('All done\n')
print('Searched total of ' + str(moves) + ' moves in ' + str(searchTime) + ' seconds\n')
print('Words found: ')
classifiedWords = classifyWords(words)
for i in range(0, len(classifiedWords)):
if classifiedWords[i] != []:
print(str(i + 1) + ' -letter words: ' + str(classifiedWords[i]))
print('\nFound ' + str(len(words)) + ' words total, ' + str(len(set(words))) + ' unique.')
print('Alpha-sorted list of words:')
print(str(sorted(words)))
if __name__ == "__main__":
main() | 5,926 | 0 | 211 |
d39b40d76ad390bb6e4fffa2f6e2fbcdc65ce596 | 65 | py | Python | control flow/functions.py | karan1276/learning-python | 9f0791370fbdf9e1d5b94ef874f8eb403d34c22c | [
"MIT"
] | null | null | null | control flow/functions.py | karan1276/learning-python | 9f0791370fbdf9e1d5b94ef874f8eb403d34c22c | [
"MIT"
] | null | null | null | control flow/functions.py | karan1276/learning-python | 9f0791370fbdf9e1d5b94ef874f8eb403d34c22c | [
"MIT"
] | null | null | null |
myFunc("That's neat")
| 13 | 29 | 0.646154 | def myFunc(str = "yo world"):
print str
myFunc("That's neat")
| 20 | 0 | 22 |
6ff315ad8982585ca6bf135699b88f03f7a8eb4a | 2,482 | py | Python | osism/plugins/routeros.py | osism/python-osism | cb4f74501f92fceab1b803d4990ef20335bb7ca1 | [
"Apache-2.0"
] | null | null | null | osism/plugins/routeros.py | osism/python-osism | cb4f74501f92fceab1b803d4990ef20335bb7ca1 | [
"Apache-2.0"
] | 28 | 2022-02-03T16:45:11.000Z | 2022-03-29T13:47:33.000Z | osism/plugins/routeros.py | osism/python-osism | cb4f74501f92fceab1b803d4990ef20335bb7ca1 | [
"Apache-2.0"
] | null | null | null | import logging
import os
from netmiko import ConnectHandler
from paramiko import AutoAddPolicy, SSHClient
from routeros_diff.parser import RouterOSConfig
from scp import SCPClient
| 27.577778 | 94 | 0.698227 | import logging
import os
from netmiko import ConnectHandler
from paramiko import AutoAddPolicy, SSHClient
from routeros_diff.parser import RouterOSConfig
from scp import SCPClient
def get_netmiko_connection(device):
parameters = get_parameters(device)
result = ConnectHandler(device_type="mikrotik_routeros", **parameters)
return result
def get_scp_connection(device):
parameters = get_parameters(device)
ssh_parameters = {
"hostname": parameters["host"],
"username": parameters["username"],
"password": parameters["password"]
}
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(**ssh_parameters)
result = SCPClient(ssh.get_transport())
return result
def get_configuration(device):
conn = get_netmiko_connection(device)
conn.send_command(f"/export file={device.name}")
scp = get_scp_connection(device)
scp.get(f"/{device.name}.rsc", f"/tmp/{device.name}.rsc")
scp.close()
with open(f"/tmp/{device.name}.rsc", 'r') as fp:
result = fp.read()
os.remove(f"/tmp/{device.name}.rsc")
return result
def get_parameters(device):
# FIXME: use get_context_data() in the future
config_context = device.local_context_data
result = {
'host': config_context['deployment_address'],
'username': config_context['deployment_user'],
'password': config_context['deployment_password']
}
return result
def deploy(device, current_configuration, last_configuration):
if not last_configuration:
last_configuration = get_configuration(device)
last = RouterOSConfig.parse(last_configuration)
current = RouterOSConfig.parse(current_configuration)
diff = str(current.diff(last))
if diff:
conn = get_netmiko_connection(device)
conn.send_config_set(diff.split('\n'))
def diff(device, current_configuration, last_configuration):
if not last_configuration:
last_configuration = get_configuration(device)
last = RouterOSConfig.parse(last_configuration)
current = RouterOSConfig.parse(current_configuration)
diff = str(current.diff(last))
# NOTE: Will be removed later. Is first of all a blocker to avoid making too many changes.
if len(diff.split('\n')) > 10:
logging.error("Too many changes at once for {devie.name}.")
else:
for line in diff.split('\n'):
logging.info(f"diff - {device.name}: {line}")
| 2,157 | 0 | 138 |
31b9775840bb061015540ecd002cded13941b01b | 3,596 | py | Python | tests/test_pytest_pydocstyle.py | henry0312/pytest-docstyle | cc78c65804f87a9fb6386d222a058a1dd593411d | [
"MIT"
] | 9 | 2018-06-02T14:02:26.000Z | 2019-11-28T16:00:02.000Z | tests/test_pytest_pydocstyle.py | henry0312/pytest-docstyle | cc78c65804f87a9fb6386d222a058a1dd593411d | [
"MIT"
] | 9 | 2018-01-15T04:09:48.000Z | 2019-12-11T05:19:59.000Z | tests/test_pytest_pydocstyle.py | henry0312/pytest-docstyle | cc78c65804f87a9fb6386d222a058a1dd593411d | [
"MIT"
] | 3 | 2017-12-27T02:33:30.000Z | 2018-06-09T20:11:45.000Z | import pytest_pydocstyle
# https://docs.pytest.org/en/5.2.2/writing_plugins.html#testing-plugins
pytest_plugins = ["pytester"]
| 25.870504 | 72 | 0.603448 | import pytest_pydocstyle
# https://docs.pytest.org/en/5.2.2/writing_plugins.html#testing-plugins
pytest_plugins = ["pytester"]
def test_option_false(testdir):
p = testdir.makepyfile("""
def test_option(request):
flag = request.config.getoption('pydocstyle')
assert flag is False
""")
p = p.write(p.read() + "\n")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_option_true(testdir):
p = testdir.makepyfile("""
def test_option(request):
flag = request.config.getoption('pydocstyle')
assert flag is True
""")
p = p.write(p.read() + "\n")
result = testdir.runpytest('--pydocstyle')
result.assert_outcomes(passed=1)
def test_ini(testdir):
testdir.makeini("""
[pydocstyle]
convention = numpy
add-ignore = D100
""")
p = testdir.makepyfile(a='''
def hello():
"""Print hello."""
print('hello')
''')
p = p.write(p.read() + "\n")
result = testdir.runpytest('--pydocstyle')
result.assert_outcomes(passed=1)
def test_pytest_collect_file(testdir):
testdir.tmpdir.ensure('a.py')
testdir.tmpdir.ensure('b.py')
testdir.tmpdir.ensure('c.txt')
testdir.tmpdir.ensure('test_d.py')
result = testdir.runpytest('--pydocstyle')
# D100: Missing docstring in public module
result.assert_outcomes(failed=2)
def test_cache(testdir):
# D100: Missing docstring in public module
testdir.tmpdir.ensure('a.py')
p = testdir.makepyfile(b='''\
"""Test."""
def hello():
"""Print hello."""
print('hello')
''')
# first run
result = testdir.runpytest('--pydocstyle')
result.assert_outcomes(passed=1, failed=1)
# second run
result = testdir.runpytest('--pydocstyle')
result.assert_outcomes(skipped=1, failed=1)
def test_no_cacheprovider(testdir):
# D100: Missing docstring in public module
testdir.tmpdir.ensure('a.py')
p = testdir.makepyfile(b='''\
"""Test."""
def hello():
"""Print hello."""
print('hello')
''')
# first run
result = testdir.runpytest('--pydocstyle', '-p', 'no:cacheprovider')
result.assert_outcomes(passed=1, failed=1)
# second run
result = testdir.runpytest('--pydocstyle', '-p', 'no:cacheprovider')
result.assert_outcomes(passed=1, failed=1)
def test_strict(testdir):
p = testdir.makepyfile(a='''
"""Test strict."""
def test_blah():
"""Test."""
pass
''')
p = p.write(p.read() + "\n")
result = testdir.runpytest('--strict-markers', '--pydocstyle')
result.assert_outcomes(passed=1)
def test_nodeid(testdir):
p = testdir.makepyfile(nodeid='''
"""Test _nodeid."""
def test_nodeid():
"""Test."""
pass
''')
p = p.write(p.read() + "\n")
result = testdir.runpytest('-m', 'pydocstyle', '--pydocstyle', '-v')
result.assert_outcomes(passed=1)
result.stdout.fnmatch_lines(['nodeid.py::PYDOCSTYLE PASSED *'])
class TestItem(object):
def test_cache_key(self):
assert pytest_pydocstyle.Item.CACHE_KEY == 'pydocstyle/mtimes'
def test_init(self):
pass
def test_setup(self):
pass
def test_runtest(self):
pass
def test_repr_failure(self):
pass
def test_reportinfo(self):
pass
class TestPyDocStyleError(object):
def test_subclass(self):
assert issubclass(pytest_pydocstyle.PyDocStyleError, Exception)
| 3,024 | 15 | 419 |
008790803e7b775bedbd07bf57d31032aeb4bcd3 | 217 | py | Python | db/manage.py | yabirgb/hilos | e3a90aa21c4d6f7e1c0301bf815ceb2a57c156f8 | [
"MIT"
] | 1 | 2020-04-28T08:01:27.000Z | 2020-04-28T08:01:27.000Z | db/manage.py | yabirgb/hilos | e3a90aa21c4d6f7e1c0301bf815ceb2a57c156f8 | [
"MIT"
] | 1 | 2021-06-01T22:03:58.000Z | 2021-06-01T22:03:58.000Z | db/manage.py | yabirgb/hilos | e3a90aa21c4d6f7e1c0301bf815ceb2a57c156f8 | [
"MIT"
] | null | null | null | from peewee import *
import peeweedbevolve
from models_data import Tweet, Branch, calldb
db = calldb()
create_tables()
| 14.466667 | 45 | 0.709677 | from peewee import *
import peeweedbevolve
from models_data import Tweet, Branch, calldb
db = calldb()
def create_tables():
calldb()
db.evolve([Tweet, Branch])
print("Tables created")
create_tables()
| 71 | 0 | 23 |
862dc8bb6d24ecac3d2901e91126649fc31737e3 | 10,931 | py | Python | friend/tests.py | HumphreyLu6/SpongeBook | 9d12e4be0ffc21bb2114df785f4278668a695f39 | [
"Apache-2.0"
] | 1 | 2020-04-01T21:37:49.000Z | 2020-04-01T21:37:49.000Z | friend/tests.py | HumphreyLu6/SpongeBook | 9d12e4be0ffc21bb2114df785f4278668a695f39 | [
"Apache-2.0"
] | 7 | 2020-03-06T00:20:30.000Z | 2020-04-04T21:04:43.000Z | friend/tests.py | HumphreyLu6/SpongeBook | 9d12e4be0ffc21bb2114df785f4278668a695f39 | [
"Apache-2.0"
] | 1 | 2020-07-08T07:29:49.000Z | 2020-07-08T07:29:49.000Z | import json
import base64
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from .models import User
# Create your tests here.
ACCEPT_STATUS = "A"
REJECT_STATUS = "R"
UNFRIEND_STATUS = "R"
| 38.625442 | 87 | 0.519623 | import json
import base64
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from .models import User
# Create your tests here.
ACCEPT_STATUS = "A"
REJECT_STATUS = "R"
UNFRIEND_STATUS = "R"
class FriendTestCase(APITestCase):
def setUp(self):
self.user1 = User.objects.create_user(
email="user1@email.com", username="user1", password="passqweruser1",
)
self.token1 = base64.b64encode(
bytes("user1@email.com:passqweruser1", "utf-8")
).decode("utf-8")
self.user2 = User.objects.create_user(
email="user2@email.com", username="user2", password="passqweruser2",
)
self.token2 = base64.b64encode(
bytes("user2@email.com:passqweruser2", "utf-8")
).decode("utf-8")
self.user3 = User.objects.create_user(
email="user3@email.com", username="user3", password="passqweruser3",
)
self.token3 = base64.b64encode(
bytes("user3@email.com:passqweruser3", "utf-8")
).decode("utf-8")
def test_send_get_friend_request(self):
# send friend request from user1 to user2
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
}
response = self.client.post(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token1}"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# get friend request for user2
response = self.client.get(
f"/author/{self.user2.id}/friendrequests",
HTTP_AUTHORIZATION=f"Basic {self.token2}",
)
data = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data["authors"], [f"{self.user1.host}author/{self.user1.id}"])
def test_accept_friend_request(self):
# send friend request from user1 to user2
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
}
response = self.client.post(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token1}"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# user2 accept friend accept from user1
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
"status": "A",
}
response = self.client.patch(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token2}"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_reject_friend_request(self):
# send friend request from user1 to user2
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
}
response = self.client.post(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token1}"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# user2 reject friend accept from user1
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
"status": "R",
}
response = self.client.patch(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token2}"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_unfriend(self):
# send friend request from user1 to user2
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
}
response = self.client.post(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token1}"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# user2 unfriend user1
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
"status": "R",
}
response = self.client.patch(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token2}"
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_if_friend(self):
# send friend request from user1 to user2
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
}
response = self.client.post(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token1}"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# send friend request from user1 to user3
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user3.host}author/{self.user3.id}",
"host": f"{self.user3.host}",
"displayName": "user3",
"url": f"{self.user3.host}author/{self.user3.id}",
},
}
response = self.client.post(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token1}"
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# user2 accept friend request from user1
request_body = {
"query": "friendrequest",
"author": {
"id": f"{self.user1.host}author/{self.user1.id}",
"host": f"{self.user1.host}",
"displayName": "user1",
"url": f"{self.user1.host}author/{self.user1.id}",
},
"friend": {
"id": f"{self.user2.host}author/{self.user2.id}",
"host": f"{self.user2.host}",
"displayName": "user2",
"url": f"{self.user2.host}author/{self.user2.id}",
},
"status": "A",
}
response = self.client.patch(
"/friendrequest", request_body, HTTP_AUTHORIZATION=f"Basic {self.token2}"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# if friend between user1 and user2
response = self.client.get(
f"/author/{self.user1.id}/friends/{self.user2.id}",
HTTP_AUTHORIZATION=f"Basic {self.token1}",
)
data = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data["friends"], True)
# if friend between user1 and user3
response = self.client.get(
f"/author/{self.user1.id}/friends/{self.user3.id}",
HTTP_AUTHORIZATION=f"Basic {self.token1}",
)
data = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data["friends"], False)
| 10,464 | 13 | 184 |
3836be24e6da9b7521e2fa8859eb76a407788328 | 4,926 | py | Python | Crawler/Install Files/eyeD3-0.7.4/src/eyed3/plugins/lameinfo.py | hanvo/MusicCloud | b91b9481df087955fcb09f472308c9ad1a94ba94 | [
"BSD-3-Clause"
] | null | null | null | Crawler/Install Files/eyeD3-0.7.4/src/eyed3/plugins/lameinfo.py | hanvo/MusicCloud | b91b9481df087955fcb09f472308c9ad1a94ba94 | [
"BSD-3-Clause"
] | null | null | null | Crawler/Install Files/eyeD3-0.7.4/src/eyed3/plugins/lameinfo.py | hanvo/MusicCloud | b91b9481df087955fcb09f472308c9ad1a94ba94 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2009 Travis Shirk <travis@pobox.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
from __future__ import print_function
import os
from eyed3 import LOCAL_ENCODING as ENCODING
from eyed3.utils import formatSize, formatTime
from eyed3.utils.console import (printMsg, printError, printWarning, boldText,
Fore, HEADER_COLOR)
from eyed3.plugins import LoaderPlugin
| 43.59292 | 80 | 0.556029 | # -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2009 Travis Shirk <travis@pobox.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
from __future__ import print_function
import os
from eyed3 import LOCAL_ENCODING as ENCODING
from eyed3.utils import formatSize, formatTime
from eyed3.utils.console import (printMsg, printError, printWarning, boldText,
Fore, HEADER_COLOR)
from eyed3.plugins import LoaderPlugin
class LameInfoPlugin(LoaderPlugin):
NAMES = ["lameinfo", "xing"]
SUMMARY = u"Outputs lame header (if one exists) for file."
DESCRIPTION = (
u"The 'lame' (or xing) header provides extra information about the mp3 "
"that is useful to players and encoders but not officially part of "
"the mp3 specification. Variable bit rate mp3s, for example, use this "
"header.\n\n"
"For more details see "
"`here <http://gabriel.mp3-tech.org/mp3infotag.html>`_"
)
def printHeader(self, filePath):
from stat import ST_SIZE
fileSize = os.stat(filePath)[ST_SIZE]
size_str = formatSize(fileSize).encode(ENCODING)
print("\n%s\t%s[ %s ]%s" % (boldText(os.path.basename(filePath),
HEADER_COLOR()),
HEADER_COLOR(), size_str,
Fore.RESET))
print("-" * 79)
def handleFile(self, f):
super(LameInfoPlugin, self).handleFile(f)
self.printHeader(f)
if not self.audio_file or not self.audio_file.info.lame_tag:
printMsg('No LAME Tag')
return
format = '%-20s: %s'
lt = self.audio_file.info.lame_tag
if "infotag_crc" not in lt:
try:
printMsg('%s: %s' % ('Encoder Version', lt['encoder_version']))
except KeyError:
pass
return
values = []
values.append(('Encoder Version', lt['encoder_version']))
values.append(('LAME Tag Revision', lt['tag_revision']))
values.append(('VBR Method', lt['vbr_method']))
values.append(('Lowpass Filter', lt['lowpass_filter']))
if "replaygain" in lt:
try:
peak = lt['replaygain']['peak_amplitude']
db = 20 * math.log10(peak)
val = '%.8f (%+.1f dB)' % (peak, db)
values.append(('Peak Amplitude', val))
except KeyError:
pass
for type in ['radio', 'audiofile']:
try:
gain = lt['replaygain'][type]
name = '%s Replay Gain' % gain['name'].capitalize()
val = '%s dB (%s)' % (gain['adjustment'], gain['originator'])
values.append((name, val))
except KeyError:
pass
values.append(('Encoding Flags', ' '.join((lt['encoding_flags']))))
if lt['nogap']:
values.append(('No Gap', ' and '.join(lt['nogap'])))
values.append(('ATH Type', lt['ath_type']))
values.append(('Bitrate (%s)' % lt['bitrate'][1], lt['bitrate'][0]))
values.append(('Encoder Delay', '%s samples' % lt['encoder_delay']))
values.append(('Encoder Padding', '%s samples' % lt['encoder_padding']))
values.append(('Noise Shaping', lt['noise_shaping']))
values.append(('Stereo Mode', lt['stereo_mode']))
values.append(('Unwise Settings', lt['unwise_settings']))
values.append(('Sample Frequency', lt['sample_freq']))
values.append(('MP3 Gain', '%s (%+.1f dB)' % (lt['mp3_gain'],
lt['mp3_gain'] * 1.5)))
values.append(('Preset', lt['preset']))
values.append(('Surround Info', lt['surround_info']))
values.append(('Music Length', '%s' % formatSize(lt['music_length'])))
values.append(('Music CRC-16', '%04X' % lt['music_crc']))
values.append(('LAME Tag CRC-16', '%04X' % lt['infotag_crc']))
for v in values:
printMsg(format % (v))
| 3,079 | 556 | 23 |
5ea65d4873a5243d2a44e0d39d0611c949e980c6 | 3,307 | py | Python | clothing_recommender_project.py | rb2001/Clothing_Recommender | 48399911686e7cbb8c830340338ca59d0475815c | [
"Unlicense"
] | null | null | null | clothing_recommender_project.py | rb2001/Clothing_Recommender | 48399911686e7cbb8c830340338ca59d0475815c | [
"Unlicense"
] | null | null | null | clothing_recommender_project.py | rb2001/Clothing_Recommender | 48399911686e7cbb8c830340338ca59d0475815c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""Clothing_Recommender Project .ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nw0ewNdkx8o3WULAp2ynhHpbq1kVq7YZ
Clean the data and use input
"""
## Import and Organize Data ##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#read clean file (downloaded from Task 1)
df=pd.read_csv('CleanedData.csv', sep=',')
#Pivot table (clothingID, age, rating) - Nan is replaced with 0
train = df.pivot_table(index='Age', columns='ClothingID', values='Rating')
#sort train data
train = train.sort_values('Age', ascending=True)
###Create a greeting
print("Welcome, let us recommend a product for you")
#Take user input
Name =input('Please enter your name: ')
Age = int(input('Please enter your age: '))
CID_user = int(input("Enter Clothing ID: ")) #90
while CID_user not in train.columns:
print('Invalid: No data for ID')
CID_user = int(input("Enter valid Clothing ID: "))
rating_user = float(input("Enter Rating for Clothing ID: ")) #4
##use this later (if user has more than one rating to enter)
#entries = int(input("How many ratings will you enter? "))
#for x in range(entries):
#create array with user data
userArray = pd.DataFrame().reindex_like(train)
userArray.dropna(thresh=1,inplace=True)
userArray.loc[Age,CID_user] = rating_user #enter user data
from sklearn.metrics.pairwise import nan_euclidean_distances
#find euclidean distance between all rows of train and first row of test *ignores nan
distance = np.zeros((0,2)) #create empty array
for index, row in train.iterrows(): #iterate through each row of train
result = float(nan_euclidean_distances([userArray.loc[Age]], [train.loc[index]])) #compute the euclidean distance between two rows, *confirmed it works thru excel
result_array = [index, result] #place age and distance into an array
distance = np.append(distance,[result_array],axis= 0)
#convert array to a dataframe
dfDistance = pd.DataFrame({'Age': distance[:, 0], 'E-Distance': distance[:, 1]})
dfDistance.head()
k= 5
#sort by distance, reset the index
dfDistance = dfDistance.sort_values('E-Distance', ascending=True).head(20)
dfDistance = dfDistance.reset_index(drop=True)
dfDistance.drop(dfDistance[dfDistance.index > k-1].index, inplace=True)
dfDistance.head()
#NOTE: for calculating the predicted rating, could use an IDW Interpolation function shown here https://stackoverflow.com/questions/3104781/inverse-distance-weighted-idw-interpolation-with-python
#just using mean of each to test a solution, will come back and try more complex/accurate functions later
#assume k of 5####
k_array = pd.DataFrame().reindex_like(train)
meanArray = pd.DataFrame()
for x in dfDistance['Age']:
k_array = k_array.append([train.loc[x]]) #make array of the k closest ages
meanArray = meanArray.append(k_array.mean(),ignore_index = True).transpose()
meanArray.dropna(axis=0,inplace=True)
meanArray.columns = ["Mean"]
meanArray = meanArray[meanArray.Mean == 5]
recommend = list(meanArray.index.values)
print("recommended ClothingID's are: ")
print(recommend)
#feedback, clothingID (choose top 5), department
#reverse lookup clothingID for department
# feedback (choose first 3)
| 35.180851 | 195 | 0.752646 | # -*- coding: utf-8 -*-
"""Clothing_Recommender Project .ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nw0ewNdkx8o3WULAp2ynhHpbq1kVq7YZ
Clean the data and use input
"""
## Import and Organize Data ##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#read clean file (downloaded from Task 1)
df=pd.read_csv('CleanedData.csv', sep=',')
#Pivot table (clothingID, age, rating) - Nan is replaced with 0
train = df.pivot_table(index='Age', columns='ClothingID', values='Rating')
#sort train data
train = train.sort_values('Age', ascending=True)
###Create a greeting
print("Welcome, let us recommend a product for you")
#Take user input
Name =input('Please enter your name: ')
Age = int(input('Please enter your age: '))
CID_user = int(input("Enter Clothing ID: ")) #90
while CID_user not in train.columns:
print('Invalid: No data for ID')
CID_user = int(input("Enter valid Clothing ID: "))
rating_user = float(input("Enter Rating for Clothing ID: ")) #4
##use this later (if user has more than one rating to enter)
#entries = int(input("How many ratings will you enter? "))
#for x in range(entries):
#create array with user data
userArray = pd.DataFrame().reindex_like(train)
userArray.dropna(thresh=1,inplace=True)
userArray.loc[Age,CID_user] = rating_user #enter user data
from sklearn.metrics.pairwise import nan_euclidean_distances
#find euclidean distance between all rows of train and first row of test *ignores nan
distance = np.zeros((0,2)) #create empty array
for index, row in train.iterrows(): #iterate through each row of train
result = float(nan_euclidean_distances([userArray.loc[Age]], [train.loc[index]])) #compute the euclidean distance between two rows, *confirmed it works thru excel
result_array = [index, result] #place age and distance into an array
distance = np.append(distance,[result_array],axis= 0)
#convert array to a dataframe
dfDistance = pd.DataFrame({'Age': distance[:, 0], 'E-Distance': distance[:, 1]})
dfDistance.head()
k= 5
#sort by distance, reset the index
dfDistance = dfDistance.sort_values('E-Distance', ascending=True).head(20)
dfDistance = dfDistance.reset_index(drop=True)
dfDistance.drop(dfDistance[dfDistance.index > k-1].index, inplace=True)
dfDistance.head()
#NOTE: for calculating the predicted rating, could use an IDW Interpolation function shown here https://stackoverflow.com/questions/3104781/inverse-distance-weighted-idw-interpolation-with-python
#just using mean of each to test a solution, will come back and try more complex/accurate functions later
#assume k of 5####
k_array = pd.DataFrame().reindex_like(train)
meanArray = pd.DataFrame()
for x in dfDistance['Age']:
k_array = k_array.append([train.loc[x]]) #make array of the k closest ages
meanArray = meanArray.append(k_array.mean(),ignore_index = True).transpose()
meanArray.dropna(axis=0,inplace=True)
meanArray.columns = ["Mean"]
meanArray = meanArray[meanArray.Mean == 5]
recommend = list(meanArray.index.values)
print("recommended ClothingID's are: ")
print(recommend)
#feedback, clothingID (choose top 5), department
#reverse lookup clothingID for department
# feedback (choose first 3)
| 0 | 0 | 0 |
3d7273fc6bb41c58edc1197e7e7b8b7f7da08c10 | 2,773 | py | Python | src/features/build_rtree.py | mingxiiii/trajectory-search | ac3c751a47be8f6a6cc389d53e5c001c01c4ca8f | [
"FTL"
] | null | null | null | src/features/build_rtree.py | mingxiiii/trajectory-search | ac3c751a47be8f6a6cc389d53e5c001c01c4ca8f | [
"FTL"
] | null | null | null | src/features/build_rtree.py | mingxiiii/trajectory-search | ac3c751a47be8f6a6cc389d53e5c001c01c4ca8f | [
"FTL"
] | null | null | null | from rtree.index import Rtree
from src.features.helper import *
import sys
import logging
import time
if __name__ == '__main__':
train_data = sys.argv[1]
q_size = int(sys.argv[2])
main(train_data, q_size)
| 34.6625 | 89 | 0.647313 | from rtree.index import Rtree
from src.features.helper import *
import sys
import logging
import time
def main(train, qgram_size):
logger = logging.getLogger('build_rtree')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('./log/%s' % train)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info('---------------------------- Build R-tree ----------------------------')
qgram_tag = 'q_%d' % qgram_size
train_path = './data/processed/%s.txt' % train
data = load_trajectory(train_path)
logger.info('Load train trajectory: %s' % train_path)
trajectory, id_list = build_qgram(data, qgram_size)
id_to_key_dict = build_id_to_key(id_list)
# order_key_dict = build_order_dict(id_list)
#save orderId-key mapping
#key: trajectory id in string, value: encoded key
rtree_id_dict_path = './data/interim/%s/rtree_id_dict_%s.txt' % (train, qgram_tag)
save_pickle(id_to_key_dict, rtree_id_dict_path)
logger.info('Output rtree_id_dict: %s' % rtree_id_dict_path)
#key: key, value: trajectory id in string
# filename = '../data/processed/order_key_dict.txt'
# outfile = open(filename,'wb')
# pickle.dump(order_key_dict,outfile)
# outfile.close()
# R-tree constructor
# parameter: 'data_full' is the filename of R-tree storage
# 2 files are created: data_full.dat, data_full.idx
# return: r-tree index
rtree_path = './data/interim/%s/my_rtree_%s' % (train, qgram_tag)
data_idx = Rtree(rtree_path)
logger.info('Output R-tree: %s' % rtree_path)
# put all trajectories into r-tree in the form of bounding box
node_id = 0
start_time = time.time()
for key, qgrams in trajectory.items():
for qgram in qgrams:
# parameters:
# 1. node id
# 2. bounding box(point): (x,y,x,y)
# 3. data inside each node: trajectory's key from order_dict
x = np.around(qgram[0], decimals=5)
y = np.around(qgram[1], decimals=5)
data_idx.insert(node_id, (x, y, x, y), obj=(id_to_key_dict[key]))
node_id += 1
del data_idx
end_time = time.time()
logger.info("exec time: "+str(end_time-start_time))
logger.info('Finished building R-tree')
if __name__ == '__main__':
train_data = sys.argv[1]
q_size = int(sys.argv[2])
main(train_data, q_size)
| 2,530 | 0 | 23 |
d65bca6881e00c4d8e00349db6bc84adc3dfb3d6 | 141 | py | Python | wsgi.py | brettrhenderson/WhiteElephant | e63b7ceae22e7da7b0f5b2b0775308da34b42e3b | [
"MIT"
] | null | null | null | wsgi.py | brettrhenderson/WhiteElephant | e63b7ceae22e7da7b0f5b2b0775308da34b42e3b | [
"MIT"
] | null | null | null | wsgi.py | brettrhenderson/WhiteElephant | e63b7ceae22e7da7b0f5b2b0775308da34b42e3b | [
"MIT"
] | null | null | null | from app import app
import logging
logging.basicConfig(level=logging.WARNING)
if __name__ == "__main__":
app.debug = True
app.run() | 17.625 | 42 | 0.723404 | from app import app
import logging
logging.basicConfig(level=logging.WARNING)
if __name__ == "__main__":
app.debug = True
app.run() | 0 | 0 | 0 |
f7cfaa20a69f2a7b5703c65f15095a00399bcd2b | 653 | py | Python | var/spack/repos/builtin/packages/r-affypdnn/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/r-affypdnn/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/r-affypdnn/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffypdnn(RPackage):
"""The package contains functions to perform the PDNN method
described by Li Zhang et al."""
homepage = "https://www.bioconductor.org/packages/affypdnn/"
git = "https://git.bioconductor.org/packages/affypdnn.git"
version('1.50.0', commit='97ff68e9f51f31333c0330435ea23b212b3ed18a')
depends_on('r@3.4.0:3.4.9', when='@1.50.0')
depends_on('r-affy', type=('build', 'run'))
| 32.65 | 73 | 0.709035 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffypdnn(RPackage):
"""The package contains functions to perform the PDNN method
described by Li Zhang et al."""
homepage = "https://www.bioconductor.org/packages/affypdnn/"
git = "https://git.bioconductor.org/packages/affypdnn.git"
version('1.50.0', commit='97ff68e9f51f31333c0330435ea23b212b3ed18a')
depends_on('r@3.4.0:3.4.9', when='@1.50.0')
depends_on('r-affy', type=('build', 'run'))
| 0 | 0 | 0 |
d18a94277219f1495767914e43a9e25e30e24490 | 1,605 | py | Python | modules/population_initialization.py | EmanueleMusumeci/MMPI-2_GeneticAlgorithm_Clustering_no_data | 32a18f080387f296a75c99190abb4b0df1cd4779 | [
"CC0-1.0"
] | null | null | null | modules/population_initialization.py | EmanueleMusumeci/MMPI-2_GeneticAlgorithm_Clustering_no_data | 32a18f080387f296a75c99190abb4b0df1cd4779 | [
"CC0-1.0"
] | null | null | null | modules/population_initialization.py | EmanueleMusumeci/MMPI-2_GeneticAlgorithm_Clustering_no_data | 32a18f080387f296a75c99190abb4b0df1cd4779 | [
"CC0-1.0"
] | null | null | null | #######################################################################
"""
@author: Emanuele Musumeci (https://github.com/EmanueleMusumeci)
PopulationInitializer abstract class and basic initializer that generates
a population of random binary strings of a given length
"""
#######################################################################
import abc
import numpy as np
from numpy import random
#Base abstract class for population initialization methods, that generate a population for the genetic optimization process
#Generate population of random binary strings of a given length
#Generates a single binary individual
#Generates a population of random binary individuals
| 32.1 | 123 | 0.658567 | #######################################################################
"""
@author: Emanuele Musumeci (https://github.com/EmanueleMusumeci)
PopulationInitializer abstract class and basic initializer that generates
a population of random binary strings of a given length
"""
#######################################################################
import abc
import numpy as np
from numpy import random
#Base abstract class for population initialization methods, that generate a population for the genetic optimization process
class PopulationInitializer(metaclass=abc.ABCMeta):
def __init__(self, population_size):
self.population_size = population_size
@abc.abstractmethod
def generate_population(self):
pass
@abc.abstractmethod
def generate_individual(self):
pass
#Generate population of random binary strings of a given length
class BinaryPopulationInitializer(PopulationInitializer):
def __init__(self, population_size, population_bits):
super().__init__(population_size)
self.population_bits = population_bits
#Generates a single binary individual
def generate_individual(self):
bit_string = list()
for i in range(self.population_bits):
bit_string.append(1 if random.random() > 0.5 else 0)
return bit_string
#Generates a population of random binary individuals
def generate_population(self):
population = list()
for i in range(self.population_size):
population.append(self.generate_individual())
return population
| 568 | 194 | 123 |
915aa51395b0122e7d71f533efe1d0daf8c758c5 | 14,691 | py | Python | pycondor/dagman.py | karlhornlund/pycondor | 7f0dd59c776697d402515b78b35c5fd6084c2c5c | [
"MIT"
] | 1 | 2020-01-21T10:25:25.000Z | 2020-01-21T10:25:25.000Z | pycondor/dagman.py | karlhornlund/pycondor | 7f0dd59c776697d402515b78b35c5fd6084c2c5c | [
"MIT"
] | null | null | null | pycondor/dagman.py | karlhornlund/pycondor | 7f0dd59c776697d402515b78b35c5fd6084c2c5c | [
"MIT"
] | null | null | null |
import os
import subprocess
from .utils import checkdir, get_condor_version, requires_command
from .basenode import BaseNode
from .job import Job
def _iter_job_args(job):
"""
Iterates over Job args list. Yields the name (and JobArg) for each node
to be used when adding job to a Dagman (i.e. the name in the
'JOB name job_submit_file' line).
Parameters
----------
job : Job
Job to iterate over. Note that the submit file for job must be built
prior to using _iter_job_args.
Yields
------
node_name : str
Node name to use in Dagman object.
job_arg : JobArg namedtuple
Job argument object (``arg``, ``name``, ``retry`` attributes).
"""
if not isinstance(job, Job):
raise TypeError('Expecting a Job object, got {}'.format(type(job)))
if not getattr(job, '_built', False):
raise ValueError('Job {} must be built before adding it '
'to a Dagman'.format(job.name))
if len(job.args) == 0:
raise StopIteration
else:
for idx, job_arg in enumerate(job):
arg, name, retry = job_arg
if name is not None:
node_name = '{}_{}'.format(job.submit_name, name)
else:
node_name = '{}_arg_{}'.format(job.submit_name, idx)
yield node_name, job_arg
def _get_parent_child_string(node):
"""Constructs the parent/child line for node to be added to a Dagman
"""
if not isinstance(node, BaseNode):
raise ValueError('Expecting a Job or Dagman object, '
'got {}'.format(type(node)))
parent_string = 'Parent'
for parent_node in node.parents:
if isinstance(parent_node, Job) and len(parent_node) > 0:
for node_name, job_arg in _iter_job_args(parent_node):
parent_string += ' {}'.format(node_name)
else:
parent_string += ' {}'.format(parent_node.submit_name)
child_string = 'Child'
if isinstance(node, Job) and len(node) > 0:
for node_name, job_arg in _iter_job_args(node):
child_string += ' {}'.format(node_name)
else:
child_string += ' {}'.format(node.submit_name)
parent_child_string = parent_string + ' ' + child_string
return parent_child_string
class Dagman(BaseNode):
"""
Dagman object consisting of a series of Jobs and sub-Dagmans to manage.
Note that the ``submit`` parameter can be explicitly given or configured
by setting the ``PYCONDOR_SUBMIT_DIR`` environment variable. An explicitly
given value for ``submit`` will be used over the environment variable,
while the environment variable will be used over a default value.
Parameters
----------
name : str
Name of the Dagman instance. This will also be the name of the
corresponding error, log, output, and submit files associated with
this Dagman.
submit : str
Path to directory where condor dagman submit files will be written
(defaults to the directory was the Dagman was submitted from).
extra_lines : list or None, optional
List of additional lines to be added to submit file.
.. versionadded:: 0.1.1
dag : Dagman, optional
If specified, Dagman will be added to dag as a subdag
(default is None).
verbose : int, optional
Level of logging verbosity option are 0-warning, 1-info,
2-debugging (default is 0).
Attributes
----------
jobs : list
The list of jobs for this Dagman instance to manage.
parents : list
List of parent Jobs and Dagmans. Ensures that Jobs and Dagmans in the
parents list will complete before this Dagman is submitted to HTCondor.
children : list
List of child Jobs and Dagmans. Ensures that Jobs and Dagmans in the
children list will be submitted only after this Dagman has completed.
"""
def add_job(self, job):
"""Add job to Dagman
Parameters
----------
job : Job
Job to append to Dagman jobs list.
Returns
-------
self : object
Returns self.
"""
self._add_node(job)
return self
def add_subdag(self, dag):
"""Add dag to Dagman
Parameters
----------
dag : Dagman
Subdag to append to Dagman jobs list.
Returns
-------
self : object
Returns self.
"""
self._add_node(dag)
return self
def _get_job_arg_lines(self, job, fancyname):
"""Constructs the lines to be added to a Dagman related to job
"""
if not isinstance(job, Job):
raise TypeError('Expecting a Job object, got {}'.format(type(job)))
if not getattr(job, '_built', False):
raise ValueError('Job {} must be built before adding it '
'to a Dagman'.format(job.name))
job_arg_lines = []
if len(job.args) == 0:
job_line = 'JOB {} {}'.format(job.submit_name, job.submit_file)
job_arg_lines.append(job_line)
else:
for node_name, job_arg in _iter_job_args(job):
# Check that '.' or '+' are not in node_name
if '.' in node_name or '+' in node_name:
self._has_bad_node_names = True
arg, name, retry = job_arg
# Add JOB line with Job submit file
job_line = 'JOB {} {}'.format(node_name, job.submit_file)
job_arg_lines.append(job_line)
# Add job ARGS line for command line arguments
arg_line = 'VARS {} ARGS="{}"'.format(node_name, arg)
job_arg_lines.append(arg_line)
# Define job_name variable if there are arg_names for job
if job._has_arg_names:
if name is not None:
job_name = node_name
else:
job_name = job.submit_name
job_name_line = 'VARS {} job_name="{}"'.format(node_name,
job_name)
job_arg_lines.append(job_name_line)
# Add retry line for Job
if retry is not None:
retry_line = 'Retry {} {}'.format(node_name, retry)
job_arg_lines.append(retry_line)
return job_arg_lines
def build(self, makedirs=True, fancyname=True):
"""Build and saves the submit file for Dagman
Parameters
----------
makedirs : bool, optional
If Job directories (e.g. error, output, log, submit) don't exist,
create them (default is ``True``).
fancyname : bool, optional
Appends the date and unique id number to error, log, output, and
submit files. For example, instead of ``dagname.submit`` the submit
file becomes ``dagname_YYYYMMD_id``. This is useful when running
several Dags/Jobs of the same name (default is ``True``).
Returns
-------
self : object
Returns self.
"""
if getattr(self, '_built', False):
self.logger.warning(
'{} submit file has already been built. '
'Skipping the build process...'.format(self.name))
return self
name = self._get_fancyname() if fancyname else self.name
submit_file = os.path.join(self.submit, '{}.submit'.format(name))
self.submit_file = submit_file
self.submit_name = name
checkdir(self.submit_file, makedirs)
# Build submit files for all nodes in self.nodes
# Note: nodes must be built before the submit file for self is built
for node_index, node in enumerate(self.nodes, start=1):
if isinstance(node, Job):
node._build_from_dag(makedirs, fancyname)
elif isinstance(node, Dagman):
node.build(makedirs, fancyname)
else:
raise TypeError('Nodes must be either a Job or Dagman object')
# Write dag submit file
self.logger.info('Building DAG submission file {}...'.format(
self.submit_file))
lines = []
parent_child_lines = []
for node_index, node in enumerate(self.nodes, start=1):
self.logger.info('Working on {} [{} of {}]'.format(node.name,
node_index, len(self.nodes)))
# Build the BaseNode submit file
if isinstance(node, Job):
# Add Job variables to Dagman submit file
job_arg_lines = self._get_job_arg_lines(node, fancyname)
lines.extend(job_arg_lines)
elif isinstance(node, Dagman):
subdag_string = _get_subdag_string(node)
lines.append(subdag_string)
else:
raise TypeError('Nodes must be either a Job or Dagman object')
# Add parent/child information, if necessary
if node.hasparents():
parent_child_string = _get_parent_child_string(node)
parent_child_lines.append(parent_child_string)
# Add any extra lines to submit file, if specified
if self.extra_lines:
lines.extend(self.extra_lines)
# Write lines to dag submit file
with open(submit_file, 'w') as dag:
dag.writelines('\n'.join(lines + ['\n#Inter-job dependencies'] +
parent_child_lines))
self._built = True
self.logger.info('Dagman submission file for {} successfully '
'built!'.format(self.name))
return self
@requires_command('condor_submit_dag')
def submit_dag(self, submit_options=None):
"""Submits Dagman to condor
Parameters
----------
submit_options : str, optional
Options to be passed to ``condor_submit_dag`` for this Dagman
(see the `condor_submit_dag documentation
<http://research.cs.wisc.edu/htcondor/manual/current/condor_submit_dag.html>`_
for possible options).
Returns
-------
self : object
Returns self.
"""
# Construct condor_submit_dag command
command = 'condor_submit_dag'
if submit_options is not None:
command += ' {}'.format(submit_options)
command += ' {}'.format(self.submit_file)
submit_dag_proc = subprocess.Popen([command],
stdout=subprocess.PIPE,
shell=True)
# Check that there are no illegal node names for newer condor versions
condor_version = get_condor_version()
if condor_version >= (8, 7, 2) and self._has_bad_node_names:
err = ("Found an illegal character (either '+' or '.') in the "
"name for a node in Dagman {}. As of HTCondor version "
"8.7.2, '+' and '.' are prohibited in Dagman node names. "
"This means a '+' or '.' character is in a Job name, "
"Dagman name, or the name for a Job argument.".format(
self.name))
raise RuntimeError(err)
# Execute condor_submit_dag command
out, err = submit_dag_proc.communicate()
print(out)
return self
@requires_command('condor_submit_dag')
def build_submit(self, makedirs=True, fancyname=True, submit_options=None):
"""Calls build and submit sequentially
Parameters
----------
makedirs : bool, optional
If Job directories (e.g. error, output, log, submit) don't exist,
create them (default is ``True``).
fancyname : bool, optional
Appends the date and unique id number to error, log, output, and
submit files. For example, instead of ``dagname.submit`` the submit
file becomes ``dagname_YYYYMMD_id``. This is useful when running
several Dags/Jobs of the same name (default is ``True``).
submit_options : str, optional
Options to be passed to ``condor_submit_dag`` for this Dagman
(see the `condor_submit_dag documentation
<http://research.cs.wisc.edu/htcondor/manual/current/condor_submit_dag.html>`_
for possible options).
Returns
-------
self : object
Returns self.
"""
self.build(makedirs, fancyname)
self.submit_dag(submit_options=submit_options)
return self
| 35.744526 | 90 | 0.57137 |
import os
import subprocess
from .utils import checkdir, get_condor_version, requires_command
from .basenode import BaseNode
from .job import Job
def _get_subdag_string(dagman):
if not isinstance(dagman, Dagman):
raise TypeError(
'Expecting a Dagman object, got {}'.format(type(dagman)))
subdag_string = 'SUBDAG EXTERNAL {} {}'.format(dagman.submit_name,
dagman.submit_file)
return subdag_string
def _iter_job_args(job):
"""
Iterates over Job args list. Yields the name (and JobArg) for each node
to be used when adding job to a Dagman (i.e. the name in the
'JOB name job_submit_file' line).
Parameters
----------
job : Job
Job to iterate over. Note that the submit file for job must be built
prior to using _iter_job_args.
Yields
------
node_name : str
Node name to use in Dagman object.
job_arg : JobArg namedtuple
Job argument object (``arg``, ``name``, ``retry`` attributes).
"""
if not isinstance(job, Job):
raise TypeError('Expecting a Job object, got {}'.format(type(job)))
if not getattr(job, '_built', False):
raise ValueError('Job {} must be built before adding it '
'to a Dagman'.format(job.name))
if len(job.args) == 0:
raise StopIteration
else:
for idx, job_arg in enumerate(job):
arg, name, retry = job_arg
if name is not None:
node_name = '{}_{}'.format(job.submit_name, name)
else:
node_name = '{}_arg_{}'.format(job.submit_name, idx)
yield node_name, job_arg
def _get_parent_child_string(node):
"""Constructs the parent/child line for node to be added to a Dagman
"""
if not isinstance(node, BaseNode):
raise ValueError('Expecting a Job or Dagman object, '
'got {}'.format(type(node)))
parent_string = 'Parent'
for parent_node in node.parents:
if isinstance(parent_node, Job) and len(parent_node) > 0:
for node_name, job_arg in _iter_job_args(parent_node):
parent_string += ' {}'.format(node_name)
else:
parent_string += ' {}'.format(parent_node.submit_name)
child_string = 'Child'
if isinstance(node, Job) and len(node) > 0:
for node_name, job_arg in _iter_job_args(node):
child_string += ' {}'.format(node_name)
else:
child_string += ' {}'.format(node.submit_name)
parent_child_string = parent_string + ' ' + child_string
return parent_child_string
class Dagman(BaseNode):
"""
Dagman object consisting of a series of Jobs and sub-Dagmans to manage.
Note that the ``submit`` parameter can be explicitly given or configured
by setting the ``PYCONDOR_SUBMIT_DIR`` environment variable. An explicitly
given value for ``submit`` will be used over the environment variable,
while the environment variable will be used over a default value.
Parameters
----------
name : str
Name of the Dagman instance. This will also be the name of the
corresponding error, log, output, and submit files associated with
this Dagman.
submit : str
Path to directory where condor dagman submit files will be written
(defaults to the directory was the Dagman was submitted from).
extra_lines : list or None, optional
List of additional lines to be added to submit file.
.. versionadded:: 0.1.1
dag : Dagman, optional
If specified, Dagman will be added to dag as a subdag
(default is None).
verbose : int, optional
Level of logging verbosity option are 0-warning, 1-info,
2-debugging (default is 0).
Attributes
----------
jobs : list
The list of jobs for this Dagman instance to manage.
parents : list
List of parent Jobs and Dagmans. Ensures that Jobs and Dagmans in the
parents list will complete before this Dagman is submitted to HTCondor.
children : list
List of child Jobs and Dagmans. Ensures that Jobs and Dagmans in the
children list will be submitted only after this Dagman has completed.
"""
def __init__(self, name, submit=None, extra_lines=None, dag=None,
verbose=0):
super(Dagman, self).__init__(name, submit, extra_lines, dag, verbose)
self.nodes = []
self._has_bad_node_names = False
self.logger.debug('{} initialized'.format(self.name))
def __repr__(self):
nondefaults = ''
for attr in sorted(vars(self)):
if getattr(self, attr) and attr not in ['name', 'nodes', 'logger']:
nondefaults += ', {}={}'.format(attr, getattr(self, attr))
output = 'Dagman(name={}, n_nodes={}{})'.format(self.name,
len(self.nodes),
nondefaults)
return output
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def __contains__(self, item):
return item in self.nodes
def _hasnode(self, node):
return node in self.nodes
def _add_node(self, node):
# Don't bother adding node if it's already been added
if self._hasnode(node):
return self
if isinstance(node, BaseNode):
self.nodes.append(node)
else:
raise TypeError('Expecting a Job or Dagman. '
'Got an object of type {}'.format(type(node)))
self.logger.debug(
'Added {} to Dagman {}'.format(node.name, self.name))
return self
def add_job(self, job):
"""Add job to Dagman
Parameters
----------
job : Job
Job to append to Dagman jobs list.
Returns
-------
self : object
Returns self.
"""
self._add_node(job)
return self
def add_subdag(self, dag):
"""Add dag to Dagman
Parameters
----------
dag : Dagman
Subdag to append to Dagman jobs list.
Returns
-------
self : object
Returns self.
"""
self._add_node(dag)
return self
def _get_job_arg_lines(self, job, fancyname):
"""Constructs the lines to be added to a Dagman related to job
"""
if not isinstance(job, Job):
raise TypeError('Expecting a Job object, got {}'.format(type(job)))
if not getattr(job, '_built', False):
raise ValueError('Job {} must be built before adding it '
'to a Dagman'.format(job.name))
job_arg_lines = []
if len(job.args) == 0:
job_line = 'JOB {} {}'.format(job.submit_name, job.submit_file)
job_arg_lines.append(job_line)
else:
for node_name, job_arg in _iter_job_args(job):
# Check that '.' or '+' are not in node_name
if '.' in node_name or '+' in node_name:
self._has_bad_node_names = True
arg, name, retry = job_arg
# Add JOB line with Job submit file
job_line = 'JOB {} {}'.format(node_name, job.submit_file)
job_arg_lines.append(job_line)
# Add job ARGS line for command line arguments
arg_line = 'VARS {} ARGS="{}"'.format(node_name, arg)
job_arg_lines.append(arg_line)
# Define job_name variable if there are arg_names for job
if job._has_arg_names:
if name is not None:
job_name = node_name
else:
job_name = job.submit_name
job_name_line = 'VARS {} job_name="{}"'.format(node_name,
job_name)
job_arg_lines.append(job_name_line)
# Add retry line for Job
if retry is not None:
retry_line = 'Retry {} {}'.format(node_name, retry)
job_arg_lines.append(retry_line)
return job_arg_lines
def build(self, makedirs=True, fancyname=True):
"""Build and saves the submit file for Dagman
Parameters
----------
makedirs : bool, optional
If Job directories (e.g. error, output, log, submit) don't exist,
create them (default is ``True``).
fancyname : bool, optional
Appends the date and unique id number to error, log, output, and
submit files. For example, instead of ``dagname.submit`` the submit
file becomes ``dagname_YYYYMMD_id``. This is useful when running
several Dags/Jobs of the same name (default is ``True``).
Returns
-------
self : object
Returns self.
"""
if getattr(self, '_built', False):
self.logger.warning(
'{} submit file has already been built. '
'Skipping the build process...'.format(self.name))
return self
name = self._get_fancyname() if fancyname else self.name
submit_file = os.path.join(self.submit, '{}.submit'.format(name))
self.submit_file = submit_file
self.submit_name = name
checkdir(self.submit_file, makedirs)
# Build submit files for all nodes in self.nodes
# Note: nodes must be built before the submit file for self is built
for node_index, node in enumerate(self.nodes, start=1):
if isinstance(node, Job):
node._build_from_dag(makedirs, fancyname)
elif isinstance(node, Dagman):
node.build(makedirs, fancyname)
else:
raise TypeError('Nodes must be either a Job or Dagman object')
# Write dag submit file
self.logger.info('Building DAG submission file {}...'.format(
self.submit_file))
lines = []
parent_child_lines = []
for node_index, node in enumerate(self.nodes, start=1):
self.logger.info('Working on {} [{} of {}]'.format(node.name,
node_index, len(self.nodes)))
# Build the BaseNode submit file
if isinstance(node, Job):
# Add Job variables to Dagman submit file
job_arg_lines = self._get_job_arg_lines(node, fancyname)
lines.extend(job_arg_lines)
elif isinstance(node, Dagman):
subdag_string = _get_subdag_string(node)
lines.append(subdag_string)
else:
raise TypeError('Nodes must be either a Job or Dagman object')
# Add parent/child information, if necessary
if node.hasparents():
parent_child_string = _get_parent_child_string(node)
parent_child_lines.append(parent_child_string)
# Add any extra lines to submit file, if specified
if self.extra_lines:
lines.extend(self.extra_lines)
# Write lines to dag submit file
with open(submit_file, 'w') as dag:
dag.writelines('\n'.join(lines + ['\n#Inter-job dependencies'] +
parent_child_lines))
self._built = True
self.logger.info('Dagman submission file for {} successfully '
'built!'.format(self.name))
return self
@requires_command('condor_submit_dag')
def submit_dag(self, submit_options=None):
"""Submits Dagman to condor
Parameters
----------
submit_options : str, optional
Options to be passed to ``condor_submit_dag`` for this Dagman
(see the `condor_submit_dag documentation
<http://research.cs.wisc.edu/htcondor/manual/current/condor_submit_dag.html>`_
for possible options).
Returns
-------
self : object
Returns self.
"""
# Construct condor_submit_dag command
command = 'condor_submit_dag'
if submit_options is not None:
command += ' {}'.format(submit_options)
command += ' {}'.format(self.submit_file)
submit_dag_proc = subprocess.Popen([command],
stdout=subprocess.PIPE,
shell=True)
# Check that there are no illegal node names for newer condor versions
condor_version = get_condor_version()
if condor_version >= (8, 7, 2) and self._has_bad_node_names:
err = ("Found an illegal character (either '+' or '.') in the "
"name for a node in Dagman {}. As of HTCondor version "
"8.7.2, '+' and '.' are prohibited in Dagman node names. "
"This means a '+' or '.' character is in a Job name, "
"Dagman name, or the name for a Job argument.".format(
self.name))
raise RuntimeError(err)
# Execute condor_submit_dag command
out, err = submit_dag_proc.communicate()
print(out)
return self
@requires_command('condor_submit_dag')
def build_submit(self, makedirs=True, fancyname=True, submit_options=None):
"""Calls build and submit sequentially
Parameters
----------
makedirs : bool, optional
If Job directories (e.g. error, output, log, submit) don't exist,
create them (default is ``True``).
fancyname : bool, optional
Appends the date and unique id number to error, log, output, and
submit files. For example, instead of ``dagname.submit`` the submit
file becomes ``dagname_YYYYMMD_id``. This is useful when running
several Dags/Jobs of the same name (default is ``True``).
submit_options : str, optional
Options to be passed to ``condor_submit_dag`` for this Dagman
(see the `condor_submit_dag documentation
<http://research.cs.wisc.edu/htcondor/manual/current/condor_submit_dag.html>`_
for possible options).
Returns
-------
self : object
Returns self.
"""
self.build(makedirs, fancyname)
self.submit_dag(submit_options=submit_options)
return self
| 1,645 | 0 | 211 |
875425da3ed2a31653ab5479cf56429d96c1a09a | 4,123 | py | Python | src/napari_tissuemaps_interface/lazy_array.py | fractal-napari-plugins-collection/napari_tissuemaps_interface | 4cba72a6193b1853c8534ccecf5fc0ace5202fb3 | [
"BSD-3-Clause"
] | null | null | null | src/napari_tissuemaps_interface/lazy_array.py | fractal-napari-plugins-collection/napari_tissuemaps_interface | 4cba72a6193b1853c8534ccecf5fc0ace5202fb3 | [
"BSD-3-Clause"
] | null | null | null | src/napari_tissuemaps_interface/lazy_array.py | fractal-napari-plugins-collection/napari_tissuemaps_interface | 4cba72a6193b1853c8534ccecf5fc0ace5202fb3 | [
"BSD-3-Clause"
] | null | null | null | """
Module containing a numpy-like array which supports lazy reading of tiled 2D-image data.
"""
import abc
import dask.array as da
import numpy as np
class LazyArray:
"""
An abstract class of a numpy-like array which supports lazy reading of tiled 2D-image data.
The class represents a custom array container which is compatible with the numpy API.
For more details please refer to
https://numpy.org/doc/stable/user/basics.dispatch.html#writing-custom-array-containers.
The class is compatible with napari's image layer which expects a "numpy-like array" as
input which supports indexing and can be converted to a numpy array via np.asarray.
(ref: https://napari.org/tutorials/fundamentals/image.html#image-data-and-numpy-like-arrays)
"""
__metaclass__ = abc.ABCMeta
def __init__(self, shape, dtype, tile_size):
"""
Initialization method.
:param shape: The shape of the underlying array.
:param dtype: The type of the underlying array.
:param tile_size: The size of a single tile by which the image is divided.
"""
assert len(shape) == 2
self.shape = shape
self.dtype = dtype
self.tile_size = tile_size
self.ndim = 2
@property
def size(self):
"""
The number of elements in the array.
"""
return self.shape[0] * self.shape[1]
def __array__(self, dtype=None, **kwargs):
# pylint: disable=W0613
"""
Method used e.g. by numpy to obtain a standard numpy.ndarray.
"""
return np.asarray(self[0:self.shape[0], 0:self.shape[1]])
def __getitem__(self, idx):
"""
Method which implements the support for basic slicing.
It does not support field access nor advanced indexing.
Moreover, the start and stop of a slice must be positive integers.
This method is optimized for the napari viewer.
napari calls self[:] for obtaining the shape, dtype and ndim attributes - not the data.
To delay reading the underlying data this method does not return a numpy array
but self when calling self[:].
To access the underlying data napari calls np.asarray(self).
"""
if not (
isinstance(idx, slice) or
(isinstance(idx, tuple) and all(isinstance(i, slice) for i in idx))
):
raise ValueError("LazyArray only supports indexing by slices!")
if (
idx == slice(None, None, None) or
idx == (slice(None, None, None), slice(None, None, None))
):
return self
if len(idx) != 2:
raise Exception("Unsupported index!")
(y_min, y_max), (x_min, x_max) = [(i.start, i.stop) for i in idx]
y_off = y_min - (y_min % self.tile_size)
x_off = x_min - (x_min % self.tile_size)
assert (y_min >= 0) and (y_max >= 0) and (x_min >= 0) & (x_max >= 0)
if y_max % self.tile_size == 0:
max_y_tiles = (y_max // self.tile_size)
else:
max_y_tiles = (y_max // self.tile_size) + 1
if x_max % self.tile_size == 0:
max_x_tiles = (x_max // self.tile_size)
else:
max_x_tiles = (x_max // self.tile_size) + 1
dask_arrays = []
for y_tile in range(y_min // self.tile_size, max_y_tiles):
row_tiles = []
for x_tile in range(x_min // self.tile_size, max_x_tiles):
row_tiles.append(
da.from_delayed(
self.read_tile(y_tile, x_tile),
shape=(self.tile_size, self.tile_size), dtype=np.uint8
)
)
dask_arrays.append(row_tiles)
y_max = min(y_max, self.shape[0])
x_max = min(x_max, self.shape[1])
return da.block(dask_arrays)[y_min-y_off:y_max-y_off, x_min-x_off:x_max-x_off]
@abc.abstractmethod
def read_tile(self, y_tile, x_tile):
"""
Abstract method which reads a tile at the position (y_tile, x_tile).
"""
return
| 36.486726 | 96 | 0.600049 | """
Module containing a numpy-like array which supports lazy reading of tiled 2D-image data.
"""
import abc
import dask.array as da
import numpy as np
class LazyArray:
"""
An abstract class of a numpy-like array which supports lazy reading of tiled 2D-image data.
The class represents a custom array container which is compatible with the numpy API.
For more details please refer to
https://numpy.org/doc/stable/user/basics.dispatch.html#writing-custom-array-containers.
The class is compatible with napari's image layer which expects a "numpy-like array" as
input which supports indexing and can be converted to a numpy array via np.asarray.
(ref: https://napari.org/tutorials/fundamentals/image.html#image-data-and-numpy-like-arrays)
"""
__metaclass__ = abc.ABCMeta
def __init__(self, shape, dtype, tile_size):
"""
Initialization method.
:param shape: The shape of the underlying array.
:param dtype: The type of the underlying array.
:param tile_size: The size of a single tile by which the image is divided.
"""
assert len(shape) == 2
self.shape = shape
self.dtype = dtype
self.tile_size = tile_size
self.ndim = 2
@property
def size(self):
"""
The number of elements in the array.
"""
return self.shape[0] * self.shape[1]
def __array__(self, dtype=None, **kwargs):
# pylint: disable=W0613
"""
Method used e.g. by numpy to obtain a standard numpy.ndarray.
"""
return np.asarray(self[0:self.shape[0], 0:self.shape[1]])
def __getitem__(self, idx):
"""
Method which implements the support for basic slicing.
It does not support field access nor advanced indexing.
Moreover, the start and stop of a slice must be positive integers.
This method is optimized for the napari viewer.
napari calls self[:] for obtaining the shape, dtype and ndim attributes - not the data.
To delay reading the underlying data this method does not return a numpy array
but self when calling self[:].
To access the underlying data napari calls np.asarray(self).
"""
if not (
isinstance(idx, slice) or
(isinstance(idx, tuple) and all(isinstance(i, slice) for i in idx))
):
raise ValueError("LazyArray only supports indexing by slices!")
if (
idx == slice(None, None, None) or
idx == (slice(None, None, None), slice(None, None, None))
):
return self
if len(idx) != 2:
raise Exception("Unsupported index!")
(y_min, y_max), (x_min, x_max) = [(i.start, i.stop) for i in idx]
y_off = y_min - (y_min % self.tile_size)
x_off = x_min - (x_min % self.tile_size)
assert (y_min >= 0) and (y_max >= 0) and (x_min >= 0) & (x_max >= 0)
if y_max % self.tile_size == 0:
max_y_tiles = (y_max // self.tile_size)
else:
max_y_tiles = (y_max // self.tile_size) + 1
if x_max % self.tile_size == 0:
max_x_tiles = (x_max // self.tile_size)
else:
max_x_tiles = (x_max // self.tile_size) + 1
dask_arrays = []
for y_tile in range(y_min // self.tile_size, max_y_tiles):
row_tiles = []
for x_tile in range(x_min // self.tile_size, max_x_tiles):
row_tiles.append(
da.from_delayed(
self.read_tile(y_tile, x_tile),
shape=(self.tile_size, self.tile_size), dtype=np.uint8
)
)
dask_arrays.append(row_tiles)
y_max = min(y_max, self.shape[0])
x_max = min(x_max, self.shape[1])
return da.block(dask_arrays)[y_min-y_off:y_max-y_off, x_min-x_off:x_max-x_off]
@abc.abstractmethod
def read_tile(self, y_tile, x_tile):
"""
Abstract method which reads a tile at the position (y_tile, x_tile).
"""
return
| 0 | 0 | 0 |
dec18f38e4f16ec25a933f6494d2b368ad15e5be | 305 | py | Python | core/management/commands/fail_repeat.py | Suleymanov2006/ansible-manager | 57397c12e49611a5cc6197ecd184222caabf1361 | [
"MIT"
] | 13 | 2017-03-18T06:03:43.000Z | 2020-02-15T03:42:28.000Z | core/management/commands/fail_repeat.py | Suleymanov2006/ansible-manager | 57397c12e49611a5cc6197ecd184222caabf1361 | [
"MIT"
] | 78 | 2017-03-17T18:16:32.000Z | 2018-10-26T11:58:20.000Z | core/management/commands/fail_repeat.py | Suleymanov2006/ansible-manager | 57397c12e49611a5cc6197ecd184222caabf1361 | [
"MIT"
] | 6 | 2017-03-19T13:41:29.000Z | 2019-09-11T21:36:52.000Z | from django.core.management.base import BaseCommand
from core.datatools.fail_repeat import FailRepeater
| 21.785714 | 51 | 0.652459 | from django.core.management.base import BaseCommand
from core.datatools.fail_repeat import FailRepeater
class Command(BaseCommand):
def handle(self, *args, **options):
try:
repeater = FailRepeater()
repeater.run()
except KeyboardInterrupt:
pass
| 143 | 6 | 50 |
985f4df231084ec6e8ef79a8e7a70ed728a9ce86 | 634 | py | Python | epg_grabber/sites/auth/starhubtvplus_auth.py | akmalharith/epg-grabber | ee6bdd20f7cbb4c780d96a8ce0fe2ca68b553c33 | [
"MIT"
] | 1 | 2022-03-16T00:42:21.000Z | 2022-03-16T00:42:21.000Z | epg_grabber/sites/auth/starhubtvplus_auth.py | akmalharith/epg-grabber | ee6bdd20f7cbb4c780d96a8ce0fe2ca68b553c33 | [
"MIT"
] | null | null | null | epg_grabber/sites/auth/starhubtvplus_auth.py | akmalharith/epg-grabber | ee6bdd20f7cbb4c780d96a8ce0fe2ca68b553c33 | [
"MIT"
] | 1 | 2022-03-17T17:16:30.000Z | 2022-03-17T17:16:30.000Z | from typing import Dict
import requests
from config.env import starhubtvplus_app_key, starhubtvplus_client_uuid
| 30.190476 | 92 | 0.656151 | from typing import Dict
import requests
from config.env import starhubtvplus_app_key, starhubtvplus_client_uuid
def get_session() -> Dict[str, str]:
try:
response = requests.get(
"https://api.one.accedo.tv/session?appKey={application_key}&uuid={uuid}".format(
application_key=starhubtvplus_app_key,
uuid=starhubtvplus_client_uuid))
except Exception as e:
raise(e)
output = response.json()
return {
"x-application-session": output["sessionKey"],
"x-application-key": starhubtvplus_app_key +
"_" +
starhubtvplus_client_uuid}
| 498 | 0 | 23 |
73d5971c32585318885749af3454e804597d68f8 | 11,142 | py | Python | ID18_U18_ONE_LENS/7keV_UndSource_RectSlit_R200um_MultiMode/run7keV_h.py | srio/paper-transfocators-resources | 917d8b4114056f62c84b295579e55bf5f0b56b6b | [
"MIT"
] | 1 | 2021-03-25T15:34:56.000Z | 2021-03-25T15:34:56.000Z | ID18_U18_ONE_LENS/7keV_UndSource_RectSlit_R200um_MultiMode/run7keV_h.py | srio/paper-transfocators-resources | 917d8b4114056f62c84b295579e55bf5f0b56b6b | [
"MIT"
] | null | null | null | ID18_U18_ONE_LENS/7keV_UndSource_RectSlit_R200um_MultiMode/run7keV_h.py | srio/paper-transfocators-resources | 917d8b4114056f62c84b295579e55bf5f0b56b6b | [
"MIT"
] | null | null | null | #
# Import section
#
import numpy
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
#
# SOURCE========================
#
#
# BEAMLINE========================
#
#
# MAIN FUNCTION========================
#
#
# MAIN========================
#
# main()
if __name__ == "__main__":
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes, Tally
from oasys.util.oasys_util import get_fwhm
from srxraylib.plot.gol import plot
#
#
#
# size_at_aperture = 565e-6
APERTURE = [40.3e-6, 85.1e-6, 145e-6, 1000e-6, -40.3e-6, -85.1e-6, -145e-6, -1000e-6] # [ 5000e-6] # [-40.3e-6, -85.1e-6, -145e-6, -1000e-6] #
DISTANCE = numpy.linspace(10, 50, 50) # numpy.array([18.4]) # # # 31.19 28.4
number_of_points = 800 # 800
for aperture in APERTURE:
# src1, wf = main(aperture=aperture, distance=18.4168, number_of_points=number_of_points)
filename = "aperture_h_%g.dat" % (1e6 * aperture) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
f = open(filename, 'w')
f.write("# S 1 scored data\n")
f.write("# N 5\n")
f.write("# L distance fwhm total_intensity on_axis_intensity peak_intensity")
if aperture < 0:
aperture *= -1
nmodes = 1
else:
nmodes = 10
for i,distance in enumerate(DISTANCE):
tally = main(aperture=aperture, distance=distance, nmodes=nmodes)
spectral_density = tally.get_spectral_density() # numpy.zeros_like(abscissas)
abscissas = tally.get_abscissas()
fwhm, quote, coordinates = get_fwhm(spectral_density, 1e6 * abscissas)
I = spectral_density
x = abscissas
fwhm, quote, coordinates = get_fwhm(I, x)
intensity_at_center = I[I.size // 2]
intensity_total = I.sum() * (x[1] - x[0])
intensity_peak = I.max()
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# plot(1e6 * abscissas, spectral_density,
# legend=["From Cross Spectral Density"],
# xtitle="x [um]", ytitle="Spectral Density", title="D=%g m,FWHM = %g um, a=%g um" % (distance, fwhm, aperture*1e6))
f.write("\n %g %g %g %g %g " % (distance, fwhm, intensity_total, intensity_at_center, intensity_peak))
f.close()
print("File %s written to disk" % filename)
# tally.save("aperture_h_%g.dat" % (aperture))
# main()
| 36.29316 | 218 | 0.653114 | #
# Import section
#
import numpy
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
#
# SOURCE========================
#
def run_source(my_mode_index=0,number_of_points=800, zero_emittance=0):
global coherent_mode_decomposition
try:
if my_mode_index == 0: raise Exception()
tmp = coherent_mode_decomposition
except:
########## SOURCE ##########
#
# create output_wavefront
#
#
from wofryimpl.propagator.util.undulator_coherent_mode_decomposition_1d import \
UndulatorCoherentModeDecomposition1D
if zero_emittance:
sigmaxx=1e-07
sigmaxpxp=1e-08
else:
sigmaxx=2.97321e-05
sigmaxpxp=4.37237e-06
coherent_mode_decomposition = UndulatorCoherentModeDecomposition1D(
electron_energy=6,
electron_current=0.2,
undulator_period=0.018,
undulator_nperiods=138,
K=1.85108,
photon_energy=7000,
abscissas_interval=0.00025,
number_of_points=number_of_points,
distance_to_screen=100,
scan_direction='V',
sigmaxx=sigmaxx,
sigmaxpxp=sigmaxpxp,
useGSMapproximation=False, )
# make calculation
coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()
mode_index = 0
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)
return output_wavefront
#
# BEAMLINE========================
#
def run_beamline(output_wavefront,aperture=40e-6, distance=30.0):
########## OPTICAL SYSTEM ##########
########## OPTICAL ELEMENT NUMBER 1 ##########
input_wavefront = output_wavefront.duplicate()
from wofryimpl.beamline.optical_elements.ideal_elements.screen import WOScreen1D
optical_element = WOScreen1D()
# drift_before 36 m
#
# propagating
#
#
propagation_elements = PropagationElements()
beamline_element = BeamlineElement(optical_element=optical_element,
coordinates=ElementCoordinates(p=36.000000, q=0.000000,
angle_radial=numpy.radians(0.000000),
angle_azimuthal=numpy.radians(0.000000)))
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements=propagation_elements)
# self.set_additional_parameters(propagation_parameters)
#
propagation_parameters.set_additional_parameters('magnification_x', 8.0)
propagation_parameters.set_additional_parameters('magnification_N', 1.0)
#
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(Integral1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name='INTEGRAL_1D')
########## OPTICAL ELEMENT NUMBER 2 ##########
input_wavefront = output_wavefront.duplicate()
from syned.beamline.shape import Rectangle
boundary_shape = Rectangle(-aperture/2, aperture/2, -aperture/2, aperture/2)
from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit1D
optical_element = WOSlit1D(boundary_shape=boundary_shape)
# no drift in this element
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
########## OPTICAL ELEMENT NUMBER 3 ##########
input_wavefront = output_wavefront.duplicate()
from wofryimpl.beamline.optical_elements.ideal_elements.screen import WOScreen1D
optical_element = WOScreen1D()
# drift_before 29 m
#
# propagating
#
#
propagation_elements = PropagationElements()
beamline_element = BeamlineElement(optical_element=optical_element,
coordinates=ElementCoordinates(p=29.000000, q=0.000000,
angle_radial=numpy.radians(0.000000),
angle_azimuthal=numpy.radians(0.000000)))
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements=propagation_elements)
# self.set_additional_parameters(propagation_parameters)
#
propagation_parameters.set_additional_parameters('magnification_x', 2.5)
#
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(FresnelZoom1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name='FRESNEL_ZOOM_1D')
########## OPTICAL ELEMENT NUMBER 4 ##########
input_wavefront = output_wavefront.duplicate()
from orangecontrib.esrf.wofry.util.lens import WOLens1D
optical_element = WOLens1D.create_from_keywords(
name='',
shape=1,
radius=0.0002,
lens_aperture=0.001,
wall_thickness=5e-05,
material='Be',
number_of_curved_surfaces=2,
n_lenses=1,
error_flag=0,
error_file='<none>',
error_edge_management=0,
write_profile_flag=0,
write_profile='profile1D.dat',
mis_flag=0,
xc=0,
ang_rot=0,
wt_offset_ffs=0,
offset_ffs=0,
tilt_ffs=0,
wt_offset_bfs=0,
offset_bfs=0,
tilt_bfs=0)
# no drift in this element
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
########## OPTICAL ELEMENT NUMBER 5 ##########
input_wavefront = output_wavefront.duplicate()
from syned.beamline.shape import Rectangle
boundary_shape = Rectangle(-0.5, 0.5, -0.5, 0.5)
from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit1D
optical_element = WOSlit1D(boundary_shape=boundary_shape)
# drift_before 18.4 m
#
# propagating
#
#
propagation_elements = PropagationElements()
beamline_element = BeamlineElement(optical_element=optical_element, coordinates=ElementCoordinates(p=distance, q=0.000000, angle_radial=numpy.radians(0.000000), angle_azimuthal=numpy.radians(0.000000)))
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements = propagation_elements)
#self.set_additional_parameters(propagation_parameters)
#
propagation_parameters.set_additional_parameters('magnification_x', 0.1)
propagation_parameters.set_additional_parameters('magnification_N', 1.0)
#
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(Integral1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters, handler_name='INTEGRAL_1D')
return output_wavefront
#
# MAIN FUNCTION========================
#
def main(aperture=40e-6, distance=30.0, number_of_points=800,nmodes=10):
from srxraylib.plot.gol import plot, plot_image
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes
tally = TallyCoherentModes()
for my_mode_index in range(nmodes):
print(">>>>>>>>>>>>>>>>>>>>> mode %d of %d" % (my_mode_index, nmodes))
output_wavefront = run_source(my_mode_index=my_mode_index,number_of_points=number_of_points)
output_wavefront = run_beamline(output_wavefront, aperture=aperture, distance=distance)
tally.append(output_wavefront)
# tally.plot_cross_spectral_density(show=1, filename="")
# tally.plot_spectral_density(show=1, filename="")
# tally.plot_occupation(show=1, filename="")
return tally
#
# MAIN========================
#
# main()
if __name__ == "__main__":
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes, Tally
from oasys.util.oasys_util import get_fwhm
from srxraylib.plot.gol import plot
#
#
#
# size_at_aperture = 565e-6
APERTURE = [40.3e-6, 85.1e-6, 145e-6, 1000e-6, -40.3e-6, -85.1e-6, -145e-6, -1000e-6] # [ 5000e-6] # [-40.3e-6, -85.1e-6, -145e-6, -1000e-6] #
DISTANCE = numpy.linspace(10, 50, 50) # numpy.array([18.4]) # # # 31.19 28.4
number_of_points = 800 # 800
for aperture in APERTURE:
# src1, wf = main(aperture=aperture, distance=18.4168, number_of_points=number_of_points)
filename = "aperture_h_%g.dat" % (1e6 * aperture) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
f = open(filename, 'w')
f.write("# S 1 scored data\n")
f.write("# N 5\n")
f.write("# L distance fwhm total_intensity on_axis_intensity peak_intensity")
if aperture < 0:
aperture *= -1
nmodes = 1
else:
nmodes = 10
for i,distance in enumerate(DISTANCE):
tally = main(aperture=aperture, distance=distance, nmodes=nmodes)
spectral_density = tally.get_spectral_density() # numpy.zeros_like(abscissas)
abscissas = tally.get_abscissas()
fwhm, quote, coordinates = get_fwhm(spectral_density, 1e6 * abscissas)
I = spectral_density
x = abscissas
fwhm, quote, coordinates = get_fwhm(I, x)
intensity_at_center = I[I.size // 2]
intensity_total = I.sum() * (x[1] - x[0])
intensity_peak = I.max()
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# plot(1e6 * abscissas, spectral_density,
# legend=["From Cross Spectral Density"],
# xtitle="x [um]", ytitle="Spectral Density", title="D=%g m,FWHM = %g um, a=%g um" % (distance, fwhm, aperture*1e6))
f.write("\n %g %g %g %g %g " % (distance, fwhm, intensity_total, intensity_at_center, intensity_peak))
f.close()
print("File %s written to disk" % filename)
# tally.save("aperture_h_%g.dat" % (aperture))
# main()
| 7,838 | 0 | 69 |
bfa9580d8d9a5ab354f53be8a0dc88c93072abd5 | 3,302 | py | Python | tests/test_bounding_box_query.py | HamutalCohen3/anyway | d130fc37ed51f838b53a79a641fe2706f2a29c6a | [
"BSD-3-Clause"
] | null | null | null | tests/test_bounding_box_query.py | HamutalCohen3/anyway | d130fc37ed51f838b53a79a641fe2706f2a29c6a | [
"BSD-3-Clause"
] | null | null | null | tests/test_bounding_box_query.py | HamutalCohen3/anyway | d130fc37ed51f838b53a79a641fe2706f2a29c6a | [
"BSD-3-Clause"
] | null | null | null | import unittest
from models import Marker # for Marker.bounding_box_query
import datetime
# This tests year 2014 accidents as this is the current example git data for testing
# Once this changes to another year or to the current year's accidents (as should be) un-comment lines 11,13,15
# and change both 2014 and 2015 to: %s
class TestQueryFilters(unittest.TestCase):
"""
# cyear = str(datetime.datetime.now().strftime("%Y"))
global start_date
start_date = "01/01/2014" # % cyear
global end_date
end_date = "01/01/2015" # % str(int(cyear)-1)
"""
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueryFilters)
unittest.TextTestRunner(verbosity=2).run(suite)
| 41.797468 | 113 | 0.640521 | import unittest
from models import Marker # for Marker.bounding_box_query
import datetime
# This tests year 2014 accidents as this is the current example git data for testing
# Once this changes to another year or to the current year's accidents (as should be) un-comment lines 11,13,15
# and change both 2014 and 2015 to: %s
class TestQueryFilters(unittest.TestCase):
"""
# cyear = str(datetime.datetime.now().strftime("%Y"))
global start_date
start_date = "01/01/2014" # % cyear
global end_date
end_date = "01/01/2015" # % str(int(cyear)-1)
"""
def setUp(self):
kwargs = {'approx': True, 'show_day': 7, 'show_discussions': True, 'accurate': True, 'surface': 0,
'weather': 0, 'district': 0, 'show_markers': True, 'show_fatal': True, 'show_time': 24,
'show_intersection': 3, 'show_light': True, 'sw_lat': 32.067363446951944, 'controlmeasure': 0,
'start_date': datetime.date(2014, 1, 1), 'ne_lng': 34.79928962966915, 'show_severe': True,
'end_date': datetime.date(2016, 1, 1), 'start_time': 25, 'acctype': 0, 'separation': 0,
'show_urban': 3, 'show_lane': 3, 'sw_lng': 34.78877537033077, 'zoom': 17, 'show_holiday': 0,
'end_time': 25, 'road': 0, 'ne_lat': 32.072427482938345}
self.query_args = kwargs
self.query = Marker.bounding_box_query(yield_per=50, **kwargs)
def tearDown(self):
self.query = None
def test_location_filters(self):
for marker in self.query:
self.assertTrue(self.query_args['sw_lat'] <= marker.latitude <= self.query_args['ne_lat'])
self.assertTrue(self.query_args['sw_lng'] <= marker.longitude <= self.query_args['ne_lng'])
def test_accurate_filter(self):
kwargs = self.query_args.copy()
kwargs['approx'] = False
markers = Marker.bounding_box_query(yield_per=50, **kwargs)
for marker in markers:
self.assertTrue(marker.locationAccuracy == 1)
def test_approx_filter(self):
kwargs = self.query_args.copy()
kwargs['accurate'] = False
markers = Marker.bounding_box_query(yield_per=50, **kwargs)
for marker in markers:
self.assertTrue(marker.locationAccuracy != 1)
def test_fatal_severity_filter(self):
kwargs = self.query_args.copy()
kwargs['show_fatal'] = False
markers = Marker.bounding_box_query(yield_per=50, **kwargs)
for marker in markers:
self.assertTrue(marker.severity != 1)
def test_severe_severity_filter(self):
kwargs = self.query_args.copy()
kwargs['show_severe'] = False
markers = Marker.bounding_box_query(yield_per=50, **kwargs)
for marker in markers:
self.assertTrue(marker.severity != 2)
def test_light_severity_filter(self):
kwargs = self.query_args.copy()
kwargs['show_light'] = False
markers = Marker.bounding_box_query(yield_per=50, **kwargs)
for marker in markers:
self.assertTrue(marker.severity != 3)
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueryFilters)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2,318 | 0 | 220 |
ba4934ddd31bff45e2438851f4120f4932853815 | 2,513 | py | Python | service/learner/brains/tensor_nest_test.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 213 | 2021-06-11T01:15:16.000Z | 2022-02-25T16:18:57.000Z | service/learner/brains/tensor_nest_test.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 32 | 2021-06-17T17:58:54.000Z | 2022-02-02T05:58:10.000Z | service/learner/brains/tensor_nest_test.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 28 | 2021-06-17T17:34:21.000Z | 2022-03-24T14:05:20.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from learner.brains import tensor_nest
import tensorflow as tf
class TensorNestTest(absltest.TestCase):
"""Tests for the tensor_nest module."""
def test_batch_size_valid_nest(self):
"""Get the batch size of a nest of tensors with the same batch size."""
nest = {
'a': {
'b': tf.constant([[1, 2, 3], [4, 5, 6]]),
'c': tf.constant([[7, 8, 9, 10], [11, 12, 13, 14]])
},
}
self.assertEqual(2, tensor_nest.batch_size(nest))
def test_batch_size_invalid_nest(self):
"""Get the batch size of a nest of tensors with different batch sizes."""
nest = {
'a': {
'b': tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
'c': tf.constant([[7, 8, 9, 10], [11, 12, 13, 14]])
},
}
self.assertRaisesRegex(
tensor_nest.MismatchedBatchSizeError,
'Tensors found in nest with mismatched batch sizes: {\'a\'.*}',
tensor_nest.batch_size, nest)
def test_batch_size_empty_nest(self):
"""Get the batch size of an empty tensor nest."""
self.assertIsNone(tensor_nest.batch_size({}))
def test_concatenate_batched(self):
"""Test the concatenation of a set of batched tensor nests."""
nests = [
{
'a': {
'b': tf.constant([[1, 2], [3, 4]]),
'c': tf.constant([[9, 8, 7], [6, 5, 4]]),
},
},
{
'a': {
'b': tf.constant([[5, 6]]),
'c': tf.constant([[3, 2, 1]]),
},
},
]
expected = {
'a': {
'b': tf.constant([[1, 2], [3, 4], [5, 6]]),
'c': tf.constant([[9, 8, 7], [6, 5, 4], [3, 2, 1]]),
},
}
tf.nest.assert_same_structure(tensor_nest.concatenate_batched(nests),
expected, expand_composites=True)
if __name__ == '__main__':
absltest.main()
| 31.810127 | 77 | 0.569439 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from learner.brains import tensor_nest
import tensorflow as tf
class TensorNestTest(absltest.TestCase):
"""Tests for the tensor_nest module."""
def test_batch_size_valid_nest(self):
"""Get the batch size of a nest of tensors with the same batch size."""
nest = {
'a': {
'b': tf.constant([[1, 2, 3], [4, 5, 6]]),
'c': tf.constant([[7, 8, 9, 10], [11, 12, 13, 14]])
},
}
self.assertEqual(2, tensor_nest.batch_size(nest))
def test_batch_size_invalid_nest(self):
"""Get the batch size of a nest of tensors with different batch sizes."""
nest = {
'a': {
'b': tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
'c': tf.constant([[7, 8, 9, 10], [11, 12, 13, 14]])
},
}
self.assertRaisesRegex(
tensor_nest.MismatchedBatchSizeError,
'Tensors found in nest with mismatched batch sizes: {\'a\'.*}',
tensor_nest.batch_size, nest)
def test_batch_size_empty_nest(self):
"""Get the batch size of an empty tensor nest."""
self.assertIsNone(tensor_nest.batch_size({}))
def test_concatenate_batched(self):
"""Test the concatenation of a set of batched tensor nests."""
nests = [
{
'a': {
'b': tf.constant([[1, 2], [3, 4]]),
'c': tf.constant([[9, 8, 7], [6, 5, 4]]),
},
},
{
'a': {
'b': tf.constant([[5, 6]]),
'c': tf.constant([[3, 2, 1]]),
},
},
]
expected = {
'a': {
'b': tf.constant([[1, 2], [3, 4], [5, 6]]),
'c': tf.constant([[9, 8, 7], [6, 5, 4], [3, 2, 1]]),
},
}
tf.nest.assert_same_structure(tensor_nest.concatenate_batched(nests),
expected, expand_composites=True)
if __name__ == '__main__':
absltest.main()
| 0 | 0 | 0 |
2a4e1bcd422d71f49eec11d6fa5a7c3eff0d4a8c | 1,868 | py | Python | ads/exercises/dynamic_programming/longest_palindromic_subsequence.py | Aminul-Momin/Algorithms_and_Data_Structures | cba73b36b73ad92fb34bc34a0e03503f7a137713 | [
"MIT"
] | null | null | null | ads/exercises/dynamic_programming/longest_palindromic_subsequence.py | Aminul-Momin/Algorithms_and_Data_Structures | cba73b36b73ad92fb34bc34a0e03503f7a137713 | [
"MIT"
] | null | null | null | ads/exercises/dynamic_programming/longest_palindromic_subsequence.py | Aminul-Momin/Algorithms_and_Data_Structures | cba73b36b73ad92fb34bc34a0e03503f7a137713 | [
"MIT"
] | null | null | null | """ Longest Palindromic Subsequence
Given a string s, find the longest palindromic subsequence's length in s.
A subsequence is a sequence that can be derived from another sequence by
deleting some or no elements without changing the order of the remaining elements.
- Example 1:
- Input: s = "bbbab"
- Output: 4
- Explanation: One possible longest palindromic subsequence is "bbbb".
- Example 2:
- Input: s = "cbbd"
- Output: 2
- Explanation: One possible longest palindromic subsequence is "bb".
- Constraints:
- 1 <= s.length <= 1000
- s consists only of lowercase English letters.
"""
# A Dynamic Programming based Python
# program for LPS problem Returns the length
# of the longest palindromic subsequence in seq
# Driver program to test above functions
seq = "GEEKS FOR GEEKS"
n = len(seq)
print("The length of the LPS is " + str(lps(seq)))
# This code is contributed by Bhavya Jain | 30.129032 | 82 | 0.619379 | """ Longest Palindromic Subsequence
Given a string s, find the longest palindromic subsequence's length in s.
A subsequence is a sequence that can be derived from another sequence by
deleting some or no elements without changing the order of the remaining elements.
- Example 1:
- Input: s = "bbbab"
- Output: 4
- Explanation: One possible longest palindromic subsequence is "bbbb".
- Example 2:
- Input: s = "cbbd"
- Output: 2
- Explanation: One possible longest palindromic subsequence is "bb".
- Constraints:
- 1 <= s.length <= 1000
- s consists only of lowercase English letters.
"""
# A Dynamic Programming based Python
# program for LPS problem Returns the length
# of the longest palindromic subsequence in seq
def lps(str):
n = len(str)
# Create a table to store results of subproblems
L = [[0 for x in range(n)] for x in range(n)]
# Strings of length 1 are palindrome of length 1
for i in range(n):
L[i][i] = 1
# Build the table. Note that the lower
# diagonal values of table are
# useless and not filled in the process.
# The values are filled in a
# manner similar to Matrix Chain
# Multiplication DP solution (See
# https://www.geeksforgeeks.org/
# dynamic-programming-set-8-matrix-chain-multiplication/
# cl is length of substring
for cl in range(2, n + 1):
for i in range(n - cl + 1):
j = i + cl - 1
if str[i] == str[j] and cl == 2:
L[i][j] = 2
elif str[i] == str[j]:
L[i][j] = L[i + 1][j - 1] + 2
else:
L[i][j] = max(L[i][j - 1], L[i + 1][j])
return L[0][n - 1]
# Driver program to test above functions
seq = "GEEKS FOR GEEKS"
n = len(seq)
print("The length of the LPS is " + str(lps(seq)))
# This code is contributed by Bhavya Jain | 918 | 0 | 22 |
a2067ed33a3a1be96603d79e2e8692eb71708f29 | 179 | py | Python | 23/04/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
] | null | null | null | 23/04/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
] | 70 | 2017-06-01T11:02:51.000Z | 2017-06-30T00:35:32.000Z | 23/04/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
] | null | null | null | import statistics
data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
print(statistics.mean(data)) # 平均
print(statistics.median(data)) # 中央値
print(statistics.variance(data)) # 標本標準分散
| 29.833333 | 47 | 0.698324 | import statistics
data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
print(statistics.mean(data)) # 平均
print(statistics.median(data)) # 中央値
print(statistics.variance(data)) # 標本標準分散
| 0 | 0 | 0 |
41e91c81754e039394c5dd4c0257dd388ee3fa1c | 134 | py | Python | src/pyhid_usb_relay/exceptions.py | JPEWdev/pyhid-usb-relay | 287f4e6b992634c7273512b92da31ff4daafa058 | [
"MIT"
] | 5 | 2021-09-14T16:52:41.000Z | 2022-02-13T12:28:58.000Z | src/pyhid_usb_relay/exceptions.py | JPEWdev/pyhid-usb-relay | 287f4e6b992634c7273512b92da31ff4daafa058 | [
"MIT"
] | 1 | 2022-03-05T12:14:50.000Z | 2022-03-05T12:14:50.000Z | src/pyhid_usb_relay/exceptions.py | JPEWdev/pyhid-usb-relay | 287f4e6b992634c7273512b92da31ff4daafa058 | [
"MIT"
] | 1 | 2022-01-19T13:21:33.000Z | 2022-01-19T13:21:33.000Z | # Copyright 2021 Joshua Watt <JPEWhacker@gmail.com>
#
# SPDX-License-Identifier: MIT
| 16.75 | 51 | 0.761194 | # Copyright 2021 Joshua Watt <JPEWhacker@gmail.com>
#
# SPDX-License-Identifier: MIT
class DeviceNotFoundError(Exception):
pass
| 0 | 25 | 23 |
25854b801aebe4a8a85e849fa0d7329bfed9bddd | 557 | py | Python | blog/entries/models.py | vinothsundararajan/Django_Blog | a897120991293b3268220b429ce3de79537b565b | [
"MIT"
] | 1 | 2020-05-01T12:24:52.000Z | 2020-05-01T12:24:52.000Z | blog/entries/models.py | vinothsundararajan/Django_Blog | a897120991293b3268220b429ce3de79537b565b | [
"MIT"
] | 10 | 2020-05-03T10:31:02.000Z | 2022-03-12T00:26:51.000Z | blog/entries/models.py | vinothsundararajan/Django_Blog | a897120991293b3268220b429ce3de79537b565b | [
"MIT"
] | 1 | 2020-05-03T10:11:32.000Z | 2020-05-03T10:11:32.000Z | from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
| 34.8125 | 94 | 0.748654 | from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class Entry(models.Model):
entry_title = models.CharField(max_length=50)
entry_text = RichTextUploadingField()
entry_date = models.DateTimeField(auto_now_add=True)
entry_author = models.ForeignKey(User, on_delete=models.CASCADE) # import User from django
class Meta:
verbose_name_plural = "entries"
def __str__(self):
return f'{self.entry_title}'
| 34 | 336 | 22 |
4c352e9eabe94aab22b090b6cc0b2abc3b67794c | 3,557 | py | Python | wagtail/contrib/redirects/forms.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/contrib/redirects/forms.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | wagtail/contrib/redirects/forms.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | import os
from django import forms
from django.utils.translation import gettext_lazy as _
from wagtail.admin.widgets import AdminPageChooser
from wagtail.contrib.redirects.models import Redirect
from wagtail.models import Site
| 32.633028 | 93 | 0.623278 | import os
from django import forms
from django.utils.translation import gettext_lazy as _
from wagtail.admin.widgets import AdminPageChooser
from wagtail.contrib.redirects.models import Redirect
from wagtail.models import Site
class RedirectForm(forms.ModelForm):
site = forms.ModelChoiceField(
label=_("From site"),
queryset=Site.objects.all(),
required=False,
empty_label=_("All sites"),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["redirect_page"].widget = AdminPageChooser()
required_css_class = "required"
def clean(self):
"""
The unique_together condition on the model is ignored if site is None, so need to
check for duplicates manually
"""
cleaned_data = super().clean()
if cleaned_data.get("site") is None:
old_path = cleaned_data.get("old_path")
if old_path is None:
# cleaned_data['old_path'] is empty because it has already failed validation,
# so don't bother with our duplicate test
return
old_path = Redirect.normalise_path(old_path)
duplicates = Redirect.objects.filter(old_path=old_path, site__isnull=True)
if self.instance.pk:
duplicates = duplicates.exclude(id=self.instance.pk)
if duplicates:
raise forms.ValidationError(
_("A redirect with this path already exists.")
)
class Meta:
model = Redirect
fields = ("old_path", "site", "is_permanent", "redirect_page", "redirect_link")
class ImportForm(forms.Form):
import_file = forms.FileField(
label=_("File to import"),
)
def __init__(self, allowed_extensions, *args, **kwargs):
super().__init__(*args, **kwargs)
accept = ",".join([".{}".format(x) for x in allowed_extensions])
self.fields["import_file"].widget = forms.FileInput(attrs={"accept": accept})
uppercased_extensions = [x.upper() for x in allowed_extensions]
allowed_extensions_text = ", ".join(uppercased_extensions)
help_text = _("Supported formats: %(supported_formats)s.") % {
"supported_formats": allowed_extensions_text,
}
self.fields["import_file"].help_text = help_text
class ConfirmImportForm(forms.Form):
from_index = forms.ChoiceField(
label=_("From field"),
choices=(),
)
to_index = forms.ChoiceField(
label=_("To field"),
choices=(),
)
site = forms.ModelChoiceField(
label=_("From site"),
queryset=Site.objects.all(),
required=False,
empty_label=_("All sites"),
)
permanent = forms.BooleanField(initial=True, required=False)
import_file_name = forms.CharField(widget=forms.HiddenInput())
original_file_name = forms.CharField(widget=forms.HiddenInput())
input_format = forms.CharField(widget=forms.HiddenInput())
def __init__(self, headers, *args, **kwargs):
super().__init__(*args, **kwargs)
choices = []
for i, f in enumerate(headers):
choices.append([str(i), f])
if len(headers) > 1:
choices.insert(0, ("", "---"))
self.fields["from_index"].choices = choices
self.fields["to_index"].choices = choices
def clean_import_file_name(self):
data = self.cleaned_data["import_file_name"]
data = os.path.basename(data)
return data
| 1,161 | 2,095 | 69 |
54aff4a4ff3deac3f93e8a6c1ecd728b45da102b | 11,643 | py | Python | fpi/assignment2/operations.py | rrrichter/ufrgs | 6a1de03afb5af0ff48d0e630a501c1db3ec2591e | [
"MIT"
] | null | null | null | fpi/assignment2/operations.py | rrrichter/ufrgs | 6a1de03afb5af0ff48d0e630a501c1db3ec2591e | [
"MIT"
] | null | null | null | fpi/assignment2/operations.py | rrrichter/ufrgs | 6a1de03afb5af0ff48d0e630a501c1db3ec2591e | [
"MIT"
] | 1 | 2018-06-24T18:19:23.000Z | 2018-06-24T18:19:23.000Z | import numpy as np
import cv2
import copy
from Tkinter import *
from PIL import Image
from PIL import ImageTk
import tkFileDialog
root = Tk()
panelA = None
panelB = None
img = None
img2 = None
img3 = None
ConvolutionLabel = Label(root, text="Convolute").grid(row=0,column=0)
Conv00Entry = Entry(root, bd =5)
Conv01Entry = Entry(root, bd =5)
Conv02Entry = Entry(root, bd =5)
Conv10Entry = Entry(root, bd =5)
Conv11Entry = Entry(root, bd =5)
Conv12Entry = Entry(root, bd =5)
Conv20Entry = Entry(root, bd =5)
Conv21Entry = Entry(root, bd =5)
Conv22Entry = Entry(root, bd =5)
Conv00Entry.grid(row=1,column=0)
Conv01Entry.grid(row=1,column=1)
Conv02Entry.grid(row=1,column=2)
Conv10Entry.grid(row=2,column=0)
Conv11Entry.grid(row=2,column=1)
Conv12Entry.grid(row=2,column=2)
Conv20Entry.grid(row=3,column=0)
Conv21Entry.grid(row=3,column=1)
Conv22Entry.grid(row=3,column=2)
brightnessLabel = Label(root, text="Brightness").grid(row=4,column=0)
brightnessEntry = Entry(root, bd =5)
brightnessEntry.grid(row=4,column=1)
contrastLabel = Label(root, text="Contrast").grid(row=5,column=0)
contrastEntry = Entry(root, bd =5)
contrastEntry.grid(row=5,column=1)
zoomOutLabel = Label(root, text="ZoomOut").grid(row=6,column=0)
zoomOutXEntry = Entry(root, bd =5)
zoomOutXEntry.grid(row=6,column=1)
zoomOutYEntry = Entry(root, bd =5)
zoomOutYEntry.grid(row=6,column=2)
selectImageBtn = Button(root, text="Select an image", command=selectImage).grid(row=0,column=3)
horizontalBtn = Button(root, text ="Flip Horizontally", command = flipHorizontal).grid(row=1,column=3)
grayscaleBtn = Button(root, text ="Grayscale", command = grayscale).grid(row=2,column=3)
histogramBtn = Button(root, text ="Generate Histogram", command = generateHistogram).grid(row=3,column=3)
brightnessBtn = Button(root, text ="Change Brightness", command = changeBrightness).grid(row=4,column=3)
contrastBtn = Button(root, text ="Change Contrast", command = changeContrast).grid(row=5,column=3)
negativeBtn = Button(root, text ="Negative", command = negative).grid(row=6,column=3)
equalizeBtn = Button(root, text ="Equalize", command = equalize).grid(row=7,column=3)
zoomOutBtn = Button(root, text ="ZoomOut", command = zoomOut).grid(row=8,column=3)
zoomInBtn = Button(root, text ="ZoomIn", command = zoomIn).grid(row=9,column=3)
rotateClockWiseBtn = Button(root, text ="rotateClockWise", command = rotateClockWise).grid(row=10,column=3)
rotateAntiClockWiseBtn = Button(root, text ="rotateAntiClockWise", command = rotateAntiClockWise).grid(row=11,column=3)
convoluteBtn = Button(root, text ="Convolute", command = convolute).grid(row=12,column=3)
root.mainloop() | 28.962687 | 429 | 0.675341 | import numpy as np
import cv2
import copy
from Tkinter import *
from PIL import Image
from PIL import ImageTk
import tkFileDialog
def selectImage():
global panelA, panelB, panelC
global img, img2
path = tkFileDialog.askopenfilename()
if len(path) > 0:
#img = loadImage('test_images/Space_187k.jpg')
#img2 = loadImage('test_images/Space_187k.jpg')
img = loadImage(path)
img2 = loadImage(path)
img3 = loadImage(path)
displayedImage = imgCvToTk(img)
displayedImage2 = imgCvToTk(img2)
displayedImage3 = imgCvToTk(img3)
if panelA is None or panelB is None or panelC is None:
panelA = Label(image=displayedImage)
panelA.image = displayedImage
panelA.grid(row=7,column=0)
panelB = Label(image=displayedImage2)
panelB.image = displayedImage2
panelB.grid(row=7,column=1)
panelC = Label(image=displayedImage3)
panelC.image = displayedImage3
panelC.grid(row=7,column=2)
else:
# update the pannels
panelA.configure(image=displayedImage)
panelB.configure(image=displayedImage2)
panelC.configure(image=displayedImage3)
panelA.image = displayedImage
panelB.image = displayedImage2
panelC.image = displayedImage3
def nothing(x):
pass
def loadImage(address):
return cv2.imread(address,1)
def updateImagesInScreen():
global img2, img3
displayImage2 = imgCvToTk(img2)
displayImage3 = imgCvToTk(img3)
panelB.configure(image=displayImage2)
panelB.image = displayImage2
panelC.configure(image=displayImage3)
panelC.image = displayImage3
def imgCvToTk(image):
convertedImg = copy.deepcopy(image)
convertedImg = cv2.cvtColor(convertedImg, cv2.COLOR_BGR2RGB)
convertedImg = Image.fromarray(convertedImg)
convertedImg = ImageTk.PhotoImage(convertedImg)
return convertedImg
def newWindow(name):
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
def flipHorizontal():
global img2
global img3
img2 = img2[:,::-1]
updateImagesInScreen()
def changeBrightness():
global img2
global img3
amount = int(brightnessEntry.get())
newImg = copy.deepcopy(img2)
for c in range(0, 3):
for i in range(img2.shape[0]):
for j in range(img2.shape[1]):
if img2[i][j][c] + amount > 255:
newImg[i][j][c] = 255
elif img2[i][j][c] + amount < 0:
newImg[i][j][c] = 0
else:
newImg[i][j][c]+=amount
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
def changeContrast():
global img2
global img3
amount = float(contrastEntry.get())
newImg = copy.deepcopy(img2)
for c in range(0, 3):
for i in range(img2.shape[0]):
for j in range(img2.shape[1]):
if img2[i][j][c] * amount > 255:
newImg[i][j][c] = 255
elif img2[i][j][c] * amount < 0:
newImg[i][j][c] = 0
else:
newImg[i][j][c]*=amount
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
def negative():
global img2
global img3
newImg = copy.deepcopy(img2)
for c in range(0, 3):
for i in range(img2.shape[0]):
for j in range(img2.shape[1]):
newImg[i][j][c] = 255 - newImg[i][j][c]
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
def grayscale():
global img2
global img3
for i in range (img2.shape[0]):
for j in range (img2.shape[1]):
pixel = img2[i][j]
l = pixel[0]*0.299 + pixel[1]*0.587 + pixel[2]*0.114
img2[i][j].fill(l)
updateImagesInScreen()
def generateHistogram():
global img2
global img3
#if image is colored
if (img2[0][0][0] != img2[0][0][1]):
grayscale()
histogramDict = {}
for i in range (img2.shape[0]):
for j in range (img2.shape[1]):
pixel = img2[i][j]
if pixel[0] in histogramDict:
histogramDict[pixel[0]]+=1
else:
histogramDict[pixel[0]] = 1
for i in range(0,256):
if i not in histogramDict:
histogramDict[i] = 0
biggestValue=0
for i in range(0,256):
if histogramDict[i] > biggestValue:
biggestValue = histogramDict[i]
print(biggestValue)
for key, value in histogramDict.items():
histogramDict[key] = (histogramDict[key]/float((img2.shape[0]*img2.shape[1])))*biggestValue
imgHistogram = np.full((256,256,3), 255, np.uint8)
for i in range(imgHistogram.shape[0]):
imgHistogram[255-histogramDict[i]:255,i] = (0,0,0)
img3 = copy.deepcopy(imgHistogram)
updateImagesInScreen()
def generateCumulativeHistogram(img):
alpha = 255.0/(img.shape[0]*img.shape[1])
histogramDict = {}
cumHistogramDict = {}
for i in range(0,256):
if i not in histogramDict:
histogramDict[i] = 0
for i in range (img.shape[0]):
for j in range (img.shape[1]):
pixel = img[i][j]
if pixel[0] in histogramDict:
histogramDict[pixel[0]]+=1
else:
histogramDict[pixel[0]] = 1
cumHistogramDict[0] = alpha*histogramDict[0]
for i in range(1,256):
cumHistogramDict[i] = cumHistogramDict[i-1] + alpha*histogramDict[i]
print('oi:',)
return cumHistogramDict
def equalize():
global img2
global img3
generateHistogram()
imgHistogram = copy.deepcopy(img3)
cumulativeHistogram = generateCumulativeHistogram(img2)
for i in range (img2.shape[0]):
for j in range (img2.shape[1]):
img2[i][j] = cumulativeHistogram[img2[i][j][0]]
updateImagesInScreen()
def zoomOut():
global img2
global img3
sX = int(zoomOutXEntry.get())
sY = int(zoomOutYEntry.get())
newImg = np.full((img2.shape[0]/sX,img2.shape[1]/sY,3), 255, np.uint8)
for c in range(0,3):
i = 0
while i < img2.shape[0]-sX:
j=0
while j < img2.shape[1]-sY:
total = 0
for x in range(i,i+sX):
for y in range(j,j+sY):
total+=img2[x,y,c]
avg = total/(sX*sY)
newImg[i/sX,j/sY,c] = avg
j+=sY
i+=sX
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
def zoomIn():
global img2
global img3
newImg = np.full((img2.shape[0]*2,img2.shape[1]*2,3), 255, np.uint8)
i = 0
while i < newImg.shape[0]:
j = 0
while j < newImg.shape[1]:
newImg[i,j,:] = img2[i/2,j/2,:]
j+=2
i+=2
i = 0
while i < newImg.shape[0]:
j = 1
while j < newImg.shape[1]-1:
newImg[i,j,0] = (int(newImg[i,j+1,0]) + newImg[i,j-1,0])/2
newImg[i,j,1] = (int(newImg[i,j+1,1]) + newImg[i,j-1,1])/2
newImg[i,j,2] = (int(newImg[i,j+1,2]) + newImg[i,j-1,2])/2
j+=2
i+=2
i = 1
while i < newImg.shape[0]-1:
j = 0
while j < newImg.shape[1]:
newImg[i,j,0] = (int(newImg[i+1,j,0]) + newImg[i-1,j,0])/2
newImg[i,j,1] = (int(newImg[i+1,j,1]) + newImg[i-1,j,1])/2
newImg[i,j,2] = (int(newImg[i+1,j,2]) + newImg[i-1,j,2])/2
j+=1
i+=2
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
def rotateClockWise():
global img2
global img3
newImg = np.full((img2.shape[1],img2.shape[0],3), 255, np.uint8)
for i in range(newImg.shape[0]):
for j in range(newImg.shape[1]):
newImg[i,j,:] = (img2[j,i,:]+0)
img2 = copy.deepcopy(newImg)
flipHorizontal()
updateImagesInScreen()
def rotateAntiClockWise():
global img2
global img3
flipHorizontal()
newImg = np.full((img2.shape[1],img2.shape[0],3), 255, np.uint8)
for i in range(newImg.shape[0]):
for j in range(newImg.shape[1]):
newImg[i,j,:] = (img2[j,i,:]+0)
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
def convolute():
global img2
global img3
newImg = np.full((img2.shape[0],img2.shape[1],3), 255, np.uint8)
for i in range(1,newImg.shape[0]-1):
for j in range(1,newImg.shape[1]-1):
'''print('00',img2[i-1,j-1,:])
print('01',img2[i,j-1,:])
print('02',img2[i+1,j-1,:])
print('10',img2[i-1,j,:])
print('11',img2[i,j,:])
print('12',img2[i+1,j,:])
print('20',img2[i-1,j+1,:])
print('21',img2[i,j+1,:])
print('22',img2[i+1,j+1,:])'''
convC0 = int(img2[i-1,j-1,0])*float(Conv20Entry.get()) + int(img2[i,j-1,0])*float(Conv21Entry.get()) + int(img2[i+1,j-1,0])*float(Conv22Entry.get()) + int(img2[i-1,j,0])*float(Conv10Entry.get()) + int(img2[i,j,0])*float(Conv11Entry.get()) + int(img2[i+1,j,0])*float(Conv12Entry.get()) + int(img2[i-1,j+1,0])*float(Conv00Entry.get()) + int(img2[i,j+1,0])*float(Conv01Entry.get()) + int(img2[i+1,j+1,0])*float(Conv02Entry.get())
if convC0 < 0:
newImg[i,j,0] = 0
elif convC0 > 255:
newImg[i,j,0] = 255
else:
newImg[i,j,0] = convC0
convC1 = int(img2[i-1,j-1,1])*float(Conv20Entry.get()) + int(img2[i,j-1,1])*float(Conv21Entry.get()) + int(img2[i+1,j-1,1])*float(Conv22Entry.get()) + int(img2[i-1,j,1])*float(Conv10Entry.get()) + int(img2[i,j,1])*float(Conv11Entry.get()) + int(img2[i+1,j,1])*float(Conv12Entry.get()) + int(img2[i-1,j+1,1])*float(Conv00Entry.get()) + int(img2[i,j+1,1])*float(Conv01Entry.get()) + int(img2[i+1,j+1,1])*float(Conv02Entry.get())
if convC1 < 0:
newImg[i,j,1] = 0
elif convC1 > 255:
newImg[i,j,1] = 255
else:
newImg[i,j,1] = convC1
convC2 = int(img2[i-1,j-1,2])*float(Conv20Entry.get()) + int(img2[i,j-1,2])*float(Conv21Entry.get()) + int(img2[i+1,j-1,2])*float(Conv22Entry.get()) + int(img2[i-1,j,2])*float(Conv10Entry.get()) + int(img2[i,j,2])*float(Conv11Entry.get()) + int(img2[i+1,j,2])*float(Conv12Entry.get()) + int(img2[i-1,j+1,2])*float(Conv00Entry.get()) + int(img2[i,j+1,2])*float(Conv01Entry.get()) + int(img2[i+1,j+1,2])*float(Conv02Entry.get())
if convC2 < 0:
newImg[i,j,2] = 0
elif convC2 > 255:
newImg[i,j,2] = 255
else:
newImg[i,j,2] = convC2
img2 = copy.deepcopy(newImg)
updateImagesInScreen()
root = Tk()
panelA = None
panelB = None
img = None
img2 = None
img3 = None
ConvolutionLabel = Label(root, text="Convolute").grid(row=0,column=0)
Conv00Entry = Entry(root, bd =5)
Conv01Entry = Entry(root, bd =5)
Conv02Entry = Entry(root, bd =5)
Conv10Entry = Entry(root, bd =5)
Conv11Entry = Entry(root, bd =5)
Conv12Entry = Entry(root, bd =5)
Conv20Entry = Entry(root, bd =5)
Conv21Entry = Entry(root, bd =5)
Conv22Entry = Entry(root, bd =5)
Conv00Entry.grid(row=1,column=0)
Conv01Entry.grid(row=1,column=1)
Conv02Entry.grid(row=1,column=2)
Conv10Entry.grid(row=2,column=0)
Conv11Entry.grid(row=2,column=1)
Conv12Entry.grid(row=2,column=2)
Conv20Entry.grid(row=3,column=0)
Conv21Entry.grid(row=3,column=1)
Conv22Entry.grid(row=3,column=2)
brightnessLabel = Label(root, text="Brightness").grid(row=4,column=0)
brightnessEntry = Entry(root, bd =5)
brightnessEntry.grid(row=4,column=1)
contrastLabel = Label(root, text="Contrast").grid(row=5,column=0)
contrastEntry = Entry(root, bd =5)
contrastEntry.grid(row=5,column=1)
zoomOutLabel = Label(root, text="ZoomOut").grid(row=6,column=0)
zoomOutXEntry = Entry(root, bd =5)
zoomOutXEntry.grid(row=6,column=1)
zoomOutYEntry = Entry(root, bd =5)
zoomOutYEntry.grid(row=6,column=2)
selectImageBtn = Button(root, text="Select an image", command=selectImage).grid(row=0,column=3)
horizontalBtn = Button(root, text ="Flip Horizontally", command = flipHorizontal).grid(row=1,column=3)
grayscaleBtn = Button(root, text ="Grayscale", command = grayscale).grid(row=2,column=3)
histogramBtn = Button(root, text ="Generate Histogram", command = generateHistogram).grid(row=3,column=3)
brightnessBtn = Button(root, text ="Change Brightness", command = changeBrightness).grid(row=4,column=3)
contrastBtn = Button(root, text ="Change Contrast", command = changeContrast).grid(row=5,column=3)
negativeBtn = Button(root, text ="Negative", command = negative).grid(row=6,column=3)
equalizeBtn = Button(root, text ="Equalize", command = equalize).grid(row=7,column=3)
zoomOutBtn = Button(root, text ="ZoomOut", command = zoomOut).grid(row=8,column=3)
zoomInBtn = Button(root, text ="ZoomIn", command = zoomIn).grid(row=9,column=3)
rotateClockWiseBtn = Button(root, text ="rotateClockWise", command = rotateClockWise).grid(row=10,column=3)
rotateAntiClockWiseBtn = Button(root, text ="rotateAntiClockWise", command = rotateAntiClockWise).grid(row=11,column=3)
convoluteBtn = Button(root, text ="Convolute", command = convolute).grid(row=12,column=3)
root.mainloop() | 8,580 | 0 | 437 |
84afb68b2e52f3070efd8c0ae2d94c912cba6f2a | 59,900 | py | Python | vendor/mari/1.4v1/_ocio_toolbar.py | mjtitchener-fn/OpenColorIO | 00b5362442b9fe954c4b1161fe0cec621fcf1915 | [
"BSD-3-Clause"
] | 628 | 2018-08-11T02:18:36.000Z | 2022-03-31T15:05:23.000Z | src/mari/1.4v1/_ocio_toolbar.py | dictoon/OpenColorIO | 64adcad300adfd166280d2e7b1fb5c3ce7dca482 | [
"BSD-3-Clause"
] | 655 | 2019-04-16T15:15:31.000Z | 2022-03-31T18:05:52.000Z | src/mari/1.4v1/_ocio_toolbar.py | dictoon/OpenColorIO | 64adcad300adfd166280d2e7b1fb5c3ce7dca482 | [
"BSD-3-Clause"
] | 181 | 2018-12-22T15:39:52.000Z | 2022-03-22T09:52:27.000Z | #-------------------------------------------------------------------------------
# Post processing (color management) related Mari scripts
# coding: utf-8
# Copyright (c) 2011 The Foundry Visionmongers Ltd. All Rights Reserved.
#-------------------------------------------------------------------------------
import mari, time, PythonQt, os, math
QtGui = PythonQt.QtGui
QtCore = PythonQt.QtCore
ocio = mari.utils.ocio
##############################################################################################
GAIN_GROUP_MAX_WIDTH = 312
FSTOP_MAX_WIDTH = 50
EXPOSURE_MAX_WIDTH = 102
GAIN_MAX_WIDTH = 80
GAMMA_MAX_WIDTH = 200
TOOLBAR_SPACING = 3
toolbar = None
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# Widgets:
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# Metadata:
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# External Connections:
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# Filter:
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# Debugging:
#-----------------------------------------------------------------------------------------
##############################################################################################
# The following functions CAN'T be part of the toolbar class as a potential bug in PythonQt
# causes the disconnect function to fail
#-----------------------------------------------------------------------------------------
##############################################################################################
if mari.app.isRunning():
if not hasattr(mari.gl_render, 'createPostFilterCollection'):
ocio.printMessage(ocio.MessageType.ERROR, 'This version of Mari does not support the mari.gl_render.createPostFilterCollection API')
else:
if ocio.config_default is not None:
toolbar = OcioToolBar()
else:
# Destroy the OCIO post filter collection if present to prevent the user trying to use it.
filter_collection = mari.gl_render.findPostFilterCollection('Color Space')
if filter_collection is not None:
mari.gl_render.deletePostFilterCollection(filter_collection)
# Destroy the toolbar to prevent the user trying to use it.
mari.app.deleteToolBar('Color Space')
| 46.796875 | 188 | 0.581953 | #-------------------------------------------------------------------------------
# Post processing (color management) related Mari scripts
# coding: utf-8
# Copyright (c) 2011 The Foundry Visionmongers Ltd. All Rights Reserved.
#-------------------------------------------------------------------------------
import mari, time, PythonQt, os, math
QtGui = PythonQt.QtGui
QtCore = PythonQt.QtCore
ocio = mari.utils.ocio
##############################################################################################
GAIN_GROUP_MAX_WIDTH = 312
FSTOP_MAX_WIDTH = 50
EXPOSURE_MAX_WIDTH = 102
GAIN_MAX_WIDTH = 80
GAMMA_MAX_WIDTH = 200
TOOLBAR_SPACING = 3
toolbar = None
class OcioToolBar():
#-----------------------------------------------------------------------------------------
def __init__(self):
# Default all members...
self._config_file_list = mari.FileList(ocio.config_file_list_default)
self._config = ocio.config_default
self._lut_file_list = mari.FileList(ocio.lut_file_list_default)
self._lut_extrapolate = ocio.lut_extrapolate_default
self._color_space = ocio.color_space_default
self._display = ocio.display_default
self._view = ocio.view_default
self._swizzle = ocio.swizzle_default
self._gain = ocio.gain_default
self._gamma = ocio.gamma_default
self._lut_filter = None
self._lut_filter_cache_id = None
self._lut_texture_cache_id = None
self._lut_sampler_name = None
self._display_filter = None
self._display_filter_cache_id = None
self._display_texture_cache_id = None
self._display_sampler_name = None
self._lut_extrapolate_widget = None
self._color_space_widget = None
self._display_widget = None
self._view_widget = None
self._swizzle_widget = None
self._fstop_widget = None
self._fstop_decrement_widget = None
self._fstop_increment_widget = None
self._gain_widget = None
self._exposure_widget = None
self._gain_reset_widget = None
self._gamma_widget = None
self._gamma_reset_widget = None
self._buildWidgets()
self._toggle_color_management_action.setEnabled(False)
self._enableWidgets(False)
# Enable/disable color management.
mari.gl_render.setPostProcessingEnabled(self.isColorManagementEnabled())
# *** IMPORTANT *** The post filter collection used to be called 'OpenColorIO' but was renamed to hide the fact
# we use OpenColorIO from our users. So as a temporary workaround we need to check for the old filter collection
# on startup and remove it if found.
delete_filter_collection = mari.gl_render.findPostFilterCollection('OpenColorIO')
if delete_filter_collection is not None:
mari.gl_render.deletePostFilterCollection(delete_filter_collection)
# Create the OCIO post filter collection if not present.
self._filter_collection = mari.gl_render.findPostFilterCollection('Color Space')
if self._filter_collection is None:
self._filter_collection = mari.gl_render.createPostFilterCollection('Color Space')
else:
self._filter_collection.clear()
self._filter_collection.setReadOnly(True)
self._lut_filter = self._filter_collection.createGLSL('LUT Transform')
if not self._lut_file_list.isEmpty() and not self._rebuildLUTFilter(self._lut_file_list.at(0)):
self._lut_file_list.clear()
self._display_filter = self._filter_collection.createGLSL('Display Transform')
self._rebuildDisplayFilter()
self._buildMetadata()
# Set the color management filter stack as the current.
mari.gl_render.setPostFilterCollection(self._filter_collection)
# Attach ourselves to the applications toolbar created signal so we can rebuild the toolbar when it's been
# destoyed.
mari.utils.connect(mari.app.toolBarsCreated, self._toolBarsCreated)
# Attach ourselves to the appropriate GL signals so we can enable and disable widgets.
mari.utils.connect(mari.gl_render.postProcessingEnabled, self._postProcessingEnabled)
mari.utils.connect(mari.gl_render.setCurrentPostFilterCollection, self._setCurrentPostFilterCollection)
# Attach ourselves to the appropriate project signals so we can load and save settings.
mari.utils.connect(mari.projects.openedProject, self._openedProject)
mari.utils.connect(mari.projects.aboutToSaveProject, self._aboutToSaveProject)
mari.utils.connect(mari.projects.projectClosed, self._closedProject)
# Update the UI to match the current project, if we have one.
current_project = mari.projects.current()
if current_project is not None:
self._openedProject(current_project)
#-----------------------------------------------------------------------------------------
def isColorManagementEnabled(self):
return self._toggle_color_management_action.isChecked()
#-----------------------------------------------------------------------------------------
def setLUTPath(self, value, update_metadata = True, force_shader_build = False):
if (self._lut_file_list.isEmpty() and value != '') or \
(not self._lut_file_list.isEmpty() and value == '') or \
(not self._lut_file_list.isEmpty() and value != self._lut_file_list.at(0)) \
:
if self._rebuildLUTFilter(value, force_shader_build):
self._lut_file_list.clear()
if value != '':
self._lut_file_list.append(value)
self._lut_file_list.setPickedFile(value)
self._clear_lut_action.setEnabled(True)
self._lut_extrapolate_widget.setEnabled(True)
self._lut_filter.setEnabled(True)
else:
self._clear_lut_action.setEnabled(False)
self._lut_extrapolate_widget.setEnabled(False)
self._lut_filter.setEnabled(False)
if update_metadata:
mari.utils.disconnect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
self._lut_filter.setMetadata('File', self._lut_file_list)
mari.utils.connect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
else:
# If this was a request via the metadata system we will need to put the value back to what it was
# before.
if not update_metadata:
mari.utils.disconnect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
self._lut_filter.setMetadata('File', self._lut_file_list)
mari.utils.connect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
return False
return True
#-----------------------------------------------------------------------------------------
def resetLUT(self):
if ocio.lut_file_list_default.isEmpty() or not self.setLUTPath(ocio.lut_file_list_default.at(0)):
self.setLUTPath('')
#-----------------------------------------------------------------------------------------
def selectLUT(self):
lut_path = mari.utils.misc.getOpenFileName(None,
'Select LUT File',
'' if self._lut_file_list.isEmpty() else self._lut_file_list.at(0),
ocio.lutFileFilter(),
None,
0)
if os.path.isfile(lut_path):
self.setLUTPath(lut_path)
#-----------------------------------------------------------------------------------------
def setExtrapolateEnabled(self, value, update_widget = True, update_metadata = True):
if value != self._lut_extrapolate:
self._lut_extrapolate = value
if update_widget:
block = self._lut_extrapolate_widget.blockSignals(True)
self._lut_extrapolate_widget.setChecked(self._lut_extrapolate)
self._lut_extrapolate_widget.blockSignals(block)
if update_metadata:
mari.utils.disconnect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
self._lut_filter.setMetadata('Extrapolate', self._lut_extrapolate)
mari.utils.connect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
if not self._rebuildLUTFilter(lut_path = '' if self._lut_file_list.isEmpty() else self._lut_file_list.at(0),
force_shader_build = True):
self.resetLUT()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed extrapolate to \'%s\'' % self._lut_extrapolate)
#-----------------------------------------------------------------------------------------
def setConfigPath(self, value, update_metadata = True):
if self._config_file_list.isEmpty() or value != self._config_file_list.at(0):
config = ocio.loadConfig(value, True)
if config is not None:
self._config_file_list.clear()
self._config_file_list.append(value)
self._config_file_list.setPickedFile(value)
self._config = config
self._updateDisplayWidgets()
self._updateDisplayMetadata()
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed config to \'%s\'' % self._config_file_list.at(0))
else:
# If this was a request via the metadata system we will need to put the value back to what it was
# before.
if not update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('ConfigPath', self._config_file_list)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
return False
return True
#-----------------------------------------------------------------------------------------
def selectConfig(self):
config_path = mari.utils.misc.getOpenFileName(None,
'Select Configuration File',
'' if self._config_file_list.isEmpty() else self._config_file_list.at(0),
ocio.configFileFilter(),
None,
0)
if os.path.isfile(config_path):
self.setConfigPath(config_path)
#-----------------------------------------------------------------------------------------
def setColorSpace(self, value, update_widget = True, update_metadata = True):
if value != self._color_space:
self._color_space = value
if update_widget:
block = self._color_space_widget.blockSignals(True)
index = self._color_space_widget.findText(self._color_space)
self._color_space_widget.setCurrentIndex(index)
self._color_space_widget.blockSignals(block)
if update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('InputColorSpace', self._color_space)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed input color space to \'%s\'' % self._color_space)
#-----------------------------------------------------------------------------------------
def setDisplay(self, value, update_widget = True, update_metadata = True):
if value != self._display:
self._display = value
if update_widget:
block = self._display_widget.blockSignals(True)
index = self._display_widget.findText(self._display)
self._display_widget.setCurrentIndex(index)
self._display_widget.blockSignals(block)
if update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('Display', self._display)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self.setView(self._config.getDefaultView(self._display), update_widget, update_metadata)
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed display to \'%s\'' % self._display)
#-----------------------------------------------------------------------------------------
def setView(self, value, update_widget = True, update_metadata = True):
if value != self._view:
self._view = value
if update_widget:
block = self._view_widget.blockSignals(True)
index = self._view_widget.findText(self._view)
self._view_widget.setCurrentIndex(index)
self._view_widget.blockSignals(block)
if update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('View', self._view)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed view to \'%s\'' % self._view)
#-----------------------------------------------------------------------------------------
def setSwizzle(self, value, update_widget = True, update_metadata = True):
if value != self._swizzle:
self._swizzle = value
if update_widget:
block = self._swizzle_widget.blockSignals(True)
index = self._swizzle_widget.findText(self._swizzle)
self._swizzle_widget.setCurrentIndex(index)
self._swizzle_widget.blockSignals(block)
if update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('Swizzle', self._swizzle)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed swizzle to \'%s\'' % self._swizzle)
#-----------------------------------------------------------------------------------------
def setGain(self, value, update_widget = True, update_metadata = True):
if value != self._gain:
self._gain = value
if update_widget:
self._updateGainWidgets()
if update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('Gain', self._gain)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed gain to \'%s\'' % self._gain)
#-----------------------------------------------------------------------------------------
def setGamma(self, value, update_widget = True, update_metadata = True):
if value != self._gamma:
self._gamma = value
if update_widget:
block = self._gamma_widget.blockSignals(True)
self._gamma_widget.setValue(self._gamma)
self._gamma_widget.blockSignals(block)
if update_metadata:
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('Gamma', self._gamma)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._rebuildDisplayFilter()
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed gamma to \'%s\'' % self._gamma)
#-----------------------------------------------------------------------------------------
def updateLUTSize(self):
ocio.printMessage(ocio.MessageType.DEBUG, 'Updating LUT size...')
# Rebuild the LUT filter.
if self._lut_sampler_name is not None:
self._lut_filter.deleteTexture(self._lut_sampler_name)
self._lut_sampler_name = None
self._lut_filter_cache_id = None
self._lut_texture_cache_id = None
if not self._rebuildLUTFilter(lut_path = '' if self._lut_file_list.isEmpty() else self._lut_file_list.at(0),
force_shader_build = True):
self.resetLUT()
# Rebuild the display filter.
if self._display_sampler_name is not None:
self._display_filter.deleteTexture(self._display_sampler_name)
self._display_sampler_name = None
self._display_filter_cache_id = None
self._display_texture_cache_id = None
self._rebuildDisplayFilter()
#-----------------------------------------------------------------------------------------
def updateFStopCenter(self):
ocio.printMessage(ocio.MessageType.DEBUG, 'Updating f-stop center...')
fstop = ocio.convertGainToFStop(self._gain)
self._updateFStopWidgetText(fstop)
#-----------------------------------------------------------------------------------------
# Widgets:
#-----------------------------------------------------------------------------------------
def _buildWidgets(self):
action_list = list()
self._toggle_color_management_action = self._addAction(
'/Mari/OpenColorIO/&Toggle Color Management',
'mari.system._ocio_toolbar.toolbar._toggleColorManagement()',
'ColorManager.png',
'Toggle on/off color management',
'Toggle color management')
self._toggle_color_management_action.setCheckable(True)
self._toggle_color_management_action.setChecked(ocio.enabled_default)
action_list.append('/Mari/OpenColorIO/&Toggle Color Management')
self._select_config_action = self._addAction(
'/Mari/OpenColorIO/&Select Config',
'mari.system._ocio_toolbar.toolbar.selectConfig()',
'LoadColorConfig.png',
'Select color space configuration file',
'Select config')
action_list.append('/Mari/OpenColorIO/&Select Config')
self._select_lut_action = self._addAction(
'/Mari/OpenColorIO/&Select LUT',
'mari.system._ocio_toolbar.toolbar.selectLUT()',
'LoadLookupTable.png',
'Select LUT file',
'Select LUT')
action_list.append('/Mari/OpenColorIO/&Select LUT')
self._clear_lut_action = self._addAction(
'/Mari/OpenColorIO/&Clear LUT',
'mari.system._ocio_toolbar.toolbar._clearLUT()',
'ClearLookupTable.png',
'Clear current LUT',
'Clear LUT')
action_list.append('/Mari/OpenColorIO/&Clear LUT')
mari.app.deleteToolBar('Color Space')
self._toolbar = mari.app.createToolBar('Color Space', True)
self._toolbar.addActionList(action_list, False)
self._toolbar.setLockedSlot(True)
self._toolbar.setSpacing(TOOLBAR_SPACING)
self._toolbar.insertSeparator('/Mari/OpenColorIO/&Select LUT')
# Extrapolate:
self._toolbar.addWidget(QtGui.QLabel('Extrapolate'))
self._lut_extrapolate_widget = QtGui.QCheckBox()
self._lut_extrapolate_widget.setToolTip('Extrapolate if outside LUT range');
self._lut_extrapolate_widget.setChecked(self._lut_extrapolate)
self._lut_extrapolate_widget.connect(
QtCore.SIGNAL('toggled(bool)'),
lambda value: self.setExtrapolateEnabled(value = value, update_widget = False, update_metadata = True))
self._toolbar.addWidget(self._lut_extrapolate_widget)
self._toolbar.addSeparator()
color_spaces = [color_space.getName() for color_space in self._config.getColorSpaces()]
# Color-Space:
self._color_space_widget = self._addComboBox(
'Input Color Space',
color_spaces,
self._color_space,
ocio.color_space_default,
lambda value: self.setColorSpace(value = value, update_widget = False, update_metadata = True))
self._color_space = self._color_space_widget.currentText
# Display:
self._display_widget = self._addComboBox(
'Display Device',
self._config.getDisplays(),
self._display,
ocio.display_default,
lambda value: self.setDisplay(value = value, update_widget = False, update_metadata = True))
self._display = self._display_widget.currentText
# View:
self._view_widget = self._addComboBox(
'View Transform',
self._config.getViews(self._display),
self._view,
ocio.view_default,
lambda value: self.setView(value = value, update_widget = False, update_metadata = True))
self._view = self._view_widget.currentText
# Swizzle:
self._swizzle_widget = self._addComboBox(
'Component',
ocio.SWIZZLE_TYPES,
self._swizzle,
ocio.swizzle_default,
lambda value: self.setSwizzle(value = value, update_widget = False, update_metadata = True))
self._swizzle = self._swizzle_widget.currentText
# Gain Group:
group_widget, layout = self._addWidgetGroup()
group_widget.setMaximumWidth(GAIN_GROUP_MAX_WIDTH)
layout.addWidget(QtGui.QLabel('Gain'))
# F-Stop:
subgroup_widget = QtGui.QWidget()
layout.addWidget(subgroup_widget)
sublayout = QtGui.QHBoxLayout()
sublayout.setSpacing(0)
sublayout.setMargin(0)
subgroup_widget.setLayout(sublayout)
exposure = ocio.convertGainToExposure(self._gain)
fstop = ocio.convertExposureToFStop(exposure)
scale = (exposure - ocio.EXPOSURE_MIN) / ocio.EXPOSURE_DELTA
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.FSTOP_STEP_SIZE))
widget_value = scale * widget_max
self._fstop_widget = mari.LineEdit()
self._fstop_widget.setRange(widget_max)
self._fstop_widget.setMaximumWidth(FSTOP_MAX_WIDTH)
self._fstop_widget.setReadOnly(True)
self._updateFStopWidgetText(fstop)
self._fstop_widget.setValue(widget_value)
mari.utils.connect(self._fstop_widget.movedMouse, self._fstopMovedMouse)
self._fstop_widget.addToLayout(sublayout)
self._fstop_decrement_widget = self._addSmallButtom(
sublayout,
'-',
'Decrease gain 1/2 stop',
lambda: self.setGain(ocio.convertExposureToGain(ocio.convertGainToExposure(self._gain) - 0.5)))
self._fstop_increment_widget = self._addSmallButtom(
sublayout,
'+',
'Increase gain 1/2 stop',
lambda: self.setGain(ocio.convertExposureToGain(ocio.convertGainToExposure(self._gain) + 0.5)))
ocio.registerLUTSizeChanged(self.updateLUTSize)
ocio.registerFStopCenterChanged(self.updateFStopCenter)
# Gain:
subgroup_widget = QtGui.QWidget()
layout.addWidget(subgroup_widget)
sublayout = QtGui.QHBoxLayout()
sublayout.setSpacing(3)
sublayout.setMargin(0)
subgroup_widget.setLayout(sublayout)
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.EXPOSURE_STEP_SIZE))
widget_value = scale * widget_max
self._gain_widget = mari.LineEdit()
self._gain_widget.setRange(widget_max)
self._gain_widget.addFloatValidator(ocio.GAIN_MIN, ocio.GAIN_MAX, ocio.GAIN_PRECISION)
self._gain_widget.setMaximumWidth(GAIN_MAX_WIDTH)
self._updateGainWidgetText()
self._gain_widget.setValue(widget_value)
mari.utils.connect(
self._gain_widget.lostFocus,
lambda: self.setGain(max(min(float(self._gain_widget.text()), ocio.GAIN_MAX), ocio.GAIN_MIN)))
mari.utils.connect(self._gain_widget.movedMouse, self._gainMovedMouse)
self._gain_widget.addToLayout(sublayout)
# Exposure:
self._exposure_widget = QtGui.QSlider()
self._exposure_widget.orientation = 1
self._exposure_widget.setMaximum(widget_max)
self._exposure_widget.setValue(widget_value)
self._exposure_widget.setMinimumWidth(EXPOSURE_MAX_WIDTH)
self._exposure_widget.setMaximumWidth(EXPOSURE_MAX_WIDTH)
mari.utils.connect(self._exposure_widget.valueChanged, self._exposureChanged)
sublayout.addWidget(self._exposure_widget)
self._gain_reset_widget = self._addSmallButtom(
layout,
'R',
'Reset gain to default',
lambda: self.setGain(value = ocio.GAIN_RESET, update_widget = True, update_metadata = True))
# Gamma:
group_widget, layout = self._addWidgetGroup()
group_widget.setMaximumWidth(GAMMA_MAX_WIDTH)
layout.addWidget(QtGui.QLabel('Gamma'))
self._gamma_widget = mari.FloatSlider()
self._gamma_widget.setRange(ocio.GAMMA_MIN, ocio.GAMMA_MAX)
self._gamma_widget.setStepSize(ocio.GAMMA_STEP_SIZE)
self._gamma_widget.setPrecision(ocio.GAMMA_PRECISION)
self._gamma_widget.setValue(self._gamma)
mari.utils.connect(
self._gamma_widget.valueChanged,
lambda value: self.setGamma(value = value, update_widget = False, update_metadata = True))
self._gamma_widget.addToLayout(layout)
self._gamma_reset_widget = self._addSmallButtom(
layout,
'R',
'Reset gamma to default',
lambda: self.setGamma(value = ocio.GAMMA_RESET, update_widget = True, update_metadata = True))
#-----------------------------------------------------------------------------------------
def _updateDisplayWidgets(self):
color_spaces = [color_space.getName() for color_space in self._config.getColorSpaces()]
self._updateComboBox(self._color_space_widget, color_spaces, self._color_space, ocio.color_space_default)
self._color_space = self._color_space_widget.currentText
self._updateComboBox(self._display_widget, self._config.getDisplays(), self._display, ocio.display_default)
self._display = self._display_widget.currentText
self._updateComboBox(self._view_widget, self._config.getViews(self._display), self._view, ocio.view_default)
self._view = self._view_widget.currentText
self._updateComboBox(self._swizzle_widget, ocio.SWIZZLE_TYPES, self._swizzle, ocio.swizzle_default)
self._swizzle = self._swizzle_widget.currentText
self._updateGainWidgets()
self._gamma_widget.setValue(self._gamma)
#-----------------------------------------------------------------------------------------
def _enableWidgets(self, enable):
self._select_config_action.setEnabled(enable)
self._select_lut_action.setEnabled(enable)
lut_enable = enable and not self._lut_file_list.isEmpty()
self._clear_lut_action.setEnabled(lut_enable)
self._lut_extrapolate_widget.setEnabled(lut_enable)
self._color_space_widget.setEnabled(enable)
self._display_widget.setEnabled(enable)
self._view_widget.setEnabled(enable)
self._swizzle_widget.setEnabled(enable)
self._fstop_widget.setEnabled(enable)
self._fstop_decrement_widget.setEnabled(enable)
self._fstop_increment_widget.setEnabled(enable)
self._gain_widget.setEnabled(enable)
self._exposure_widget.setEnabled(enable)
self._gain_reset_widget.setEnabled(enable)
self._gamma_widget.setEnabled(enable)
self._gamma_reset_widget.setEnabled(enable)
#-----------------------------------------------------------------------------------------
def _addAction(self, identifier, command, icon_filename, tip, whats_this):
action = mari.actions.find(identifier)
if action is None:
action = mari.actions.create(identifier, command)
icon_path = mari.resources.path(mari.resources.ICONS) + '/' + icon_filename
action.setIconPath(icon_path)
action.setStatusTip(tip)
action.setToolTip(tip)
action.setWhatsThis(whats_this)
return action
#-----------------------------------------------------------------------------------------
def _addWidgetGroup(self):
group_widget = QtGui.QWidget()
self._toolbar.addWidget(group_widget)
layout = QtGui.QHBoxLayout()
layout.setSpacing(1)
layout.setMargin(1)
group_widget.setLayout(layout)
return (group_widget, layout)
#-----------------------------------------------------------------------------------------
def _addComboBox(self, label, items, value, default, value_changed, *args):
group_widget, layout = self._addWidgetGroup()
layout.addWidget(QtGui.QLabel(label))
widget = QtGui.QComboBox()
self._updateComboBox(widget, items, value, default)
widget.connect(QtCore.SIGNAL('currentIndexChanged(const QString &)'), value_changed)
layout.addWidget(widget)
return widget
#-----------------------------------------------------------------------------------------
def _updateComboBox(self, widget, items, value, default):
block = widget.blockSignals(True)
widget.clear()
for item in items:
widget.addItem(item)
if items.count(value) != 0:
widget.setCurrentIndex(items.index(value))
elif items.count(default) != 0:
widget.setCurrentIndex(items.index(default))
widget.blockSignals(block)
#-----------------------------------------------------------------------------------------
def _addSmallButtom(self, layout, label, tool_tip, value_changed, *args):
widget = QtGui.QPushButton(label);
widget.setToolTip(tool_tip);
widget.setFixedHeight(16);
widget.setFixedWidth(16);
widget.connect(QtCore.SIGNAL('released()'), value_changed)
layout.addWidget(widget);
return widget
#-----------------------------------------------------------------------------------------
def _convertFStopWidgetValueToGain(self, value):
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.FSTOP_STEP_SIZE))
scale = float(value) / float(widget_max)
exposure = ocio.EXPOSURE_MIN + scale * ocio.EXPOSURE_DELTA
return ocio.convertExposureToGain(exposure)
#-----------------------------------------------------------------------------------------
def _convertExposureWidgetValueToGain(self, value):
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.EXPOSURE_STEP_SIZE))
scale = float(value) / float(widget_max)
exposure = ocio.EXPOSURE_MIN + scale * ocio.EXPOSURE_DELTA
return ocio.convertExposureToGain(exposure)
#-----------------------------------------------------------------------------------------
def _updateFStopWidgetText(self, fstop):
block = self._fstop_widget.blockSignals(True)
if fstop < 10.0:
# Floor the value to one decimal place and only display the decimal point if necessary
text = '%f' % fstop
index = text.index('.')
if text[index + 1] == '0':
text = text[:index]
else:
text = text[:index + 2]
self._fstop_widget.setText('f/%s' % text)
else:
self._fstop_widget.setText('f/%d' % int(fstop))
self._fstop_widget.blockSignals(block)
#-----------------------------------------------------------------------------------------
def _updateGainWidgetText(self):
block = self._gain_widget.blockSignals(True)
self._gain_widget.setText(('%.' + ('%d' % ocio.GAIN_PRECISION) + 'f') % self._gain)
self._gain_widget.home(False)
self._gain_widget.blockSignals(block)
#-----------------------------------------------------------------------------------------
def _updateGainWidgets(self):
exposure = ocio.convertGainToExposure(self._gain)
fstop = ocio.convertExposureToFStop(exposure)
self._updateFStopWidgetText(fstop)
scale = (exposure - ocio.EXPOSURE_MIN) / ocio.EXPOSURE_DELTA
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.FSTOP_STEP_SIZE))
widget_value = int(round(scale * float(widget_max)))
block = self._fstop_widget.blockSignals(True)
self._fstop_widget.setValue(widget_value)
self._fstop_widget.blockSignals(block)
self._updateGainWidgetText()
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.EXPOSURE_STEP_SIZE))
widget_value = int(round(scale * float(widget_max)))
block = self._gain_widget.blockSignals(True)
self._gain_widget.setValue(widget_value)
self._gain_widget.blockSignals(block)
block = self._exposure_widget.blockSignals(True)
self._exposure_widget.setValue(widget_value)
self._exposure_widget.blockSignals(block)
#-----------------------------------------------------------------------------------------
def _toggleColorManagement(self):
enabled = self.isColorManagementEnabled()
mari.gl_render.setPostProcessingEnabled(enabled)
self._enableWidgets(enabled)
ocio.printMessage(ocio.MessageType.DEBUG, 'Toggled color management to \'%s\'' % ('on' if enabled else 'off'))
#-----------------------------------------------------------------------------------------
def _clearLUT(self):
self.setLUTPath('')
ocio.printMessage(ocio.MessageType.DEBUG, 'Cleared lut')
#-----------------------------------------------------------------------------------------
def _fstopMovedMouse(self, value):
self.setGain(self._convertFStopWidgetValueToGain(float(value)), False)
exposure = ocio.convertGainToExposure(self._gain)
fstop = ocio.convertExposureToFStop(exposure)
self._updateFStopWidgetText(fstop)
self._updateGainWidgetText()
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.EXPOSURE_STEP_SIZE))
scale = (exposure - ocio.EXPOSURE_MIN) / ocio.EXPOSURE_DELTA
value = int(round(scale * float(widget_max)))
self._gain_widget.setValue(value)
value = max(min(value, widget_max), 0)
self._exposure_widget.setValue(value)
#-----------------------------------------------------------------------------------------
def _gainMovedMouse(self, value):
self.setGain(self._convertExposureWidgetValueToGain(float(value)), False)
self._updateGainWidgetText()
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.EXPOSURE_STEP_SIZE))
value = max(min(value, widget_max), 0)
self._exposure_widget.setValue(value)
exposure = ocio.convertGainToExposure(self._gain)
fstop = ocio.convertExposureToFStop(exposure)
self._updateFStopWidgetText(fstop)
scale = (exposure - ocio.EXPOSURE_MIN) / ocio.EXPOSURE_DELTA
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.FSTOP_STEP_SIZE))
value = int(round(scale * float(widget_max)))
self._fstop_widget.setValue(value)
#-----------------------------------------------------------------------------------------
def _exposureChanged(self, value):
self.setGain(value = self._convertExposureWidgetValueToGain(float(value)),
update_widget = False,
update_metadata = True)
self._updateGainWidgetText()
self._gain_widget.setValue(value)
exposure = ocio.convertGainToExposure(self._gain)
fstop = ocio.convertExposureToFStop(exposure)
self._updateFStopWidgetText(fstop)
scale = (exposure - ocio.EXPOSURE_MIN) / ocio.EXPOSURE_DELTA
widget_max = int(math.ceil(ocio.EXPOSURE_DELTA / ocio.FSTOP_STEP_SIZE))
value = int(round(scale * float(widget_max)))
self._fstop_widget.setValue(value)
#-----------------------------------------------------------------------------------------
# Metadata:
#-----------------------------------------------------------------------------------------
def _buildMetadata(self):
# LUT:
# ---
mari.utils.connect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
self._updateLUTMetadata()
flags = self._lut_filter.METADATA_VISIBLE | self._lut_filter.METADATA_EDITABLE
self._lut_filter.setMetadataFlags('File', flags)
self._lut_filter.setMetadataFlags('Extrapolate', flags)
# Display:
# -------
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._updateDisplayMetadata()
self._display_filter.setMetadataDisplayName('ConfigPath', 'Configuration File')
flags = self._display_filter.METADATA_VISIBLE | self._display_filter.METADATA_EDITABLE
self._display_filter.setMetadataFlags('ConfigPath', flags)
self._display_filter.setMetadataDisplayName('InputColorSpace', 'Input Color Space')
self._display_filter.setMetadataFlags('InputColorSpace', flags)
self._display_filter.setMetadataDisplayName('Display', 'Display Device')
self._display_filter.setMetadataFlags('Display', flags)
self._display_filter.setMetadataDisplayName('View', 'View Transform')
self._display_filter.setMetadataFlags('View', flags)
self._display_filter.setMetadataDisplayName('Swizzle', 'Component')
self._display_filter.setMetadataFlags('Swizzle', flags)
self._display_filter.setMetadataDefault('Gain', ocio.GAIN_RESET)
self._display_filter.setMetadataRange('Gain', ocio.GAIN_MIN, ocio.GAIN_MAX)
self._display_filter.setMetadataStep('Gain', ocio.GAIN_STEP_SIZE)
self._display_filter.setMetadataFlags('Gain', flags)
self._display_filter.setMetadataDefault('Gamma', ocio.GAMMA_RESET)
self._display_filter.setMetadataRange('Gamma', ocio.GAMMA_MIN, ocio.GAMMA_MAX)
self._display_filter.setMetadataStep('Gamma', ocio.GAMMA_STEP_SIZE)
self._display_filter.setMetadataFlags('Gamma', flags)
#-----------------------------------------------------------------------------------------
def _updateLUTMetadata(self):
mari.utils.disconnect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
self._lut_filter.setMetadata('File', self._lut_file_list)
self._lut_filter.setMetadata('Extrapolate', self._lut_extrapolate)
mari.utils.connect(self._lut_filter.metadataValueChanged, lutMetadataValueChanged)
#-----------------------------------------------------------------------------------------
def _updateDisplayMetadata(self):
mari.utils.disconnect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
self._display_filter.setMetadata('ConfigPath', self._config_file_list)
color_spaces = [color_space.getName() for color_space in self._config.getColorSpaces()]
self._display_filter.setMetadata('InputColorSpace', self._color_space)
self._display_filter.setMetadataItemList('InputColorSpace', color_spaces)
self._display_filter.setMetadata('Display', self._display)
self._display_filter.setMetadataItemList('Display', self._config.getDisplays())
self._display_filter.setMetadata('View', self._view)
self._display_filter.setMetadataItemList('View', self._config.getViews(self._display))
self._display_filter.setMetadata('Swizzle', self._swizzle)
self._display_filter.setMetadataItemList('Swizzle', ocio.SWIZZLE_TYPES)
self._display_filter.setMetadata('Gain', self._gain)
self._display_filter.setMetadata('Gamma', self._gain)
mari.utils.connect(self._display_filter.metadataValueChanged, displayMetadataValueChanged)
#-----------------------------------------------------------------------------------------
# External Connections:
#-----------------------------------------------------------------------------------------
def _openedProject(self, project):
ocio.printMessage(ocio.MessageType.DEBUG, 'Loading settings for project \'%s\'' % project.name())
# Load the settings stored as metadata on the project...
# General:
# -------
self._toggle_color_management_action.setEnabled(True)
self._toggle_color_management_action.setChecked(project.metadata('ColorEnabled') if project.hasMetadata('ColorEnabled') else ocio.enabled_default)
# Enable/disable color management (MUST be done after modifications to 'self._toggle_color_management_action'.
mari.gl_render.setPostProcessingEnabled(self.isColorManagementEnabled())
filter_collection = None
if project.hasMetadata('ColorProfile'):
# *** IMPORTANT *** The post filter collection used to be called 'OpenColorIO' but was renamed to hide the
# fact we use OpenColorIO from our users. So as a temporary workaround we need to check for the old filter
# collection correct for it.
name = project.metadata('ColorProfile')
if name == 'OpenColorIO':
name = 'Color Space'
filter_collection = mari.gl_render.findPostFilterCollection(name)
# Default the color management filter stack if the working one doesn't exist.
if filter_collection is None:
filter_collection = mari.gl_render.findPostFilterCollection(ocio.profile_default)
mari.gl_render.setPostFilterCollection(filter_collection)
# LUT:
# ---
lut_extrapolate = project.metadata('OcioLutExtrapolate') if project.hasMetadata('OcioLutExtrapolate') else ocio.lut_extrapolate_default
force_shader_build = lut_extrapolate != self._lut_extrapolate
self._lut_extrapolate = lut_extrapolate
self._lut_extrapolate_widget.setChecked(self._lut_extrapolate)
if project.hasMetadata('OcioLutPath'):
lut_path = ocio.buildLoadPath(project.metadata('OcioLutPath'))
if not self.setLUTPath(value = lut_path, update_metadata = True, force_shader_build = force_shader_build):
self.resetLUT()
else:
self.resetLUT()
# Display:
# -------
self._color_space = project.metadata( 'OcioColorSpace') if project.hasMetadata('OcioColorSpace') else ocio.color_space_default
self._display = project.metadata( 'OcioDisplay') if project.hasMetadata( 'OcioDisplay') else ocio.display_default
self._view = project.metadata( 'OcioView') if project.hasMetadata( 'OcioView') else ocio.view_default
self._swizzle = project.metadata( 'OcioSwizzle') if project.hasMetadata( 'OcioSwizzle') else ocio.swizzle_default
self._gain = max(min(project.metadata('OcioGain'),
ocio.GAIN_MAX),
ocio.GAIN_MIN) if project.hasMetadata( 'OcioGain') else ocio.gain_default
self._gamma = project.metadata( 'OcioGamma') if project.hasMetadata( 'OcioGamma') else ocio.gamma_default
# Attempt to load a configuration file...
self._config_file_list.clear()
self._config = None
# 1. Environment variable.
config_path = os.getenv('OCIO')
if config_path is not None:
self.setConfigPath(config_path)
# 2. Project setting.
if self._config is None and project.hasMetadata('OcioConfigPath'):
self.setConfigPath(ocio.buildLoadPath(project.metadata('OcioConfigPath')))
# 3. Use the default if nothing was found.
if self._config is None:
self._config_file_list = mari.FileList(ocio.config_file_list_default)
self._config = ocio.config_default
self._updateDisplayWidgets()
self._rebuildDisplayFilter()
self._enableWidgets(filter_collection.name() == 'Color Space' and self._toggle_color_management_action.isChecked())
self._updateLUTMetadata()
self._updateDisplayMetadata()
self._printLog()
#-----------------------------------------------------------------------------------------
def _aboutToSaveProject(self, project):
ocio.printMessage(ocio.MessageType.DEBUG, 'Saving settings for project \'%s\'' % project.name())
# Store the settings as metadata on the project.
project.setMetadata( 'ColorEnabled', self.isColorManagementEnabled())
filter_collection = mari.gl_render.currentPostFilterCollection()
if filter_collection is not None:
project.setMetadata( 'ColorProfile', filter_collection.name())
project.setMetadata('OcioLutExtrapolate', self._lut_extrapolate)
project.setMetadata( 'OcioLutPath', '' if self._lut_file_list.isEmpty() else ocio.buildSavePath(self._lut_file_list.at(0)))
if os.getenv('OCIO') is None:
project.setMetadata('OcioConfigPath', '' if self._config_file_list.isEmpty() else ocio.buildSavePath(self._config_file_list.at(0)))
project.setMetadata( 'OcioColorSpace', self._color_space)
project.setMetadata( 'OcioDisplay', self._display)
project.setMetadata( 'OcioView', self._view)
project.setMetadata( 'OcioSwizzle', self._swizzle)
project.setMetadata( 'OcioGain', self._gain)
project.setMetadata( 'OcioGamma', self._gamma)
#-----------------------------------------------------------------------------------------
def _closedProject(self):
self._toggle_color_management_action.setEnabled(False)
self._enableWidgets(False)
#-----------------------------------------------------------------------------------------
def _toolBarsCreated(self):
# Things like deleting Mari's configuration file and reseting the layout to the default will destroy the toolbar
# so we need to detect if this is the case and rebuild it!
toolbar = mari.app.findToolBar('Color Space')
if toolbar is None:
ocio.printMessage(ocio.MessageType.DEBUG, 'Rebuilding missing toolbar...')
self._buildWidgets()
#-----------------------------------------------------------------------------------------
def _postProcessingEnabled(self, enabled):
self._toggle_color_management_action.setChecked(enabled)
# Only enable or disable UI if we have a current project.
current_project = mari.projects.current()
if current_project is not None:
self._enableWidgets(enabled)
#-----------------------------------------------------------------------------------------
def _setCurrentPostFilterCollection(self):
# Only enable or disable UI if we have a current project.
current_project = mari.projects.current()
if current_project is not None:
filter_collection = mari.gl_render.currentPostFilterCollection()
if filter_collection is None or filter_collection.name() != 'Color Space':
ocio.printMessage(ocio.MessageType.DEBUG, 'Disabling OpenColorIO')
self._enableWidgets(False)
else:
ocio.printMessage(ocio.MessageType.DEBUG, 'Enabling OpenColorIO')
self._enableWidgets(True)
#-----------------------------------------------------------------------------------------
# Filter:
#-----------------------------------------------------------------------------------------
def _rebuildLUTFilter(self, lut_path, force_shader_build = False):
if lut_path == '':
self._lut_filter.setDefinitionsSnippet('')
self._lut_filter.setBodySnippet('')
if self._lut_sampler_name is not None:
self._lut_filter.deleteTexture(self._lut_sampler_name)
self._lut_sampler_name = None
self._lut_filter_cache_id = None
self._lut_texture_cache_id = None
else:
# There is a chance this is a bad file so we need to guard against it.
try:
self._lut_filter_cache_id, self._lut_texture_cache_id, self._lut_sampler_name = ocio.buildLUTFilter(
self._config,
lut_path,
self._lut_filter,
self._lut_filter_cache_id,
self._lut_texture_cache_id,
self._lut_extrapolate,
force_shader_build)
except Exception, e:
message = 'Failed to load LUT file \'%s\' due to \'%s\'' % (lut_path, e)
ocio.printMessage(ocio.MessageType.ERROR, '%s' % message)
if not mari.app.inTerminalMode():
mari.utils.misc.message(message, 'Color Space', 1024, 2)
return False
ocio.printMessage(ocio.MessageType.DEBUG, 'Changed LUT to \'%s\'' % lut_path)
return True
#-----------------------------------------------------------------------------------------
def _rebuildDisplayFilter(self):
display_transform = ocio.PyOpenColorIO.DisplayTransform()
display_transform.setInputColorSpaceName(self._color_space)
if hasattr(display_transform, 'setDisplay'):
# OCIO 1.0+
display_transform.setDisplay(self._display)
display_transform.setView(self._view)
else:
# OCIO 0.8.X
display_color_space = self._config.getDisplayColorSpaceName(self._display, self._view)
display_transform.setDisplayColorSpaceName(display_color_space)
# Add the channel sizzle.
luma_coefs = self._config.getDefaultLumaCoefs()
mtx, offset = ocio.PyOpenColorIO.MatrixTransform.View(ocio.SWIZZLE_VALUES[self._swizzle], luma_coefs)
transform = ocio.PyOpenColorIO.MatrixTransform()
transform.setValue(mtx, offset)
display_transform.setChannelView(transform)
# Add the linear gain.
transform = ocio.PyOpenColorIO.CDLTransform()
transform.setSlope((self._gain, self._gain, self._gain))
display_transform.setLinearCC(transform)
# Add the post-display CC.
transform = ocio.PyOpenColorIO.ExponentTransform()
transform.setValue([1.0 / max(1e-6, v) for v in (self._gamma, self._gamma, self._gamma, self._gamma)])
display_transform.setDisplayCC(transform)
processor = self._config.getProcessor(display_transform)
self._display_filter_cache_id, self._display_texture_cache_id, self._display_sampler_name = ocio.buildProcessorFilter(
processor,
self._display_filter,
self._display_filter_cache_id,
self._display_texture_cache_id)
current_canvas = mari.canvases.current()
if current_canvas is not None:
current_canvas.repaint()
#-----------------------------------------------------------------------------------------
# Debugging:
#-----------------------------------------------------------------------------------------
def _printLog(self):
ocio.printMessage( ocio.MessageType.INFO, '==============================================================')
ocio.printMessage( ocio.MessageType.INFO, 'Configuration:')
ocio.printMessage( ocio.MessageType.INFO, '==============================================================')
ocio.printMessage( ocio.MessageType.INFO, ' Enabled: %s; Default: %s' % (mari.gl_render.isPostProcessingEnabled(),
ocio.enabled_default))
filter_collection = mari.gl_render.currentPostFilterCollection()
if filter_collection is not None:
ocio.printMessage(ocio.MessageType.INFO, ' Profile: %s; Default: %s' % (filter_collection.name(),
ocio.profile_default))
else:
ocio.printMessage(ocio.MessageType.INFO, ' Profile: None; Default: %s' % (ocio.profile_default))
ocio.printMessage( ocio.MessageType.INFO, ' LUT Path: %s; Default: %s' % ('' if self._lut_file_list.isEmpty() else self._lut_file_list.at(0),
'' if ocio.lut_file_list_default.isEmpty() else ocio.lut_file_list_default.at(0)))
ocio.printMessage( ocio.MessageType.INFO, ' Extrapolate: %s; Default: %s' % (self._lut_extrapolate,
ocio.lut_extrapolate_default))
ocio.printMessage( ocio.MessageType.INFO, ' Config Path: %s; Default: %s' % ('' if self._config_file_list.isEmpty() else self._config_file_list.at(0),
'' if ocio.config_file_list_default.isEmpty() else ocio.config_file_list_default.at(0)))
ocio.printMessage( ocio.MessageType.INFO, ' Color Space: %s; Default: %s' % (self._color_space,
ocio.color_space_default))
ocio.printMessage( ocio.MessageType.INFO, ' Display: %s; Default: %s' % (self._display,
ocio.display_default))
ocio.printMessage( ocio.MessageType.INFO, ' View: %s; Default: %s' % (self._view,
ocio.view_default))
ocio.printMessage( ocio.MessageType.INFO, ' Swizzle: %s; Default: %s' % (self._swizzle,
ocio.swizzle_default))
ocio.printMessage( ocio.MessageType.INFO, ' F-Stop: %f; Default: %f; Center: %f' % (ocio.convertGainToFStop(self._gain),
ocio.convertGainToFStop(ocio.gain_default),
ocio.fstop_center))
ocio.printMessage( ocio.MessageType.INFO, ' Gain: %f; Default: %f' % (self._gain,
ocio.gain_default))
ocio.printMessage( ocio.MessageType.INFO, ' Gamma: %f; Default: %f' % (self._gamma,
ocio.gamma_default))
ocio.printMessage( ocio.MessageType.INFO, '==============================================================')
##############################################################################################
# The following functions CAN'T be part of the toolbar class as a potential bug in PythonQt
# causes the disconnect function to fail
def lutMetadataValueChanged(name, value):
global toolbar
ocio.printMessage(ocio.MessageType.DEBUG, 'LUT metadata \'%s\' changed to \'%s\'' % (name, value))
if name == 'File':
toolbar.setLUTPath(value = '' if value.isEmpty() else value.at(0),
update_metadata = False,
force_shader_build = False)
elif name == 'Extrapolate':
toolbar.setExtrapolateEnabled(value = value, update_widget = True, update_metadata = False)
#-----------------------------------------------------------------------------------------
def displayMetadataValueChanged(name, value):
global toolbar
ocio.printMessage(ocio.MessageType.DEBUG, 'Display metadata \'%s\' changed to \'%s\'' % (name, value))
if name == 'ConfigPath':
toolbar.setConfigPath(value = '' if value.isEmpty() else value.at(0), update_metadata = False)
elif name == 'InputColorSpace':
toolbar.setColorSpace(value = value, update_widget = True, update_metadata = False)
elif name == 'Display':
toolbar.setDisplay(value = value, update_widget = True, update_metadata = False)
elif name == 'View':
toolbar.setView(value = value, update_widget = True, update_metadata = False)
elif name == 'Swizzle':
toolbar.setSwizzle(value = value, update_widget = True, update_metadata = False)
elif name == 'Gain':
toolbar.setGain(value = value, update_widget = True, update_metadata = False)
elif name == 'Gamma':
toolbar.setGamma(value = value, update_widget = True, update_metadata = False)
##############################################################################################
if mari.app.isRunning():
if not hasattr(mari.gl_render, 'createPostFilterCollection'):
ocio.printMessage(ocio.MessageType.ERROR, 'This version of Mari does not support the mari.gl_render.createPostFilterCollection API')
else:
if ocio.config_default is not None:
toolbar = OcioToolBar()
else:
# Destroy the OCIO post filter collection if present to prevent the user trying to use it.
filter_collection = mari.gl_render.findPostFilterCollection('Color Space')
if filter_collection is not None:
mari.gl_render.deletePostFilterCollection(filter_collection)
# Destroy the toolbar to prevent the user trying to use it.
mari.app.deleteToolBar('Color Space')
| 51,709 | -1 | 1,311 |
3adaa59898dd69e5ed1505298901761c27f40382 | 3,367 | py | Python | fixit/common/tests/test_imports.py | dkgi/Fixit | 1a68680dac71b0b332da9ccb0b8b51161f270130 | [
"Apache-2.0"
] | null | null | null | fixit/common/tests/test_imports.py | dkgi/Fixit | 1a68680dac71b0b332da9ccb0b8b51161f270130 | [
"Apache-2.0"
] | null | null | null | fixit/common/tests/test_imports.py | dkgi/Fixit | 1a68680dac71b0b332da9ccb0b8b51161f270130 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
from pathlib import Path
from libcst.testing.utils import UnitTest
from fixit.common.config import (
CACHE as CONFIG_CACHE,
get_lint_config,
get_rules_for_path,
)
from fixit.common.utils import (
dedent_with_lstrip,
DuplicateLintRuleNameError,
find_and_import_rule,
import_rule_from_package,
LintRuleNotFoundError,
)
DUMMY_PACKAGE: str = "fixit.common.tests.test_imports_dummy_package"
DUMMY_PACKAGE_PATH: Path = Path(__file__).parent / "test_imports_dummy_package"
DUPLICATE_DUMMY_PATH: Path = (
Path(__file__).parent / "test_imports_dummy_package_with_duplicate_rule"
)
# Using dummy config file, test whether the rule import helpers work as expected.
| 37 | 96 | 0.716068 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
from pathlib import Path
from libcst.testing.utils import UnitTest
from fixit.common.config import (
CACHE as CONFIG_CACHE,
get_lint_config,
get_rules_for_path,
)
from fixit.common.utils import (
dedent_with_lstrip,
DuplicateLintRuleNameError,
find_and_import_rule,
import_rule_from_package,
LintRuleNotFoundError,
)
DUMMY_PACKAGE: str = "fixit.common.tests.test_imports_dummy_package"
DUMMY_PACKAGE_PATH: Path = Path(__file__).parent / "test_imports_dummy_package"
DUPLICATE_DUMMY_PATH: Path = (
Path(__file__).parent / "test_imports_dummy_package_with_duplicate_rule"
)
# Using dummy config file, test whether the rule import helpers work as expected.
class ImportsTest(UnitTest):
def test_get_rules_from_config(self) -> None:
rules = get_rules_for_path(DUMMY_PACKAGE_PATH)
expected_rules = {
f"{DUMMY_PACKAGE}.dummy_1",
f"{DUMMY_PACKAGE}.dummy_2",
f"{DUMMY_PACKAGE}.dummy_3",
}
self.assertEqual(expected_rules, {r.__module__ for r in rules})
def test_get_rules_from_config_with_duplicate(self) -> None:
with self.assertRaises(DuplicateLintRuleNameError):
get_rules_for_path(
DUPLICATE_DUMMY_PATH / "subpackage_defining_duplicate_rule" / "dummy.py"
)
def test_import_rule_from_package(self) -> None:
rules_package = get_lint_config(DUMMY_PACKAGE_PATH).packages
self.assertEqual(rules_package, [DUMMY_PACKAGE])
# Test with an existing dummy rule.
imported_rule = import_rule_from_package(rules_package[0], "DummyRule2")
self.assertIsNotNone(imported_rule)
self.assertEqual(imported_rule.__name__, "DummyRule2")
self.assertEqual(imported_rule.__module__, f"{DUMMY_PACKAGE}.dummy_2")
# Test with non-existent rule.
imported_rule = import_rule_from_package(rules_package[0], "DummyRule1000")
self.assertIsNone(imported_rule)
def test_find_and_import_rule(self) -> None:
rules_packages = get_lint_config(DUMMY_PACKAGE_PATH).packages
# Test with existing dummy rule. Should get the first one it finds, from dummy_1 module.
imported_rule = find_and_import_rule("DummyRule1", rules_packages)
self.assertEqual(imported_rule.__module__, f"{DUMMY_PACKAGE}.dummy_1")
with self.assertRaises(LintRuleNotFoundError):
imported_rule = find_and_import_rule("DummyRule1000", rules_packages)
class NestedTest(UnitTest):
def test_nested_rule_no_inherit_does_not_inherit(self) -> None:
rules = get_rules_for_path(DUMMY_PACKAGE_PATH / "nested_no_inherit")
expected_rules = {
f"{DUMMY_PACKAGE}.dummy_2",
}
self.assertEqual(expected_rules, {r.__module__ for r in rules})
def test_nested_rule_inherit_does_inherit(self) -> None:
CONFIG_CACHE.clear()
rules = get_rules_for_path(DUMMY_PACKAGE_PATH / "nested_inherit")
expected_rules = {
f"{DUMMY_PACKAGE}.dummy_1",
f"{DUMMY_PACKAGE}.dummy_3",
}
self.assertEqual(expected_rules, {r.__module__ for r in rules})
| 2,250 | 13 | 205 |
3bf448d8e67ca350d1128281134e29fbeb3ed855 | 1,603 | py | Python | src/network/addrthread.py | kaue/PyBitmessage | 7b8bf082ff0d569f507d65e087000e4e3d6ccf3f | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 1,583 | 2015-01-01T13:03:20.000Z | 2022-03-31T23:10:00.000Z | src/network/addrthread.py | kaue/PyBitmessage | 7b8bf082ff0d569f507d65e087000e4e3d6ccf3f | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 841 | 2015-01-01T14:51:48.000Z | 2022-03-25T06:45:14.000Z | src/network/addrthread.py | kaue/PyBitmessage | 7b8bf082ff0d569f507d65e087000e4e3d6ccf3f | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 482 | 2015-01-07T00:53:25.000Z | 2022-03-24T15:58:12.000Z | """
Announce addresses as they are received from other hosts
"""
import Queue
import state
from helper_random import randomshuffle
from network.assemble import assemble_addr
from network.connectionpool import BMConnectionPool
from queues import addrQueue
from threads import StoppableThread
class AddrThread(StoppableThread):
"""(Node) address broadcasting thread"""
name = "AddrBroadcaster"
| 32.714286 | 74 | 0.535246 | """
Announce addresses as they are received from other hosts
"""
import Queue
import state
from helper_random import randomshuffle
from network.assemble import assemble_addr
from network.connectionpool import BMConnectionPool
from queues import addrQueue
from threads import StoppableThread
class AddrThread(StoppableThread):
"""(Node) address broadcasting thread"""
name = "AddrBroadcaster"
def run(self):
while not state.shutdown:
chunk = []
while True:
try:
data = addrQueue.get(False)
chunk.append(data)
except Queue.Empty:
break
if chunk:
# Choose peers randomly
connections = BMConnectionPool().establishedConnections()
randomshuffle(connections)
for i in connections:
randomshuffle(chunk)
filtered = []
for stream, peer, seen, destination in chunk:
# peer's own address or address received from peer
if i.destination in (peer, destination):
continue
if stream not in i.streams:
continue
filtered.append((stream, peer, seen))
if filtered:
i.append_write_buf(assemble_addr(filtered))
addrQueue.iterate()
for i in range(len(chunk)):
addrQueue.task_done()
self.stop.wait(1)
| 1,173 | 0 | 27 |
e43a18ae3fda6f1eb82d9a2621cb84585c33fe6f | 25 | py | Python | branch.py | Audrey-Newman/cs3240-labdemo | 9bc1562616969bd6b7c4cfbbeb42766fb98c4d62 | [
"MIT"
] | null | null | null | branch.py | Audrey-Newman/cs3240-labdemo | 9bc1562616969bd6b7c4cfbbeb42766fb98c4d62 | [
"MIT"
] | null | null | null | branch.py | Audrey-Newman/cs3240-labdemo | 9bc1562616969bd6b7c4cfbbeb42766fb98c4d62 | [
"MIT"
] | null | null | null | print("branches are fun") | 25 | 25 | 0.76 | print("branches are fun") | 0 | 0 | 0 |
31947d277ef5f038f0c95f56a0c2e29cb0845520 | 271 | py | Python | _game_chats/templatetags/custom_filters.py | cansarigol/game-chats | 4b58e368cbb68126cfeeea2297c8b9bae5679916 | [
"MIT"
] | 1 | 2018-03-28T10:38:59.000Z | 2018-03-28T10:38:59.000Z | _game_chats/templatetags/custom_filters.py | cansarigol/game-chats | 4b58e368cbb68126cfeeea2297c8b9bae5679916 | [
"MIT"
] | 1 | 2018-02-21T14:11:15.000Z | 2018-02-28T13:54:26.000Z | _game_chats/templatetags/custom_filters.py | cansarigol/game-chats | 4b58e368cbb68126cfeeea2297c8b9bae5679916 | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.filter(name='addcss')
| 22.583333 | 82 | 0.708487 | from django import template
register = template.Library()
@register.filter(name='addcss')
def addcss(field, css_addition):
css_classes = field.field.widget.attrs.get('class', '')
return field.as_widget(attrs={"class": "%s %s" % (css_classes, css_addition)})
| 154 | 0 | 22 |
da993499bc9ffd9e1ef582071f0dd064a15e6212 | 1,733 | py | Python | wxbtool/data/variables.py | caiyunapp/wxbtool | 7a019e1c0b9d454d07ed5cecbbf5ed00951d3ce7 | [
"MIT"
] | 3 | 2021-03-14T03:15:40.000Z | 2021-09-30T16:32:52.000Z | wxbtool/data/variables.py | caiyunapp/wxbtool | 7a019e1c0b9d454d07ed5cecbbf5ed00951d3ce7 | [
"MIT"
] | null | null | null | wxbtool/data/variables.py | caiyunapp/wxbtool | 7a019e1c0b9d454d07ed5cecbbf5ed00951d3ce7 | [
"MIT"
] | 1 | 2021-03-14T03:15:40.000Z | 2021-03-14T03:15:40.000Z | # -*- coding: utf-8 -*-
vars2d = [
'2m_temperature',
'10m_u_component_of_wind', '10m_v_component_of_wind',
'total_cloud_cover', 'total_precipitation',
'toa_incident_solar_radiation',
'temperature_850hPa',
]
vars3d = [
'geopotential', 'temperature',
'specific_humidity', 'relative_humidity',
'u_component_of_wind', 'v_component_of_wind',
'vorticity', 'potential_vorticity',
]
codes = {
'geopotential': 'z',
'temperature': 't',
'temperature_850hPa': 't',
'specific_humidity': 'q',
'relative_humidity': 'r',
'u_component_of_wind': 'u',
'v_component_of_wind': 'v',
'vorticity': 'vo',
'potential_vorticity': 'pv',
'2m_temperature': 't2m',
'10m_u_component_of_wind': 'u10',
'10m_v_component_of_wind': 'v10',
'total_cloud_cover': 'tcc',
'total_precipitation': 'tp',
'toa_incident_solar_radiation': 'tisr',
}
code2var = {
'z': 'geopotential',
't': 'temperature',
'q': 'specific_humidity',
'r': 'relative_humidity',
'u': 'u_component_of_wind',
'v': 'v_component_of_wind',
'vo': 'vorticity',
'pv': 'potential_vorticity',
't2m': '2m_temperature',
'u10': '10m_u_component_of_wind',
'v10': '10m_v_component_of_wind',
'tcc': 'total_cloud_cover',
'tp': 'total_precipitation',
'tisr': 'toa_incident_solar_radiation',
}
| 27.951613 | 136 | 0.622043 | # -*- coding: utf-8 -*-
vars2d = [
'2m_temperature',
'10m_u_component_of_wind', '10m_v_component_of_wind',
'total_cloud_cover', 'total_precipitation',
'toa_incident_solar_radiation',
'temperature_850hPa',
]
vars3d = [
'geopotential', 'temperature',
'specific_humidity', 'relative_humidity',
'u_component_of_wind', 'v_component_of_wind',
'vorticity', 'potential_vorticity',
]
codes = {
'geopotential': 'z',
'temperature': 't',
'temperature_850hPa': 't',
'specific_humidity': 'q',
'relative_humidity': 'r',
'u_component_of_wind': 'u',
'v_component_of_wind': 'v',
'vorticity': 'vo',
'potential_vorticity': 'pv',
'2m_temperature': 't2m',
'10m_u_component_of_wind': 'u10',
'10m_v_component_of_wind': 'v10',
'total_cloud_cover': 'tcc',
'total_precipitation': 'tp',
'toa_incident_solar_radiation': 'tisr',
}
code2var = {
'z': 'geopotential',
't': 'temperature',
'q': 'specific_humidity',
'r': 'relative_humidity',
'u': 'u_component_of_wind',
'v': 'v_component_of_wind',
'vo': 'vorticity',
'pv': 'potential_vorticity',
't2m': '2m_temperature',
'u10': '10m_u_component_of_wind',
'v10': '10m_v_component_of_wind',
'tcc': 'total_cloud_cover',
'tp': 'total_precipitation',
'tisr': 'toa_incident_solar_radiation',
}
def split_name(composite):
if composite == 't2m' or composite == 'u10' or composite == 'v10' or composite == 'tcc' or composite == 'tp' or composite == 'tisr':
return composite, ''
else:
if composite[:2] == 'vo' or composite[:2] == 'pv':
return composite[:2], composite[2:]
else:
return composite[:1], composite[1:]
| 350 | 0 | 23 |
cc8af49aa90076ada3fd9cdddb4cd4e13405a748 | 1,457 | py | Python | steppy/controllers/base_controller.py | ygravrand/steppy | f1663da562243912afa7c016cf2aa517c20ed937 | [
"BSD-3-Clause"
] | 34 | 2016-12-13T14:46:53.000Z | 2021-05-03T17:24:35.000Z | steppy/controllers/base_controller.py | ygravrand/steppy | f1663da562243912afa7c016cf2aa517c20ed937 | [
"BSD-3-Clause"
] | 3 | 2016-12-15T11:07:08.000Z | 2022-01-23T10:09:04.000Z | steppy/controllers/base_controller.py | ygravrand/steppy | f1663da562243912afa7c016cf2aa517c20ed937 | [
"BSD-3-Clause"
] | 4 | 2017-02-11T14:02:45.000Z | 2018-11-11T16:42:18.000Z | # -*- coding: utf-8 -*-
"""
StepPy
:copyright: (c) 2016-2017 by Yann Gravrand.
:license: BSD, see LICENSE for more details.
"""
from collections import OrderedDict
| 33.883721 | 88 | 0.612903 | # -*- coding: utf-8 -*-
"""
StepPy
:copyright: (c) 2016-2017 by Yann Gravrand.
:license: BSD, see LICENSE for more details.
"""
from collections import OrderedDict
class BaseController(object):
def __init__(self, sequencer, console, port_name=''):
self.sequencer = sequencer
self.console = console
self.port_name = port_name
self.rules_chains = OrderedDict()
def register(self, name, func, rules_chain):
"""Register a function which will be called when the rules chain matches.
In:
- ``name`` -- A name for this registration
- ``func`` -- A callable which will be given as arguments:
- ``matched_messages`` -- The list of message having matched the rules chain
- ``matched_rules`` -- The list of rules in the rules chain
- ``rules_chain`` -- The rules chain
"""
self.rules_chains[name] = (func, rules_chain)
def handle_message(self, msg):
self.console.print_('Msg received: %s' % msg)
for name, (func, rules_chain) in self.rules_chains.items():
match, partial_match, res = rules_chain.run(func, msg)
if match:
self.console.print_('%s : %s' % (name, res))
def get_rules_chain_by_name(self, name):
if name in self.rules_chains:
return self.rules_chains[name][1]
def __repr__(self):
return self.port_name or 'GENERIC'
| 600 | 656 | 23 |