hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12a668f147490b052289202d9372f523023dc419
| 3,820
|
py
|
Python
|
yeti/core/model/stix/sro.py
|
yeti-platform/TibetanBrownBear
|
8ab520bd199a63e404b3a6a5b49a29f277384e8e
|
[
"Apache-2.0"
] | 9
|
2018-01-15T22:44:24.000Z
|
2021-05-28T11:13:03.000Z
|
yeti/core/model/stix/sro.py
|
yeti-platform/TibetanBrownBear
|
8ab520bd199a63e404b3a6a5b49a29f277384e8e
|
[
"Apache-2.0"
] | 140
|
2018-01-12T10:07:47.000Z
|
2021-08-02T23:03:49.000Z
|
yeti/core/model/stix/sro.py
|
yeti-platform/TibetanBrownBear
|
8ab520bd199a63e404b3a6a5b49a29f277384e8e
|
[
"Apache-2.0"
] | 11
|
2018-01-16T19:49:35.000Z
|
2022-01-18T16:30:34.000Z
|
"""Detail Yeti's Entity object structure."""
import json
from yeti.core.errors import ValidationError
from .base import StixObject
class StixSRO(StixObject):
def __init__(self, db_from, db_to, attributes):
self._db_from = db_from
self._db_to = db_to
super().__init__(**attributes)
@classmethod
def get(cls, key):
"""Fetches the most recent version of a STIX Relationship given its
STIX ID.
Args:
key: The STIX ID of the Relationship to fetch.
Returns:
A STIX Relationship object.
"""
all_versions = cls.filter({'attributes.id': key})
if not all_versions:
return None
winner = all_versions[0]
for version in all_versions:
if version.modified > winner.modified:
winner = version
return winner
def all_versions(self):
"""Returns all versions of a STIX object given its key.
Returns:
A list of STIX objects.
"""
return super().filter({'attributes.id': self.id}, latest=False)
def dump(self, destination='db'):
"""Dumps an Entity object into its STIX JSON representation.
Args:
destination: Since STIX2 uses IDs as means to identify a single object
we need to transform the object depending on whether it is being
sent to the database or to a web client.
Returns:
The Entity's JSON representation in dictionary form.
"""
attributes = json.loads(self._stix_object.serialize())
if destination == 'db':
return {
'id': None,
'_from': self._db_from,
'_to': self._db_to,
'attributes': attributes
}
return attributes
@classmethod
def load_stix(cls, args):
"""Translate information from the backend into a valid STIX definition.
Will instantiate a STIX object from that definition.
Args:
args: The dictionary to use to create the STIX object.
strict: Unused, kept to be consistent with overriden method
Returns:
The corresponding STIX objet.
Raises:
ValidationError: If a STIX object could not be instantiated from the
serialized data.
"""
if isinstance(args, list):
return [cls.load_stix(item) for item in args]
subclass = cls.get_final_datatype(args['attributes'])
db_id = args.pop('_id', None)
db_from = args.pop('_from')
db_to = args.pop('_to')
args.pop('_rev', None)
stix_rel = args['attributes']
try:
obj = subclass(db_from, db_to, stix_rel)
if db_id:
obj._arango_id = db_id # pylint: disable=protected-access
except Exception as err:
raise ValidationError(str(err))
return obj
@property
def type(self):
return self._stix_object.type
@property
def id(self):
return self._stix_object.id
@property
def created_by_ref(self):
return self._stix_object.created_by_ref
@property
def created(self):
return self._stix_object.created
@property
def modified(self):
return self._stix_object.modified
@property
def revoked(self):
return self._stix_object.revoked
@property
def labels(self):
return self._stix_object.labels
@property
def external_references(self):
return self._stix_object.external_references
@property
def object_marking_refs(self):
return self._stix_object.object_marking_refs
@property
def granular_markings(self):
return self._stix_object.granular_markings
| 28.296296
| 80
| 0.606545
| 3,686
| 0.964921
| 0
| 0
| 2,475
| 0.647906
| 0
| 0
| 1,401
| 0.366754
|
12a754908091d00ea075e8ffe5d6a23ed6d1b3e0
| 4,761
|
py
|
Python
|
netforce_mfg/netforce_mfg/models/barcode_qc.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 27
|
2015-09-30T23:53:30.000Z
|
2021-06-07T04:56:25.000Z
|
netforce_mfg/netforce_mfg/models/barcode_qc.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 191
|
2015-10-08T11:46:30.000Z
|
2019-11-14T02:24:36.000Z
|
netforce_mfg/netforce_mfg/models/barcode_qc.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 32
|
2015-10-01T03:59:43.000Z
|
2022-01-13T07:31:05.000Z
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
class BarcodeQC(Model):
_name = "barcode.qc"
_transient = True
_fields = {
"production_id": fields.Many2One("production.order", "Production Order"),
"test_id": fields.Many2One("qc.test", "QC Test"),
"sample_qty": fields.Decimal("Sampling Qty", scale=6),
"min_value": fields.Decimal("Min Value", function="_get_related", function_context={"path": "test_id.min_value"}),
"max_value": fields.Decimal("Max Value", function="_get_related", function_context={"path": "test_id.max_value"}),
"value": fields.Char("Value"),
"result": fields.Selection([["yes", "Pass"], ["no", "Not Pass"], ["na", "N/A"]], "Result"),
"lines": fields.One2Many("barcode.qc.line", "barcode_id", "Lines"),
}
def fill_qc_tests(self, ids, context={}):
obj = self.browse(ids)[0]
prod_order = obj.production_id
if not prod_order:
raise Exception("Please select production order")
for qc_test in prod_order.qc_tests:
vals = {
"barcode_id": obj.id,
"test_id": qc_test.test_id.id,
"sample_qty": qc_test.sample_qty,
"value": qc_test.value,
"min_value": qc_test.min_value,
"max_value": qc_test.max_value,
"result": qc_test.result,
"prod_qc_id": qc_test.id
}
get_model("barcode.qc.line").create(vals)
def validate(self, ids, context={}):
obj = self.browse(ids)[0]
prod_order = obj.production_id
if not prod_order:
raise Exception("Plesae select production order")
prod_order.write({"qc_tests": [("delete_all",)]})
for line in obj.lines:
vals = {
"order_id": prod_order.id,
"test_id": line.test_id.id,
"sample_qty": line.sample_qty,
"value": line.value,
"min_value": line.min_value,
"max_value": line.max_value,
"result": line.result,
}
get_model("production.qc").create(vals)
obj.write({
"production_id": None,
"test_id": None,
"sample_qty": None,
"value": None,
"result": None,
"lines": [("delete_all",)],
})
return {
"flash": "QC result recorded successfully for production order %s" % obj.production_id.number,
"focus_field": "production_id",
}
def onchange_qc_value(self, context={}):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
try:
value = float(line.get("value"))
except:
return
min_value = line.get("min_value")
max_value = line.get("max_value")
if min_value and value < min_value:
line["result"] = "no"
elif max_value and value > max_value:
line["result"] = "no"
else:
line["result"] = "yes"
return data
def onchange_qc_test(self, context={}):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
test_id = line.get("test_id")
if not test_id:
return
test = get_model("qc.test").browse(test_id)
line["min_value"] = test.min_value
line["max_value"] = test.max_value
self.onchange_qc_value(context)
return data
BarcodeQC.register()
| 40.008403
| 122
| 0.603025
| 3,538
| 0.743121
| 0
| 0
| 0
| 0
| 0
| 0
| 1,990
| 0.417979
|
12a82679ea427e2384e89df55cbadd443f41af9e
| 4,739
|
py
|
Python
|
src/data/domain.py
|
AlexMoreo/pydci
|
44f8fe1ce95da45709061cbe19fa6f462c1f2164
|
[
"BSD-3-Clause"
] | 7
|
2018-10-21T17:34:08.000Z
|
2021-05-17T11:37:56.000Z
|
src/data/domain.py
|
AlexMoreo/pydci
|
44f8fe1ce95da45709061cbe19fa6f462c1f2164
|
[
"BSD-3-Clause"
] | null | null | null |
src/data/domain.py
|
AlexMoreo/pydci
|
44f8fe1ce95da45709061cbe19fa6f462c1f2164
|
[
"BSD-3-Clause"
] | 4
|
2018-11-22T10:30:07.000Z
|
2021-03-20T10:07:57.000Z
|
import pickle
from scipy.sparse import lil_matrix
import numpy as np
class Domain:
"""
Defines a domain, composed by a labelled set and a unlabeled set. All sets share a common vocabulary.
The domain is also characterized by its name and language.
"""
def __init__(self, X, y, U, vocabulary, domain, language='en'):
"""
:param X: the document collection
:param y: the document labels
:param U: the unlabeled collection
:param vocabulary: the feature space of X and U
:param domain: a descriptive name of the domain
:param language: a descriptive name of the language
"""
self.X = X
self.y = y
self.U = U
self.V=vocabulary if isinstance(vocabulary, Vocabulary) else Vocabulary(vocabulary)
self.domain = domain
self.language = language
def name(self):
return '{}_{}'.format(self.language,self.domain)
def dump(self, path):
pickle.dump(self, open(path, 'wb'), pickle.HIGHEST_PROTOCOL)
def show(self):
print('domain: '+self.domain)
print('language: ' + self.language)
print('|V|={}'.format(len(self.V)))
print('|X|={} (prev={})'.format(self.X.shape[0], self.y.mean()))
print('|U|={}'.format(self.U.shape[0]))
@classmethod
def load(cls, path):
domain = pickle.load(open(path, 'rb'))
assert isinstance(domain, Domain), 'wrong pickle'
return domain
class Vocabulary:
"""
A bidirectional dictionary words->id and id->words
"""
def __init__(self, word2idx_dict):
self._word2idx = word2idx_dict
self._idx2word = {idx:word for word, idx in word2idx_dict.items()}
def word2idx(self, word):
if word in self._word2idx:
return self._word2idx[word]
return None
def idx2word(self, idx):
if idx in self._idx2word:
return self._idx2word[idx]
return None
def __len__(self):
return len(self._word2idx)
def term_set(self):
return set(self._word2idx.keys())
def index_list(self):
return sorted(self._idx2word.keys())
def __contains__(self, word):
return word in self._word2idx
class WordOracle:
"""
An oracle that, given a source term returns the target translation, or viceversa.
As defined by Prettenhofer, Peter, and Benno Stein. "Cross-language text classification using structural
correspondence learning." Proceedings of the 48th annual meeting of the association for computational linguistics.
Association for Computational Linguistics, 2010.
"""
def __init__(self, dictionary, source, target, analyzer=None):
self.source = source
self.target = target
self.s2t_dict = {_preproc(analyzer, s) : _preproc(analyzer, t) for s, t in dictionary.items()} if analyzer else dictionary
self.t2s_dict = {v:k for k,v in dictionary.items()}
def source2target(self, word):
if word in self.s2t_dict.keys():
return self.s2t_dict[word]
return None
def target2source(self, word):
if word in self.t2s_dict.keys():
return self.t2s_dict[word]
return None
def _preproc(analyzer, str):
return analyzer(str)[0] if analyzer(str) else 'null__'
def pack_domains(source, target, pivots_source, pivots_target):
dX = {source.name(): source.X, target.name(): target.X}
dU = {source.name(): source.U, target.name(): target.U}
dP = {source.name(): pivots_source, target.name(): pivots_target}
dV = {source.name(): source.V, target.name(): target.V}
return dX, dU, dP, dV
def unify_feat_space(source, target):
"""
Given a source and a target domain, returns two new versions of them in which the feature spaces are common, by
trivially juxtapossing the two vocabularies
:param source: the source domain
:param target: the target domain
:return: a new version of the source and the target domains where the feature space is common
"""
word_set = source.V.term_set().union(target.V.term_set())
word2idx = {w:i for i,w in enumerate(word_set)}
Vshared = Vocabulary(word2idx)
def reindexDomain(domain, sharedV):
V = domain.V
nD=domain.X.shape[0]
nF=len(sharedV)
newX = lil_matrix((nD,nF))
domainIndexes = np.array(V.index_list())
sharedIndexes = np.array([sharedV.word2idx(w) for w in [V.idx2word(i) for i in domainIndexes]])
newX[:,sharedIndexes]=domain.X[:,domainIndexes]
return Domain(newX.tocsr(),domain.y,None,sharedV,domain.domain+'_shared',domain.language)
return reindexDomain(source, Vshared), reindexDomain(target, Vshared)
| 34.845588
| 130
| 0.650559
| 3,158
| 0.666385
| 0
| 0
| 164
| 0.034606
| 0
| 0
| 1,387
| 0.292678
|
12a832b1e6427f5514100a7f00be3d2042f2ed0f
| 207
|
py
|
Python
|
LeetCode_1304.py
|
xulu199705/LeetCode
|
9a654a10117a93f9ad9728d6b86eb3713185545e
|
[
"MIT"
] | null | null | null |
LeetCode_1304.py
|
xulu199705/LeetCode
|
9a654a10117a93f9ad9728d6b86eb3713185545e
|
[
"MIT"
] | null | null | null |
LeetCode_1304.py
|
xulu199705/LeetCode
|
9a654a10117a93f9ad9728d6b86eb3713185545e
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def sumZero(self, n: int) -> List[int]:
ans = [x for x in range(-(n//2), n//2 + 1)]
if n % 2 == 0:
ans.remove(0)
return ans
| 18.818182
| 51
| 0.492754
| 181
| 0.874396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
12a8abd596e75426da116460419af8dc9c55b01d
| 1,506
|
py
|
Python
|
models/universal_sentence_encoder_multilingual_qa/v1/utils.py
|
rhangelxs/russian_embeddings
|
64821cdff03ff97752b6c80621bedf9e2227a0ba
|
[
"MIT"
] | null | null | null |
models/universal_sentence_encoder_multilingual_qa/v1/utils.py
|
rhangelxs/russian_embeddings
|
64821cdff03ff97752b6c80621bedf9e2227a0ba
|
[
"MIT"
] | 5
|
2020-09-26T00:18:44.000Z
|
2022-02-10T00:22:42.000Z
|
models/universal_sentence_encoder_multilingual_qa/v1/utils.py
|
rhangelxs/russian_embeddings
|
64821cdff03ff97752b6c80621bedf9e2227a0ba
|
[
"MIT"
] | null | null | null |
import numpy
import tensorflow as tf
import tensorflow_hub as hub
import tf_sentencepiece
class EmbeddingWrapper:
def __init__(self):
module_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/1"
# Set up graph.
g = tf.Graph()
with g.as_default():
self.module = hub.Module(module_url) # load tfhub module
self.question = tf.placeholder(dtype=tf.string, shape=[None]) # question
self.response = tf.placeholder(dtype=tf.string, shape=[None]) # response
self.response_context = tf.placeholder(
dtype=tf.string, shape=[None]) # response context
self.question_embedding = self.module(
self.question, signature="question_encoder", as_dict=True)
self.response_embedding = self.module(
inputs={
"input": self.response,
"context": self.response_context
},
signature="response_encoder",
as_dict=True)
init_op = tf.group(
[tf.global_variables_initializer(),
tf.tables_initializer()])
g.finalize()
# Initialize session.
session = tf.Session(graph=g)
session.run(init_op)
self.session = session
def str2vec(self, string):
result = self.session.run(self.question_embedding, feed_dict={self.question: [string]})['outputs'][0]
return result
| 36.731707
| 109
| 0.592961
| 1,412
| 0.937583
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.149402
|
12a970b715888d87283271740bd7a109a0ea7f3e
| 921
|
py
|
Python
|
jade/extensions/demo/create_merge_pred_gdp.py
|
jgu2/jade
|
e643830be89a7df74a82065400b2e82f6b181ec8
|
[
"BSD-3-Clause"
] | 15
|
2021-05-15T21:58:26.000Z
|
2022-03-17T08:26:48.000Z
|
jade/extensions/demo/create_merge_pred_gdp.py
|
jgu2/jade
|
e643830be89a7df74a82065400b2e82f6b181ec8
|
[
"BSD-3-Clause"
] | 22
|
2021-02-04T20:02:33.000Z
|
2021-09-14T13:29:30.000Z
|
jade/extensions/demo/create_merge_pred_gdp.py
|
jgu2/jade
|
e643830be89a7df74a82065400b2e82f6b181ec8
|
[
"BSD-3-Clause"
] | 3
|
2021-01-11T15:11:31.000Z
|
2021-06-07T17:36:51.000Z
|
#!/usr/bin/env python
"""Creates the JADE configuration for stage 2 of the demo pipeline."""
import os
import sys
from jade.models import PipelineConfig
from jade.utils.subprocess_manager import run_command
from jade.utils.utils import load_data
PRED_GDP_COMMANDS_FILE = "pred_gdp_commands.txt"
def main():
config = PipelineConfig(**load_data(os.environ["JADE_PIPELINE_STATUS_FILE"]))
cur_stage = config.stages[-1]
cur_stage_output = cur_stage.path
previous_stage = config.stages[-2]
previous_stage_output = previous_stage.path
script = "jade/extensions/demo/merge_pred_gdp.py"
with open(PRED_GDP_COMMANDS_FILE, "w") as f_out:
cmd = f"python {script} run {previous_stage_output} {cur_stage_output}"
f_out.write(cmd + "\n")
cmd = "jade config create pred_gdp_commands.txt -c config-stage2.json"
sys.exit(run_command(cmd))
if __name__ == "__main__":
main()
| 27.909091
| 81
| 0.733985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.355049
|
12aa4d4698103b11546cfe0e6f724650c7f1a730
| 3,165
|
py
|
Python
|
hamal/hamal/conf/identity.py
|
JackDan9/hamal
|
965be9db066209300c52f0cf17d251290d8901b7
|
[
"MIT"
] | 3
|
2020-06-12T13:03:46.000Z
|
2020-08-06T11:25:46.000Z
|
hamal/hamal/conf/identity.py
|
JackDan9/hamal
|
965be9db066209300c52f0cf17d251290d8901b7
|
[
"MIT"
] | null | null | null |
hamal/hamal/conf/identity.py
|
JackDan9/hamal
|
965be9db066209300c52f0cf17d251290d8901b7
|
[
"MIT"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import passlib.utils
from hamal.conf import utils
max_password_length = cfg.IntOpt(
'max_password_length',
default=4096,
max=passlib.utils.MAX_PASSWORD_SIZE,
help=utils.fmt("""
Maximum allowed length for user passwords. Decrease this value to improve
performance. Changing this value does not effect existing passwords.
"""))
password_hash_algorithm = cfg.StrOpt(
'password_hash_algorithm',
choices=['bcrypt', 'scrypt', 'pbkdf2_sha512'],
default='bcrypt',
help=utils.fmt("""
The password hashing algorithm to use for passwords stored within hamal.
"""))
password_hash_rounds = cfg.IntOpt(
'password_hash_rounds',
help=utils.fmt("""
This option represents a trade off between security and performance. Higher
values lead to slower performance, but higher security. Changing this option
will only affect newly created passwords as existing password hashes already
have a fixed number of rounds applied, so it is safe to tune this option in a
running cluster.
The default for bcrypt is 12, must be between 4 and 31, inclusive.
The default for scrypt is 16, must be within `range(1,32)`.
The default for pbkdf_sha512 is 60000, must be within `range(1,1<32)`
WARNING: If using scrypt, increasing this value increases BOTH time AND
memory requirements to hash a password.
"""))
salt_bytesize = cfg.IntOpt(
'salt_bytesize',
min=0,
max=96,
help=utils.fmt("""
Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt.
Default for scrypt is 16 bytes.
Default for pbkfd2_sha512 is 16 bytes.
Limited to a maximum of 96 bytes due to the size of the column used to store
password hashes.
"""))
scrypt_block_size = cfg.IntOpt(
'scrypt_block_size',
help=utils.fmt("""
Optional block size to pass to scrypt hash function (the `r` parameter).
Useful for tuning scrypt to optimal performance for your CPU architecture.
This option is only used when the `password_hash_algorithm` option is set
to `scrypt`. Defaults to 8.
"""))
scrypt_paralellism = cfg.IntOpt(
'scrypt_parallelism',
help=utils.fmt("""
Optional parallelism to pass to scrypt hash function (the `p` parameter).
This option is only used when the `password_hash_algorithm` option is set
to `scrypt`. Defaults to 1.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
max_password_length,
password_hash_algorithm,
password_hash_rounds,
scrypt_block_size,
scrypt_paralellism,
salt_bytesize
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
| 30.728155
| 77
| 0.749447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,257
| 0.713112
|
12aab253143e67156c54f44e65c0b36caa2ab283
| 2,631
|
py
|
Python
|
fact/time.py
|
mackaiver/slowREST
|
8ae07d8657164abe83f071216b6e9d00a57ae705
|
[
"MIT"
] | 1
|
2015-03-03T08:07:52.000Z
|
2015-03-03T08:07:52.000Z
|
fact/time.py
|
mackaiver/slowREST
|
8ae07d8657164abe83f071216b6e9d00a57ae705
|
[
"MIT"
] | null | null | null |
fact/time.py
|
mackaiver/slowREST
|
8ae07d8657164abe83f071216b6e9d00a57ae705
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
__author__ = 'dneise, mnoethe'
""" This file contains some functions to deal with FACT modified modified julian date
The time used most of the time in FACT is the number of days since 01.01.1970
So this time is related to unix time, since it has the same offset
(unix time is the number of seconds since 01.01.1970 00:00:00)
but it is also related to "the" Modified Julian Date (MJD),
which is used by astronomers
in the sense, that it also counts days.
According to http://en.wikipedia.org/wiki/Julian_day,
there is quite a large number of
somehow modified julian dates, of which the MJD is only one.
So it might be okay, to introduce a new modification,
going by the name of FACT Julian Date (FJD).
"""
import time
import calendar
from datetime import datetime
import logging
import dateutil
import dateutil.parser
OFFSET = (datetime(1970, 1, 1) - datetime(1, 1, 1)).days
def fjd(datetime_inst):
""" convert datetime instance to FJD
"""
if datetime_inst.tzinfo is None:
logging.warning("datetime instance is not aware of its timezone."
" Result possibly wrong!")
return calendar.timegm(datetime_inst.utctimetuple()) / (24.*3600.)
def iso2dt(iso_time_string):
""" parse ISO time string to timezone aware datetime instance
example
2015-01-23T08:08+01:00
"""
datetime_inst = dateutil.parser.parse(iso_time_string)
# make aware at any cost!
if datetime_inst.tzinfo is None:
print("ISO time string did not contain timezone info. I assume UTC!")
datetime_inst = datetime_inst.replace(tzinfo=dateutil.tz.tzutc())
return datetime_inst
def run2dt(run_string):
""" parse typical FACT run file path string to datetime instance (UTC)
example
first you do this:
"/path/to/file/20141231.more_text" --> "20141231"
then call
run2dt("20141231")
"""
format_ = "%Y%m%d"
datetime_inst = datetime.strptime(run_string, format_)
datetime_inst = datetime_inst.replace(tzinfo=dateutil.tz.tzutc())
return datetime_inst
def facttime(time_string):
""" conver time-string with format %Y%m%d %H:%M to fact time
"""
return calendar.timegm(time.strptime(
time_string, "%Y%m%d %H:%M")) / (24.*3600.)
def to_datetime(fact_julian_date):
""" convert facttime to datetime instance
"""
unix_time = fact_julian_date*24*3600
datetime_inst = datetime.utcfromtimestamp(unix_time)
return datetime_inst
def datestr(datetime_inst):
""" make iso time string from datetime instance
"""
return datetime_inst.isoformat("T")
| 28.912088
| 85
| 0.708476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,411
| 0.536298
|
12aabf7a6ed3903e5b3fb7b076bf621fe0068180
| 1,318
|
py
|
Python
|
nipype/interfaces/ants/tests/test_auto_ImageMath.py
|
TRO-HIT/nipype
|
c453eac5d7efdd4e19a9bcc8a7f3d800026cc125
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/ants/tests/test_auto_ImageMath.py
|
TRO-HIT/nipype
|
c453eac5d7efdd4e19a9bcc8a7f3d800026cc125
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/ants/tests/test_auto_ImageMath.py
|
TRO-HIT/nipype
|
c453eac5d7efdd4e19a9bcc8a7f3d800026cc125
|
[
"Apache-2.0"
] | 1
|
2020-12-16T16:36:48.000Z
|
2020-12-16T16:36:48.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..utils import ImageMath
def test_ImageMath_inputs():
input_map = dict(
args=dict(argstr="%s",),
copy_header=dict(usedefault=True,),
dimension=dict(argstr="%d", position=1, usedefault=True,),
environ=dict(nohash=True, usedefault=True,),
num_threads=dict(nohash=True, usedefault=True,),
op1=dict(argstr="%s", extensions=None, mandatory=True, position=-2,),
op2=dict(argstr="%s", position=-1,),
operation=dict(argstr="%s", mandatory=True, position=3,),
output_image=dict(
argstr="%s",
extensions=None,
keep_extension=True,
name_source=["op1"],
name_template="%s_maths",
position=2,
),
)
inputs = ImageMath.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ImageMath_outputs():
output_map = dict(output_image=dict(extensions=None,),)
outputs = ImageMath.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 34.684211
| 77
| 0.618361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.069803
|
12b0b747a8e429f2bfcdc96202c017eb8b47dbba
| 72,049
|
py
|
Python
|
tests/chainerx_tests/unit_tests/routines_tests/test_math.py
|
tkerola/chainer
|
572f6eef2c3f1470911ac08332c2b5c3440edf44
|
[
"MIT"
] | null | null | null |
tests/chainerx_tests/unit_tests/routines_tests/test_math.py
|
tkerola/chainer
|
572f6eef2c3f1470911ac08332c2b5c3440edf44
|
[
"MIT"
] | null | null | null |
tests/chainerx_tests/unit_tests/routines_tests/test_math.py
|
tkerola/chainer
|
572f6eef2c3f1470911ac08332c2b5c3440edf44
|
[
"MIT"
] | null | null | null |
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
_in_out_dtypes_arithmetic_invalid = [
(('bool_', 'bool_'), 'bool_'),
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
(('bool_', 'float16'), 'float16'),
(('bool_', 'float32'), 'float32'),
(('bool_', 'float64'), 'float64'),
(('int8', 'bool_'), 'int8'),
(('int16', 'bool_'), 'int16'),
(('int32', 'bool_'), 'int32'),
(('int64', 'bool_'), 'int64'),
(('uint8', 'bool_'), 'uint8'),
(('float16', 'bool_'), 'float16'),
(('float32', 'bool_'), 'float32'),
(('float64', 'bool_'), 'float64'),
]
_in_out_dtypes_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_arithmetic_invalid
]
_in_out_dtypes_inplace_arithmetic_invalid = [
((t1, t2), t3) for (t1, t2), t3 in _in_out_dtypes_arithmetic
if (numpy.dtype(t1).kind != 'f' and numpy.dtype(t2).kind == 'f')
] + _in_out_dtypes_arithmetic_invalid
_in_out_dtypes_inplace_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_inplace_arithmetic_invalid
]
_in_out_dtypes_array_int_scalar = [
# Int scalar.
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
(('float64',), numpy.int8, 'float64'),
(('float16',), numpy.int64, 'float16'),
]
_in_out_dtypes_int_array_float_scalar = [
# Int arrays and float scalars.
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
(('int8',), numpy.float32, 'float32'),
(('int64',), numpy.float16, 'float32'),
(('uint8',), numpy.float64, 'float32'),
]
_in_out_dtypes_float_array_float_scalar = [
# Float arrays and flaot scalars.
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
(('float64',), float, 'float64'),
(('float16',), numpy.float64, 'float16'),
(('float64',), numpy.float16, 'float64'),
]
_in_out_dtypes_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_float_arithmetic_scalar = (
_in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_float_arithmetic_scalar = (
_in_out_dtypes_float_array_float_scalar)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes)),
'input': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes)),
'input': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'input': [float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestNegative(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
if self.is_module:
return xp.negative(a)
else:
return -a
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DtypeError, TypeError))
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_negative_invalid_bool(xp, device, is_module):
x = xp.array([True, False], dtype='bool_')
if is_module:
xp.negative(x)
else:
-x
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAdd(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.add(a, b)
else:
return a + b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_add_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a + b
else:
chainerx.add(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIAdd(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a += b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_iadd_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a += b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAddScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a + scalar
else:
return scalar + a
else:
if self.is_scalar_rhs:
return xp.add(a, scalar)
else:
return xp.add(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestIAddScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a += scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSub(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.subtract(a, b)
else:
return a - b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_sub_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a - b
else:
chainerx.subtract(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestISub(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a -= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_isub_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a -= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSubScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a - scalar
else:
return scalar - a
else:
if self.is_scalar_rhs:
return xp.subtract(a, scalar)
else:
return xp.subtract(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestISubScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a -= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMul(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.multiply(a, b)
else:
return a * b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic + [
((t, 'bool_'), t) for t in chainerx.testing.all_dtypes
],
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIMul(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a *= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar + [
((t,), bool, t) for t in chainerx.testing.all_dtypes
],
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMulScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a * scalar
else:
return scalar * a
else:
if self.is_scalar_rhs:
return xp.multiply(a, scalar)
else:
return xp.multiply(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_inplace_arithmetic_scalar + [
((t,), bool, t) for t in chainerx.testing.all_dtypes
]),
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestIMulScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a *= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*chainer.testing.product({
'lhs,rhs': [
([], []),
([0, 1, 2, 3, 100, 101, 102, 103], [3] * 8),
([-0, -1, -2, -3, -4, -100, -101, -102, -103], [3] * 9),
([0, 1, 2, 3, 100, 101, 102, 103], [-3] * 8),
([-0, -1, -2, -3, -4, -100, -101, -102, -103], [-3] * 9),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [1.2] * 8),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4],
[1.2] * 9),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [-1.2] * 8),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4],
[-1.2] * 9),
],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'is_module': [True, False],
}))
class TestFloorDivide(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
in_dtype1, in_dtype2 = self.in_dtypes
a = numpy.array(self.lhs).astype(in_dtype1)
b = numpy.array(self.rhs).astype(in_dtype2)
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.floor_divide(a, b)
else:
return a // b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(chainer.testing.product_dict(
chainer.testing.product({
'array': [
([]),
([0, 1, 2, 3, 100, 101, 102, 103]),
([-0, -1, -2, -3, -4, -100, -101, -102, -103]),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4]),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4]),
([-0.61, -0.6, -0.59, 0.59, 0.6, 0.61]),
],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
}),
chainer.testing.product({
'scalar_value': [-3, 3, -1.2, 1.2, 0],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
})
# Special values
+ chainer.testing.product({
'scalar_value': [float('inf'), -float('inf'), float('nan')],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
})
)))
class TestFloorDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
# TODO(imanishi): Remove this.
if in_dtype == 'uint8' and self.scalar_value < 0:
self.skip_forward_test = True
def generate_inputs(self):
in_dtype, = self.in_dtypes
a = numpy.array(self.array).astype(in_dtype)
return a,
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.floor_divide(a, scalar)
else:
return xp.floor_divide(scalar, a)
else:
if self.is_scalar_rhs:
return a // scalar
else:
return scalar // a
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_floordiv_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a // b
else:
chainerx.floor_divide(a, b)
# TODO(imanishi): Support and test zero division and mixed dtypes.
# TODO(imanishi): Support and test chainerx.Scalar // chainerx.ndarray.
# TODO(imanishi): Support and test bool dtype.
@chainerx.testing.numpy_chainerx_array_equal(float16_rtol=1e-3)
@pytest.mark.parametrize('lhs,rhs', [
([], []),
([0, 1, 2, 3, 100, 101, 102, 103], [3] * 8),
([-1, -2, -3, -4, -100, -101, -102, -103], [3] * 8),
([0, 1, 2, 3, 100, 101, 102, 103], [-3] * 8),
([-1, -2, -3, -4, -100, -101, -102, -103], [-3] * 8),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [1.2] * 8),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], [1.2] * 8),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [-1.2] * 8),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], [-1.2] * 8),
([0, 1, 2, 3, 100, 101, 102, 103], 3),
([-1, -2, -3, -4, -100, -101, -102, -103], 3),
([0, 1, 2, 3, 100, 101, 102, 103], -3),
([-1, -2, -3, -4, -100, -101, -102, -103], -3),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], 1.2),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], 1.2),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], -1.2),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], -1.2),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_ifloordiv(xp, lhs, rhs, device, numeric_dtype):
if numpy.array(lhs).dtype.kind != numpy.dtype(numeric_dtype).kind:
return chainerx.testing.ignore()
lhs = xp.array(lhs).astype(numeric_dtype)
if isinstance(rhs, (list, tuple)):
rhs = xp.array(rhs).astype(numeric_dtype)
lhs //= rhs
return lhs
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_ifloordiv_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a //= b
_in_out_dtypes_inplace_truediv = [
(('float32', 'int16'), 'float32'),
(('float64', 'uint8'), 'float64'),
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float16', 'float64'), 'float64'),
]
_in_out_dtypes_truediv = _in_out_dtypes_inplace_truediv + [
(('int8', 'int8'), 'float32'),
(('int16', 'int16'), 'float32'),
(('int32', 'int32'), 'float32'),
(('int64', 'int64'), 'float32'),
(('uint8', 'uint8'), 'float32'),
(('int8', 'int32'), 'float32'),
(('uint8', 'int64'), 'float32'),
(('int8', 'uint8'), 'float32'),
(('int32', 'float16'), 'float16'),
(('uint8', 'float32'), 'float32'),
]
_in_out_dtypes_inplace_truediv_scalar = [
(('int8',), int, 'float32'),
(('int16',), int, 'float32'),
(('int32',), int, 'float32'),
(('int64',), int, 'float32'),
(('uint8',), int, 'float32'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
]
_in_out_dtypes_truediv_scalar = _in_out_dtypes_inplace_truediv_scalar + [
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivide(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
# Avoid (-0.3, 0.3) interval
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.divide(a, b)
else:
return a / b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_truediv_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a / b
else:
chainerx.true_divide(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': _in_out_dtypes_inplace_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestITrueDivide(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
a /= b
# TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds).
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [True, False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def generate_inputs(self):
# Do not divide by small number to avoid ridiculously large outputs.
if not self.is_scalar_rhs and self.input == 'random':
in_dtype, = self.in_dtypes
low = -5 if numpy.dtype(in_dtype).kind != 'u' else 2
high = 5
x = array_utils.uniform(self.shape, in_dtype, low=low, high=high)
x[(-1 < x) & (x < 0)] = -2
x[(0 <= x) & (x < 1)] = 2
return x,
return super().generate_inputs()
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.divide(a, scalar)
else:
return xp.divide(scalar, a)
else:
if self.is_scalar_rhs:
return a / scalar
else:
return scalar / a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestITrueDivideScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a /= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('in_dtypes,out_dtype', [
(('bool_',), 'int64'),
(('int8',), 'int64'),
(('int16',), 'int64'),
(('int32',), 'int64'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
# TODO(niboshi): Unsigned integer dtypes should result in uint64.
# Currently chainerx returns int64.
(('uint8',), 'int64'),
])
@chainer.testing.parameterize_pytest('shape,axis', [
((), None),
((), ()),
((2,), None),
((2,), ()),
((2,), 0),
((2,), (0,)),
((2,), (-1,)),
((2, 3), None),
((2, 3), ()),
((2, 3), 0),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (-1,)),
((2, 3), (-2,)),
((2, 3), (0, 1)),
((2, 3), (-2, -1)),
((1, 3), None), # sum over 1-dim axis
((0, 3), None), # sum over 0-dim axis
# Sum over axes that are in the middle or apart
((2, 3, 4), (1,)),
((2, 3, 4), (0, 2)),
# Sum over axes that are apart and/or unsorted
((2, 3), (1, 0)),
((2, 3, 4), (2, 0)),
((2, 3, 4), (2, 0, 1)),
((2, 3, 4), (-2, 2, 0)),
])
@chainer.testing.parameterize_pytest('keepdims', [True, False])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestSum(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def func(self, xp, a):
if self.is_module:
return xp.sum(a, axis=self.axis, keepdims=self.keepdims)
else:
return a.sum(axis=self.axis, keepdims=self.keepdims)
@op_utils.op_test(['native:0'])
class TestSumStability(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.full(2 ** 20, 0.1, dtype=numpy.float32),
def forward_xp(self, inputs, xp):
x, = inputs
if xp is chainerx:
return x.sum(),
else:
return (x[0] * x.size).astype(x.dtype),
@op_utils.op_test(['native:0'])
@chainer.testing.parameterize_pytest('size', list(range(1024)))
class TestSumEachSize(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.arange(self.size, dtype=numpy.int32) + 1,
def forward_xp(self, inputs, xp):
x, = inputs
return x.sum(),
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
# ((), 0), # TODO(sonots): Fix compatibility
((), 1),
((), (1,)),
((2,), 2),
((2,), (2,)),
((2,), (-2,)),
((2, 3,), (-3,)),
((2, 3,), (-3, -4)),
((2, 3,), (0, 0)),
((2, 3,), (-1, -1)),
((2, 3,), (0, 1, 1)),
((2, 3,), (0, -2)),
])
def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
xp.sum(a, axis=axis, keepdims=keepdims)
else:
a.sum(axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_float_arithmetic_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMinimumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.minimum(a, scalar)
else:
return xp.minimum(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [0, 1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_float_arithmetic_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMaximumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.maximum(a, scalar)
else:
return xp.maximum(scalar, a)
def _create_dummy_array_for_dot(xp, shape, dtype):
x = numpy.arange(numpy.prod(shape)).reshape(shape)
if dtype == 'bool_':
x = numpy.asarray(x % 2 == 0)
else:
x = x.astype(dtype)
return xp.array(x)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog10(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log10(a)
_logsumexp_params = [
((2,), 0),
((2,), -1),
((2, 3), None),
((2, 3), 0),
((2, 3), 1),
((2, 3), -2),
((2, 3), (0, 1)),
((2, 3), (-2, 1)),
((1, 2, 3), None),
((1, 2, 3), (1)),
((1, 2, 3), (1, 0)),
((1, 2, 3), (0, 1, 2)),
]
_invalid_logsumexp_params = [
# Axis out of bounds
((2,), 1),
((2,), -2),
((2,), (0, 1)),
((2, 3), (0, 1, 2)),
# Duplicate axes
((2,), (0, 0)),
((2, 3), (0, 0)),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest('keepdims', [True, False])
class TestLogSumExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
if self.in_dtypes == 'float16':
# TODO(imanishi): Support device implementation and remove this.
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
keepdims = self.keepdims
if xp is chainerx:
return chainerx.logsumexp(x, axis=axis, keepdims=keepdims),
x = x.astype(self.out_dtype)
return numpy.log(numpy.exp(x).sum(axis=axis, keepdims=keepdims)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
@pytest.mark.parametrize('keepdims', [True, False])
# TODO(hvy): Should not overflow for large numbers, add tests
def test_logsumexp_invalid(device, a_shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.logsumexp(a, axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestLogSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
return chainerx.log_softmax(x, axis=axis),
x = x.astype(self.out_dtype)
axis = axis if axis is not None else 1
return x - numpy.log(numpy.exp(x).sum(axis=axis, keepdims=True)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
def test_log_softmax_invalid(device, a_shape, axis, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
return chainerx.log_softmax(a, axis=axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSquaredDifference(op_utils.OpTest):
def setup(self):
x1_dtype, x2_dtype = self.in_dtypes
if x1_dtype == 'float16' or x2_dtype == 'float16':
self.check_forward_options.update({'atol': 3e-3, 'rtol': 3e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 5e-2})
self.check_double_backward_options.update(
{'atol': 1e-2, 'rtol': 5e-2})
def generate_inputs(self):
shape = self.shape
x1_dtype, x2_dtype = self.in_dtypes
x1 = array_utils.uniform(shape, x1_dtype)
x2 = array_utils.uniform(shape, x2_dtype)
return x1, x2
def forward_chainerx(self, inputs):
x1, x2 = inputs
y = chainerx.squared_difference(x1, x2)
return y,
def forward_expected(self, inputs):
x1, x2 = inputs
y = numpy.asarray(
numpy.square(numpy.subtract(x1, x2))).astype(x1.dtype)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Differentiable
chainer.testing.product({
'input': [
numpy.asarray(0.),
numpy.asarray(-1.),
numpy.asarray(1.),
numpy.asarray(10.),
numpy.full((), 2.),
numpy.full((0,), 2.),
numpy.full((2, 3), 2.)
]})
+
# Nondifferentiable
chainer.testing.product({
'input': [
numpy.asarray(float('inf')),
numpy.asarray(float('nan')),
],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
@pytest.mark.parametrize('contiguous', [None, 'C'])
class TestSigmoid(op_utils.NumpyOpTest):
def setup(self, contiguous, float_dtype):
self.dtype = float_dtype
self.contiguous = contiguous
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-3}
if float_dtype == 'float16':
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def generate_inputs(self):
return self.input,
def forward_xp(self, inputs, xp):
if xp is numpy:
return 1 / (1 + numpy.exp(-inputs[0])),
return xp.sigmoid(inputs[0]),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
return chainerx.softmax(x, axis=axis),
x = x.astype(self.out_dtype)
axis = axis if axis is not None else 1
return numpy.exp(x) / (numpy.exp(x).sum(axis=axis, keepdims=True)),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [-2, 0, 2],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSquare(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.square(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSqrt(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.sqrt(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs,input_rhs': [(2, 2)],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_numeric_dtypes_two_arrays,
'input_lhs,input_rhs': [(2, 2)],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs,input_rhs': [(2, 2)],
'is_module': [True, False],
})
# Special values (integers forward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.signed_integral_dtypes)),
'input_lhs': [-2, -1, 0, 1, 2, 5],
'input_rhs': [0, 1, 2, 5],
'is_module': [False],
})
# Special values (floats forward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'input_rhs': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special values (floats backward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-3.0, -1.2, 1.2, 3],
'input_rhs': [-3.0, -1.2, 0.0, 1.2, 3.0],
'is_module': [False],
})
))
class TestPower(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def setup(self):
super().setup()
in_dtype1, in_dtype2 = self.in_dtypes
if in_dtype1 == 'float16' or in_dtype2 == 'float16':
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def func(self, xp, a, b):
if self.is_module:
y = xp.power(a, b)
else:
y = a ** b
return y
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'scalar_value': [
-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestPowerScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def setup(self):
super().setup()
if self.in_dtypes == 'float16':
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
y = xp.power(a, scalar)
else:
y = xp.power(scalar, a)
else:
if self.is_scalar_rhs:
y = a ** scalar
else:
y = scalar ** a
return y
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('is_bool_rhs', [True, False])
@pytest.mark.parametrize('is_bool_primitive', [True, False])
@pytest.mark.parametrize('is_module', [True, False])
def test_power_invalid_bool_dtype(
device, dtype, is_bool_rhs, is_bool_primitive, is_module):
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, dtype))
if is_bool_primitive:
b = True
else:
b = chainerx.array(array_utils.uniform(shape, 'bool'))
with pytest.raises(chainerx.DtypeError):
if is_module:
if is_bool_rhs:
chainerx.power(a, b)
else:
chainerx.power(b, a)
else:
if is_bool_rhs:
a ** b
else:
b ** a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': ['random'],
'contiguous': [None, 'C'],
})
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAbs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a):
assert chainerx.abs is chainerx.absolute
return xp.abs(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [-2.5, -1.5, -0.1, 0.1, 1.5, 2.5],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestFabs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.fabs(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0.5),
numpy.asarray(-1.2),
numpy.asarray(10.9),
numpy.asarray(-10.6),
numpy.asarray(0.),
numpy.asarray(float('inf')),
numpy.asarray(-float('inf')),
numpy.asarray(float('nan')),
numpy.full((), 2.1),
numpy.full((0,), 2),
numpy.full((2, 3), 0),
numpy.full((2, 3), 2.6),
numpy.full((1, 1), -1.01),
numpy.full((1, 1), 1.99),
])
@pytest.mark.parametrize('dtypes', [
(('int8',), 'int8'),
(('int16',), 'int16'),
(('int32',), 'int32'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
])
def test_sign(xp, device, input, dtypes):
(in_dtype, ), out_dtype = dtypes
a = xp.array(input.astype(in_dtype))
return xp.sign(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0.5),
numpy.asarray(-1.2),
numpy.asarray(10.9),
numpy.asarray(float('inf')),
numpy.asarray(-float('inf')),
numpy.asarray(float('nan')),
numpy.full((), 2.1),
numpy.full((0,), 2),
numpy.full((2, 3), 2.6),
numpy.full((1, 1), 1.01),
numpy.full((1, 1), 1.99),
])
@pytest.mark.parametrize('dtypes', math_utils.in_out_dtypes_math_functions)
@pytest.mark.parametrize('func', [
lambda xp, a: xp.ceil(a),
lambda xp, a: xp.floor(a)
])
def test_rounding_routines(func, xp, device, input, dtypes):
(in_dtype, ), out_dtype = dtypes
a = xp.array(input.astype(in_dtype))
a = func(xp, a)
a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype)
return a
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isnan(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isnan(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isinf(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isinf(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isfinite(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isfinite(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# TODO(aksub99): Add tests for inf and NaN.
))
class TestMaximum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a, b):
return xp.maximum(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_maximum_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
chainerx.maximum(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# TODO(aksub99): Add tests for inf and NaN.
))
class TestMinimum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a, b):
return xp.minimum(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_minimum_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
chainerx.minimum(a, b)
| 32.914116
| 79
| 0.579106
| 14,651
| 0.203348
| 0
| 0
| 66,284
| 0.919985
| 0
| 0
| 15,876
| 0.22035
|
12b0f94ae97150323ed0af8a6fe2aba3cc7d3f40
| 445
|
py
|
Python
|
7.py
|
flpcan/project_euler
|
2cabb0a51c70b0b6e145328f3e3c55de41ac2854
|
[
"CC0-1.0"
] | null | null | null |
7.py
|
flpcan/project_euler
|
2cabb0a51c70b0b6e145328f3e3c55de41ac2854
|
[
"CC0-1.0"
] | null | null | null |
7.py
|
flpcan/project_euler
|
2cabb0a51c70b0b6e145328f3e3c55de41ac2854
|
[
"CC0-1.0"
] | null | null | null |
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10 001st prime number?
primes = []
for i in range(2, 100):
if len(primes) == 10001:
break
x = list(map(lambda y: i % y == 0, range(2,i)))
if sum(x) == False:
primes.append(i)
print(i)
print(primes[-1] , "Len: ", len(primes))
# x = list(map(lambda y: i % y == 0, range(2,i)))
| 18.541667
| 102
| 0.546067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.438202
|
12b14a676fba1294e88631fcf085323cedbf845c
| 5,707
|
py
|
Python
|
src/plot_scripts/plot_sigcomm_bars_cellular.py
|
zxxia/RL-CC
|
d3d3be0097d69ee07b06363ad531cf2479029d74
|
[
"Apache-2.0"
] | null | null | null |
src/plot_scripts/plot_sigcomm_bars_cellular.py
|
zxxia/RL-CC
|
d3d3be0097d69ee07b06363ad531cf2479029d74
|
[
"Apache-2.0"
] | null | null | null |
src/plot_scripts/plot_sigcomm_bars_cellular.py
|
zxxia/RL-CC
|
d3d3be0097d69ee07b06363ad531cf2479029d74
|
[
"Apache-2.0"
] | null | null | null |
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SAVE_ROOT = '../../figs_sigcomm22'
plt.style.use('seaborn-deep')
plt.rcParams['font.family'] = 'Arial'
# plt.rcParams['font.size'] = 42
# plt.rcParams['axes.labelsize'] = 42
# plt.rcParams['legend.fontsize'] = 42
# plt.rcParams['figure.figsize'] = (11, 9)
plt.rcParams['font.size'] = 36
plt.rcParams['axes.labelsize'] = 36
plt.rcParams['axes.titlesize'] = 36
plt.rcParams['legend.fontsize'] = 36
plt.rcParams['svg.fonttype'] = 'none'
HATCHES = ['/', '\\', 'x', 'o', '.', 'O', '-', '*', '+']
WIDTH = 0.3
bbr_reward, bbr_tput, bbr_tail_lat, bbr_loss = 118.07, 5.23, 517.02, 0.05
copa_reward, copa_tput, copa_tail_lat, copa_loss = 255.84, 4.58, 333.47, 0.01
cubic_reward, cubic_tput, cubic_tail_lat, cubic_loss = 69.75, 5.40, 858.46, 0.02
vivace_reward, vivace_tput, vivace_tail_lat, vivace_loss = -404.59, 4.04, 864.41, 0.21
vivace_latency_reward, vivace_latency_tput, vivace_latency_tail_lat, vivace_latency_loss = -422.16, 4.40, 888.76, 0.22
vivace_loss_reward = -616.31 #5.04 941.72 0.32
genet_reward = 252.28
genet_reward_err = 6.46
genet_tput, genet_tail_lat, genet_loss = 5.02, 251.02, 0.02
udr1_reward = 142.31
udr1_reward_err = 23.78 #
udr1_tput, udr1_tail_lat, udr1_loss = 4.59, 418.87, 0.03
udr2_reward = 187.61
udr2_reward_err = 5.03 #
udr2_tput, udr2_tail_lat, udr2_loss = 4.74, 408.95, 0.01
udr3_reward = 203.96
udr3_reward_err = 4.05 # 4.74 386.01 0.01
udr3_tput, udr3_tail_lat, udr3_loss = 4.74, 386.01, 0.01
real_reward = 171.61
real_reward_err = 3.18 # 5.01 459.23 0.02
cl1_reward = 206.56
cl1_reward_err = 3.07 # 4.88 413.40 0.01
cl2_reward = 211.89
cl2_reward_err = 4.05 # 4.82 419.74 0.00
column_wid = 0.7
capsize_wid = 8
eline_wid = 2
def cellular_bars():
plt.figure(figsize=(9,5))
ax = plt.gca()
# plt.bar([1, 2], [bbr_reward, cubic_reward])
bars = plt.bar([1, 2, 3, 4],
[udr1_reward, udr2_reward, udr3_reward, real_reward],
yerr=[udr1_reward_err, udr2_reward_err, udr3_reward_err, real_reward_err],
color='C0', width=column_wid, error_kw=dict( lw=eline_wid, capsize=capsize_wid))
for bar, pat in zip(bars, HATCHES):
bar.set_hatch(pat)
plt.bar([5], [genet_reward], yerr=[genet_reward_err], color='C2',
width=column_wid, error_kw=dict( lw=eline_wid, capsize=capsize_wid))
# plt.title('Ethernet')
ax = plt.gca()
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels(['RL1', 'RL2', 'RL3', 'RL-real', 'Genet'], rotation=20)
ax.set_ylabel('Test reward')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False) # ticks along the top edge are off
# plt.tight_layout()
svg_file = os.path.join(SAVE_ROOT, 'evaluation_generalization_test_cellular.svg')
pdf_file = os.path.join(SAVE_ROOT, 'evaluation_generalization_test_cellular.pdf')
plt.savefig(svg_file, bbox_inches='tight')
os.system("inkscape {} --export-pdf={}".format(svg_file, pdf_file))
os.system("pdfcrop --margins 1 {} {}".format(pdf_file, pdf_file))
def cc_scatter():
plt.rcParams['font.size'] = 32
plt.rcParams['axes.labelsize'] = 32
plt.rcParams['axes.titlesize'] = 32
plt.rcParams['legend.fontsize'] = 32
fig, ax = plt.subplots(figsize=(9, 5))
msize = 200
ax.scatter([bbr_tail_lat], [bbr_tput], marker='d', s=msize, color='C0',
label='BBR')
ax.annotate('BBR', (bbr_tail_lat+102, bbr_tput+0.01))
ax.scatter([copa_tail_lat], [copa_tput], marker='>', s=msize, color='C1',
label='Copa')
ax.annotate('Copa', (copa_tail_lat, copa_tput+0.01))
ax.scatter([cubic_tail_lat], [cubic_tput], marker='v', s=msize,
color='darkorange', label='Cubic')
ax.annotate('Cubic', (cubic_tail_lat+50, cubic_tput-0.12))
ax.scatter([vivace_latency_tail_lat], [vivace_latency_tput], marker='^',
s=msize, color='C3', label='Vivace')
ax.annotate('Vivace', (vivace_latency_tail_lat, vivace_latency_tput))
ax.scatter([udr1_tail_lat], [udr1_tput], marker='<', s=msize, color='C4',
label='RL1')
ax.annotate('RL1', (udr1_tail_lat+27, udr1_tput-0.13))
ax.scatter([udr2_tail_lat], [udr2_tput], marker='p', s=msize, color='C5',
label='RL2')
ax.annotate('RL2', (udr2_tail_lat+100, udr2_tput+0.02))
ax.scatter([udr3_tail_lat], [udr3_tput], marker='s', s=msize,
color='indigo', label='RL3')
ax.annotate('RL3', (udr3_tail_lat-13, udr3_tput+0.02))
ax.scatter([genet_tail_lat], [genet_tput], s=msize, color='C2',
label='Genet')
ax.annotate('Genet', (genet_tail_lat+60, genet_tput+0.05))
ax.set_ylabel('Throughput (Mbps)')
ax.set_xlabel('90th percentile latency (ms)')
ax.invert_xaxis()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# fig.legend(bbox_to_anchor=(0, 1.02, 1, 0.14), ncol=4, loc="upper center",
# borderaxespad=0, borderpad=0.2, columnspacing=0.01, handletextpad=0.001)
#
svg_file = os.path.join(SAVE_ROOT, 'evaluation_cc_scatter_cellular.svg')
pdf_file = os.path.join(SAVE_ROOT, 'evaluation_cc_scatter_cellular.pdf')
fig.savefig(svg_file, bbox_inches='tight')
os.system("inkscape {} --export-pdf={}".format(svg_file, pdf_file))
os.system("pdfcrop --margins 1 {} {}".format(pdf_file, pdf_file))
if __name__ == '__main__':
cellular_bars()
# cc_scatter()
| 41.963235
| 118
| 0.65884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,487
| 0.260557
|
12b1527e01e27cdb3f79857b70a9797275320e0e
| 1,372
|
py
|
Python
|
spacy/lang/th/__init__.py
|
snosrap/spaCy
|
3f68bbcfec44ef55d101e6db742d353b72652129
|
[
"MIT"
] | 1
|
2019-05-17T02:43:33.000Z
|
2019-05-17T02:43:33.000Z
|
spacy/lang/th/__init__.py
|
snosrap/spaCy
|
3f68bbcfec44ef55d101e6db742d353b72652129
|
[
"MIT"
] | 49
|
2021-10-01T10:15:30.000Z
|
2021-12-27T14:36:05.000Z
|
spacy/lang/th/__init__.py
|
snosrap/spaCy
|
3f68bbcfec44ef55d101e6db742d353b72652129
|
[
"MIT"
] | 1
|
2019-10-01T08:27:20.000Z
|
2019-10-01T08:27:20.000Z
|
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from ...language import Language, BaseDefaults
from ...tokens import Doc
from ...util import DummyTokenizer, registry, load_config_from_str
from ...vocab import Vocab
DEFAULT_CONFIG = """
[nlp]
[nlp.tokenizer]
@tokenizers = "spacy.th.ThaiTokenizer"
"""
@registry.tokenizers("spacy.th.ThaiTokenizer")
def create_thai_tokenizer():
def thai_tokenizer_factory(nlp):
return ThaiTokenizer(nlp.vocab)
return thai_tokenizer_factory
class ThaiTokenizer(DummyTokenizer):
def __init__(self, vocab: Vocab) -> None:
try:
from pythainlp.tokenize import word_tokenize
except ImportError:
raise ImportError(
"The Thai tokenizer requires the PyThaiNLP library: "
"https://github.com/PyThaiNLP/pythainlp"
) from None
self.word_tokenize = word_tokenize
self.vocab = vocab
def __call__(self, text: str) -> Doc:
words = list(self.word_tokenize(text))
spaces = [False] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
class ThaiDefaults(BaseDefaults):
config = load_config_from_str(DEFAULT_CONFIG)
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Thai(Language):
lang = "th"
Defaults = ThaiDefaults
__all__ = ["Thai"]
| 24.945455
| 69
| 0.68586
| 828
| 0.603499
| 0
| 0
| 187
| 0.136297
| 0
| 0
| 196
| 0.142857
|
12b18047e85021cd05074093d60424bfe744046f
| 167
|
py
|
Python
|
src/setup/__init__.py
|
ScottDay/DFN-Maintenance-GUI-Backend
|
bfb05c75747fa9c334224b99609baef7321860a4
|
[
"MIT"
] | 2
|
2017-03-31T00:57:35.000Z
|
2017-08-04T10:38:28.000Z
|
src/setup/__init__.py
|
CPedersen3245/Desert-Fireball-Maintainence-GUI
|
bfb05c75747fa9c334224b99609baef7321860a4
|
[
"MIT"
] | 10
|
2017-03-29T04:13:14.000Z
|
2017-08-14T06:14:52.000Z
|
src/setup/__init__.py
|
ScottDay/DFN-Maintenance-GUI-Backend
|
bfb05c75747fa9c334224b99609baef7321860a4
|
[
"MIT"
] | 4
|
2017-12-23T03:16:00.000Z
|
2018-06-20T07:15:50.000Z
|
from .args import args
from .extensions import extensions
from .logger import logger
from .routes import routes
__all__ = ['args', 'extensions', 'logger', 'routes']
| 20.875
| 52
| 0.748503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.203593
|
12b22d55acd96929800d8872484a4576f43f6f08
| 6,223
|
py
|
Python
|
cloudrunner_server/plugins/clouds/docker_host.py
|
ttrifonov/cloudrunner-server
|
3b2426c8d9987e78425899010b534afc7734d8d4
|
[
"Apache-2.0"
] | 2
|
2016-03-31T08:45:29.000Z
|
2021-04-28T15:18:45.000Z
|
cloudrunner_server/plugins/clouds/docker_host.py
|
ttrifonov/cloudrunner-server
|
3b2426c8d9987e78425899010b534afc7734d8d4
|
[
"Apache-2.0"
] | null | null | null |
cloudrunner_server/plugins/clouds/docker_host.py
|
ttrifonov/cloudrunner-server
|
3b2426c8d9987e78425899010b534afc7734d8d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <info@cloudrunner.io>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
import json
import os
import requests
import tempfile
from cloudrunner import VAR_DIR
from .base import BaseCloudProvider, CR_SERVER
HEADERS = {'Content-Type': 'application/json'}
class Docker(BaseCloudProvider):
def __init__(self, profile, log=None):
super(Docker, self).__init__(profile, log)
prefix = "%s-%s" % (self.profile.owner.org.name, self.profile.id)
self._path = os.path.join(VAR_DIR, "tmp", "creds", prefix)
if ":" in self.profile.username:
self.server_address = self.profile.username
else:
self.server_address = "%s:2376" % self.profile.username
try:
os.makedirs(self._path)
except:
pass
_, self._cert_path = tempfile.mkstemp(dir=self._path,
suffix='pem',
text=True)
_, self._key_path = tempfile.mkstemp(dir=self._path,
suffix='pem',
text=True)
with open(self._cert_path, 'w') as f:
f.write(self.profile.password)
with open(self._key_path, 'w') as f:
f.write(self.profile.arguments)
def _cleanup(self):
os.unlink(self._cert_path)
os.unlink(self._key_path)
def create_machine(self, name, image=None, server=CR_SERVER,
ports=None, privileged=None,
volumes=None, **kwargs):
self.log.info("Registering Docker machine [%s::%s] for [%s] at [%s]" %
(name, image, CR_SERVER, self.server_address))
priv = privileged in ['1', 'true', 'True', True]
# cmd = PROVISION % dict(server=server,
# name=name,
# api_key=self.api_key)
exposed_ports, port_bindings = {}, {}
_ports = [p.strip() for p in ports.split(",") if p.strip()]
for port in _ports:
cont_port, _, host_port = port.partition(":")
exposed = "%s/tcp" % cont_port
exposed_ports[exposed] = {}
if host_port:
host_port = host_port
port_bindings[exposed] = [{
'HostPort': host_port
}]
else:
port_bindings[exposed] = [{
'HostPort': None
}]
volumes_desc, binds = {}, []
_volumes = [v.strip() for v in volumes.split(",") if v.strip()]
for _vol in _volumes:
mnt_host, _, mnt_cont = _vol.partition(":")
if not mnt_cont:
mnt_cont = mnt_host
mnt_host = ''
volumes_desc[mnt_cont] = {}
if mnt_host:
binds.append("%s:%s" % (mnt_host, mnt_cont))
else:
binds.append("%s:%s" % (mnt_cont, mnt_cont))
env = ["SERVER_ID=%s" % CR_SERVER, "ORG_ID=%s" % self.api_key]
create_data = dict(Hostname=name, Image=image, Env=env,
ExposedPorts=exposed_ports,
Volumes=volumes_desc,
Privileged=priv,
Tty=True,
OpenStdin=True,)
# Cmd=[cmd],
# Entrypoint=['/bin/curl'])
create_url = "https://%s/containers/create" % self.server_address
try:
server_ids = []
res = requests.post(create_url, data=json.dumps(create_data),
cert=(self._cert_path,
self._key_path),
headers=HEADERS,
verify=False)
if res.status_code >= 300:
self.log.error("FAILURE %s(%s)" %
(res.status_code, res.content))
return self.FAIL, [], {}
start_data = dict(PortBindings=port_bindings,
Binds=binds,
Privileged=priv,
Detach=False,
Tty=False)
server_id = res.json()['Id']
self.log.info("Started docker instance %s" % server_id)
server_ids.append(server_id)
start_url = "https://%s/containers/%s/start" % (
self.server_address,
server_id)
res = requests.post(start_url, data=json.dumps(start_data),
cert=(self._cert_path,
self._key_path),
headers=HEADERS,
verify=False)
meta = dict(server_address=self.server_address)
except Exception, ex:
self.log.exception(ex)
raise
finally:
self._cleanup()
return self.OK, server_ids, meta
def delete_machine(self, server_ids, **kwargs):
ret = self.OK
for server_id in server_ids:
try:
delete_url = "https://%s/containers/%s?force=true" % (
self.server_address, server_id)
res = requests.delete(delete_url, cert=(self._cert_path,
self._key_path),
headers=HEADERS,
verify=False)
if res.status_code >= 300:
self.log.error("FAILURE %s(%s)" %
(res.status_code, res.content))
ret = self.FAIL
except Exception, ex:
self.log.error(ex)
return ret
| 39.138365
| 78
| 0.472602
| 5,560
| 0.89346
| 0
| 0
| 0
| 0
| 0
| 0
| 1,015
| 0.163105
|
12b2fe22c669ef8f586778fb7af3dd29059295d7
| 4,702
|
py
|
Python
|
scope/client_util/job_runner_check.py
|
drew-sinha/rpc-scope
|
268864097b5b7d123a842f216adc446ec6b32d01
|
[
"MIT"
] | 1
|
2017-11-10T17:23:11.000Z
|
2017-11-10T17:23:11.000Z
|
scope/client_util/job_runner_check.py
|
drew-sinha/rpc-scope
|
268864097b5b7d123a842f216adc446ec6b32d01
|
[
"MIT"
] | 5
|
2018-08-01T03:05:35.000Z
|
2018-11-29T22:11:25.000Z
|
scope/client_util/job_runner_check.py
|
drew-sinha/rpc-scope
|
268864097b5b7d123a842f216adc446ec6b32d01
|
[
"MIT"
] | 3
|
2016-05-25T18:58:35.000Z
|
2018-11-29T23:40:45.000Z
|
# -*- coding: utf-8 -*-
# This code is licensed under the MIT License (see LICENSE file for details)
import platform
import datetime
import sys
import pathlib
import subprocess
import time
from .. import scope_job_runner
from ..config import scope_configuration
def main():
if len(sys.argv) == 2 and sys.argv[1] == '--install':
install_systemd_units()
else:
check_job_runner()
TIMER_UNIT = '''[Unit]
Description=Check that scope_job_runner is active if jobs are queued
[Timer]
OnBootSec=15min
OnUnitActiveSec=45min
[Install]
WantedBy=timers.target
'''
SERVICE_UNIT = '''[Unit]
Description=Check that scope_job_runner is active if jobs are queued
[Service]
ExecStart={executable}
'''
def install_systemd_units():
base_unit = pathlib.Path('/etc/systemd/system/job_runner_check')
timer_file = base_unit.with_suffix('.timer')
timer_file.write_text(TIMER_UNIT)
timer_file.chmod(0o644)
service_file = base_unit.with_suffix('.service')
service_file.write_text(SERVICE_UNIT.format(executable=sys.argv[0]))
service_file.chmod(0o644)
subprocess.run(['systemctl', 'enable', timer_file.name], check=True)
subprocess.run(['systemctl', 'start', timer_file.name], check=True)
print(f'systemd units installed. Run systemctl status {timer_file.name} or {base_unit.name} to check.')
ERROR_SUBJECT = '{host}: scope job pending but scope_job_runner is inactive.'
ERROR_MESSAGE = '''One or more of your jobs is overdue on {host}, but the scope job runner daemon is not running.
These jobs will not be run until the command `scope_job_runner start` is executed on that machine.
Time: {time}
Queued Jobs:
{jobs}
'''
ALL_CLEAR_SUBJECT = '{host}: scope_job_runner was reactivated.'
ALL_CLEAR_MESSAGE = '''One or more of your jobs on {host} was stalled due to an inactive job runner.
The job runner has now been restarted and your jobs will be run as planned.
Time: {time}
Queued Jobs:
{jobs}
'''
def check_job_runner():
runner = scope_job_runner.JobRunner()
problem_file = scope_configuration.CONFIG_DIR / '.jobs_queued_but_runner_inactive'
overdue_jobs, to_email = get_overdue_jobs(runner)
if len(overdue_jobs) == 0:
return
if runner.is_running():
if problem_file.exists():
# job runner was restarted; problem is cleared.
# Alert previous email recipients that things are good now
print('Previous error, but now job runner is running.')
send_email(to_email, runner, overdue_jobs, ALL_CLEAR_SUBJECT, ALL_CLEAR_MESSAGE, 'all-clear')
# Remove the problem-file flag
problem_file.unlink()
else: # job runner is not running.
print('Jobs queued but job runner is not running.')
previously_emailed = set()
if problem_file.exists():
# this error was previously detected
previously_emailed.update(problem_file.read_text().split('\n'))
to_email -= previously_emailed
if to_email:
# we have not alerted some people about the queued jobs
send_email(to_email, runner, overdue_jobs, ERROR_SUBJECT, ERROR_MESSAGE, 'alert')
problem_file.write_text('\n'.join(to_email | previously_emailed))
else:
print('No alert emailed: all relevant parties have already been emailed.')
def get_overdue_jobs(runner):
# Get overdue jobs that anyone cares about (e.g. that aren't system checks and have
# emails attached).
now = time.time()
exec_dir = pathlib.Path(sys.argv[0]).parent
overdue_jobs = []
to_email = set()
for job in runner.jobs.get_jobs():
if ( job.exec_file.parent != exec_dir and # job is user-provided, not like incubator_check
job.status == scope_job_runner.STATUS_QUEUED and # and is active
job.next_run_time is not None and # and is scheduled to run again
job.next_run_time < now and # and is overdue
job.alert_emails ): # and has a non-empty, non-None list of people to alert
overdue_jobs.append(job)
to_email.update(job.alert_emails)
return overdue_jobs, to_email
def send_email(to_email, runner, jobs, subject_template, body_template, email_type):
host = platform.node().split('.')[0]
now = datetime.datetime.now().isoformat(sep=' ', timespec='seconds')
subject = subject_template.format(host=host)
job_blurbs = '\n'.join(runner.format_job_blurb(job) for job in jobs)
message = body_template.format(host=host, time=now, jobs=job_blurbs)
print('Emailing {} about the following jobs:\n{}'.format(email_type, job_blurbs))
runner.send_error_email(sorted(to_email), subject, message)
| 38.227642
| 113
| 0.701616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,912
| 0.406635
|
12b402f977b10f55535c5a3654e5fda7b7dcf072
| 2,222
|
py
|
Python
|
toffy/json_utils.py
|
angelolab/toffy
|
4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6
|
[
"Apache-2.0"
] | null | null | null |
toffy/json_utils.py
|
angelolab/toffy
|
4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6
|
[
"Apache-2.0"
] | 46
|
2022-01-26T18:21:21.000Z
|
2022-03-30T19:19:12.000Z
|
toffy/json_utils.py
|
angelolab/creed-helper
|
4d6c50fe0dfbf1568ee3f9db2182a04dc9ac85c6
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
import os
from ark.utils import io_utils
def rename_missing_fovs(fov_data):
"""Identify FOVs that are missing the 'name' key and create one with value placeholder_{n}
Args:
fov_data (dict): the FOV run JSON data
Returns:
dict: a copy of the run JSON data with placeholder names for FOVs that lack one
"""
copy_fov_data = copy.deepcopy(fov_data)
# count of FOVs that are missing the 'name' key
missing_count = 0
# iterate over each FOV and add a placeholder name if necessary
for fov in copy_fov_data['fovs']:
if 'name' not in fov.keys():
missing_count += 1
fov['name'] = f'placeholder_{missing_count}'
return copy_fov_data
def rename_duplicate_fovs(tma_fovs):
"""Identify and rename duplicate FOV names in `fov_list`
For a given FOV name, the subsequent duplicates get renamed `{FOV}_duplicate{n}`
Args:
tma_fovs (dict):
The TMA run JSON, should contain a `'fovs'` key defining the list of FOVs
Returns:
dict:
The same run JSON with the FOVs renamed to account for duplicates
"""
# used for identifying the number of times each FOV was found
fov_count = {}
# iterate over each FOV
for fov in tma_fovs['fovs']:
if fov['name'] not in fov_count:
fov_count[fov['name']] = 0
fov_count[fov['name']] += 1
if fov_count[fov['name']] > 1:
fov['name'] = '%s_duplicate%d' % (fov['name'], fov_count[fov['name']] - 1)
return tma_fovs
def list_moly_fovs(bin_file_dir):
"""Lists all of the FOVs in a directory which are moly FOVs
Args:
bin_file_dir (str): path to bin files
Returns:
list: list of FOVs which are moly FOVs"""
json_files = io_utils.list_files(bin_file_dir, '.json')
moly_fovs = []
for file in json_files:
json_path = os.path.join(bin_file_dir, file)
with open(json_path, 'r') as jp:
json_file = json.load(jp)
if json_file.get('standardTarget', "") == "Molybdenum Foil":
moly_name = file.split('.json')[0]
moly_fovs.append(moly_name)
return moly_fovs
| 26.771084
| 94
| 0.629613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,176
| 0.529253
|
12b6971b8aff245d6004cadaa44e2d26223997e6
| 545
|
py
|
Python
|
app/plugins/task/upload.py
|
venturiscm/hcp
|
74ad18180822301274daa9218d7bd9fbdb7807f7
|
[
"Apache-2.0"
] | 1
|
2020-06-22T21:25:52.000Z
|
2020-06-22T21:25:52.000Z
|
app/plugins/task/upload.py
|
venturiscm/hcp
|
74ad18180822301274daa9218d7bd9fbdb7807f7
|
[
"Apache-2.0"
] | 1
|
2020-05-21T02:46:24.000Z
|
2020-05-25T07:19:23.000Z
|
app/plugins/task/upload.py
|
venturiscm/hcp
|
74ad18180822301274daa9218d7bd9fbdb7807f7
|
[
"Apache-2.0"
] | null | null | null |
from systems.plugins.index import BaseProvider
import os
class Provider(BaseProvider('task', 'upload')):
def execute(self, results, params):
file_path = self.get_path(self.field_file)
if not os.path.exists(file_path):
self.command.error("Upload task provider file {} does not exist".format(file_path))
ssh = self._get_ssh()
ssh.upload(file_path, self.field_remote_path,
mode = self.field_mode,
owner = self.field_owner,
group = self.field_group
)
| 27.25
| 95
| 0.640367
| 484
| 0.888073
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.108257
|
12b73e722a7a33f56b3403eba3f5dbfb5e5538e6
| 2,955
|
py
|
Python
|
win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py
|
takkii/dotfile
|
7daf848c718ee10603a68a6e37a1002a827ec72f
|
[
"MIT"
] | 1
|
2018-10-11T21:31:43.000Z
|
2018-10-11T21:31:43.000Z
|
win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py
|
takkii/dotfile
|
7daf848c718ee10603a68a6e37a1002a827ec72f
|
[
"MIT"
] | null | null | null |
win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py
|
takkii/dotfile
|
7daf848c718ee10603a68a6e37a1002a827ec72f
|
[
"MIT"
] | null | null | null |
# ============================================================================
# FILE: outline.py
# AUTHOR: Yasumasa Tamura (tamura.yasumasa _at_ gmail.com)
# License: MIT license
# ============================================================================
from .base import Base
from subprocess import check_output, CalledProcessError
from denite.util import parse_tagline
import re
import tempfile
OUTLINE_HIGHLIGHT_SYNTAX = [
{'name': 'Name', 'link': 'Identifier', 're': '\S\+\%(\s\+\[\)\@='},
{'name': 'Type', 'link': 'Type', 're': '\[.\{-}\]'},
{'name': 'Ref', 'link': 'Comment', 're': '\s\s.\+'}
]
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'outline'
self.kind = 'file'
self.vars = {
'command': ['ctags'],
'options': [],
'file_opt': '-o',
'ignore_types': [],
'encoding': 'utf-8'
}
def on_init(self, context):
context['__path'] = context['args'][0] if len(
context['args']) > 0 else self.vim.current.buffer.name
def highlight(self):
for syn in OUTLINE_HIGHLIGHT_SYNTAX:
self.vim.command(
'syntax match {0}_{1} /{2}/ contained containedin={0}'.format(
self.syntax_name, syn['name'], syn['re']))
self.vim.command(
'highlight default link {0}_{1} {2}'.format(
self.syntax_name, syn['name'], syn['link']))
def gather_candidates(self, context):
with tempfile.NamedTemporaryFile(
mode='w', encoding=self.vars['encoding']) as tf:
args = []
args += self.vars['command']
args += self.vars['options']
args += [self.vars['file_opt'], tf.name]
args += [context['__path']]
self.print_message(context, args)
tf.close()
try:
check_output(args).decode(self.vars['encoding'], 'replace')
except CalledProcessError:
return []
candidates = []
with open(tf.name, encoding=self.vars['encoding'],
errors='replace') as f:
for line in f:
if re.match('!', line) or not line:
continue
info = parse_tagline(line.rstrip(), tf.name)
candidate = {
'word': info['name'],
'action__path': info['file'],
}
fmt = '{name} [{type}] {file} {ref}'
candidate['abbr'] = fmt.format(**info)
if info['line']:
candidate['action__line'] = info['line']
else:
candidate['action__pattern'] = info['pattern']
candidates.append(candidate)
return candidates
| 35.60241
| 78
| 0.457868
| 2,322
| 0.785787
| 0
| 0
| 0
| 0
| 0
| 0
| 813
| 0.275127
|
12b887c446ea424a4bd8fd55a07bceb06b1c0206
| 1,656
|
py
|
Python
|
test.py
|
Tweetsched/tweetsched-publisher
|
c639670fc9658251a02b8946b34dfae3f3145a72
|
[
"MIT"
] | 1
|
2018-08-28T14:04:15.000Z
|
2018-08-28T14:04:15.000Z
|
test.py
|
Tweetsched/tweetsched-publisher
|
c639670fc9658251a02b8946b34dfae3f3145a72
|
[
"MIT"
] | null | null | null |
test.py
|
Tweetsched/tweetsched-publisher
|
c639670fc9658251a02b8946b34dfae3f3145a72
|
[
"MIT"
] | null | null | null |
from base64 import b64encode
from app import app
import unittest
from mock import patch
import os
import json
from twython import Twython
class TestApp(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
os.environ['SERVICE_KEY'] = 'test-key'
os.environ['SERVICE_PASS'] = 'test-secret'
os.environ['APP_KEY'] = 'test-key'
os.environ['APP_SECRET'] = 'test-secret'
os.environ['OAUTH_TOKEN'] = 'test-oauth-token'
os.environ['OAUTH_TOKEN_SECRET'] = 'test-oauth-token-secret'
@patch('app.Twython.update_status')
def test_publish_tweet(self, update_status_mock):
update_status_mock.return_value = True
auth = (os.environ['SERVICE_KEY'] + ':' + os.environ['SERVICE_PASS']).encode('utf-8')
headers = {
'Authorization': 'Basic ' + b64encode(auth).decode()
}
rv = self.app.post('/api/v1/tweets',
data = json.dumps(dict(id = 3, message = 'test tweet', profileId = '1')),
content_type = 'application/json',
headers = headers)
self.assertEqual(rv.status_code, 200)
self.assertEqual(update_status_mock.call_count, 1)
update_status_mock.assert_called_once()
def test_404(self):
auth = (os.environ['SERVICE_KEY'] + ':' + os.environ['SERVICE_PASS']).encode('utf-8')
headers = {
'Authorization': 'Basic ' + b64encode(auth).decode()
}
rv = self.app.get('/i-am-not-found', headers=headers)
self.assertEqual(rv.status_code, 404)
if __name__ == '__main__':
unittest.main()
| 35.234043
| 100
| 0.607488
| 1,468
| 0.886473
| 0
| 0
| 734
| 0.443237
| 0
| 0
| 393
| 0.237319
|
12b904baad9cd10c3b5e703a970ce798e635e1b7
| 372
|
py
|
Python
|
Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py
|
darioGerussi/exercises
|
414a3867d4db9449e402c58efd993153f55b91eb
|
[
"MIT"
] | 1
|
2022-03-31T01:57:55.000Z
|
2022-03-31T01:57:55.000Z
|
Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py
|
darioGerussi/exercises
|
414a3867d4db9449e402c58efd993153f55b91eb
|
[
"MIT"
] | null | null | null |
Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py
|
darioGerussi/exercises
|
414a3867d4db9449e402c58efd993153f55b91eb
|
[
"MIT"
] | null | null | null |
# Converts a given temperature from Celsius to Fahrenheit
# Prompt user for Celsius temperature
degreesCelsius = float(input('\nEnter the temperature in Celsius: '))
# Calculate and display the converted
# temperature in Fahrenheit
degreesFahrenheit = ((9.0 / 5.0) * degreesCelsius) + 32
print('Fahrenheit equivalent: ', format(degreesFahrenheit, ',.1f'), '\n', sep='')
| 37.2
| 81
| 0.744624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.626344
|
12b9be88a391697f2894a2c7dcc4147754edbf99
| 1,227
|
py
|
Python
|
website/models/post.py
|
LKKTGB/lkkpomia
|
0a814ed6d28757e07d6392ca27c914e68f0b3bda
|
[
"MIT"
] | null | null | null |
website/models/post.py
|
LKKTGB/lkkpomia
|
0a814ed6d28757e07d6392ca27c914e68f0b3bda
|
[
"MIT"
] | 5
|
2020-04-26T09:03:33.000Z
|
2022-02-02T13:00:39.000Z
|
website/models/post.py
|
LKKTGB/lkkpomia
|
0a814ed6d28757e07d6392ca27c914e68f0b3bda
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
class Post(models.Model):
title = models.CharField(_('post_title'), max_length=100)
body = models.TextField(_('post_body'))
tags = TaggableManager(_('post_tags'), help_text=_('post_tags_help_text'))
create_time = models.DateTimeField(_('post_create_time'), auto_now_add=True)
update_time = models.DateTimeField(_('post_update_time'), auto_now=True)
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
@staticmethod
def autocomplete_search_fields():
return ('id__iexact', 'title__icontains',)
def __str__(self):
return self.title
@property
def cover_url(self):
soup = BeautifulSoup(self.body, 'html.parser')
tags = soup.findAll('img')
return tags[0]['src'] if tags else None
@property
def summary(self):
soup = BeautifulSoup(self.body, 'html.parser')
for br in soup.find_all("br"):
br.replace_with("\n")
ps = [t for t in soup.findAll('p') if t.text.strip()]
return ps[0].text if ps else None
| 32.289474
| 80
| 0.667482
| 1,065
| 0.867971
| 0
| 0
| 538
| 0.438468
| 0
| 0
| 181
| 0.147514
|
12ba24dffd7a4983b46d43a9846f2ca9b1d6059e
| 4,214
|
py
|
Python
|
tests/sentry/api/serializers/test_alert_rule.py
|
kinghuang/sentry
|
5c22673994a62f54a782d1c595852986ccc51ae9
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T17:46:16.000Z
|
2019-10-17T17:46:16.000Z
|
tests/sentry/api/serializers/test_alert_rule.py
|
kinghuang/sentry
|
5c22673994a62f54a782d1c595852986ccc51ae9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/serializers/test_alert_rule.py
|
kinghuang/sentry
|
5c22673994a62f54a782d1c595852986ccc51ae9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.alert_rule import DetailedAlertRuleSerializer
from sentry.incidents.logic import create_alert_rule, create_alert_rule_trigger
from sentry.incidents.models import AlertRuleThresholdType
from sentry.snuba.models import QueryAggregations
from sentry.testutils import TestCase
class BaseAlertRuleSerializerTest(object):
def assert_alert_rule_serialized(self, alert_rule, result):
assert result["id"] == six.text_type(alert_rule.id)
assert result["organizationId"] == six.text_type(alert_rule.organization_id)
assert result["name"] == alert_rule.name
assert result["thresholdType"] == alert_rule.threshold_type
assert result["dataset"] == alert_rule.dataset
assert result["query"] == alert_rule.query
assert result["aggregation"] == alert_rule.aggregation
assert result["timeWindow"] == alert_rule.time_window
assert result["resolution"] == alert_rule.resolution
assert result["alertThreshold"] == alert_rule.alert_threshold
assert result["resolveThreshold"] == alert_rule.resolve_threshold
assert result["thresholdPeriod"] == alert_rule.threshold_period
assert result["includeAllProjects"] == alert_rule.include_all_projects
assert result["dateModified"] == alert_rule.date_modified
assert result["dateAdded"] == alert_rule.date_added
class AlertRuleSerializerTest(BaseAlertRuleSerializerTest, TestCase):
def test_simple(self):
alert_rule = create_alert_rule(
self.organization,
[self.project],
"hello",
AlertRuleThresholdType.ABOVE,
"level:error",
QueryAggregations.TOTAL,
10,
1000,
400,
1,
)
result = serialize(alert_rule)
self.assert_alert_rule_serialized(alert_rule, result)
def test_triggers(self):
alert_rule = self.create_alert_rule()
other_alert_rule = self.create_alert_rule()
trigger = create_alert_rule_trigger(alert_rule, "test", AlertRuleThresholdType.ABOVE, 1000)
result = serialize([alert_rule, other_alert_rule])
assert result[0]["triggers"] == [serialize(trigger)]
assert result[1]["triggers"] == []
class DetailedAlertRuleSerializerTest(BaseAlertRuleSerializerTest, TestCase):
def test_simple(self):
projects = [self.project, self.create_project()]
alert_rule = self.create_alert_rule(projects=projects)
result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer())
self.assert_alert_rule_serialized(alert_rule, result)
assert sorted(result["projects"]) == sorted([p.slug for p in projects])
assert result["excludedProjects"] == []
def test_excluded_projects(self):
projects = [self.project]
excluded = [self.create_project()]
alert_rule = self.create_alert_rule(
projects=[], include_all_projects=True, excluded_projects=excluded
)
result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer())
self.assert_alert_rule_serialized(alert_rule, result)
assert result["projects"] == [p.slug for p in projects]
assert result["excludedProjects"] == [p.slug for p in excluded]
alert_rule = self.create_alert_rule(projects=projects, include_all_projects=False)
result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer())
self.assert_alert_rule_serialized(alert_rule, result)
assert result["projects"] == [p.slug for p in projects]
assert result["excludedProjects"] == []
def test_triggers(self):
alert_rule = self.create_alert_rule()
other_alert_rule = self.create_alert_rule()
trigger = create_alert_rule_trigger(alert_rule, "test", AlertRuleThresholdType.ABOVE, 1000)
result = serialize([alert_rule, other_alert_rule], serializer=DetailedAlertRuleSerializer())
assert result[0]["triggers"] == [serialize(trigger)]
assert result[1]["triggers"] == []
| 45.311828
| 100
| 0.701709
| 3,775
| 0.895823
| 0
| 0
| 0
| 0
| 0
| 0
| 369
| 0.087565
|
12bae8e939e905a92184b3c60e3fd70c58c999c2
| 1,003
|
py
|
Python
|
mys/cli/subparsers/test.py
|
nsauzede/mys
|
5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267
|
[
"MIT"
] | null | null | null |
mys/cli/subparsers/test.py
|
nsauzede/mys
|
5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267
|
[
"MIT"
] | null | null | null |
mys/cli/subparsers/test.py
|
nsauzede/mys
|
5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267
|
[
"MIT"
] | null | null | null |
import os
from ..utils import add_jobs_argument
from ..utils import add_no_ccache_argument
from ..utils import add_optimize_argument
from ..utils import add_verbose_argument
from ..utils import build_prepare
from ..utils import run
def do_test(_parser, args, _mys_config):
build_prepare(args.verbose, args.optimize, args.no_ccache)
command = [
'make', '-f', 'build/Makefile', 'test', 'TEST=yes'
]
if os.getenv('MAKEFLAGS') is None:
command += ['-j', str(args.jobs)]
if args.debug:
command += ['TRANSPILE_DEBUG=--debug']
run(command, 'Building tests', args.verbose)
run(['./build/test'], 'Running tests', args.verbose)
def add_subparser(subparsers):
subparser = subparsers.add_parser(
'test',
description='Build and run tests.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_optimize_argument(subparser, 'debug')
add_no_ccache_argument(subparser)
subparser.set_defaults(func=do_test)
| 27.108108
| 62
| 0.698903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 162
| 0.161515
|
12bc9ffc8a5d1fd39d7381b5bb5f4a16fad4749b
| 14,579
|
py
|
Python
|
plugins/modules/nsxt_transport_node_collections.py
|
madhukark/ansible-for-nsxt
|
f75c698e24073305a968ce2f70739fee77a14bb2
|
[
"BSD-2-Clause"
] | null | null | null |
plugins/modules/nsxt_transport_node_collections.py
|
madhukark/ansible-for-nsxt
|
f75c698e24073305a968ce2f70739fee77a14bb2
|
[
"BSD-2-Clause"
] | null | null | null |
plugins/modules/nsxt_transport_node_collections.py
|
madhukark/ansible-for-nsxt
|
f75c698e24073305a968ce2f70739fee77a14bb2
|
[
"BSD-2-Clause"
] | 1
|
2021-12-03T08:26:09.000Z
|
2021-12-03T08:26:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_transport_node_collections
short_description: Create transport node collection by attaching Transport Node Profile to cluster.
description: When transport node collection is created the hosts which are part
of compute collection will be prepared automatically i.e. NSX Manager
attempts to install the NSX components on hosts. Transport nodes for these
hosts are created using the configuration specified in transport node
profile.
version_added: "2.7"
author: Rahul Raghuvanshi
options:
hostname:
description: Deployed NSX manager hostname.
required: true
type: str
username:
description: The username to authenticate with the NSX manager.
required: true
type: str
password:
description: The password to authenticate with the NSX manager.
required: true
type: str
cluster_name:
description: CLuster Name
required: false
type: str
compute_manager_name:
description: Cluster Manager Name
required: false
type: str
description:
description: Description
required: true
type: str
display_name:
description: Display name
required: true
type: str
resource_type:
description: "Must be set to the value TransportNodeCollection"
required: true
type: str
state:
choices:
- present
- absent
description: "State can be either 'present' or 'absent'.
'present' is used to create or update resource.
'absent' is used to delete resource."
required: true
transport_node_profile_name:
description: Transport Node Profile Names
required: true
type: str
'''
EXAMPLES = '''
- name: Create transport node collection
nsxt_transport_node_collections:
hostname: "{{hostname}}"
username: "{{username}}"
password: "{{password}}"
validate_certs: False
display_name: "TNC1"
resource_type: "TransportNodeCollection"
description: "Transport Node Collections 1"
compute_manager_name: "VC1"
cluster_name: "cl1"
transport_node_profile_name: "TNP1"
state: present
'''
RETURN = '''# '''
import json, time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request
from ansible.module_utils._text import to_native
import ssl
import socket
import hashlib
def get_transport_node_collections_params(args=None):
args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs']
for key in args_to_remove:
args.pop(key, None)
for key, value in args.copy().items():
if value == None:
args.pop(key, None)
return args
def get_transport_node_collections(module, manager_url, mgr_username, mgr_password, validate_certs):
try:
(rc, resp) = request(manager_url+ '/transport-node-collections', headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing transport-node-collections. Error [%s]' % (to_native(err)))
return resp
def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True):
try:
(rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err)))
for result in resp['results']:
if result.__contains__('display_name') and result['display_name'] == display_name:
return result['id']
if exit_if_not_found:
module.fail_json(msg='No id exist with display name %s' % display_name)
def get_transport_node_collection_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name):
transport_node_collections = get_transport_node_collections(module, manager_url, mgr_username, mgr_password, validate_certs)
for transport_node_collection in transport_node_collections['results']:
if transport_node_collection.__contains__('display_name') and transport_node_collection['display_name'] == display_name:
return transport_node_collection
return None
def wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs):
try:
while True:
(rc, resp) = request(manager_url+ '/transport-node-collections/%s'% id, headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
time.sleep(10)
except Exception as err:
time.sleep(5)
return
def get_transport_node_profile_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_name):
try:
return get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs,
"/transport-node-profiles", transport_node_profile_name)
except Exception as err:
module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (transport_node_profile_name, to_native(err)))
def get_compute_collection_id (module, manager_url, mgr_username, mgr_password, validate_certs, manager_name, cluster_name):
try:
(rc, resp) = request(manager_url+ '/fabric/compute-collections', headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
compute_manager_id = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs,
"/fabric/compute-managers", manager_name)
except Exception as err:
module.fail_json(msg='Error accessing compute collection id for manager %s, cluster %s. Error [%s]' % (manager_name, cluster_name, to_native(err)))
for result in resp['results']:
if result.__contains__('display_name') and result['display_name'] == cluster_name and \
result['origin_id'] == compute_manager_id:
return result['external_id']
module.fail_json(msg='No compute collection id exist with cluster name %s for compute manager %s' % (cluster_name, manager_name))
def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_params ):
compute_manager_name = transport_node_collection_params.pop('compute_manager_name', None)
compute_cluster_name = transport_node_collection_params.pop('cluster_name', None)
compute_collection_id = get_compute_collection_id (module, manager_url, mgr_username, mgr_password, validate_certs,
compute_manager_name, compute_cluster_name)
transport_node_collection_params['compute_collection_id'] = compute_collection_id
transport_node_profile_name = transport_node_collection_params.pop('transport_node_profile_name', None)
transport_node_profile_id = get_transport_node_profile_id (module, manager_url, mgr_username, mgr_password, validate_certs,
transport_node_profile_name)
transport_node_collection_params['transport_node_profile_id'] = transport_node_profile_id
return transport_node_collection_params
def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_with_ids):
existing_tnc = get_transport_node_collection_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_with_ids['display_name'])
if existing_tnc is None:
return False
if existing_tnc['compute_collection_id'] == transport_node_collection_with_ids['compute_collection_id'] and \
existing_tnc['transport_node_profile_id'] != transport_node_collection_with_ids['transport_node_profile_id']:
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(display_name=dict(required=True, type='str'),
description=dict(required=True, type='str'),
resource_type=dict(required=True, type='str'),
transport_node_profile_name=dict(required=True, type='str'),
compute_manager_name=dict(required=False, type='str'),
cluster_name=dict(required=False, type='str'),
state=dict(required=True, choices=['present', 'absent']))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
transport_node_collections_params = get_transport_node_collections_params(module.params.copy())
state = module.params['state']
mgr_hostname = module.params['hostname']
mgr_username = module.params['username']
mgr_password = module.params['password']
validate_certs = module.params['validate_certs']
display_name = module.params['display_name']
manager_url = 'https://{}/api/v1'.format(mgr_hostname)
transport_node_collections_dict = get_transport_node_collection_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name)
transport_node_collection_id, revision = None, None
if transport_node_collections_dict:
transport_node_collection_id = transport_node_collections_dict['id']
revision = transport_node_collections_dict['_revision']
if state == 'present':
body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collections_params)
updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body)
headers = dict(Accept="application/json")
headers['Content-Type'] = 'application/json'
if not updated:
# add the transport_node_collections
request_data = json.dumps(transport_node_collections_params)
if module.check_mode:
module.exit_json(changed=True, debug_out=str(request_data), id='12345')
try:
if transport_node_collection_id:
module.exit_json(changed=False, id=transport_node_collection_id,
message="transport-node-collection with display_name %s already exist on cluster %s." % (module.params['display_name'], module.params['cluster_name']))
(rc, resp) = request(manager_url+ '/transport-node-collections', data=request_data, headers=headers, method='POST',
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg="Failed to add transport_node_collections. Request body [%s]. Error[%s]." % (request_data, to_native(err)))
module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport-node-collection created for cluster %s." % module.params['cluster_name'])
else:
if module.check_mode:
module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_collections_params)), id=transport_node_collection_id)
transport_node_collections_params['_revision'] = revision # update current revision
request_data = json.dumps(transport_node_collections_params)
id = transport_node_collection_id
try:
(rc, resp) = request(manager_url+ '/transport-node-collections/%s' % id, data=request_data, headers=headers, method='PUT',
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg="Failed to update transport_node_collections with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err)))
module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport-node-collection with Compute collection fabric template id %s updated." % id)
elif state == 'absent':
# delete the array
id = transport_node_collection_id
if id is None:
module.exit_json(changed=False, msg='No transport-node-collection exist with display_name %s' % display_name)
if module.check_mode:
module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_collections_params)), id=id)
try:
(rc, resp) = request(manager_url + "/transport-node-collections/%s" % id, method='DELETE',
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs)
except Exception as err:
module.fail_json(msg="Failed to delete transport-node-collection with name %s. Error[%s]." % (display_name, to_native(err)))
wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs)
module.exit_json(changed=True, id=id, message="transport-node-collection with name %s deleted." % display_name)
if __name__ == '__main__':
main()
| 52.442446
| 183
| 0.721243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,240
| 0.359421
|
12bedc5672fe578c7205936e96d0685f45374da0
| 16,945
|
py
|
Python
|
training/loss.py
|
drboog/Lafite
|
10e109b9f46646ab793e0a5f38386af3012e9636
|
[
"MIT"
] | 45
|
2022-03-10T23:49:44.000Z
|
2022-03-31T21:47:45.000Z
|
training/loss.py
|
drboog/Lafite
|
10e109b9f46646ab793e0a5f38386af3012e9636
|
[
"MIT"
] | 7
|
2022-03-13T15:13:18.000Z
|
2022-03-31T16:57:38.000Z
|
training/loss.py
|
drboog/Lafite
|
10e109b9f46646ab793e0a5f38386af3012e9636
|
[
"MIT"
] | 8
|
2022-03-10T23:49:29.000Z
|
2022-03-31T18:20:17.000Z
|
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
import torch.nn.functional as F
import torchvision.transforms as T
import clip
import dnnlib
import random
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, real_features): # to be overridden by subclass
raise NotImplementedError()
class Model(torch.nn.Module):
def __init__(self, device):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(512, 1024)
self.linear2 = torch.nn.Linear(1024, 1024)
self.linear3 = torch.nn.Linear(1024, 1024)
self.linear4 = torch.nn.Linear(1024, 512)
self.linear5 = torch.nn.Linear(512, 1024)
self.linear6 = torch.nn.Linear(1024, 1024)
self.linear7 = torch.nn.Linear(1024, 1024)
self.linear8 = torch.nn.Linear(1024, 512)
self.device = device
def forward(self, x):
mu = F.leaky_relu(self.linear1(x))
mu = F.leaky_relu(self.linear2(mu))
mu = F.leaky_relu(self.linear3(mu))
mu = self.linear4(mu)
std = F.leaky_relu(self.linear5(x))
std = F.leaky_relu(self.linear6(std))
std = F.leaky_relu(self.linear7(std))
std = self.linear8(std)
return mu + std.exp()*(torch.randn(mu.shape).to(self.device))
def loss(self, real, fake, temp=0.1, lam=0.5):
sim = torch.cosine_similarity(real.unsqueeze(1), fake.unsqueeze(0), dim=-1)
if temp > 0.:
sim = torch.exp(sim/temp)
sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp
sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp
if 0.<lam < 1.:
return -(lam*torch.log(sim1) + (1.-lam)*torch.log(sim2))
elif lam == 0:
return -torch.log(sim2)
else:
return -torch.log(sim1)
else:
return -torch.diagonal(sim)
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G_mapping, G_synthesis, G_mani, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.G_mani = G_mani
self.D = D
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
clip_model, _ = clip.load("ViT-B/32", device=device) # Load CLIP model here
self.clip_model = clip_model.eval()
self.mapper = Model(device)
self.mapper.load_state_dict(torch.load('./implicit.0.001.64.True.0.0.pth', map_location='cpu')) # path to the noise mapping network
self.mapper.to(device)
def run_G(self, z, c, sync, txt_fts=None, ):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c)
if self.style_mixing_prob > 0:
new_ws = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True)
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = new_ws[:, cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
img = self.G_synthesis(ws, fts=txt_fts)
return img, ws
def run_D(self, img, c, sync, fts=None):
if self.augment_pipe is not None:
img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits, d_fts = self.D(img, c, fts=fts)
return logits, d_fts
def normalize(self):
return T.Compose([
T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def full_preprocess(self, img, mode='bicubic', ratio=0.5):
full_size = img.shape[-2]
if full_size < 224:
pad_1 = torch.randint(0, 224-full_size, ())
pad_2 = torch.randint(0, 224-full_size, ())
m = torch.nn.ConstantPad2d((pad_1, 224-full_size-pad_1, pad_2, 224-full_size-pad_2), 1.)
reshaped_img = m(img)
else:
cut_size = torch.randint(int(ratio*full_size), full_size, ())
left = torch.randint(0, full_size-cut_size, ())
top = torch.randint(0, full_size-cut_size, ())
cropped_img = img[:, :, top:top+cut_size, left:left+cut_size]
reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False)
reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now
reshaped_img = self.normalize()(reshaped_img)
return reshaped_img
def custom_preprocess(self, img, ind, cut_num, mode='bicubic'): # more to be implemented here
full_size = img.shape[-2]
grid = np.sqrt(cut_num)
most_right = min(int((ind%grid + 1)*full_size/grid), full_size)
most_bottom = min(int((ind//grid + 1)*full_size/grid), full_size)
cut_size = torch.randint(int(full_size//(grid+1)), int(min(min(full_size//2, most_right), most_bottom)), ()) # TODO: tune this later
left = torch.randint(0, most_right-cut_size, ())
top = torch.randint(0, most_bottom-cut_size, ())
cropped_img = img[:, :, top:top+cut_size, left:left+cut_size]
reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False)
reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now
reshaped_img = self.normalize()(reshaped_img)
return reshaped_img
def contra_loss(self, temp, mat1, mat2, lam):
sim = torch.cosine_similarity(mat1.unsqueeze(1), mat2.unsqueeze(0), dim=-1)
if temp > 0.:
sim = torch.exp(sim/temp) # This implementation is incorrect, it should be sim=sim/temp.
# However, this incorrect implementation can reproduce our results with provided hyper-parameters.
# If you want to use the correct implementation, please manually revise it.
# The correct implementation should lead to better results, but don't use our provided hyper-parameters, you need to carefully tune lam, temp, itd, itc and other hyper-parameters
sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp
sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp
if 0.<lam < 1.:
return lam*torch.log(sim1) + (1.-lam)*torch.log(sim2)
elif lam == 0:
return torch.log(sim2)
else:
return torch.log(sim1)
else:
return torch.diagonal(sim)
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, img_fts, txt_fts, lam, temp, gather, d_use_fts, itd, itc, iid, iic, mixing_prob=0.):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
# augmentation
aug_level_1 = 0.1
aug_level_2 = 0.75
# print(torch.cosine_similarity(img_fts, txt_fts, dim=-1))
mixing_prob = mixing_prob # probability to use img_fts instead of txt_fts
random_noise = torch.randn(txt_fts.shape).to(img_fts.device)# + torch.randn((1, 512)).to(img_fts.device)
random_noise = random_noise/random_noise.norm(dim=-1, keepdim=True)
txt_fts_ = txt_fts*(1-aug_level_1) + random_noise*aug_level_1
txt_fts_ = txt_fts_/txt_fts_.norm(dim=-1, keepdim=True)
if txt_fts.shape[-1] == img_fts.shape[-1]:
# # Gaussian purterbation
img_fts_ = img_fts*(1-aug_level_2) + random_noise*aug_level_2
# learned generation
# with torch.no_grad():
# normed_real_full_img = self.full_preprocess(real_img, ratio=0.99)
# img_fts_real_full_ = self.clip_model.encode_image(normed_real_full_img).float()
# img_fts_real_full_ = img_fts_real_full_/img_fts_real_full_.norm(dim=-1, keepdim=True)
# # img_fts_real_full_ = img_fts
# img_fts_ = self.mapper(img_fts_real_full_) + img_fts_real_full_
img_fts_ = img_fts_/img_fts_.norm(dim=-1, keepdim=True)
if mixing_prob > 0.99:
txt_fts_ = img_fts_
elif mixing_prob < 0.01:
txt_fts_ = txt_fts_
else:
txt_fts_ = torch.where(torch.rand([txt_fts_.shape[0], 1], device=txt_fts_.device) < mixing_prob, img_fts_, txt_fts_)
img_img_d = iid # discriminator
img_img_c = iic # clip
img_txt_d = itd # discriminator
img_txt_c = itc # clip
temp = temp
lam = lam
def gather_tensor(input_tensor, gather_or_not):
if gather_or_not:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
output_tensor = [torch.zeros_like(input_tensor) for _ in range(world_size)]
torch.distributed.all_gather(output_tensor, input_tensor)
output_tensor[rank] = input_tensor
# # print(torch.cat(output_tensor).size())
return torch.cat(output_tensor)
else:
return input_tensor
txt_fts_all = gather_tensor(txt_fts_, gather)
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=(sync and not do_Gpl)) # May get synced by Gpl.
gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_)
gen_d_fts_all = gather_tensor(gen_d_fts, gather)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
normed_gen_full_img = self.full_preprocess(gen_img)
img_fts_gen_full = self.clip_model.encode_image(normed_gen_full_img)
img_fts_gen_full = img_fts_gen_full/img_fts_gen_full.norm(dim=-1, keepdim=True)
img_fts_gen_full_all = gather_tensor(img_fts_gen_full, gather)
img_fts_all = gather_tensor(img_fts, gather)
if img_txt_c > 0.:
clip_loss_img_txt = self.contra_loss(temp, img_fts_gen_full_all, txt_fts_all, lam)
loss_Gmain = loss_Gmain - img_txt_c*clip_loss_img_txt.mean()
if img_img_c > 0.:
clip_loss_img_img = self.contra_loss(temp, img_fts_gen_full_all, img_fts_all, lam)
loss_Gmain = loss_Gmain - img_img_c*clip_loss_img_img.mean()
if img_txt_d > 0.:
loss_Gmain = loss_Gmain - img_txt_d*self.contra_loss(temp, gen_d_fts_all, txt_fts_all, lam).mean()
if img_img_d > 0.:
with torch.no_grad():
_, g_real_d_fts = self.run_D(real_img.detach(), real_c, sync=False, fts=txt_fts_)
g_real_d_fts_all = gather_tensor(g_real_d_fts, gather)
loss_Gmain = loss_Gmain - img_img_d*self.contra_loss(temp, g_real_d_fts_all, gen_d_fts_all, lam).mean()
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
txt_fts_0 = txt_fts_[:batch_size]
txt_fts_0.requires_grad_()
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], txt_fts=txt_fts_0, sync=sync)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
if d_use_fts:
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws, txt_fts_0], create_graph=True, only_inputs=True)[0]
else:
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=False)
gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
real_logits, real_d_fts = self.run_D(real_img_tmp, real_c, sync=sync, fts=txt_fts_)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
if img_txt_d > 0.:
real_d_fts_all = gather_tensor(real_d_fts, gather)
loss_Dreal = loss_Dreal - img_txt_d*self.contra_loss(temp, real_d_fts_all, txt_fts_all, lam).mean()
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# ----------------------------------------------------------------------------
| 50.281899
| 190
| 0.5928
| 16,450
| 0.970788
| 0
| 0
| 0
| 0
| 0
| 0
| 2,456
| 0.14494
|
12bfd9fea84125596f1417fe60855b47416a33a6
| 4,203
|
py
|
Python
|
lib/oitool/fetchoi.py
|
stockalgo/oichart
|
962c373b34fcef09cc58abcf6e252dd746d413a1
|
[
"MIT"
] | 8
|
2021-02-05T21:54:26.000Z
|
2022-03-26T19:44:42.000Z
|
lib/oitool/fetchoi.py
|
stockalgo/oichart
|
962c373b34fcef09cc58abcf6e252dd746d413a1
|
[
"MIT"
] | 3
|
2021-03-15T18:41:12.000Z
|
2021-12-18T09:23:47.000Z
|
lib/oitool/fetchoi.py
|
stockalgo/oichart
|
962c373b34fcef09cc58abcf6e252dd746d413a1
|
[
"MIT"
] | 5
|
2021-03-16T12:28:37.000Z
|
2021-12-17T17:35:16.000Z
|
import time
import logging
from bandl.nse_data import NseData
from influxdb import InfluxDBClient
class FetchOI:
def __init__(self,source=None,influxdb_client=None,database="oitool",log_path=None,logLevel='info'):
"""[summary]
:param source: stock broker
:type source: string, optional
:param influxdb_client: influxdb client object, defaults to None
:type influxdb_client: object, optional
:param database: name of databse, defaults to "oitool"
:type database: str, optional
:param log_path: log file path, defaults to None
:type log_path: str, optional
:param logLevel: log level, defaults to 'info'
:type logLevel: str, optional
:raises Exception: database error/ bandl error
"""
try:
if not influxdb_client:
self.client = InfluxDBClient(database=database)
else:
self.client = influxdb_client
self.client.create_database(database)
if not source:
self.feeder = NseData()
else:
raise("Sources will be supported in future release")
# setting logs
if not log_path:
log_path = "OItool_logs_"+ time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
log_file = logging.FileHandler(log_path, 'a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_file.setFormatter(formatter)
log = logging.getLogger() # root logger
log.addHandler(log_file) # set the new handler
numeric_log_level = getattr(logging, logLevel.upper(), 10)
log.setLevel(numeric_log_level)
except Exception as err:
logging.error('%s raised an error', str(err))
raise Exception("Error occurred in OItool initialization: ",str(err))
def subscribe(self,symbol,level=10):
""" To register ticker for data
:param symbol: ticker to fetch option data
:type symbol: string
:param level: number of option strike from ATM
:type symbols: interger
"""
self.symbol = symbol
self.level = level
def create_influx_data(self,price,option_data,measurement):
tags = {"price":price}
fields = {}
for keys in option_data:
fields[str(keys)+" ce"] = option_data[keys]["CE"]["openInterest"]
fields[str(keys)+" pe"] = option_data[keys]["PE"]["openInterest"]
logging.info(fields)
influx_json = [{"measurement": measurement,"tags":tags,"fields":fields}]
return influx_json
def get_option_data(symbol,strikes=None,expiry_date=None):
return self.feeder.get_option_data(symbol=symbol,strikes=strikes,expiry_date=expiry_date)
def start(self,interval=90,runtime=21600):
"""To start fetching data into influxdb
:param interval: wait between data capture, defaults to 90 Seconds
:type interval: int, optional
:param runtime: runtime for script, defaults to 21600
:type runtime: int, optional
:raises Exception: InfluxDb error/ bandl error
"""
if not self.symbol:
raise Exception ("Symbol not subscribed.")
starttime = time.time()
strikes = self.feeder.get_oc_strike_prices(self.symbol,level=self.level)
prev_dict = None
while(True):
try:
price,oc_data = self.feeder.get_option_data(self.symbol,strikes=strikes)
if prev_dict == oc_data:
time.sleep(15)
continue
else:
prev_dict = oc_data
formated_data = self.create_influx_data(price,option_data=oc_data,measurement=self.symbol)
self.client.write_points(formated_data)
except Exception as exc:
logging.debug(str(exc))
print("Error Occurred,Don't worry. We try again. Error: ",str(exc))
timenow = time.time()
if(timenow - starttime >= runtime):
break
time.sleep(interval)
| 39.280374
| 106
| 0.602665
| 4,103
| 0.976207
| 0
| 0
| 0
| 0
| 0
| 0
| 1,516
| 0.360695
|
12c0367fe0f1278ce33a6a9b512ae1509254147d
| 1,667
|
py
|
Python
|
notebooks/HelperFunctions/RunModel.py
|
hh2110/continual-ml-stocks
|
2a2baa330cd418b3cfb7eda8464c6b5b67bc608f
|
[
"CC0-1.0"
] | null | null | null |
notebooks/HelperFunctions/RunModel.py
|
hh2110/continual-ml-stocks
|
2a2baa330cd418b3cfb7eda8464c6b5b67bc608f
|
[
"CC0-1.0"
] | null | null | null |
notebooks/HelperFunctions/RunModel.py
|
hh2110/continual-ml-stocks
|
2a2baa330cd418b3cfb7eda8464c6b5b67bc608f
|
[
"CC0-1.0"
] | null | null | null |
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def do_ml(merged_df, test_size, ml_model, **kwargs):
train_data = merged_df.drop(
columns=[
"lagged_poc",
"price_date",
"label_id",
# "Low",
# "High",
# "Open",
# "Close",
# "Adj Close",
# "positive_poc",
"negative_poc",
]
)
target = merged_df[["lagged_poc"]]
X_train, X_test, y_train, y_test = train_test_split(
np.array(train_data), np.array(target), test_size=test_size, random_state=1
)
model = ml_model(**kwargs)
# Fit on training data
model.fit(X_train, np.ravel(y_train))
# Actual class predictions
predictions = model.predict(X_test)
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
accuracy_score = metrics.accuracy_score(y_test, predictions)
# feature importance
plot_feature_importance(model, train_data)
return confusion_matrix, accuracy_score
def plot_feature_importance(model, train_data):
featureImportances = model.feature_importances_
fiDF = pd.DataFrame()
fiDF["fi"] = featureImportances
fiDF["f"] = train_data.columns
fiDF = fiDF.sort_values("fi", ascending=False)
fiDF.head()
nf = 50
plt.rcParams.update({"font.size": 8})
plt.figure(figsize=(8, 4))
plt.plot(fiDF.f.iloc[0:nf], fiDF.fi.iloc[0:nf])
plt.xticks(rotation=90)
plt.show()
| 27.783333
| 83
| 0.652669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.130174
|
12c19863b8bc11caf71dfdd9f3bff254268754da
| 7,299
|
py
|
Python
|
tools/build_defs/pkg/make_rpm.py
|
jpieper-tri/bazel
|
eef80048e2c59e3be974144ce9cd90b9f90294fb
|
[
"Apache-2.0"
] | 1
|
2018-03-27T17:18:20.000Z
|
2018-03-27T17:18:20.000Z
|
tools/build_defs/pkg/make_rpm.py
|
Corroler/bazel
|
073ea095a6c6a826ccdbbce1b213de47115e701a
|
[
"Apache-2.0"
] | 2
|
2018-11-06T01:01:16.000Z
|
2019-04-10T02:25:49.000Z
|
tools/build_defs/pkg/make_rpm.py
|
Corroler/bazel
|
073ea095a6c6a826ccdbbce1b213de47115e701a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple cross-platform helper to create an RPM package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fileinput
import os
import re
import shutil
import subprocess
import sys
from tempfile import mkdtemp
# pylint: disable=g-direct-third-party-import
from third_party.py import gflags
gflags.DEFINE_string('name', '', 'The name of the software being packaged.')
gflags.DEFINE_string('version', '',
'The version of the software being packaged.')
gflags.DEFINE_string('release', '',
'The release of the software being packaged.')
gflags.DEFINE_string('arch', '',
'The CPU architecture of the software being packaged.')
gflags.DEFINE_string('spec_file', '',
'The file containing the RPM specification.')
gflags.DEFINE_string('out_file', '',
'The destination to save the resulting RPM file to.')
# Setup to safely create a temporary directory and clean it up when done.
@contextlib.contextmanager
def Cd(newdir, cleanup=lambda: True):
"""Change the current working directory.
This will run the provided cleanup function when the context exits and the
previous working directory is restored.
Args:
newdir: The directory to change to. This must already exist.
cleanup: An optional cleanup function to be executed when the context exits.
Yields:
Nothing.
"""
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
cleanup()
@contextlib.contextmanager
def Tempdir():
"""Create a new temporary directory and change to it.
The temporary directory will be removed when the context exits.
Yields:
The full path of the temporary directory.
"""
dirpath = mkdtemp()
def Cleanup():
shutil.rmtree(dirpath)
with Cd(dirpath, Cleanup):
yield dirpath
def GetFlagValue(flagvalue, strip=True):
if flagvalue:
if flagvalue[0] == '@':
with open(flagvalue[1:], 'r') as f:
flagvalue = f.read()
if strip:
return flagvalue.strip()
return flagvalue
WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE)
def FindOutputFile(log):
"""Find the written file from the log information."""
m = WROTE_FILE_RE.search(log)
if m:
return m.group('rpm_path')
return None
def CopyAndRewrite(input_file, output_file, replacements=None):
"""Copies the given file and optionally rewrites with replacements.
Args:
input_file: The file to copy.
output_file: The file to write to.
replacements: A dictionary of replacements.
Keys are prefixes scan for, values are the replacements to write after
the prefix.
"""
with open(output_file, 'w') as output:
for line in fileinput.input(input_file):
if replacements:
for prefix, text in replacements.items():
if line.startswith(prefix):
line = prefix + ' ' + text + '\n'
break
output.write(line)
def Which(program):
def IsExe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ['PATH'].split(os.pathsep):
filename = os.path.join(path, program)
if IsExe(filename):
return filename
return None
class NoRpmbuildFound(Exception):
pass
def FindRpmbuild():
path = Which('rpmbuild')
if path:
return path
else:
raise NoRpmbuildFound()
class RpmBuilder(object):
"""A helper class to manage building the RPM file."""
SOURCE_DIR = 'SOURCES'
BUILD_DIR = 'BUILD'
TEMP_DIR = 'TMP'
DIRS = [SOURCE_DIR, BUILD_DIR, TEMP_DIR]
def __init__(self, name, version, release, arch):
self.name = name
self.version = GetFlagValue(version)
self.release = GetFlagValue(release)
self.arch = arch
self.files = []
self.rpmbuild_path = FindRpmbuild()
self.rpm_path = None
def AddFiles(self, files):
"""Add a set of files to the current RPM."""
self.files += files
def SetupWorkdir(self, spec_file, original_dir):
"""Create the needed structure in the workdir."""
# Create directory structure.
for name in RpmBuilder.DIRS:
if not os.path.exists(name):
os.makedirs(name, 0o777)
# Copy the files.
for f in self.files:
dst_dir = os.path.join(RpmBuilder.BUILD_DIR, os.path.dirname(f))
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0o777)
shutil.copy(os.path.join(original_dir, f), dst_dir)
# Copy the spec file, updating with the correct version.
spec_origin = os.path.join(original_dir, spec_file)
self.spec_file = os.path.basename(spec_file)
replacements = {}
if self.version:
replacements['Version:'] = self.version
if self.release:
replacements['Release:'] = self.release
CopyAndRewrite(spec_origin, self.spec_file, replacements)
def CallRpmBuild(self, dirname):
"""Call rpmbuild with the correct arguments."""
args = [
self.rpmbuild_path,
'--define',
'_topdir %s' % dirname,
'--define',
'_tmppath %s/TMP' % dirname,
'--bb',
self.spec_file,
]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
if p.returncode == 0:
# Find the created file.
self.rpm_path = FindOutputFile(output)
if p.returncode != 0 or not self.rpm_path:
print('Error calling rpmbuild:')
print(output)
# Return the status.
return p.returncode
def SaveResult(self, out_file):
"""Save the result RPM out of the temporary working directory."""
if self.rpm_path:
shutil.copy(self.rpm_path, out_file)
print('Saved RPM file to %s' % out_file)
else:
print('No RPM file created.')
def Build(self, spec_file, out_file):
"""Build the RPM described by the spec_file."""
print('Building RPM for %s at %s' % (self.name, out_file))
original_dir = os.getcwd()
spec_file = os.path.join(original_dir, spec_file)
out_file = os.path.join(original_dir, out_file)
with Tempdir() as dirname:
self.SetupWorkdir(spec_file, original_dir)
status = self.CallRpmBuild(dirname)
self.SaveResult(out_file)
return status
def main(argv=()):
try:
builder = RpmBuilder(FLAGS.name, FLAGS.version, FLAGS.release, FLAGS.arch)
builder.AddFiles(argv[1:])
return builder.Build(FLAGS.spec_file, FLAGS.out_file)
except NoRpmbuildFound:
print('ERROR: rpmbuild is required but is not present in PATH')
return 1
if __name__ == '__main__':
FLAGS = gflags.FLAGS
main(FLAGS(sys.argv))
| 27.43985
| 80
| 0.68023
| 2,863
| 0.392246
| 829
| 0.113577
| 883
| 0.120975
| 0
| 0
| 2,770
| 0.379504
|
12c1f75f883cd400635b90784e88c06bdf2c4be4
| 2,739
|
py
|
Python
|
data/datasets/gb_100.py
|
CharleyZhao123/graceful-few-shot
|
fae8170158a7a39ead7da40fecd787fea4abcf1a
|
[
"MIT"
] | 1
|
2021-08-11T12:56:29.000Z
|
2021-08-11T12:56:29.000Z
|
data/datasets/gb_100.py
|
CharleyZhao123/graceful-few-shot
|
fae8170158a7a39ead7da40fecd787fea4abcf1a
|
[
"MIT"
] | null | null | null |
data/datasets/gb_100.py
|
CharleyZhao123/graceful-few-shot
|
fae8170158a7a39ead7da40fecd787fea4abcf1a
|
[
"MIT"
] | null | null | null |
import os
import pickle
import random
from torch.utils.data import Dataset
from .datasets import dataset_register
default_split = {
'train': 0.7,
'val': 0.3,
}
@dataset_register('gb-100')
class GB100(Dataset):
def __init__(self, root_path, split='train', split_method='novel', **kwargs):
data_file_name = 'gb_dataset.pickle'
with open(os.path.join(root_path, data_file_name), 'rb') as f:
pack = pickle.load(f, encoding='latin1')
# 经过默认数据处理[Resize, ToTensor, normalize]的图像tensor,可直接输入Network
default_data = pack['data']
feature = pack['feature']
imgname = pack['imgname']
origin_label = pack['origin_label']
logits = pack['logits']
gb_label = pack['gb_label']
# 划分数据
g_index = []
b_index = []
for i, l in enumerate(gb_label):
if l == 1.0:
g_index.append(i)
else:
b_index.append(i)
if split_method == 'random':
# 随机抽取数据并划分数据
random.seed(0)
train_g_index = random.sample(g_index, int(
len(g_index)*default_split['train']))
val_g_index = list(set(g_index).difference(set(train_g_index)))
random.seed(1)
train_b_index = random.sample(b_index, int(
len(b_index)*default_split['train']))
val_b_index = list(set(b_index).difference(set(train_b_index)))
train_index = train_g_index + train_b_index
val_index = val_g_index + val_b_index
else:
# 前n个class为训练集, 后64-n个为验证集划分数据
t_class_num = int(default_split['train'] * 64) # n
v_class_num = 64 - t_class_num
train_g_index = g_index[:100*t_class_num]
val_g_index = g_index[100*t_class_num:]
train_b_index = b_index[:100*t_class_num]
val_b_index = b_index[100*t_class_num:]
train_index = train_g_index + train_b_index
val_index = val_g_index + val_b_index
if split == 'train':
self.index_list = train_index
else:
self.index_list = val_index
self.data = default_data
self.feature = feature
self.gb_label = gb_label
def __len__(self):
return len(self.index_list)
def __getitem__(self, i):
index = self.index_list[i]
return self.data[index], self.feature[index], int(self.gb_label[index])
if __name__ == '__main__':
gb_100 = GB100(
root_path='/space1/zhaoqing/dataset/fsl/gb-100', split='val', split_method='novel')
print(len(gb_100))
# random
# val 3840
# train 8960
# novel
# val 4000
# train 8800
| 28.831579
| 91
| 0.588536
| 2,379
| 0.839153
| 0
| 0
| 2,407
| 0.84903
| 0
| 0
| 484
| 0.170723
|
12c2d9d6cce98782d3ab5c1e821708313828e9f6
| 594
|
py
|
Python
|
examples/analyze-outdated.py
|
duzvik/project-freta
|
6c96b5d9af98380d695f0ad1c1636021793f30d2
|
[
"CC-BY-4.0",
"MIT"
] | 67
|
2020-07-06T20:18:05.000Z
|
2022-03-27T15:00:16.000Z
|
examples/analyze-outdated.py
|
hhfdserth/project-freta
|
b552267f87a4f5e4796ece6865232853d62f227c
|
[
"CC-BY-4.0",
"MIT"
] | 2
|
2020-07-06T23:35:47.000Z
|
2020-07-14T15:22:47.000Z
|
examples/analyze-outdated.py
|
hhfdserth/project-freta
|
b552267f87a4f5e4796ece6865232853d62f227c
|
[
"CC-BY-4.0",
"MIT"
] | 21
|
2020-04-07T22:37:52.000Z
|
2021-11-10T08:27:38.000Z
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
# Re-analyze all images that don't have latest version of the analysis available
from freta.api import Freta
def main():
freta = Freta()
versions = freta.versions()
for image in freta.image.list():
if (
image["state"] == "Report available"
and image["analysis_version"] != versions["analysis"]
):
print("redoing %s" % image["image_id"])
freta.image.analyze(image["image_id"])
if __name__ == "__main__":
main()
| 23.76
| 80
| 0.616162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.452862
|
12c342b7aef5ffeb0a48559a00dc029a6ad70253
| 4,041
|
py
|
Python
|
utils/utils_fit.py
|
bubbliiiing/faster-rcnn-keras
|
aa1eb5e974785646b9fd86bfd269f2b6c12ec0e6
|
[
"MIT"
] | 282
|
2020-02-25T00:19:28.000Z
|
2022-03-20T08:14:20.000Z
|
utils/utils_fit.py
|
codertcm/faster-rcnn-keras
|
aa1eb5e974785646b9fd86bfd269f2b6c12ec0e6
|
[
"MIT"
] | 46
|
2020-02-24T13:17:40.000Z
|
2022-03-12T00:59:15.000Z
|
utils/utils_fit.py
|
codertcm/faster-rcnn-keras
|
aa1eb5e974785646b9fd86bfd269f2b6c12ec0e6
|
[
"MIT"
] | 123
|
2020-02-23T09:28:36.000Z
|
2022-03-16T01:43:46.000Z
|
import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_loss = 0
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.train_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
write_log(callback, ['total_loss','rpn_cls_loss', 'rpn_reg_loss', 'detection_cls_loss', 'detection_reg_loss'], loss_class, iteration)
rpn_cls_loss += loss_class[1]
rpn_loc_loss += loss_class[2]
roi_cls_loss += loss_class[3]
roi_loc_loss += loss_class[4]
total_loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss
pbar.set_postfix(**{'total' : total_loss / (iteration + 1),
'rpn_cls' : rpn_cls_loss / (iteration + 1),
'rpn_loc' : rpn_loc_loss / (iteration + 1),
'roi_cls' : roi_cls_loss / (iteration + 1),
'roi_loc' : roi_loc_loss / (iteration + 1),
'lr' : K.get_value(model_rpn.optimizer.lr)})
pbar.update(1)
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.test_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
val_loss += loss_class[0]
pbar.set_postfix(**{'total' : val_loss / (iteration + 1)})
pbar.update(1)
logs = {'loss': total_loss / epoch_step, 'val_loss': val_loss / epoch_step_val}
loss_history.on_epoch_end([], logs)
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
| 44.406593
| 153
| 0.554318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 310
| 0.076714
|
12c35e34c837e4d87b7e6155a3d32986c86a463f
| 88
|
py
|
Python
|
__init__.py
|
sbalen/TrafficSignsDataset
|
39ae40a0d307ee83af57f70eed43c38bc5d25233
|
[
"Apache-2.0"
] | 1
|
2021-05-05T14:23:34.000Z
|
2021-05-05T14:23:34.000Z
|
__init__.py
|
sbalen/TrafficSignsDataset
|
39ae40a0d307ee83af57f70eed43c38bc5d25233
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
sbalen/TrafficSignsDataset
|
39ae40a0d307ee83af57f70eed43c38bc5d25233
|
[
"Apache-2.0"
] | null | null | null |
"""TrafficSignDataset dataset."""
from .TrafficSignsDataset import Trafficsignsdataset
| 22
| 52
| 0.829545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.375
|
12c3f8688909dadef43a9224619f1323d1d373b9
| 972
|
py
|
Python
|
exercicios-Python/ex042.py
|
pedrosimoes-programmer/exercicios-python
|
150de037496d63d76086678d87425a8ccfc74573
|
[
"MIT"
] | null | null | null |
exercicios-Python/ex042.py
|
pedrosimoes-programmer/exercicios-python
|
150de037496d63d76086678d87425a8ccfc74573
|
[
"MIT"
] | null | null | null |
exercicios-Python/ex042.py
|
pedrosimoes-programmer/exercicios-python
|
150de037496d63d76086678d87425a8ccfc74573
|
[
"MIT"
] | null | null | null |
#Refaça o DESAFIO 035 dos triângulos, acrescentando o recurso de mostrar que tipo de triângulo será formado:
#- EQUILÁTERO: todos os lados iguais
#- ISÓSCELES: dois lados iguais, um diferente
#- ESCALENO: todos os lados diferentes
print('-' * 20, 'Programa Analisador de Triângulos', '-' * 20)
seg1 = float(input('Digite o valor do primeiro segmento: '))
seg2 = float(input('Digite o valor do segundo segmento: '))
seg3 = float(input('Digite o valor do terceiro segmento: '))
if seg1 < seg2 + seg3 and seg2 < seg1 + seg3 and seg3 < seg1 + seg2:
if seg1 == seg2 and seg3: # outra possibilidade --> seg1 == seg2 == seg3:
print('Os segmentos PODEM formar um triângulo do tipo EQUILÁTERO!')
elif seg1 != seg2 != seg3 != seg1:
print('Os segmentos acima PODEM formar um triângulo do tipo ESCALENO!')
else:
print('Os segmentos acima PODEM formar um triângulo do tipo ISÓSCELES!')
else:
print('Os segmentos NÃO PODEM formar um triângulo!')
| 54
| 108
| 0.700617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 679
| 0.688641
|
12c5579947927013c8506c4aecdbaabf5a5bd1d2
| 319
|
py
|
Python
|
tests/test_extension.py
|
PeterWurmsdobler/mopidy-vfd
|
8ae067d37b8670da2a0b9e876257c09ceb222be7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_extension.py
|
PeterWurmsdobler/mopidy-vfd
|
8ae067d37b8670da2a0b9e876257c09ceb222be7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_extension.py
|
PeterWurmsdobler/mopidy-vfd
|
8ae067d37b8670da2a0b9e876257c09ceb222be7
|
[
"Apache-2.0"
] | null | null | null |
from mopidy_vfd import Extension
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert "[vfd]" in config
assert "enabled = true" in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
assert "display" in schema
| 16.789474
| 37
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.100313
|
12c5c79e7f95fd34c8892f6f44952e889b0051d1
| 111
|
py
|
Python
|
backend/venv/src/api/ordercampproduct/apps.py
|
AkashSDas/camps_for_champs
|
1bf7e51905b5b3efc47f94ffcfde7167dace4475
|
[
"MIT"
] | null | null | null |
backend/venv/src/api/ordercampproduct/apps.py
|
AkashSDas/camps_for_champs
|
1bf7e51905b5b3efc47f94ffcfde7167dace4475
|
[
"MIT"
] | null | null | null |
backend/venv/src/api/ordercampproduct/apps.py
|
AkashSDas/camps_for_champs
|
1bf7e51905b5b3efc47f94ffcfde7167dace4475
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class OrdercampproductConfig(AppConfig):
name = 'api.ordercampproduct'
| 18.5
| 40
| 0.792793
| 74
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.198198
|
12c65927c0458f39714e96cf3347972f4ddf2a65
| 691
|
py
|
Python
|
onnx_tf/handlers/backend/identity.py
|
ZemingZhao/onnx-tensorflow
|
9ab9b934c2c8494b6309d20f15acabcb3abd126d
|
[
"Apache-2.0"
] | null | null | null |
onnx_tf/handlers/backend/identity.py
|
ZemingZhao/onnx-tensorflow
|
9ab9b934c2c8494b6309d20f15acabcb3abd126d
|
[
"Apache-2.0"
] | null | null | null |
onnx_tf/handlers/backend/identity.py
|
ZemingZhao/onnx-tensorflow
|
9ab9b934c2c8494b6309d20f15acabcb3abd126d
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("Identity")
@tf_func(tf.identity)
class Identity(BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def version_13(cls, node, **kwargs):
return [cls.make_tensor_from_onnx_node(node, **kwargs)]
@classmethod
def version_14(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
if isinstance(x, (list, tuple)):
return [tf.identity_n(x)]
else:
return [tf.identity(x)]
| 25.592593
| 59
| 0.726483
| 470
| 0.680174
| 0
| 0
| 513
| 0.742402
| 0
| 0
| 23
| 0.033285
|
12c759447ac7e05d73a693a7af973c9ec776f540
| 42,699
|
py
|
Python
|
raiden/tests/integration/api/test_restapi.py
|
litexnetwork/raiden
|
b084c77e994d1f08f72947e57ce5bd6c8aa9c2a9
|
[
"MIT"
] | 1
|
2018-11-26T01:40:37.000Z
|
2018-11-26T01:40:37.000Z
|
raiden/tests/integration/api/test_restapi.py
|
litexnetwork/raiden
|
b084c77e994d1f08f72947e57ce5bd6c8aa9c2a9
|
[
"MIT"
] | null | null | null |
raiden/tests/integration/api/test_restapi.py
|
litexnetwork/raiden
|
b084c77e994d1f08f72947e57ce5bd6c8aa9c2a9
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
import time
import logging
import pytest
import grequests
from flask import url_for
from eth_utils import (
to_checksum_address,
to_canonical_address,
is_checksum_address,
)
from raiden_contracts.constants import (
CONTRACT_HUMAN_STANDARD_TOKEN,
MAX_TOKENS_DEPLOY,
TEST_SETTLE_TIMEOUT_MIN,
TEST_SETTLE_TIMEOUT_MAX,
)
from raiden.api.v1.encoding import (
AddressField,
HexAddressConverter,
)
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
CHANNEL_STATE_CLOSED,
)
from raiden.tests.utils import assert_dicts_are_equal
from raiden.tests.utils.client import burn_all_eth
from raiden.tests.utils.smartcontracts import deploy_contract_web3
# pylint: disable=too-many-locals,unused-argument,too-many-lines
def assert_no_content_response(response):
assert(
response is not None and
response.text == '' and
response.status_code == HTTPStatus.NO_CONTENT
)
def assert_response_with_code(response, status_code):
assert (
response is not None and
response.status_code == status_code
)
def assert_response_with_error(response, status_code):
assert (
response is not None and
response.status_code == status_code and
'errors' in response.json() and
response.json()['errors'] != ''
)
def assert_proper_response(response, status_code=HTTPStatus.OK):
assert (
response is not None and
response.status_code == status_code and
response.headers['Content-Type'] == 'application/json'
)
def api_url_for(api_backend, endpoint, **kwargs):
api_server, _ = api_backend
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith('0x'):
pass
#kwargs[key] = to_canonical_address(val)
with api_server.flask_app.app_context():
return url_for('v1_resources.{}'.format(endpoint), **kwargs)
def test_hex_converter():
converter = HexAddressConverter(map=None)
# invalid hex data
with pytest.raises(Exception):
converter.to_python('-')
# invalid address, too short
with pytest.raises(Exception):
converter.to_python('0x1234')
# missing prefix 0x
with pytest.raises(Exception):
converter.to_python('414d72a6f6e28f4950117696081450d63d56c354')
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert converter.to_python('0x414D72a6f6E28F4950117696081450d63D56C354') == address
def test_address_field():
# pylint: disable=protected-access
field = AddressField()
attr = 'test'
data = object()
# invalid hex data
with pytest.raises(Exception):
field._deserialize('-', attr, data)
# invalid address, too short
with pytest.raises(Exception):
field._deserialize('0x1234', attr, data)
# missing prefix 0x
with pytest.raises(Exception):
field._deserialize('414d72a6f6e28f4950117696081450d63d56c354', attr, data)
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert field._deserialize('0x414D72a6f6E28F4950117696081450d63D56C354', attr, data) == address
def test_url_with_invalid_address(rest_api_port_number, api_backend):
""" Addresses require the leading 0x in the urls. """
url_without_prefix = (
'http://localhost:{port}/api/1/'
'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8'
).format(port=rest_api_port_number)
request = grequests.patch(
url_without_prefix,
json=dict(state='CHANNEL_STATE_SETTLED'),
)
response = request.send().response
assert_response_with_code(response, HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_without_prefix(api_backend):
""" Addresses require leading 0x in the payload. """
invalid_address = '61c808d82a3ac53231750dadc13c777b59310bd9'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_chars(api_backend):
""" Addresses cannot have invalid characters in it. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310bdg' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_length(api_backend):
""" Encoded addresses must have the right length. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310b' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_not_eip55(api_backend):
""" Provided addresses must be EIP55 encoded. """
invalid_address = '0xf696209d2ca35e6c88e5b99b7cda3abf316bed69'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 90,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_query_our_address(api_backend):
request = grequests.get(
api_url_for(api_backend, 'addressresource'),
)
response = request.send().response
assert_proper_response(response)
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
assert response.json() == {'our_address': to_checksum_address(our_address)}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_get_channel_list(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == []
# let's create a new channel
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
channel_info = response.json()[0]
assert channel_info['partner_address'] == partner_address
assert channel_info['token_address'] == to_checksum_address(token_address)
assert 'token_network_identifier' in channel_info
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_status_channel_nonexistant(
api_backend,
token_addresses,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.NOT_FOUND)
assert response.json()['errors'] == (
"Channel with partner '{}' for token '{}' could not be found.".format(
to_checksum_address(partner_address),
to_checksum_address(token_address),
)
)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_and_deposit_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = 0
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# now let's open a channel and make a deposit too
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
balance = 100
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': balance,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = token_network_identifier
assert_dicts_are_equal(response, expected_response)
# let's deposit on the first channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=first_partner_address,
),
json={'total_deposit': balance},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE,
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': balance,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# let's try querying for the second channel
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=second_partner_address,
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE,
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': balance,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# finally let's burn all eth and try to open another channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
channel_data_obj = {
'partner_address': '0xf3AF96F89b3d7CdcBE0C083690A28185Feb0b3CE',
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': 1,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_close_and_settle_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# let's close the channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response)
expected_response = {
'token_network_identifier': token_network_identifier,
'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE,
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_CLOSED,
'balance': balance,
}
assert_dicts_are_equal(response.json(), expected_response)
def test_api_close_insufficient_eth(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
# let's burn all eth and try to close the channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_channel_invalid_input(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = TEST_SETTLE_TIMEOUT_MIN - 1
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
channel_data_obj['settle_timeout'] = TEST_SETTLE_TIMEOUT_MAX + 1
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_state_change_errors(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# let's try to set a random state
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state='inlimbo'),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# let's try to set both new state and balance
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED, total_deposit=200),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
# let's try to patch with no arguments
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# ok now let's close and settle for real
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED),
)
response = request.send().response
assert_proper_response(response)
# let's try to deposit to a settled channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(total_deposit=500),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_api_tokens(api_backend, blockchain_services, token_addresses):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address1 = token_addresses[0]
token_address2 = token_addresses[1]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address1),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address2),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's get the token list
request = grequests.get(
api_url_for(
api_backend,
'tokensresource',
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
to_checksum_address(token_address1),
to_checksum_address(token_address2),
]
assert set(response) == set(expected_response)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_query_partners_by_token(api_backend, blockchain_services, token_addresses):
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
channel_data_obj['partner_address'] = second_partner_address
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
# and a channel for another token
channel_data_obj['partner_address'] = '0xb07937AbA15304FBBB0Bf6454a9377a76E3dD39E'
channel_data_obj['token_address'] = to_checksum_address(token_address)
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's query our partners per token for the first token
request = grequests.get(
api_url_for(
api_backend,
'partnersresourcebytokenaddress',
token_address=to_checksum_address(token_address),
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
{
'partner_address': first_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(first_partner_address),
),
}, {
'partner_address': second_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(second_partner_address),
),
},
]
assert all(r in response for r in expected_response)
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_transfers(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
transfer = {
'initiator_address': to_checksum_address(our_address),
'target_address': to_checksum_address(target_address),
'token_address': to_checksum_address(token_address),
'amount': amount,
'identifier': identifier,
}
request = grequests.post(
api_url_for(
api_backend,
'transfertotargetresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'amount': amount, 'identifier': identifier},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
assert response == transfer
#demo
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_crosstransactiontry(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
raiden = _.raiden
sendETH_amount = 101
sendBTC_amount =2
receiveBTC_address = "1JnC15WwDVcC3QbQRUY6ChqRLucLpTGaJN"
token_address = token_addresses[0]
target_address = app1.raiden.address
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
crosstransaction = {
'initiator_address': to_checksum_address(our_address),
'target_address': to_checksum_address(target_address),
'token_address': to_checksum_address(token_address),
'sendETH_amount': sendETH_amount,
'sendBTC_amount': sendBTC_amount,
'receiveBTC_address':receiveBTC_address,
}
request = grequests.post(
api_url_for(
api_backend,
'crosstransactiontry',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'initiator_address': to_checksum_address(our_address), 'sendETH_amount': sendETH_amount,'sendBTC_amount':sendBTC_amount,'receiveBTC_address':receiveBTC_address},
)
response = request.send().response
time.sleep(10)
hash_r = raiden.wal.storage.get_all_crosstransaction()[0][9]
test_api_crosstransation_hash(api_backend,raiden_network,token_address,hash_r)
assert_proper_response(response)
response = response.json()
assert response == crosstransaction
#demo
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_getcrosstransation(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
api_server, _ = api_backend
raiden = app1.raiden
test_api_crosstransactiontry(api_backend,raiden_network,token_addresses)
request = grequests.get(
api_url_for(
api_backend,
'getcrosstransaction',
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
logging.debug(response)
assert response.json() != []
#test getcrosstransation_by_id
cross_id = response.json()[0]['crossid']
test_api_getcrosstransation_by_id(api_backend,raiden_network,token_addresses,cross_id)
def test_api_getcrosstransation_by_id(api_backend, raiden_network, token_addresses,cross_id):
_, app1 = raiden_network
api_server, _ = api_backend
cross_id = cross_id
request = grequests.get(
api_url_for(
api_backend,
'getcrosstransactionbyid',
cross_id = cross_id,
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() != []
def test_api_crosstransation_hash(api_backend, raiden_network, token_addresses,hash_r):
_, app1 = raiden_network
api_server, _ = api_backend
hash_r = str(hash_r)
request = grequests.get(
api_url_for(
api_backend,
'recivehashresource',
hash_r = hash_r,
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == 'hash_r is ok'
@pytest.mark.parametrize('number_of_tokens', [0])
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_register_token(api_backend, token_amount, token_addresses, raiden_network):
app0 = raiden_network[0]
new_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
other_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
register_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
register_response = register_request.send().response
assert_proper_response(register_response, status_code=HTTPStatus.CREATED)
response_json = register_response.json()
assert 'token_network_address' in response_json
assert is_checksum_address(response_json['token_network_address'])
# now try to reregister it and get the error
conflict_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
conflict_response = conflict_request.send().response
assert_response_with_error(conflict_response, HTTPStatus.CONFLICT)
# Burn all the eth and then make sure we get the appropriate API error
burn_all_eth(app0.raiden)
poor_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(other_token_address),
))
poor_response = poor_request.send().response
assert_response_with_error(poor_response, HTTPStatus.PAYMENT_REQUIRED)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_get_connection_managers_info(api_backend, token_addresses):
# check that there are no registered tokens
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert len(result) == 0
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now is one registered channel manager
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 1
assert token_address1 in result
assert isinstance(result[token_address1], dict)
assert set(result[token_address1].keys()) == {'funds', 'sum_deposits', 'channels'}
funds = 100
token_address2 = to_checksum_address(token_addresses[1])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address2,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now are two registered channel managers
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 2
assert token_address2 in result
assert isinstance(result[token_address2], dict)
assert set(result[token_address2].keys()) == {'funds', 'sum_deposits', 'channels'}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_connect_insufficient_eth(api_backend, token_addresses):
# Burn all eth and then try to connect to a token network
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_network_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'networkeventsresource',
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'tokeneventsresource',
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_channel_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'channeleventsresource',
partner_address=partner_address,
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events_errors_for_unregistered_token(api_backend):
request = grequests.get(
api_url_for(
api_backend,
'tokeneventsresource',
token_address='0x61C808D82A3Ac53231750daDc13c777b59310bD9',
from_block=5,
to_block=20,
),
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('deposit', [50000])
def test_api_deposit_limit(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel and deposit exactly the limit amount
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
balance_working = MAX_TOKENS_DEPLOY * (10 ** 2) # token has two digits
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': balance_working,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance_working
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
# now let's open a channel and deposit a bit more than the limit
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
balance_failing = balance_working + 1 # token has two digits
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': balance_failing,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CONFLICT)
response = response.json()
assert response['errors'] == 'The deposit of 10001 is bigger than the current limit of 10000'
| 32.769762
| 176
| 0.686035
| 0
| 0
| 0
| 0
| 36,123
| 0.845992
| 0
| 0
| 8,777
| 0.205555
|
12c7cbd02b14e09531a4f5ea52a53834f3434799
| 6,946
|
py
|
Python
|
contents/MyExperiment/Exp3_test/cluster_env.py
|
Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master
|
011594083410f9b2f8e16eb5deed26e730ed849e
|
[
"MIT"
] | null | null | null |
contents/MyExperiment/Exp3_test/cluster_env.py
|
Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master
|
011594083410f9b2f8e16eb5deed26e730ed849e
|
[
"MIT"
] | null | null | null |
contents/MyExperiment/Exp3_test/cluster_env.py
|
Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master
|
011594083410f9b2f8e16eb5deed26e730ed849e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import random
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
class Cluster(tk.Tk, object):
def __init__(self, state_init, server_attribute):
super(Cluster, self).__init__()
self.action_space = np.array([[0,0],[0,1],[0,2],[0,3],
[1,0],[1,1],[1,2],[1,3],
[2,0],[2,1],[2,2],[2,3],
[3,0],[3,1],[3,2],[3,3],
[4,0],[4,1],[4,2],[4,3],
[5,0],[5,1],[5,2],[5,3],
[6,0],[6,1],[6,2],[6,3],
[7,0],[7,1],[7,2],[7,3],
[8,0],[8,1],[8,2],[8,3],
[9,0],[9,1],[9,2],[9,3],
[10,0],[10,1],[10,2],[10,3],
[11,0],[11,1],[11,2],[11,3]])
self.n_actions = len(self.action_space)
self.cost_matrix = pd.DataFrame(np.array([[0,1,5,12],
[1,0,4,2],
[5,4,0,3],
[12,2,3,0]]),
columns=[0, 1, 2, 3])
self.server_attribute = server_attribute
self.QSs = self.read_file()
self.state_init = state_init
self.cost_init = self.cost_init()
def step(self, action, state, costs):
s = state.copy()
#action_real[查询,移动到的服务器]
action_real = self.action_space[action]
q = action_real[0]
index_server = action_real[1]
s.iloc[q, :] = 0
s.iloc[q, index_server] = 1
cost_new = self.cost_caculate(q, index_server)
if cost_new > costs[q]:
is_better = True
else:
is_better = False
# costs[action_real[0]] = cost_new
costs[q] = cost_new
cost_all = self.cost_all(costs)
reward = self.reward(cost_all, s)
s_ = s
return s_, costs, reward, cost_all, is_better
#判断结束的条件 选择的action在执行之后状态仍然没有变 or 判断状态是否在处与某种情况下,例如负载不平衡
def is_finish(self):
# TODO
return True
# read the file and store in an array[query,[server1,server2,......]]
def read_file(self):
server_attribute = self.server_attribute
with open("D:\SynologyDrive\Reinforcement-learning-with-tensorflow-master\contents\MyExperiment\Exp3_test\QueryAttribute_test",'r') as f:
content = f.readlines()
QSs = []
for item in content:
QS = []
item = item.strip("\n")
q = item.split(",")[0]
targetAttribute = item.split(",")[1:]
targetAttribute = list(map(int, targetAttribute))
servers = []
for attribute in targetAttribute:
server = server_attribute[server_attribute.loc[:, attribute] == 1].index[0]
servers.append(server)
QS.append(int(q))
QS.append(servers)
QSs.append(QS)
return QSs
# compute the initial costs array based on the initial state matrix. every element represent the total cost of the query
def cost_init(self):
state_init = self.state_init
# print(len(state_init))
states = self.state_array(state_init)
# print(len(states))
costs = []
# print(len(state_init))
for i in range(len(state_init)):
index_server = states[i][1]
cost = self.cost_caculate(i, index_server)
costs.append(cost)
return costs
def cost_caculate(self,q,index_server):
cost = 0
for j in range(len(self.QSs[q][1])):
target_server = self.QSs[q][1][j]
cost += self.cost_matrix.iloc[index_server, target_server]
return cost
# create the initial state matrix(random)
# compute the total reward based on the costs array
def cost_all(self, costs):
cost_all = 0
for i in range(len(costs)):
cost_all += costs[i]
return cost_all
def reward(self, cost_all, state):
list = []
for i in state.columns:
list.append(state[i].sum())
load_weight_var = np.var(list)
reward = (len(state)/cost_all) * self.function(1.1, load_weight_var)
return reward
def function(self, a, x):
y = 100/(a**x)
return y
# transform the state matrix into array
def state_array(self, state):
states = []
for i in range(len(state)):
for j in range(len(state.columns)):
state_arr = []
if state.iloc[i, j] == 1:
state_arr.append(i)
state_arr.append(j)
states.append(state_arr)
return states
def state_init():
init_state = pd.DataFrame(np.zeros(327*8).reshape(327, 8), columns=[0, 1, 2, 3, 4, 5, 6, 7])
for i in range(len(init_state)):
j = random.randint(0, 7)
init_state.iloc[i][j] = 1
return init_state
# if __name__ == '__main__':
# server_attribute = pd.DataFrame(np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
# 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
# 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
# 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
# 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]).
# reshape(8, 24),
# columns=np.arange(24))
# env = Cluster(state_init(), server_attribute)
# Qss = env.QSs
# print(Qss)
# for i in range(len(Qss)):
# q = i
# for j in range(len(server_attribute)):
# index_server = j
# print(env.cost_init)
# print("The reward of initial state is:")
# print(env.reward(env.cost_all(env.cost_init), env.state_init))
# print(env.state_init)
# actions=list(range(env.n_actions))
# print(actions)
# env.after(100, update)
# env.mainloop()
| 37.144385
| 145
| 0.460121
| 5,036
| 0.713517
| 0
| 0
| 0
| 0
| 0
| 0
| 2,269
| 0.321479
|
12c7d079f923030d66c22a1b6cf6b9b674f39635
| 2,589
|
py
|
Python
|
libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py
|
Kardyne/libensemble
|
566c8f5daafe2ad4deebc13198a1e131e4ce6542
|
[
"BSD-2-Clause"
] | null | null | null |
libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py
|
Kardyne/libensemble
|
566c8f5daafe2ad4deebc13198a1e131e4ce6542
|
[
"BSD-2-Clause"
] | null | null | null |
libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py
|
Kardyne/libensemble
|
566c8f5daafe2ad4deebc13198a1e131e4ce6542
|
[
"BSD-2-Clause"
] | null | null | null |
# """
# Runs libEnsemble on the 6-hump camel problem. Documented here:
# https://www.sfu.ca/~ssurjano/camel6.html
#
# Execute via the following command:
# mpiexec -np 4 python3 test_6-hump_camel_elapsed_time_abort.py
# The number of concurrent evaluations of the objective function will be 4-1=3.
# """
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from mpi4py import MPI # for libE communicator
import sys, os # for adding to path
import numpy as np
# Import libEnsemble main
from libensemble.libE import libE
# Import sim_func
from libensemble.sim_funcs.six_hump_camel import six_hump_camel
# Import gen_func
from libensemble.gen_funcs.uniform_sampling import uniform_random_sample
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
script_name = os.path.splitext(os.path.basename(__file__))[0]
#State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {'sim_f': six_hump_camel, # This is the function whose output is being minimized
'in': ['x'], # These keys will be given to the above function
'out': [('f',float), # This is the output from the function being minimized
],
'pause_time': 2,
# 'save_every_k': 10
}
# State the generating function, its arguments, output, and necessary parameters.
gen_specs = {'gen_f': uniform_random_sample,
'in': ['sim_id'],
'out': [('x',float,2),
],
'lb': np.array([-3,-2]),
'ub': np.array([ 3, 2]),
'gen_batch_size': 5,
'num_active_gens': 1,
'batch_mode': False,
# 'save_every_k': 10
}
# Tell libEnsemble when to stop
exit_criteria = {'elapsed_wallclock_time': 1}
np.random.seed(1)
persis_info = {}
for i in range(MPI.COMM_WORLD.Get_size()):
persis_info[i] = {'rand_stream': np.random.RandomState(i)}
# Perform the run
H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info)
if MPI.COMM_WORLD.Get_rank() == 0:
eprint(flag)
eprint(H)
assert flag == 2
short_name = script_name.split("test_", 1).pop()
filename = short_name + '_results_History_length=' + str(len(H)) + '_evals=' + str(sum(H['returned'])) + '_ranks=' + str(MPI.COMM_WORLD.Get_size())
print("\n\n\nRun completed.\nSaving results to file: " + filename)
# if flag == 2:
# print("\n\n\nKilling COMM_WORLD")
# MPI.COMM_WORLD.Abort()
| 34.52
| 151
| 0.653148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,166
| 0.450367
|
12c8a53eac5c028a5e825aaa86f201c528a2f671
| 1,329
|
py
|
Python
|
do_like_javac/tools/graphtools.py
|
zcai1/do-like-javac
|
3eb4a43521ae181a9b777a589e477b0c6ab7cb6e
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 1
|
2020-10-10T20:24:08.000Z
|
2020-10-10T20:24:08.000Z
|
do_like_javac/tools/graphtools.py
|
zcai1/do-like-javac
|
3eb4a43521ae181a9b777a589e477b0c6ab7cb6e
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 13
|
2019-06-20T23:16:15.000Z
|
2022-03-26T21:19:20.000Z
|
do_like_javac/tools/graphtools.py
|
zcai1/do-like-javac
|
3eb4a43521ae181a9b777a589e477b0c6ab7cb6e
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 5
|
2016-09-23T00:52:12.000Z
|
2021-09-08T01:24:36.000Z
|
import os
import argparse
from . import common
argparser = argparse.ArgumentParser(add_help=False)
graph_group = argparser.add_argument_group('graphtool arguments')
graph_group.add_argument('--graph-jar', metavar='<graphtool-jar>',
action='store',default=None, dest='graph_jar',
help='Path to prog2dfg.jar or apilearner.jar')
def run(args, javac_commands, jars):
if not args.graph_jar:
print("Could not run graph tool: missing arg --graph-jar")
return
tool_command = ["java", "-jar", args.graph_jar]
dot_dir = os.path.join(args.output_directory, "dot")
if not os.path.isdir(dot_dir):
os.makedirs(dot_dir)
for jc in javac_commands:
java_files = jc['java_files']
java_files_file = os.path.join(os.getcwd(), '__java_file_names.txt')
class_dir = common.class_directory(jc)
with open(java_files_file, 'w') as f:
for s in java_files:
f.write(s)
f.write("\n")
current_outdir = os.path.join(dot_dir,
class_dir.replace(os.getcwd(),'').replace(os.sep,"_"))
cmd = tool_command + ["-o", current_outdir,
"-j", class_dir,
"-all",
"-source", java_files_file]
common.run_cmd(cmd, args, 'graphtools')
| 30.906977
| 88
| 0.611738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 259
| 0.194883
|
12c8ff9bf299511a1712cec875fde79e159c64f4
| 507
|
py
|
Python
|
boss_grabbing/pipelines.py
|
shansb/boss_grabbing
|
20aabd6b2062099eb287d7586dcf619648569ba2
|
[
"MIT"
] | null | null | null |
boss_grabbing/pipelines.py
|
shansb/boss_grabbing
|
20aabd6b2062099eb287d7586dcf619648569ba2
|
[
"MIT"
] | null | null | null |
boss_grabbing/pipelines.py
|
shansb/boss_grabbing
|
20aabd6b2062099eb287d7586dcf619648569ba2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from boss_grabbing.sqlite import Sqlite
class BossGrabbingPipeline(object):
def process_item(self, item, spider):
print("process")
count = Sqlite.select_db(item['url'])[0][0]
print("count:" + str(count))
if count == 0:
Sqlite.insert_db(item)
return item
| 25.35
| 65
| 0.65286
| 270
| 0.532544
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.412229
|
12c9169d04a1b953b055c11fb6f8b67fa66071ff
| 344
|
py
|
Python
|
core/jobs/urls.py
|
InKyrNet/inkyrnet
|
fdb5c8def9b74049c4b48f2fccf5d52b040a4435
|
[
"MIT"
] | null | null | null |
core/jobs/urls.py
|
InKyrNet/inkyrnet
|
fdb5c8def9b74049c4b48f2fccf5d52b040a4435
|
[
"MIT"
] | 4
|
2021-06-04T21:36:18.000Z
|
2021-09-22T17:44:09.000Z
|
core/jobs/urls.py
|
InKyrNet/inkyrnet
|
fdb5c8def9b74049c4b48f2fccf5d52b040a4435
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import *
from django_filters.views import FilterView
app_name = 'jobs'
urlpatterns = [
path('', FilterView.as_view(filterset_class=JobFilter,
template_name='jobs/job_list.html'), name='index'),
path('companies/', CompanyListView.as_view(), name='companies'),
]
| 28.666667
| 83
| 0.674419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.168605
|
12c9326e60a2f14e4ff7c33d36e504ccc28441b7
| 2,010
|
py
|
Python
|
src/compas/datastructures/mesh/transformations_numpy.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | 2
|
2021-03-17T18:14:22.000Z
|
2021-09-19T13:50:02.000Z
|
src/compas/datastructures/mesh/transformations_numpy.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/mesh/transformations_numpy.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import transform_points_numpy
__all__ = [
'mesh_transform_numpy',
'mesh_transformed_numpy',
]
def mesh_transform_numpy(mesh, transformation):
"""Transform a mesh.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh.
transformation : compas.geometry.Transformation
The transformation.
Notes
-----
The mesh is modified in-place.
Examples
--------
>>> mesh = Mesh.from_obj(compas.get('cube.obj'))
>>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4)
>>> tmesh = mesh.copy()
>>> mesh_transform(tmesh, T)
"""
vertices = list(mesh.vertices())
xyz = [mesh.vertex_coordinates(vertex) for vertex in vertices]
xyz[:] = transform_points_numpy(xyz, transformation)
for index, vertex in enumerate(vertices):
mesh.vertex_attributes(vertex, 'xyz', xyz[index])
def mesh_transformed_numpy(mesh, transformation):
"""Transform a copy of ``mesh``.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh.
transformation : compas.geometry.Transformation
The transformation.
Returns
-------
Mesh
A transformed independent copy of ``mesh``.
Notes
-----
The original mesh is not modified.
Instead a transformed independent copy is returned.
Examples
--------
>>> mesh = Mesh.from_obj(compas.get('cube.obj'))
>>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4)
>>> tmesh = mesh_transformed(mesh, T)
"""
mesh_copy = mesh.copy()
mesh_transform_numpy(mesh_copy, transformation)
return mesh_copy
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(globs=globals())
| 24.216867
| 80
| 0.595025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,268
| 0.630846
|
12c93b56f0fe4bfd1cf140c773e7ff17f7dd5689
| 17,860
|
py
|
Python
|
selfdrive/car/gm/carcontroller.py
|
CTyrell/openpilot
|
1ef27823882eed575266983175f106af1e293082
|
[
"MIT"
] | null | null | null |
selfdrive/car/gm/carcontroller.py
|
CTyrell/openpilot
|
1ef27823882eed575266983175f106af1e293082
|
[
"MIT"
] | null | null | null |
selfdrive/car/gm/carcontroller.py
|
CTyrell/openpilot
|
1ef27823882eed575266983175f106af1e293082
|
[
"MIT"
] | null | null | null |
from cereal import car
from common.realtime import DT_CTRL
from common.numpy_fast import interp
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.gm import gmcan
from selfdrive.car.gm.values import DBC, AccState, CanBus, CarControllerParams
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
class CarController():
def __init__(self, dbc_name, CP, VM):
self.start_time = 0.
self.apply_steer_last = 0
self.lka_steering_cmd_counter_last = -1
self.lka_icon_status_last = (False, False)
self.steer_rate_limited = False
self.fcw_count = 0
self.params = CarControllerParams()
self.packer_pt = CANPacker(DBC[CP.carFingerprint]['pt'])
self.packer_obj = CANPacker(DBC[CP.carFingerprint]['radar'])
self.packer_ch = CANPacker(DBC[CP.carFingerprint]['chassis'])
self.debug_logging = False
self.debug_log_time_step = 0.333
self.last_debug_log_t = 0.
if self.debug_logging:
with open("/data/openpilot/coast_debug.csv","w") as f:
f.write(",".join([
"t",
"long plan",
"d (m/s)",
"v",
"vEgo",
"v_cruise",
"v (mph)",
"vEgo (mph)",
"v_cruise (mph)",
"ttc",
"coast gas lockout",
"coast brake lockout",
"gas in",
"brake in",
"one-pedal",
"coasting enabled",
"no f brakes",
"gas out",
"brake out"]) + "\n")
def update(self, enabled, CS, frame, actuators,
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert):
P = self.params
# Send CAN commands.
can_sends = []
# Steering (50Hz)
# Avoid GM EPS faults when transmitting messages too close together: skip this transmit if we just received the
# next Panda loopback confirmation in the current CS frame.
if CS.lka_steering_cmd_counter != self.lka_steering_cmd_counter_last:
self.lka_steering_cmd_counter_last = CS.lka_steering_cmd_counter
elif (frame % P.STEER_STEP) == 0:
lkas_enabled = (enabled or CS.pause_long_on_gas_press) and CS.lkMode and not (CS.out.steerWarning or CS.out.steerError) and CS.out.vEgo > P.MIN_STEER_SPEED and CS.lane_change_steer_factor > 0.
if lkas_enabled:
new_steer = int(round(actuators.steer * P.STEER_MAX * CS.lane_change_steer_factor))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P)
self.steer_rate_limited = new_steer != apply_steer
else:
apply_steer = 0
self.apply_steer_last = apply_steer
# GM EPS faults on any gap in received message counters. To handle transient OP/Panda safety sync issues at the
# moment of disengaging, increment the counter based on the last message known to pass Panda safety checks.
idx = (CS.lka_steering_cmd_counter + 1) % 4
can_sends.append(gmcan.create_steering_control(self.packer_pt, CanBus.POWERTRAIN, apply_steer, idx, lkas_enabled))
# Gas/regen prep
if not enabled or CS.pause_long_on_gas_press:
# Stock ECU sends max regen when not enabled.
apply_gas = P.MAX_ACC_REGEN
apply_brake = 0
else:
apply_gas = interp(actuators.accel, P.GAS_LOOKUP_BP, P.GAS_LOOKUP_V)
apply_brake = interp(actuators.accel, P.BRAKE_LOOKUP_BP, P.BRAKE_LOOKUP_V)
t = sec_since_boot()
v_rel = CS.coasting_lead_v - CS.vEgo
ttc = min(-CS.coasting_lead_d / v_rel if (CS.coasting_lead_d > 0. and v_rel < 0.) else 100.,100.)
d_time = CS.coasting_lead_d / CS.vEgo if (CS.coasting_lead_d > 0. and CS.vEgo > 0. and CS.tr > 0.) else 10.
if CS.coasting_lead_d > 0. and (ttc < CS.lead_ttc_long_gas_lockout_bp[-1] \
or v_rel < CS.lead_v_rel_long_gas_lockout_bp[-1] \
or CS.coasting_lead_v < CS.lead_v_long_gas_lockout_bp[-1] \
or d_time < CS.tr * CS.lead_tr_long_gas_lockout_bp[-1]\
or CS.coasting_lead_d < CS.lead_d_long_gas_lockout_bp[-1]):
lead_long_gas_lockout_factor = max([
interp(v_rel, CS.lead_v_rel_long_gas_lockout_bp, CS.lead_v_rel_long_gas_lockout_v),
interp(CS.coasting_lead_v, CS.lead_v_long_gas_lockout_bp, CS.lead_v_long_gas_lockout_v),
interp(ttc, CS.lead_ttc_long_gas_lockout_bp, CS.lead_ttc_long_gas_lockout_v),
interp(d_time / CS.tr, CS.lead_tr_long_gas_lockout_bp, CS.lead_tr_long_gas_lockout_v),
interp(CS.coasting_lead_d, CS.lead_d_long_gas_lockout_bp, CS.lead_d_long_gas_lockout_v)])
if CS.coasting_lead_d > 0. and (ttc < CS.lead_ttc_long_brake_lockout_bp[-1] \
or v_rel < CS.lead_v_rel_long_brake_lockout_bp[-1] \
or CS.coasting_lead_v < CS.lead_v_long_brake_lockout_bp[-1] \
or d_time < CS.tr * CS.lead_tr_long_brake_lockout_bp[-1]\
or CS.coasting_lead_d < CS.lead_d_long_brake_lockout_bp[-1]):
lead_long_brake_lockout_factor = max([
interp(v_rel, CS.lead_v_rel_long_brake_lockout_bp, CS.lead_v_rel_long_brake_lockout_v),
interp(CS.coasting_lead_v, CS.lead_v_long_brake_lockout_bp, CS.lead_v_long_brake_lockout_v),
interp(ttc, CS.lead_ttc_long_brake_lockout_bp, CS.lead_ttc_long_brake_lockout_v),
interp(d_time / CS.tr, CS.lead_tr_long_brake_lockout_bp, CS.lead_tr_long_brake_lockout_v),
interp(CS.coasting_lead_d, CS.lead_d_long_brake_lockout_bp, CS.lead_d_long_brake_lockout_v)])
else:
lead_long_brake_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking
else:
lead_long_gas_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking
lead_long_brake_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking
# debug logging
do_log = self.debug_logging and (t - self.last_debug_log_t > self.debug_log_time_step)
if do_log:
self.last_debug_log_t = t
f = open("/data/openpilot/coast_debug.csv","a")
f.write(",".join([f"{i:.1f}" if i == float else str(i) for i in [
t - CS.sessionInitTime,
CS.coasting_long_plan,
CS.coasting_lead_d,
CS.coasting_lead_v,
CS.vEgo,
CS.v_cruise_kph * CV.KPH_TO_MS,
CS.coasting_lead_v * CV.MS_TO_MPH,
CS.vEgo * CV.MS_TO_MPH,
CS.v_cruise_kph * CV.KPH_TO_MPH,
ttc,
lead_long_gas_lockout_factor,
lead_long_brake_lockout_factor,
int(apply_gas),
int(apply_brake),
(CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active),
CS.coasting_enabled,
CS.no_friction_braking]]) + ",")
if (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active):
if not CS.one_pedal_mode_active and CS.gear_shifter_ev == 4 and CS.one_pedal_dl_coasting_enabled and CS.vEgo > 0.05:
apply_gas = apply_gas * lead_long_gas_lockout_factor + float(P.ZERO_GAS ) * (1. - lead_long_gas_lockout_factor)
else:
apply_gas = apply_gas * lead_long_gas_lockout_factor + float(P.MAX_ACC_REGEN) * (1. - lead_long_gas_lockout_factor)
time_since_brake = t - CS.one_pedal_mode_last_gas_press_t
if CS.one_pedal_mode_active:
if abs(CS.angle_steers) > CS.one_pedal_angle_steers_cutoff_bp[0]:
one_pedal_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode])
one_pedal_apply_brake_minus1 = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[max(0,CS.one_pedal_brake_mode-1)], CS.one_pedal_mode_stop_apply_brake_v[max(0,CS.one_pedal_brake_mode-1)])
one_pedal_apply_brake = interp(abs(CS.angle_steers), CS.one_pedal_angle_steers_cutoff_bp, [one_pedal_apply_brake, one_pedal_apply_brake_minus1])
else:
one_pedal_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode])
one_pedal_apply_brake *= interp(CS.pitch, CS.one_pedal_pitch_brake_adjust_bp, CS.one_pedal_pitch_brake_adjust_v[CS.one_pedal_brake_mode])
one_pedal_apply_brake = min(one_pedal_apply_brake, float(P.BRAKE_LOOKUP_V[0]))
one_pedal_apply_brake *= interp(time_since_brake, CS.one_pedal_mode_ramp_time_bp, CS.one_pedal_mode_ramp_time_v) if CS.one_pedal_brake_mode < 2 else 1.
else:
one_pedal_apply_brake = 0.
# ramp braking
if CS.one_pedal_mode_active_last and time_since_brake > CS.one_pedal_mode_ramp_time_bp[-1]:
if CS.one_pedal_mode_apply_brake != one_pedal_apply_brake:
if CS.one_pedal_mode_ramp_mode_last != CS.one_pedal_brake_mode:
# brake mode changed, so need to calculate new step based on the old and new modes
old_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_mode_ramp_mode_last], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_mode_ramp_mode_last])
CS.one_pedal_mode_ramp_time_step = (one_pedal_apply_brake - old_apply_brake) / (CS.one_pedal_mode_ramp_duration * (2. if CS.one_pedal_mode_apply_brake > one_pedal_apply_brake else 1.))
if CS.one_pedal_mode_apply_brake < one_pedal_apply_brake:
if CS.one_pedal_mode_ramp_time_step < 0.:
CS.one_pedal_mode_ramp_time_step *= -1.
CS.one_pedal_mode_apply_brake = max(one_pedal_apply_brake, CS.one_pedal_mode_apply_brake + CS.one_pedal_mode_ramp_time_step * (t - CS.one_pedal_mode_ramp_t_last))
else:
if CS.one_pedal_mode_ramp_time_step > 0.:
CS.one_pedal_mode_ramp_time_step *= -1.
CS.one_pedal_mode_apply_brake = min(one_pedal_apply_brake, CS.one_pedal_mode_apply_brake + CS.one_pedal_mode_ramp_time_step * (t - CS.one_pedal_mode_ramp_t_last))
one_pedal_apply_brake = CS.one_pedal_mode_apply_brake
else:
CS.one_pedal_mode_apply_brake = one_pedal_apply_brake
CS.one_pedal_mode_active_last = True
CS.one_pedal_mode_ramp_t_last = t
CS.one_pedal_mode_ramp_mode_last = CS.one_pedal_brake_mode
if CS.one_pedal_mode_op_braking_allowed and CS.coasting_long_plan not in ['cruise', 'limit']:
apply_brake = max(one_pedal_apply_brake, apply_brake * lead_long_brake_lockout_factor)
else:
apply_brake = one_pedal_apply_brake
elif CS.coasting_enabled and lead_long_brake_lockout_factor < 1.:
if CS.coasting_long_plan in ['cruise', 'limit'] and apply_gas < P.ZERO_GAS or apply_brake > 0.:
check_speed_ms = (CS.speed_limit if CS.speed_limit_active and CS.speed_limit < CS.v_cruise_kph else CS.v_cruise_kph) * CV.KPH_TO_MS
if apply_brake > 0.:
coasting_over_speed_vEgo_BP = [
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_vEgo_BP[0]),
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_vEgo_BP[1])
]
over_speed_factor = interp(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0., 1.]) if (check_speed_ms > 0. and CS.coasting_brake_over_speed_enabled) else 0.
over_speed_brake = apply_brake * over_speed_factor
apply_brake = max([apply_brake * lead_long_brake_lockout_factor, over_speed_brake])
if apply_gas < P.ZERO_GAS and lead_long_gas_lockout_factor < 1.:
coasting_over_speed_vEgo_BP = [
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_regen_vEgo_BP[0]),
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_regen_vEgo_BP[1])
]
over_speed_factor = interp(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0., 1.]) if (check_speed_ms > 0 and CS.coasting_brake_over_speed_enabled) else 0.
coast_apply_gas = int(round(float(P.ZERO_GAS) - over_speed_factor * (P.ZERO_GAS - apply_gas)))
apply_gas = apply_gas * lead_long_gas_lockout_factor + coast_apply_gas * (1. - lead_long_gas_lockout_factor)
elif CS.no_friction_braking and lead_long_brake_lockout_factor < 1.:
if CS.coasting_long_plan in ['cruise', 'limit'] and apply_brake > 0.:
apply_brake *= lead_long_brake_lockout_factor
apply_gas = int(round(apply_gas))
apply_brake = int(round(apply_brake))
CS.one_pedal_mode_active_last = CS.one_pedal_mode_active
if do_log:
f.write(",".join([str(i) for i in [
apply_gas,
apply_brake]]) + "\n")
f.close()
if CS.showBrakeIndicator:
CS.apply_brake_percent = 0.
if CS.vEgo > 0.1:
if CS.out.cruiseState.enabled:
if not CS.pause_long_on_gas_press:
if apply_brake > 1:
CS.apply_brake_percent = interp(apply_brake, [float(P.BRAKE_LOOKUP_V[-1]), float(P.BRAKE_LOOKUP_V[0])], [51., 100.])
elif (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active):
CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.])
elif apply_gas < P.ZERO_GAS:
CS.apply_brake_percent = interp(apply_gas, [float(P.GAS_LOOKUP_V[0]), float(P.GAS_LOOKUP_V[1])], [51., 0.])
else:
CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.])
elif CS.is_ev and CS.out.brake == 0.:
CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.])
elif CS.out.brake > 0.:
CS.apply_brake_percent = interp(CS.out.brake, [0., 0.5], [51., 100.])
elif CS.out.brake > 0.:
CS.apply_brake_percent = interp(CS.out.brake, [0., 0.5], [51., 100.])
# Gas/regen and brakes - all at 25Hz
if (frame % 4) == 0:
idx = (frame // 4) % 4
if CS.cruiseMain and not enabled and CS.autoHold and CS.autoHoldActive and not CS.out.gasPressed and CS.out.gearShifter in ['drive','low'] and CS.out.vEgo < 0.02 and not CS.regenPaddlePressed:
# Auto Hold State
car_stopping = apply_gas < P.ZERO_GAS
standstill = CS.pcm_acc_status == AccState.STANDSTILL
at_full_stop = standstill and car_stopping
near_stop = (CS.out.vEgo < P.NEAR_STOP_BRAKE_PHASE) and car_stopping
can_sends.append(gmcan.create_friction_brake_command(self.packer_ch, CanBus.CHASSIS, apply_brake, idx, near_stop, at_full_stop))
CS.autoHoldActivated = True
else:
if CS.pause_long_on_gas_press:
at_full_stop = False
near_stop = False
car_stopping = False
standstill = False
else:
car_stopping = apply_gas < P.ZERO_GAS
standstill = CS.pcm_acc_status == AccState.STANDSTILL
at_full_stop = enabled and standstill and car_stopping
near_stop = enabled and (CS.out.vEgo < P.NEAR_STOP_BRAKE_PHASE) and car_stopping
can_sends.append(gmcan.create_friction_brake_command(self.packer_ch, CanBus.CHASSIS, apply_brake, idx, near_stop, at_full_stop))
CS.autoHoldActivated = False
# Auto-resume from full stop by resetting ACC control
acc_enabled = enabled
if standstill and not car_stopping:
acc_enabled = False
can_sends.append(gmcan.create_gas_regen_command(self.packer_pt, CanBus.POWERTRAIN, apply_gas, idx, acc_enabled, at_full_stop))
# Send dashboard UI commands (ACC status), 25hz
if (frame % 4) == 0:
send_fcw = hud_alert == VisualAlert.fcw
follow_level = CS.get_follow_level()
can_sends.append(gmcan.create_acc_dashboard_command(self.packer_pt, CanBus.POWERTRAIN, enabled,
hud_v_cruise * CV.MS_TO_KPH, hud_show_car, follow_level, send_fcw))
# Radar needs to know current speed and yaw rate (50hz),
# and that ADAS is alive (10hz)
time_and_headlights_step = 10
tt = frame * DT_CTRL
if frame % time_and_headlights_step == 0:
idx = (frame // time_and_headlights_step) % 4
can_sends.append(gmcan.create_adas_time_status(CanBus.OBSTACLE, int((tt - self.start_time) * 60), idx))
can_sends.append(gmcan.create_adas_headlights_status(self.packer_obj, CanBus.OBSTACLE))
speed_and_accelerometer_step = 2
if frame % speed_and_accelerometer_step == 0:
idx = (frame // speed_and_accelerometer_step) % 4
can_sends.append(gmcan.create_adas_steering_status(CanBus.OBSTACLE, idx))
can_sends.append(gmcan.create_adas_accelerometer_speed_status(CanBus.OBSTACLE, CS.out.vEgo, idx))
if frame % P.ADAS_KEEPALIVE_STEP == 0:
can_sends += gmcan.create_adas_keepalive(CanBus.POWERTRAIN)
# Show green icon when LKA torque is applied, and
# alarming orange icon when approaching torque limit.
# If not sent again, LKA icon disappears in about 5 seconds.
# Conveniently, sending camera message periodically also works as a keepalive.
lka_active = CS.lkas_status == 1
lka_critical = lka_active and abs(actuators.steer) > 0.9
lka_icon_status = (lka_active, lka_critical)
if frame % P.CAMERA_KEEPALIVE_STEP == 0 or lka_icon_status != self.lka_icon_status_last:
steer_alert = hud_alert in [VisualAlert.steerRequired, VisualAlert.ldw]
can_sends.append(gmcan.create_lka_icon_command(CanBus.SW_GMLAN, lka_active, lka_critical, steer_alert))
self.lka_icon_status_last = lka_icon_status
return can_sends
| 54.45122
| 203
| 0.693729
| 17,407
| 0.974636
| 0
| 0
| 0
| 0
| 0
| 0
| 1,718
| 0.096193
|
12ca7aec9c936b7e376b5d6d2ed2e6e550f43708
| 8,570
|
py
|
Python
|
src/rprblender/__init__.py
|
ralic/RadeonProRenderBlenderAddon
|
310c650d4230289ac5d5407cc24a13b4c7ce0a90
|
[
"Apache-2.0"
] | 1
|
2021-03-29T05:55:49.000Z
|
2021-03-29T05:55:49.000Z
|
src/rprblender/__init__.py
|
ralic/RadeonProRenderBlenderAddon
|
310c650d4230289ac5d5407cc24a13b4c7ce0a90
|
[
"Apache-2.0"
] | 1
|
2021-04-03T09:39:28.000Z
|
2021-04-03T09:39:28.000Z
|
src/rprblender/__init__.py
|
isabella232/RadeonProRenderBlenderAddon
|
ff4ede164c1e1e909f182be709422bc8c8878b1c
|
[
"Apache-2.0"
] | null | null | null |
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import traceback
import bpy
bl_info = {
"name": "Radeon ProRender",
"author": "AMD",
"version": (3, 1, 0),
"blender": (2, 80, 0),
"location": "Info header, render engine menu",
"description": "Radeon ProRender rendering plugin for Blender 2.8x",
"warning": "",
"tracker_url": "",
"wiki_url": "",
"category": "Render"
}
version_build = ""
from .utils import logging, version_updater
from .utils import install_libs
from .engine.engine import Engine
from . import (
nodes,
properties,
ui,
operators,
material_library,
)
from .engine.render_engine import RenderEngine
from .engine.render_engine_2 import RenderEngine2
from .engine.preview_engine import PreviewEngine
from .engine.viewport_engine import ViewportEngine
from .engine.viewport_engine_2 import ViewportEngine2
from .engine.animation_engine import AnimationEngine, AnimationEngine2
from .engine.render_engine_hybrid import RenderEngine as RenderEngineHybrid
from .engine.viewport_engine_hybrid import ViewportEngine as ViewportEngineHybrid
from .engine.animation_engine_hybrid import AnimationEngine as AnimationEngineHybrid
log = logging.Log(tag='init')
log("Loading RPR addon {}".format(bl_info['version']))
render_engine_cls = {
'FULL': RenderEngine,
'HIGH': RenderEngineHybrid,
'MEDIUM': RenderEngineHybrid,
'LOW': RenderEngineHybrid,
'FULL2': RenderEngine2,
}
animation_engine_cls = {
'FULL': AnimationEngine,
'HIGH': AnimationEngineHybrid,
'MEDIUM': AnimationEngineHybrid,
'LOW': AnimationEngineHybrid,
'FULL2': AnimationEngine2,
}
viewport_engine_cls = {
'FULL': ViewportEngine,
'HIGH': ViewportEngineHybrid,
'MEDIUM': ViewportEngineHybrid,
'LOW': ViewportEngineHybrid,
'FULL2': ViewportEngine2,
}
class RPREngine(bpy.types.RenderEngine):
"""
Main class of Radeon ProRender render engine for Blender v2.80+
"""
bl_idname = "RPR"
bl_label = "Radeon ProRender"
bl_use_preview = True
bl_use_shading_nodes = True
bl_use_shading_nodes_custom = False
bl_info = "Radeon ProRender rendering plugin"
engine: Engine = None
def __del__(self):
if isinstance(self.engine, ViewportEngine):
self.engine.stop_render()
log('__del__', self.as_pointer())
# final render
def update(self, data, depsgraph):
""" Called for final render """
log('update', self.as_pointer())
# TODO: We create for every view layer separate Engine. We should improve this by implementing sync_update()
try:
if self.is_preview:
engine_cls = PreviewEngine
elif self.is_animation:
engine_cls = animation_engine_cls[depsgraph.scene.rpr.render_quality]
else:
engine_cls = render_engine_cls[depsgraph.scene.rpr.render_quality]
self.engine = engine_cls(self)
self.engine.sync(depsgraph)
except Exception as e:
log.error(e, 'EXCEPTION:', traceback.format_exc())
self.error_set(f"ERROR | {e}. Please see log for more details.")
def render(self, depsgraph):
""" Called with final render and preview """
log("render", self.as_pointer())
try:
self.engine.render()
except Exception as e:
log.error(e, 'EXCEPTION:', traceback.format_exc())
self.error_set(f"ERROR | {e}. Please see log for more details.")
# This has to be called in the end of render due to possible memory leak RPRBLND-1635
# Important to call it in this function, not in __del__()
self.engine.stop_render()
# viewport render
def view_update(self, context, depsgraph):
""" Called when data is updated for viewport """
log('view_update', self.as_pointer())
try:
# if there is no engine set, create it and do the initial sync
engine_cls = viewport_engine_cls[depsgraph.scene.rpr.render_quality]
if self.engine and type(self.engine) == engine_cls:
self.engine.sync_update(context, depsgraph)
return
if self.engine:
self.engine.stop_render()
self.engine = engine_cls(self)
self.engine.sync(context, depsgraph)
except Exception as e:
log.error(e, 'EXCEPTION:', traceback.format_exc())
def view_draw(self, context, depsgraph):
""" called when viewport is to be drawn """
log('view_draw', self.as_pointer())
try:
self.engine.draw(context)
except Exception as e:
log.error(e, 'EXCEPTION:', traceback.format_exc())
# view layer AOVs
def update_render_passes(self, render_scene=None, render_layer=None):
"""
Update 'Render Layers' compositor node with active render passes info.
Called by Blender.
"""
aovs = properties.view_layer.RPR_ViewLayerProperites.aovs_info
cryptomatte_aovs = properties.view_layer.RPR_ViewLayerProperites.cryptomatte_aovs_info
scene = render_scene if render_scene else bpy.context.scene
layer = render_layer if render_scene else bpy.context.view_layer
def do_register_pass(aov):
pass_channel = aov['channel']
pass_name = aov['name']
pass_channels_size = len(pass_channel)
# convert from channel to blender type
blender_type = 'VALUE'
if pass_channel in ('RGB', 'RGBA'):
blender_type = 'COLOR'
elif pass_channel in {'XYZ', 'UVA'}:
blender_type = 'VECTOR'
self.register_pass(scene, layer,
pass_name, pass_channels_size, pass_channel, blender_type)
for index, enabled in enumerate(layer.rpr.enable_aovs):
if enabled:
do_register_pass(aovs[index])
if layer.rpr.crytomatte_aov_material:
for i in range(3):
do_register_pass(cryptomatte_aovs[i])
if layer.rpr.crytomatte_aov_object:
for i in range(3,6):
do_register_pass(cryptomatte_aovs[i])
@bpy.app.handlers.persistent
def on_version_update(*args, **kwargs):
""" On scene loading update old RPR data to current version """
log("on_version_update")
addon_version = bl_info['version']
if version_updater.is_scene_from_2_79(addon_version):
version_updater.update_2_79_scene()
@bpy.app.handlers.persistent
def on_save_pre(*args, **kwargs):
""" Handler on saving a blend file (before) """
log("on_save_pre")
# Save current plugin version in scene
bpy.context.scene.rpr.saved_addon_version = bl_info['version']
@bpy.app.handlers.persistent
def on_load_pre(*args, **kwargs):
""" Handler on loading a blend file (before) """
log("on_load_pre")
utils.clear_temp_dir()
def register():
""" Register all addon classes in Blender """
log("register")
install_libs.ensure_boto3()
bpy.utils.register_class(RPREngine)
material_library.register()
properties.register()
operators.register()
nodes.register()
ui.register()
bpy.app.handlers.save_pre.append(on_save_pre)
bpy.app.handlers.load_pre.append(on_load_pre)
bpy.app.handlers.version_update.append(on_version_update)
def unregister():
""" Unregister all addon classes from Blender """
log("unregister")
bpy.app.handlers.version_update.remove(on_version_update)
bpy.app.handlers.load_pre.remove(on_load_pre)
bpy.app.handlers.save_pre.remove(on_save_pre)
ui.unregister()
nodes.unregister()
operators.unregister()
properties.unregister()
material_library.unregister()
bpy.utils.unregister_class(RPREngine)
| 31.391941
| 116
| 0.655076
| 4,429
| 0.516803
| 0
| 0
| 721
| 0.084131
| 0
| 0
| 2,521
| 0.294166
|
12cc0f45c792a01e3a5bd5c42c13138e07ace531
| 1,561
|
py
|
Python
|
plot_metric_err_vs_dim.py
|
wchen459/design_embeddings_jmd_2016
|
30dfec40b14c81e6cbe1c57efc2abe1a28dbdd5f
|
[
"MIT"
] | 9
|
2017-07-13T19:17:48.000Z
|
2022-03-17T02:19:06.000Z
|
plot_metric_err_vs_dim.py
|
wchen459/design_embeddings_jmd_2016
|
30dfec40b14c81e6cbe1c57efc2abe1a28dbdd5f
|
[
"MIT"
] | null | null | null |
plot_metric_err_vs_dim.py
|
wchen459/design_embeddings_jmd_2016
|
30dfec40b14c81e6cbe1c57efc2abe1a28dbdd5f
|
[
"MIT"
] | 2
|
2018-08-31T22:46:03.000Z
|
2020-06-19T16:17:38.000Z
|
"""
Plots reconstruction error vs semantic space dimensionality
Usage: python metric_err_vs_dim.py
Author(s): Wei Chen (wchen459@umd.edu)
"""
import matplotlib.pyplot as plt
import numpy as np
plt.rc("font", size=18)
examples = ['glass', 'sf_linear', 'sf_s_nonlinear', 'sf_v_nonlinear']
titles = {'glass': 'Glass',
'sf_linear': 'Superformula (linear)',
'sf_s_nonlinear': 'Superformula (slightly nonlinear)',
'sf_v_nonlinear': 'Superformula (very nonlinear)'}
n = len(examples)
x = range(1, 6)
for i in range(n):
plt.figure()
plt.xticks(np.arange(min(x), max(x)+1, dtype=np.int))
plt.xlabel('Semantic space dimensionality')
plt.ylabel('Reconstruction error')
plt.xlim(0.5, 5.5)
errs = np.zeros((3,5))
for j in x:
# Read reconstruction errors in rec_err.txt
txtfile = open('./results/'+examples[i]+'/n_samples = 115/n_control_points = 20/semantic_dim = '
+str(j)+'/rec_err.txt', 'r')
k = 0
for line in txtfile:
errs[k, j-1] = float(line)
k += 1
line_pca, = plt.plot(x, errs[0], '-ob', label='PCA')
line_kpca, = plt.plot(x, errs[1], '-vg', label='Kernel PCA')
line_ae, = plt.plot(x, errs[2], '-sr', label='Autoencoder')
plt.legend(handles=[line_pca, line_kpca, line_ae], fontsize=16)
plt.title(titles[examples[i]])
fig_name = 'err_vs_dim_'+examples[i]+'.png'
plt.tight_layout()
plt.savefig('./results/'+fig_name, dpi=300)
print fig_name+' saved!'
| 31.22
| 104
| 0.606022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 611
| 0.391416
|
12cc8345dd761da772a7145052f730ec8abb45f7
| 621
|
py
|
Python
|
tools/pot/openvino/tools/pot/graph/gpu_patterns.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/pot/openvino/tools/pot/graph/gpu_patterns.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/pot/openvino/tools/pot/graph/gpu_patterns.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .pattern_utils import check_fused_scale_shift_patterns, get_fused_scale_shift_patterns, \
check_fused_op_const_patterns, get_fused_op_const_pattern, get_clamp_mult_const_pattern
def get_gpu_ignored_patterns():
return {
'blocks': [(pattern, check_fused_scale_shift_patterns) for pattern in get_fused_scale_shift_patterns()] +
[(pattern, check_fused_op_const_patterns) for pattern in get_fused_op_const_pattern()],
'activations': [get_clamp_mult_const_pattern()],
'inputs': []
}
| 41.4
| 113
| 0.756844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.175523
|
12ccbde3bf71864760496c1e1f0963111fba9314
| 638
|
py
|
Python
|
test/environments/instances/8x8/gen.py
|
Multi-Agent-Research-Group/hog2
|
544d7c0e933fd69025944a0a3abcf9a40e59f0be
|
[
"MIT"
] | 5
|
2020-08-03T09:43:26.000Z
|
2022-01-11T08:28:30.000Z
|
test/environments/instances/8x8/gen.py
|
Multi-Agent-Research-Group/hog2
|
544d7c0e933fd69025944a0a3abcf9a40e59f0be
|
[
"MIT"
] | null | null | null |
test/environments/instances/8x8/gen.py
|
Multi-Agent-Research-Group/hog2
|
544d7c0e933fd69025944a0a3abcf9a40e59f0be
|
[
"MIT"
] | 7
|
2017-07-31T13:01:28.000Z
|
2021-05-16T10:15:49.000Z
|
#!/usr/bin/python
import random
import os
import errno
for i in range(100):
s=set()
g=set()
while len(s) < 50:
s.add((random.randint(0,7),random.randint(0,7)))
while len(g) < 50:
g.add((random.randint(0,7),random.randint(0,7)))
start=list(s)
goal=list(g)
for size in range(21,50):
if not os.path.exists("./%d"%size):
try:
os.makedirs("./%d"%size)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open("./%d/%d.csv"%(size,i), "w") as f:
for j in range(size):
f.write("%d,%d %d,%d\n"%(start[j][0],start[j][1],goal[j][0],goal[j][1]))
| 22.785714
| 80
| 0.548589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.094044
|
12ccd738c589b9032a098324390886166233073c
| 2,308
|
py
|
Python
|
pose_recognition_from_camera_demo.py
|
amazingchow/capture-dance-using-mediapipe
|
1963d461b4e047308da78b1bb88b9ed1f2c3c7d1
|
[
"MIT"
] | null | null | null |
pose_recognition_from_camera_demo.py
|
amazingchow/capture-dance-using-mediapipe
|
1963d461b4e047308da78b1bb88b9ed1f2c3c7d1
|
[
"MIT"
] | null | null | null |
pose_recognition_from_camera_demo.py
|
amazingchow/capture-dance-using-mediapipe
|
1963d461b4e047308da78b1bb88b9ed1f2c3c7d1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import cv2 as cv
import mediapipe as mp
import sys
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_device", type=int, default=0)
parser.add_argument("--video_file", type=str, default="")
args = parser.parse_args()
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
cap = object()
if args.video_file != "":
cap = cv.VideoCapture(args.video_file)
else:
cap = cv.VideoCapture(args.video_device)
if not cap.isOpened():
print("Cannot open camera device-0")
sys.exit(-1)
else:
print("Video <width: {}, height: {}, fps: {}>".format(
cap.get(cv.CAP_PROP_FRAME_WIDTH),
cap.get(cv.CAP_PROP_FRAME_HEIGHT),
cap.get(cv.CAP_PROP_FPS)
))
fps = int(cap.get(cv.CAP_PROP_FPS))
frame_idx = 0
while 1:
ret, frame = cap.read()
if not ret:
print("Cannot receive frame, exiting ...")
break
frame_idx += 1
st = time.time()
# flip the frame horizontally for a later selfie-view display
frame = cv.cvtColor(cv.flip(frame, 1), cv.COLOR_BGR2RGB)
# to improve performance, optionally mark the frame as not writeable to pass by reference
frame.flags.writeable = False
results = pose.process(frame)
frame.flags.writeable = True
frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)
# draw the pose annotation on the frame
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
ed = time.time()
print("Used {:.3f} secs to process frame-{:05}".format(ed - st, frame_idx))
gap = 1000//fps - int(1000 * (ed - st))
if gap < 5:
gap = 5
cv.imshow("pose_recognition_from_camera_demo", frame)
if cv.waitKey(gap) & 0xFF == 27:
break
cap.release()
cv.destroyAllWindows()
| 35.507692
| 101
| 0.561958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 436
| 0.188908
|
12ce678d7b9581bc7d8e71fefb2ce7346256d86f
| 1,901
|
py
|
Python
|
reference/data_dict_export.py
|
TBody/atomic1D
|
fcab88f3b303468f23ac75b847c76244593f4b7f
|
[
"MIT"
] | 1
|
2019-05-18T22:32:21.000Z
|
2019-05-18T22:32:21.000Z
|
reference/data_dict_export.py
|
TBody/atomic1D
|
fcab88f3b303468f23ac75b847c76244593f4b7f
|
[
"MIT"
] | null | null | null |
reference/data_dict_export.py
|
TBody/atomic1D
|
fcab88f3b303468f23ac75b847c76244593f4b7f
|
[
"MIT"
] | null | null | null |
# Program name: atomic1D/reference/build_json.py
# Author: Thomas Body
# Author email: tajb500@york.ac.uk
# Date of creation: 14 July 2017
#
#
# Makes data_dict and copies it into a .json file 'sd1d-case-05.json'
filename = 'sd1d-case-05'
from boutdata.collect import collect
data_dict = {}
# Normalisation factor for temperature - T * Tnorm returns in eV
data_dict["Tnorm"] = collect("Tnorm")
# Normalisation factor for density - N * Nnorm returns in m^-3
data_dict["Nnorm"] = collect("Nnorm")
# Plasma pressure (normalised). Pe = 2 Ne Te => P/Ne = Te (and assume Ti=Te)
data_dict["P"] = collect("P")
# Electron density (normalised)
data_dict["Ne"] = collect("Ne")
# Neutral density (normalised)
data_dict["Nn"] = collect("Nn")
# Help for user
data_dict["help"] = "Contains outputs from Boutprojects/SD1D/case-05 example. Created with data_dict_export.py - stored in Github.com/TBody/atomic1D/reference"
from copy import deepcopy
import numpy as np
import json
# Need to 'jsonify' the numpy arrays (i.e. convert to nested lists) so that they can be stored in plain-text
# Deep-copy data to a new dictionary and then edit that one (i.e. break the data pointer association - keep data_dict unchanged in case you want to run a copy-verify on it)
data_dict_jsonified = deepcopy(data_dict)
numpy_ndarrays = [];
for key, element in data_dict.items():
if type(element) == np.ndarray:
# Store which keys correspond to numpy.ndarray, so that you can de-jsonify the arrays when reading
numpy_ndarrays.append(key)
data_dict_jsonified[key] = data_dict_jsonified[key].tolist()
data_dict_jsonified['numpy_ndarrays'] = numpy_ndarrays
# Encode help
# >> data_dict['help'] = 'help string'
# <<Use original filename, except with .json instead of .dat extension>>
with open('{}.json'.format(filename),'w') as fp:
json.dump(data_dict_jsonified, fp, sort_keys=True, indent=4)
| 36.557692
| 172
| 0.730668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,224
| 0.643872
|
12cf323ab36261eee5e0ca79f3a3c93c62ed377b
| 3,300
|
py
|
Python
|
wordDocComposite.py
|
flyonok/image2text
|
0c16e6bf35eb486e6ff28e9e402a18bea6bd338c
|
[
"Apache-1.1"
] | null | null | null |
wordDocComposite.py
|
flyonok/image2text
|
0c16e6bf35eb486e6ff28e9e402a18bea6bd338c
|
[
"Apache-1.1"
] | null | null | null |
wordDocComposite.py
|
flyonok/image2text
|
0c16e6bf35eb486e6ff28e9e402a18bea6bd338c
|
[
"Apache-1.1"
] | null | null | null |
from docx import Document
def CompositeTwoDocs(srcDocFullName, dstDocFullName, compositeName):
'''
srcDocFullName:源文档,里面含有需要替换的内容
dstDocFullName:目标文档,执行后,相关模板内容被替换
compositeName:替换的对象名,比如正面或背面
return: 成功->True,失败->False
'''
try:
srcDoc = Document(srcDocFullName)
dstDoc = Document(dstDocFullName)
srcParasMap = {} # Heading 2 => [paras list]
dstParasMap = {} # Heading 2 => [paras list]
firstPage = False
secondPage = False
currentLabelStyleContent = None # 当前标签样式对应的内容
# 查找源文档的相关内容
for srcPara in srcDoc.paragraphs:
if (srcPara.style.name.find('Heading 2') >= 0 and srcPara.text.find(compositeName) >= 0):
print('find {0}'.format(srcPara))
firstPage = True
elif (srcPara.style.name.find('Heading 2') >= 0 and firstPage):
secondPage = True
break
else:
if (firstPage and not secondPage):
if (srcPara.style.name.find('Heading 3') >= 0):
srcParasMap[srcPara.text] = []
currentLabelStyleContent = srcPara.text
else:
if currentLabelStyleContent is None:
raise ValueError('不合格的word模板文档!')
srcParasMap[currentLabelStyleContent].append(srcPara)
firstPage = False
secondPage = False
currentLabelStyleContent = None # 当前标签样式对应的内容
# 查找目标文档的相关内容
for dstPara in dstDoc.paragraphs:
if (dstPara.style.name.find('Heading 2') >= 0 and dstPara.text.find(compositeName) >= 0):
print('find {0}'.format(dstPara))
firstPage = True
elif (dstPara.style.name.find('Heading 2') >= 0 and firstPage):
secondPage = True
break
else:
if (firstPage and not secondPage):
if (dstPara.style.name.find('Heading 3') >= 0):
dstParasMap[dstPara.text] = []
currentLabelStyleContent = dstPara.text
else:
if currentLabelStyleContent is None:
raise ValueError('不合格的word模板文档!')
dstParasMap[currentLabelStyleContent].append(dstPara)
# 开始组合
for key, dstParas in dstParasMap.items():
srcParas = srcParasMap[key]
if len(srcParas) <= 0:
print('源文档中没有该项--{0}--内容'.format(key))
continue
else:
for index, item in enumerate(dstParas):
if (index <= len(srcParas)):
dstParas[index].text = srcParas[index].text
else:
print('{0}中的长度--{1}--已经大于源文档的总长度--{2}'.format(key, index, len(srcParas)))
dstDoc.save(dstDocFullName)
except Exception as e:
print('出现错误...')
print(e)
return False
return True
if __name__ == '__main__':
srcDocFullName = r'D:\秒秒学人工智能平台\2020年8月\名片-111\名片-111.docx'
dstDocFullName = r'D:\秒秒学人工智能平台\2020年8月\名片-456\名片-456.docx'
CompositeTwoDocs(srcDocFullName, dstDocFullName, '正面')
| 40.740741
| 101
| 0.538485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 889
| 0.243162
|
12d0afe950ed445eb9f7e907ee14e9a851acd904
| 4,853
|
py
|
Python
|
app/cover.py
|
mrwiwi/tydom2mqtt
|
293322033b67521bb981af1c8c2245ca9af6c646
|
[
"MIT"
] | 26
|
2020-04-07T17:58:24.000Z
|
2022-02-12T16:28:44.000Z
|
app/cover.py
|
mrwiwi/tydom2mqtt
|
293322033b67521bb981af1c8c2245ca9af6c646
|
[
"MIT"
] | 19
|
2020-03-25T09:46:46.000Z
|
2021-11-29T09:55:57.000Z
|
app/cover.py
|
mrwiwi/tydom2mqtt
|
293322033b67521bb981af1c8c2245ca9af6c646
|
[
"MIT"
] | 26
|
2020-04-27T21:40:12.000Z
|
2022-01-06T14:44:22.000Z
|
import json
import time
from datetime import datetime
from sensors import sensor
cover_command_topic = "cover/tydom/{id}/set_positionCmd"
cover_config_topic = "homeassistant/cover/tydom/{id}/config"
cover_position_topic = "cover/tydom/{id}/current_position"
cover_set_postion_topic = "cover/tydom/{id}/set_position"
cover_attributes_topic = "cover/tydom/{id}/attributes"
class Cover:
def __init__(self, tydom_attributes, set_position=None, mqtt=None):
self.attributes = tydom_attributes
self.device_id = self.attributes['device_id']
self.endpoint_id = self.attributes['endpoint_id']
self.id = self.attributes['id']
self.name = self.attributes['cover_name']
self.current_position = self.attributes['position']
self.set_position = set_position
self.mqtt = mqtt
# def id(self):
# return self.id
# def name(self):
# return self.name
# def current_position(self):
# return self.current_position
# def set_position(self):
# return self.set_position
# def attributes(self):
# return self.attributes
async def setup(self):
self.device = {}
self.device['manufacturer'] = 'Delta Dore'
self.device['model'] = 'Volet'
self.device['name'] = self.name
self.device['identifiers'] = self.id
self.config_topic = cover_config_topic.format(id=self.id)
self.config = {}
self.config['name'] = self.name
self.config['unique_id'] = self.id
# self.config['attributes'] = self.attributes
self.config['command_topic'] = cover_command_topic.format(id=self.id)
self.config['set_position_topic'] = cover_set_postion_topic.format(
id=self.id)
self.config['position_topic'] = cover_position_topic.format(id=self.id)
self.config['json_attributes_topic'] = cover_attributes_topic.format(
id=self.id)
self.config['payload_open'] = "UP"
self.config['payload_close'] = "DOWN"
self.config['payload_stop'] = "STOP"
self.config['retain'] = 'false'
self.config['device'] = self.device
# print(self.config)
if (self.mqtt is not None):
self.mqtt.mqtt_client.publish(
self.config_topic, json.dumps(
self.config), qos=0)
# setup_pub = '(self.config_topic, json.dumps(self.config), qos=0)'
# return(setup_pub)
async def update(self):
await self.setup()
try:
await self.update_sensors()
except Exception as e:
print("Cover sensors Error :")
print(e)
self.position_topic = cover_position_topic.format(
id=self.id, current_position=self.current_position)
if (self.mqtt is not None):
self.mqtt.mqtt_client.publish(
self.position_topic,
self.current_position,
qos=0,
retain=True)
# self.mqtt.mqtt_client.publish('homeassistant/sensor/tydom/last_update', str(datetime.fromtimestamp(time.time())), qos=1, retain=True)
self.mqtt.mqtt_client.publish(
self.config['json_attributes_topic'], self.attributes, qos=0)
print(
"Cover created / updated : ",
self.name,
self.id,
self.current_position)
# update_pub = '(self.position_topic, self.current_position, qos=0, retain=True)'
# return(update_pub)
async def update_sensors(self):
# print('test sensors !')
for i, j in self.attributes.items():
# sensor_name = "tydom_alarm_sensor_"+i
# print("name "+sensor_name, "elem_name "+i, "attributes_topic_from_device ",self.config['json_attributes_topic'], "mqtt",self.mqtt)
if not i == 'device_type' or not i == 'id':
new_sensor = None
new_sensor = sensor(
elem_name=i,
tydom_attributes_payload=self.attributes,
attributes_topic_from_device=self.config['json_attributes_topic'],
mqtt=self.mqtt)
await new_sensor.update()
# def __init__(self, name, elem_name, tydom_attributes_payload,
# attributes_topic_from_device, mqtt=None):
async def put_position(tydom_client, device_id, cover_id, position):
print(cover_id, 'position', position)
if not (position == ''):
await tydom_client.put_devices_data(device_id, cover_id, 'position', position)
async def put_positionCmd(tydom_client, device_id, cover_id, positionCmd):
print(cover_id, 'positionCmd', positionCmd)
if not (positionCmd == ''):
await tydom_client.put_devices_data(device_id, cover_id, 'positionCmd', positionCmd)
| 37.914063
| 147
| 0.622708
| 4,478
| 0.922728
| 0
| 0
| 0
| 0
| 3,578
| 0.737276
| 1,546
| 0.318566
|
12d29fab22f07b19b231bdfe08bc053825594e45
| 56,823
|
py
|
Python
|
edx/config/lms/docker_run.py
|
openfun/learning-analytics-playground
|
dca80d89ca781d9060bd69927af4aa1462cc53ef
|
[
"MIT"
] | 1
|
2021-12-13T09:05:59.000Z
|
2021-12-13T09:05:59.000Z
|
edx/config/lms/docker_run.py
|
openfun/learning-analytics-playground
|
dca80d89ca781d9060bd69927af4aa1462cc53ef
|
[
"MIT"
] | 3
|
2021-05-18T08:26:51.000Z
|
2022-03-14T10:34:36.000Z
|
edx/config/lms/docker_run.py
|
openfun/learning-analytics-playground
|
dca80d89ca781d9060bd69927af4aa1462cc53ef
|
[
"MIT"
] | 1
|
2021-06-03T14:21:56.000Z
|
2021-06-03T14:21:56.000Z
|
"""
This is the default template for our main set of servers. This does NOT
cover the content machines, which use content.py
Common traits:
* Use memcached, and cache-backed sessions
* Use a MySQL 5.1 database
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import datetime
import dateutil
from glob import glob
import json
import os
from path import Path as path
import pkgutil
import platform
from django.utils.translation import ugettext_lazy
from django.conf import global_settings
from celery_redis_sentinel import register
from openedx.core.lib.logsettings import get_logger_config
from path import Path as path
from xmodule.modulestore.modulestore_settings import (
convert_module_store_setting_if_needed,
update_module_store_settings,
)
from ..common import *
from .utils import Configuration, prefer_fun_video
# Load custom configuration parameters from yaml files
config = Configuration(os.path.dirname(__file__))
# edX has now started using "settings.ENV_TOKENS" and "settings.AUTH_TOKENS" everywhere in the
# project, not just in the settings. Let's make sure our settings still work in this case
ENV_TOKENS = config
AUTH_TOKENS = config
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = config("SERVICE_VARIANT", default=None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(config("CONFIG_ROOT", default=ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
################################ ALWAYS THE SAME ##############################
RELEASE = config("RELEASE", default=None)
DEBUG = False
DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["debug"] = False
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
###################################### CELERY ################################
CELERY_ALWAYS_EAGER = config("CELERY_ALWAYS_EAGER", default=False, formatter=bool)
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = config(
"CELERY_RESULT_BACKEND", default="djcelery.backends.cache:CacheBackend"
)
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 60.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Celery queues
DEFAULT_PRIORITY_QUEUE = config(
"DEFAULT_PRIORITY_QUEUE", default="edx.lms.core.default"
)
HIGH_PRIORITY_QUEUE = config("HIGH_PRIORITY_QUEUE", default="edx.lms.core.high")
LOW_PRIORITY_QUEUE = config("LOW_PRIORITY_QUEUE", default="edx.lms.core.low")
HIGH_MEM_QUEUE = config("HIGH_MEM_QUEUE", default="edx.lms.core.high_mem")
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = config(
"CELERY_QUEUES",
default={
DEFAULT_PRIORITY_QUEUE: {},
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
},
formatter=json.loads,
)
CELERY_ROUTES = "lms.celery.Router"
# Force accepted content to "json" only. If we also accept pickle-serialized
# messages, the worker will crash when it's running with a privileged user (even
# if it's not the root user but a user belonging to the root group, which is our
# case with OpenShift).
CELERY_ACCEPT_CONTENT = ["json"]
CELERYBEAT_SCHEDULE = {} # For scheduling tasks, entries can be added to this dict
########################## NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
STATIC_ROOT_BASE = path("/edx/app/edxapp/staticfiles")
STATIC_ROOT = STATIC_ROOT_BASE
STATIC_URL = "/static/"
STATICFILES_STORAGE = config(
"STATICFILES_STORAGE", default="lms.envs.fun.storage.CDNProductionStorage"
)
CDN_BASE_URL = config("CDN_BASE_URL", default=None)
MEDIA_ROOT = path("/edx/var/edxapp/media/")
MEDIA_URL = "/media/"
# DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one
DEFAULT_COURSE_ABOUT_IMAGE_URL = config(
"DEFAULT_COURSE_ABOUT_IMAGE_URL", default=DEFAULT_COURSE_ABOUT_IMAGE_URL
)
PLATFORM_NAME = config("PLATFORM_NAME", default=PLATFORM_NAME)
# For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default
PLATFORM_TWITTER_ACCOUNT = config(
"PLATFORM_TWITTER_ACCOUNT", default=PLATFORM_TWITTER_ACCOUNT
)
PLATFORM_FACEBOOK_ACCOUNT = config(
"PLATFORM_FACEBOOK_ACCOUNT", default=PLATFORM_FACEBOOK_ACCOUNT
)
SOCIAL_SHARING_SETTINGS = config(
"SOCIAL_SHARING_SETTINGS", default=SOCIAL_SHARING_SETTINGS, formatter=json.loads
)
# Social media links for the page footer
SOCIAL_MEDIA_FOOTER_URLS = config(
"SOCIAL_MEDIA_FOOTER_URLS", default=SOCIAL_MEDIA_FOOTER_URLS, formatter=json.loads
)
CC_MERCHANT_NAME = config("CC_MERCHANT_NAME", default=PLATFORM_NAME)
EMAIL_BACKEND = config(
"EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
EMAIL_FILE_PATH = config("EMAIL_FILE_PATH", default=None)
EMAIL_HOST = config("EMAIL_HOST", default="localhost")
EMAIL_PORT = config("EMAIL_PORT", default=25) # django default is 25
EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False) # django default is False
HTTPS = config("HTTPS", default=HTTPS)
SESSION_COOKIE_DOMAIN = config("SESSION_COOKIE_DOMAIN", default=None)
SESSION_COOKIE_HTTPONLY = config(
"SESSION_COOKIE_HTTPONLY", default=True, formatter=bool
)
SESSION_COOKIE_SECURE = config(
"SESSION_COOKIE_SECURE", default=SESSION_COOKIE_SECURE, formatter=bool
)
SESSION_ENGINE = config("SESSION_ENGINE", default="redis_sessions.session")
SESSION_SAVE_EVERY_REQUEST = config(
"SESSION_SAVE_EVERY_REQUEST", default=SESSION_SAVE_EVERY_REQUEST, formatter=bool
)
# Configuration to use session with redis
# To use redis, change SESSION_ENGINE to "redis_sessions.session"
SESSION_REDIS_HOST = config("SESSION_REDIS_HOST", default="redis")
SESSION_REDIS_PORT = config("SESSION_REDIS_PORT", default=6379, formatter=int)
SESSION_REDIS_DB = config("SESSION_REDIS_DB", default=1, formatter=int)
SESSION_REDIS_PASSWORD = config("SESSION_REDIS_PASSWORD", default=None)
SESSION_REDIS_PREFIX = config("SESSION_REDIS_PREFIX", default="session")
SESSION_REDIS_SOCKET_TIMEOUT = config(
"SESSION_REDIS_SOCKET_TIMEOUT", default=1, formatter=int
)
SESSION_REDIS_RETRY_ON_TIMEOUT = config(
"SESSION_REDIS_RETRY_ON_TIMEOUT", default=False, formatter=bool
)
SESSION_REDIS = config(
"SESSION_REDIS",
default={
"host": SESSION_REDIS_HOST,
"port": SESSION_REDIS_PORT,
"db": SESSION_REDIS_DB, # db 0 is used for Celery Broker
"password": SESSION_REDIS_PASSWORD,
"prefix": SESSION_REDIS_PREFIX,
"socket_timeout": SESSION_REDIS_SOCKET_TIMEOUT,
"retry_on_timeout": SESSION_REDIS_RETRY_ON_TIMEOUT,
},
formatter=json.loads,
)
SESSION_REDIS_SENTINEL_LIST = config(
"SESSION_REDIS_SENTINEL_LIST", default=None, formatter=json.loads
)
SESSION_REDIS_SENTINEL_MASTER_ALIAS = config(
"SESSION_REDIS_SENTINEL_MASTER_ALIAS", default=None
)
REGISTRATION_EXTRA_FIELDS = config(
"REGISTRATION_EXTRA_FIELDS", default=REGISTRATION_EXTRA_FIELDS, formatter=json.loads
)
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = config(
"EDXMKTG_LOGGED_IN_COOKIE_NAME", default=EDXMKTG_LOGGED_IN_COOKIE_NAME
)
EDXMKTG_USER_INFO_COOKIE_NAME = config(
"EDXMKTG_USER_INFO_COOKIE_NAME", default=EDXMKTG_USER_INFO_COOKIE_NAME
)
# Override feature by feature by whatever is being redefined in the settings.yaml file
CONFIG_FEATURES = config("FEATURES", default={}, formatter=json.loads)
FEATURES.update(CONFIG_FEATURES)
LMS_BASE = config("LMS_BASE", default="localhost:8072")
CMS_BASE = config("CMS_BASE", default="localhost:8082")
LMS_ROOT_URL = config("LMS_ROOT_URL", default="http://{:s}".format(LMS_BASE))
LMS_INTERNAL_ROOT_URL = config("LMS_INTERNAL_ROOT_URL", default=LMS_ROOT_URL)
SITE_NAME = config("SITE_NAME", default=LMS_BASE)
ALLOWED_HOSTS = config(
"ALLOWED_HOSTS", default=[LMS_BASE.split(":")[0]], formatter=json.loads
)
if FEATURES.get("PREVIEW_LMS_BASE"):
ALLOWED_HOSTS.append(FEATURES["PREVIEW_LMS_BASE"])
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if config("SESSION_COOKIE_NAME", default=None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this
# being a str()
SESSION_COOKIE_NAME = str(config("SESSION_COOKIE_NAME"))
CACHE_REDIS_HOST = config("CACHE_REDIS_HOST", default="redis")
CACHE_REDIS_PORT = config("CACHE_REDIS_PORT", default=6379, formatter=int)
CACHE_REDIS_DB = config("CACHE_REDIS_DB", default=1, formatter=int)
CACHE_REDIS_BACKEND = config(
"CACHE_REDIS_BACKEND", default="django_redis.cache.RedisCache"
)
CACHE_REDIS_URI = "redis://{}:{}/{}".format(
CACHE_REDIS_HOST, CACHE_REDIS_PORT, CACHE_REDIS_DB
)
CACHE_REDIS_CLIENT = config(
"CACHE_REDIS_CLIENT", default="django_redis.client.DefaultClient"
)
CACHES_DEFAULT_CONFIG = {
"BACKEND": CACHE_REDIS_BACKEND,
"LOCATION": CACHE_REDIS_URI,
"OPTIONS": {"CLIENT_CLASS": CACHE_REDIS_CLIENT},
}
if "Sentinel" in CACHE_REDIS_BACKEND:
CACHES_DEFAULT_CONFIG["LOCATION"] = [(CACHE_REDIS_HOST, CACHE_REDIS_PORT)]
CACHES_DEFAULT_CONFIG["OPTIONS"]["SENTINEL_SERVICE_NAME"] = config(
"CACHE_REDIS_SENTINEL_SERVICE_NAME", default="mymaster"
)
CACHES_DEFAULT_CONFIG["OPTIONS"]["REDIS_CLIENT_KWARGS"] = {"db": CACHE_REDIS_DB}
CACHES = config(
"CACHES",
default={
"default": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "default"}),
"general": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "general"}),
"celery": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "celery"}),
"mongo_metadata_inheritance": dict(
CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "mongo_metadata_inheritance"}
),
"openassessment_submissions": dict(
CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "openassessment_submissions"}
),
"loc_cache": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "edx_location_mem_cache",
},
# Cache backend used by Django 1.8 storage backend while processing static files
"staticfiles": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "edx_location_mem_cache",
},
},
formatter=json.loads,
)
# Email overrides
DEFAULT_FROM_EMAIL = config("DEFAULT_FROM_EMAIL", default=DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = config(
"DEFAULT_FEEDBACK_EMAIL", default=DEFAULT_FEEDBACK_EMAIL
)
ADMINS = config("ADMINS", default=ADMINS, formatter=json.loads)
SERVER_EMAIL = config("SERVER_EMAIL", default=SERVER_EMAIL)
TECH_SUPPORT_EMAIL = config("TECH_SUPPORT_EMAIL", default=TECH_SUPPORT_EMAIL)
CONTACT_EMAIL = config("CONTACT_EMAIL", default=CONTACT_EMAIL)
BUGS_EMAIL = config("BUGS_EMAIL", default=BUGS_EMAIL)
PAYMENT_SUPPORT_EMAIL = config("PAYMENT_SUPPORT_EMAIL", default=PAYMENT_SUPPORT_EMAIL)
FINANCE_EMAIL = config("FINANCE_EMAIL", default=FINANCE_EMAIL)
UNIVERSITY_EMAIL = config("UNIVERSITY_EMAIL", default=UNIVERSITY_EMAIL)
PRESS_EMAIL = config("PRESS_EMAIL", default=PRESS_EMAIL)
# Currency
PAID_COURSE_REGISTRATION_CURRENCY = config(
"PAID_COURSE_REGISTRATION_CURRENCY", default=["EUR", u"\N{euro sign}"]
)
# Payment Report Settings
PAYMENT_REPORT_GENERATOR_GROUP = config(
"PAYMENT_REPORT_GENERATOR_GROUP", default=PAYMENT_REPORT_GENERATOR_GROUP
)
# Bulk Email overrides
BULK_EMAIL_DEFAULT_FROM_EMAIL = config(
"BULK_EMAIL_DEFAULT_FROM_EMAIL", default=BULK_EMAIL_DEFAULT_FROM_EMAIL
)
BULK_EMAIL_EMAILS_PER_TASK = config(
"BULK_EMAIL_EMAILS_PER_TASK", default=BULK_EMAIL_EMAILS_PER_TASK, formatter=int
)
BULK_EMAIL_DEFAULT_RETRY_DELAY = config(
"BULK_EMAIL_DEFAULT_RETRY_DELAY",
default=BULK_EMAIL_DEFAULT_RETRY_DELAY,
formatter=int,
)
BULK_EMAIL_MAX_RETRIES = config(
"BULK_EMAIL_MAX_RETRIES", default=BULK_EMAIL_MAX_RETRIES, formatter=int
)
BULK_EMAIL_INFINITE_RETRY_CAP = config(
"BULK_EMAIL_INFINITE_RETRY_CAP",
default=BULK_EMAIL_INFINITE_RETRY_CAP,
formatter=int,
)
BULK_EMAIL_LOG_SENT_EMAILS = config(
"BULK_EMAIL_LOG_SENT_EMAILS", default=BULK_EMAIL_LOG_SENT_EMAILS, formatter=bool
)
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = config(
"BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS",
default=BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS,
formatter=int,
)
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
# We have to reset the value here, since we have changed the value of the queue name.
BULK_EMAIL_ROUTING_KEY = config("BULK_EMAIL_ROUTING_KEY", default=HIGH_PRIORITY_QUEUE)
# We can run smaller jobs on the low priority queue. See note above for why
# we have to reset the value here.
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE
# Theme overrides
THEME_NAME = config("THEME_NAME", default=None)
COMPREHENSIVE_THEME_DIR = path(
config("COMPREHENSIVE_THEME_DIR", default=COMPREHENSIVE_THEME_DIR)
)
# Marketing link overrides
MKTG_URL_LINK_MAP = config("MKTG_URL_LINK_MAP", default={}, formatter=json.loads)
SUPPORT_SITE_LINK = config("SUPPORT_SITE_LINK", default=SUPPORT_SITE_LINK)
# Mobile store URL overrides
MOBILE_STORE_URLS = config("MOBILE_STORE_URLS", default=MOBILE_STORE_URLS)
# Timezone overrides
TIME_ZONE = config("TIME_ZONE", default=TIME_ZONE)
# Translation overrides
LANGUAGES = config("LANGUAGES", default=LANGUAGES, formatter=json.loads)
LANGUAGE_DICT = dict(LANGUAGES)
LANGUAGE_CODE = config("LANGUAGE_CODE", default=LANGUAGE_CODE)
USE_I18N = config("USE_I18N", default=USE_I18N)
# Additional installed apps
for app in config("ADDL_INSTALLED_APPS", default=[], formatter=json.loads):
INSTALLED_APPS.append(app)
WIKI_ENABLED = config("WIKI_ENABLED", default=WIKI_ENABLED, formatter=bool)
local_loglevel = config("LOCAL_LOGLEVEL", default="INFO")
# Configure Logging
LOG_DIR = config("LOG_DIR", default=path("/edx/var/logs/edx"), formatter=path)
DATA_DIR = config("DATA_DIR", default=path("/edx/app/edxapp/data"), formatter=path)
# Default format for syslog logging
standard_format = "%(asctime)s %(levelname)s %(process)d [%(name)s] %(filename)s:%(lineno)d - %(message)s"
syslog_format = (
"[variant:lms][%(name)s][env:sandbox] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] - %(message)s"
).format(hostname=platform.node().split(".")[0])
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"local": {
"formatter": "syslog_format",
"class": "logging.StreamHandler",
"level": "INFO",
},
"tracking": {
"formatter": "raw",
"class": "logging.StreamHandler",
"level": "DEBUG",
},
"console": {
"formatter": "standard",
"class": "logging.StreamHandler",
"level": "INFO",
},
},
"formatters": {
"raw": {"format": "%(message)s"},
"syslog_format": {"format": syslog_format},
"standard": {"format": standard_format},
},
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"loggers": {
"": {"level": "INFO", "propagate": False, "handlers": ["console", "local"]},
"tracking": {"level": "DEBUG", "propagate": False, "handlers": ["tracking"]},
},
}
SENTRY_DSN = config("SENTRY_DSN", default=None)
if SENTRY_DSN:
LOGGING["loggers"][""]["handlers"].append("sentry")
LOGGING["handlers"]["sentry"] = {
"class": "raven.handlers.logging.SentryHandler",
"dsn": SENTRY_DSN,
"level": "ERROR",
"environment": "production",
"release": RELEASE,
}
COURSE_LISTINGS = config("COURSE_LISTINGS", default={}, formatter=json.loads)
SUBDOMAIN_BRANDING = config("SUBDOMAIN_BRANDING", default={}, formatter=json.loads)
VIRTUAL_UNIVERSITIES = config("VIRTUAL_UNIVERSITIES", default=[])
META_UNIVERSITIES = config("META_UNIVERSITIES", default={}, formatter=json.loads)
COMMENTS_SERVICE_URL = config("COMMENTS_SERVICE_URL", default="")
COMMENTS_SERVICE_KEY = config("COMMENTS_SERVICE_KEY", default="")
CERT_NAME_SHORT = config("CERT_NAME_SHORT", default=CERT_NAME_SHORT)
CERT_NAME_LONG = config("CERT_NAME_LONG", default=CERT_NAME_LONG)
CERT_QUEUE = config("CERT_QUEUE", default="test-pull")
ZENDESK_URL = config("ZENDESK_URL", default=None)
FEEDBACK_SUBMISSION_EMAIL = config("FEEDBACK_SUBMISSION_EMAIL", default=None)
MKTG_URLS = config("MKTG_URLS", default=MKTG_URLS, formatter=json.loads)
# Badgr API
BADGR_API_TOKEN = config("BADGR_API_TOKEN", default=BADGR_API_TOKEN)
BADGR_BASE_URL = config("BADGR_BASE_URL", default=BADGR_BASE_URL)
BADGR_ISSUER_SLUG = config("BADGR_ISSUER_SLUG", default=BADGR_ISSUER_SLUG)
# git repo loading environment
GIT_REPO_DIR = config(
"GIT_REPO_DIR", default=path("/edx/var/edxapp/course_repos"), formatter=path
)
GIT_IMPORT_STATIC = config("GIT_IMPORT_STATIC", default=True)
for name, value in config("CODE_JAIL", default={}, formatter=json.loads).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = config(
"COURSES_WITH_UNSAFE_CODE", default=[], formatter=json.loads
)
ASSET_IGNORE_REGEX = config("ASSET_IGNORE_REGEX", default=ASSET_IGNORE_REGEX)
# Event Tracking
TRACKING_IGNORE_URL_PATTERNS = config(
"TRACKING_IGNORE_URL_PATTERNS",
default=TRACKING_IGNORE_URL_PATTERNS,
formatter=json.loads,
)
# SSL external authentication settings
SSL_AUTH_EMAIL_DOMAIN = config("SSL_AUTH_EMAIL_DOMAIN", default="MIT.EDU")
SSL_AUTH_DN_FORMAT_STRING = config("SSL_AUTH_DN_FORMAT_STRING", default=None)
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = config(
"CAS_EXTRA_LOGIN_PARAMS", default=None, formatter=json.loads
)
if FEATURES.get("AUTH_USE_CAS"):
CAS_SERVER_URL = config("CAS_SERVER_URL", default=None)
INSTALLED_APPS.append("django_cas")
MIDDLEWARE_CLASSES.append("django_cas.middleware.CASMiddleware")
CAS_ATTRIBUTE_CALLBACK = config(
"CAS_ATTRIBUTE_CALLBACK", default=None, formatter=json.loads
)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK["module"]),
CAS_ATTRIBUTE_CALLBACK["function"],
)
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = config("VIDEO_CDN_URL", default={}, formatter=json.loads)
# Branded footer
FOOTER_OPENEDX_URL = config("FOOTER_OPENEDX_URL", default=FOOTER_OPENEDX_URL)
FOOTER_OPENEDX_LOGO_IMAGE = config(
"FOOTER_OPENEDX_LOGO_IMAGE", default=FOOTER_OPENEDX_LOGO_IMAGE
)
FOOTER_ORGANIZATION_IMAGE = config(
"FOOTER_ORGANIZATION_IMAGE", default=FOOTER_ORGANIZATION_IMAGE
)
FOOTER_CACHE_TIMEOUT = config(
"FOOTER_CACHE_TIMEOUT", default=FOOTER_CACHE_TIMEOUT, formatter=int
)
FOOTER_BROWSER_CACHE_MAX_AGE = config(
"FOOTER_BROWSER_CACHE_MAX_AGE", default=FOOTER_BROWSER_CACHE_MAX_AGE, formatter=int
)
# Credit notifications settings
NOTIFICATION_EMAIL_CSS = config(
"NOTIFICATION_EMAIL_CSS", default=NOTIFICATION_EMAIL_CSS
)
NOTIFICATION_EMAIL_EDX_LOGO = config(
"NOTIFICATION_EMAIL_EDX_LOGO", default=NOTIFICATION_EMAIL_EDX_LOGO
)
############# CORS headers for cross-domain requests #################
if FEATURES.get("ENABLE_CORS_HEADERS") or FEATURES.get(
"ENABLE_CROSS_DOMAIN_CSRF_COOKIE"
):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = config(
"CORS_ORIGIN_WHITELIST", default=(), formatter=json.loads
)
CORS_ORIGIN_ALLOW_ALL = config(
"CORS_ORIGIN_ALLOW_ALL", default=False, formatter=bool
)
CORS_ALLOW_INSECURE = config("CORS_ALLOW_INSECURE", default=False, formatter=bool)
# If setting a cross-domain cookie, it's really important to choose
# a name for the cookie that is DIFFERENT than the cookies used
# by each subdomain. For example, suppose the applications
# at these subdomains are configured to use the following cookie names:
#
# 1) foo.example.com --> "csrftoken"
# 2) baz.example.com --> "csrftoken"
# 3) bar.example.com --> "csrftoken"
#
# For the cross-domain version of the CSRF cookie, you need to choose
# a name DIFFERENT than "csrftoken"; otherwise, the new token configured
# for ".example.com" could conflict with the other cookies,
# non-deterministically causing 403 responses.
#
# Because of the way Django stores cookies, the cookie name MUST
# be a `str`, not unicode. Otherwise there will `TypeError`s will be raised
# when Django tries to call the unicode `translate()` method with the wrong
# number of parameters.
CROSS_DOMAIN_CSRF_COOKIE_NAME = str(config("CROSS_DOMAIN_CSRF_COOKIE_NAME"))
# When setting the domain for the "cross-domain" version of the CSRF
# cookie, you should choose something like: ".example.com"
# (note the leading dot), where both the referer and the host
# are subdomains of "example.com".
#
# Browser security rules require that
# the cookie domain matches the domain of the server; otherwise
# the cookie won't get set. And once the cookie gets set, the client
# needs to be on a domain that matches the cookie domain, otherwise
# the client won't be able to read the cookie.
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = config("CROSS_DOMAIN_CSRF_COOKIE_DOMAIN")
# Field overrides. To use the IDDE feature, add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider'.
FIELD_OVERRIDE_PROVIDERS = tuple(
config("FIELD_OVERRIDE_PROVIDERS", default=[], formatter=json.loads)
)
############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
############### XBlock filesystem field config ##########
DJFS = config(
"DJFS",
default={
"directory_root": "/edx/var/edxapp/django-pyfs/static/django-pyfs",
"type": "osfs",
"url_root": "/static/django-pyfs",
},
formatter=json.loads,
)
############### Module Store Items ##########
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = config(
"HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS", default={}, formatter=json.loads
)
# PREVIEW DOMAIN must be present in HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS for the preview to show draft changes
if "PREVIEW_LMS_BASE" in FEATURES and FEATURES["PREVIEW_LMS_BASE"] != "":
PREVIEW_DOMAIN = FEATURES["PREVIEW_LMS_BASE"].split(":")[0]
# update dictionary with preview domain regex
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS.update({PREVIEW_DOMAIN: "draft-preferred"})
############### Mixed Related(Secure/Not-Secure) Items ##########
LMS_SEGMENT_KEY = config("LMS_SEGMENT_KEY", default=None)
CC_PROCESSOR_NAME = config("CC_PROCESSOR_NAME", default=CC_PROCESSOR_NAME)
CC_PROCESSOR = config("CC_PROCESSOR", default=CC_PROCESSOR)
SECRET_KEY = config("SECRET_KEY", default="ThisisAnExampleKeyForDevPurposeOnly")
# Authentication backends
# - behind a proxy, use: "lms.envs.fun.backends.ProxyRateLimitModelBackend"
# - for LTI provider, add: "lti_provider.users.LtiBackend"
# - for CAS, add: "django_cas.backends.CASBackend"
AUTHENTICATION_BACKENDS = config(
"AUTHENTICATION_BACKENDS",
default=("lms.envs.fun.backends.ProxyRateLimitModelBackend",),
)
DEFAULT_FILE_STORAGE = config(
"DEFAULT_FILE_STORAGE", default="django.core.files.storage.FileSystemStorage"
)
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = config(
"FILE_UPLOAD_STORAGE_BUCKET_NAME", default="uploads"
)
FILE_UPLOAD_STORAGE_PREFIX = config(
"FILE_UPLOAD_STORAGE_PREFIX", default=FILE_UPLOAD_STORAGE_PREFIX
)
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASE_ENGINE = config("DATABASE_ENGINE", default="django.db.backends.mysql")
DATABASE_HOST = config("DATABASE_HOST", default="mysql")
DATABASE_PORT = config("DATABASE_PORT", default=3306, formatter=int)
DATABASE_NAME = config("DATABASE_NAME", default="edxapp")
DATABASE_USER = config("DATABASE_USER", default="edxapp_user")
DATABASE_PASSWORD = config("DATABASE_PASSWORD", default="password")
DATABASES = config(
"DATABASES",
default={
"default": {
"ENGINE": DATABASE_ENGINE,
"HOST": DATABASE_HOST,
"PORT": DATABASE_PORT,
"NAME": DATABASE_NAME,
"USER": DATABASE_USER,
"PASSWORD": DATABASE_PASSWORD,
}
},
formatter=json.loads,
)
# Enable automatic transaction management on all databases
# https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests
# This needs to be true for all databases
for database_name in DATABASES:
DATABASES[database_name]["ATOMIC_REQUESTS"] = True
XQUEUE_INTERFACE = config(
"XQUEUE_INTERFACE",
default={"url": None, "basic_auth": None, "django_auth": None},
formatter=json.loads,
)
# Configure the MODULESTORE
MODULESTORE = convert_module_store_setting_if_needed(
config("MODULESTORE", default=MODULESTORE, formatter=json.loads)
)
MONGODB_PASSWORD = config("MONGODB_PASSWORD", default="")
MONGODB_HOST = config("MONGODB_HOST", default="mongodb")
MONGODB_PORT = config("MONGODB_PORT", default=27017, formatter=int)
MONGODB_NAME = config("MONGODB_NAME", default="edxapp")
MONGODB_USER = config("MONGODB_USER", default=None)
MONGODB_SSL = config("MONGODB_SSL", default=False, formatter=bool)
MONGODB_REPLICASET = config("MONGODB_REPLICASET", default=None)
# Accepted read_preference value can be found here https://github.com/mongodb/mongo-python-driver/blob/2.9.1/pymongo/read_preferences.py#L54
MONGODB_READ_PREFERENCE = config("MONGODB_READ_PREFERENCE", default="PRIMARY")
DOC_STORE_CONFIG = config(
"DOC_STORE_CONFIG",
default={
"collection": "modulestore",
"host": MONGODB_HOST,
"port": MONGODB_PORT,
"db": MONGODB_NAME,
"user": MONGODB_USER,
"password": MONGODB_PASSWORD,
"ssl": MONGODB_SSL,
"replicaSet": MONGODB_REPLICASET,
"read_preference": MONGODB_READ_PREFERENCE,
},
formatter=json.loads,
)
update_module_store_settings(MODULESTORE, doc_store_settings=DOC_STORE_CONFIG)
MONGODB_LOG = config("MONGODB_LOG", default={}, formatter=json.loads)
CONTENTSTORE = config(
"CONTENTSTORE",
default={
"DOC_STORE_CONFIG": DOC_STORE_CONFIG,
"ENGINE": "xmodule.contentstore.mongo.MongoContentStore",
},
formatter=json.loads,
)
EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="") # django default is ''
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="") # django default is ''
# Datadog for events!
DATADOG = config("DATADOG", default={}, formatter=json.loads)
# TODO: deprecated (compatibility with previous settings)
DATADOG_API = config("DATADOG_API", default=None)
# Analytics dashboard server
ANALYTICS_SERVER_URL = config("ANALYTICS_SERVER_URL", default=None)
ANALYTICS_API_KEY = config("ANALYTICS_API_KEY", default="")
# Analytics data source
ANALYTICS_DATA_URL = config("ANALYTICS_DATA_URL", default=ANALYTICS_DATA_URL)
ANALYTICS_DATA_TOKEN = config("ANALYTICS_DATA_TOKEN", default=ANALYTICS_DATA_TOKEN)
# Analytics Dashboard
# when True this setting add a link in instructor dashbord to analytics insight service
ANALYTICS_DASHBOARD_URL = config(
"ANALYTICS_DASHBOARD_URL", default=False, formatter=bool
)
ANALYTICS_DASHBOARD_NAME = config(
"ANALYTICS_DASHBOARD_NAME", default=PLATFORM_NAME + " Insights"
)
# Mailchimp New User List
MAILCHIMP_NEW_USER_LIST_ID = config("MAILCHIMP_NEW_USER_LIST_ID", default=None)
# Zendesk
ZENDESK_USER = config("ZENDESK_USER", default=None)
ZENDESK_API_KEY = config("ZENDESK_API_KEY", default=None)
# API Key for inbound requests from Notifier service
EDX_API_KEY = config("EDX_API_KEY", default=None)
# Celery Broker
# For redis sentinel use the redis-sentinel transport
CELERY_BROKER_TRANSPORT = config("CELERY_BROKER_TRANSPORT", default="redis")
CELERY_BROKER_USER = config("CELERY_BROKER_USER", default="")
CELERY_BROKER_PASSWORD = config("CELERY_BROKER_PASSWORD", default="")
CELERY_BROKER_HOST = config("CELERY_BROKER_HOST", default="redis")
CELERY_BROKER_PORT = config("CELERY_BROKER_PORT", default=6379, formatter=int)
CELERY_BROKER_VHOST = config("CELERY_BROKER_VHOST", default=0, formatter=int)
if CELERY_BROKER_TRANSPORT == "redis-sentinel":
# register redis sentinel schema in celery
register()
BROKER_URL = "{transport}://{user}:{password}@{host}:{port}/{vhost}".format(
transport=CELERY_BROKER_TRANSPORT,
user=CELERY_BROKER_USER,
password=CELERY_BROKER_PASSWORD,
host=CELERY_BROKER_HOST,
port=CELERY_BROKER_PORT,
vhost=CELERY_BROKER_VHOST,
)
# To use redis-sentinel, refer to the documenation here
# https://celery-redis-sentinel.readthedocs.io/en/latest/
BROKER_TRANSPORT_OPTIONS = config(
"BROKER_TRANSPORT_OPTIONS", default={}, formatter=json.loads
)
# upload limits
STUDENT_FILEUPLOAD_MAX_SIZE = config(
"STUDENT_FILEUPLOAD_MAX_SIZE", default=STUDENT_FILEUPLOAD_MAX_SIZE, formatter=int
)
# Event tracking
TRACKING_BACKENDS.update(config("TRACKING_BACKENDS", default={}, formatter=json.loads))
EVENT_TRACKING_BACKENDS["tracking_logs"]["OPTIONS"]["backends"].update(
config("EVENT_TRACKING_BACKENDS", default={}, formatter=json.loads)
)
EVENT_TRACKING_BACKENDS["segmentio"]["OPTIONS"]["processors"][0]["OPTIONS"][
"whitelist"
].extend(
config("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", default=[], formatter=json.loads)
)
TRACKING_SEGMENTIO_WEBHOOK_SECRET = config(
"TRACKING_SEGMENTIO_WEBHOOK_SECRET", default=TRACKING_SEGMENTIO_WEBHOOK_SECRET
)
TRACKING_SEGMENTIO_ALLOWED_TYPES = config(
"TRACKING_SEGMENTIO_ALLOWED_TYPES",
default=TRACKING_SEGMENTIO_ALLOWED_TYPES,
formatter=json.loads,
)
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = config(
"TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES",
default=TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES,
formatter=json.loads,
)
TRACKING_SEGMENTIO_SOURCE_MAP = config(
"TRACKING_SEGMENTIO_SOURCE_MAP",
default=TRACKING_SEGMENTIO_SOURCE_MAP,
formatter=json.loads,
)
# Student identity verification settings
VERIFY_STUDENT = config("VERIFY_STUDENT", default=VERIFY_STUDENT, formatter=json.loads)
# Grades download
GRADES_DOWNLOAD_ROUTING_KEY = config(
"GRADES_DOWNLOAD_ROUTING_KEY", default=HIGH_MEM_QUEUE
)
GRADES_DOWNLOAD = config(
"GRADES_DOWNLOAD", default=GRADES_DOWNLOAD, formatter=json.loads
)
GRADES_DOWNLOAD = config("GRADES_DOWNLOAD", default=GRADES_DOWNLOAD)
# financial reports
FINANCIAL_REPORTS = config(
"FINANCIAL_REPORTS", default=FINANCIAL_REPORTS, formatter=json.loads
)
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = config(
"MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", default=5, formatter=int
)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = config(
"MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", default=15 * 60, formatter=int
)
MICROSITE_CONFIGURATION = config(
"MICROSITE_CONFIGURATION", default={}, formatter=json.loads
)
MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default=""))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = config("PASSWORD_MIN_LENGTH", default=12, formatter=int)
PASSWORD_MAX_LENGTH = config("PASSWORD_MAX_LENGTH", default=None, formatter=int)
PASSWORD_COMPLEXITY = config(
"PASSWORD_COMPLEXITY",
default={"UPPER": 1, "LOWER": 1, "DIGITS": 1},
formatter=json.loads,
)
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = config(
"PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD",
default=PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD,
formatter=int,
)
PASSWORD_DICTIONARY = config("PASSWORD_DICTIONARY", default=[], formatter=json.loads)
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = config(
"SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", default=None, formatter=int
)
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = config(
"TIME_ZONE_DISPLAYED_FOR_DEADLINES", default=TIME_ZONE_DISPLAYED_FOR_DEADLINES
)
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = config("X_FRAME_OPTIONS", default=X_FRAME_OPTIONS)
##### Third-party auth options ################################################
if FEATURES.get("ENABLE_THIRD_PARTY_AUTH"):
# The reduced session expiry time during the third party login pipeline. (Value in seconds)
SOCIAL_AUTH_PIPELINE_TIMEOUT = config("SOCIAL_AUTH_PIPELINE_TIMEOUT", default=600)
# The SAML private/public key values do not need the delimiter lines (such as
# "-----BEGIN PRIVATE KEY-----", default="-----END PRIVATE KEY-----" etc.) but they may be included
# if you want (though it's easier to format the key values as JSON without the delimiters).
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = config(
"SOCIAL_AUTH_SAML_SP_PRIVATE_KEY", default=""
)
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = config(
"SOCIAL_AUTH_SAML_SP_PUBLIC_CERT", default=""
)
SOCIAL_AUTH_OAUTH_SECRETS = config(
"SOCIAL_AUTH_OAUTH_SECRETS", default={}, formatter=json.loads
)
SOCIAL_AUTH_LTI_CONSUMER_SECRETS = config(
"SOCIAL_AUTH_LTI_CONSUMER_SECRETS", default={}, formatter=json.loads
)
# third_party_auth config moved to ConfigurationModels. This is for data migration only:
THIRD_PARTY_AUTH_OLD_CONFIG = config("THIRD_PARTY_AUTH", default=None)
if (
config("THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS", default=24, formatter=int)
is not None
):
CELERYBEAT_SCHEDULE["refresh-saml-metadata"] = {
"task": "third_party_auth.fetch_saml_metadata",
"schedule": datetime.timedelta(
hours=config(
"THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS",
default=24,
formatter=int,
)
),
}
# The following can be used to integrate a custom login form with third_party_auth.
# It should be a dict where the key is a word passed via ?auth_entry=, and the value is a
# dict with an arbitrary 'secret_key' and a 'url'.
THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = config(
"THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS", default={}, formatter=json.loads
)
##### OAUTH2 Provider ##############
if FEATURES.get("ENABLE_OAUTH2_PROVIDER"):
OAUTH_OIDC_ISSUER = config("OAUTH_OIDC_ISSUER", default=None)
OAUTH_ENFORCE_SECURE = config("OAUTH_ENFORCE_SECURE", default=True, formatter=bool)
OAUTH_ENFORCE_CLIENT_SECURE = config(
"OAUTH_ENFORCE_CLIENT_SECURE", default=True, formatter=bool
)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = config(
"ADVANCED_SECURITY_CONFIG", default={}, formatter=json.loads
)
##### GOOGLE ANALYTICS IDS #####
GOOGLE_ANALYTICS_ACCOUNT = config("GOOGLE_ANALYTICS_ACCOUNT", default=None)
GOOGLE_ANALYTICS_LINKEDIN = config("GOOGLE_ANALYTICS_LINKEDIN", default=None)
##### OPTIMIZELY PROJECT ID #####
OPTIMIZELY_PROJECT_ID = config("OPTIMIZELY_PROJECT_ID", default=OPTIMIZELY_PROJECT_ID)
#### Course Registration Code length ####
REGISTRATION_CODE_LENGTH = config("REGISTRATION_CODE_LENGTH", default=8, formatter=int)
# REGISTRATION CODES DISPLAY INFORMATION
INVOICE_CORP_ADDRESS = config("INVOICE_CORP_ADDRESS", default=INVOICE_CORP_ADDRESS)
INVOICE_PAYMENT_INSTRUCTIONS = config(
"INVOICE_PAYMENT_INSTRUCTIONS", default=INVOICE_PAYMENT_INSTRUCTIONS
)
# Which access.py permission names to check;
# We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = config(
"COURSE_CATALOG_VISIBILITY_PERMISSION", default=COURSE_CATALOG_VISIBILITY_PERMISSION
)
COURSE_ABOUT_VISIBILITY_PERMISSION = config(
"COURSE_ABOUT_VISIBILITY_PERMISSION", default=COURSE_ABOUT_VISIBILITY_PERMISSION
)
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = config(
"ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT", default=60, formatter=int
)
# PDF RECEIPT/INVOICE OVERRIDES
PDF_RECEIPT_TAX_ID = config("PDF_RECEIPT_TAX_ID", default=PDF_RECEIPT_TAX_ID)
PDF_RECEIPT_FOOTER_TEXT = config(
"PDF_RECEIPT_FOOTER_TEXT", default=PDF_RECEIPT_FOOTER_TEXT
)
PDF_RECEIPT_DISCLAIMER_TEXT = config(
"PDF_RECEIPT_DISCLAIMER_TEXT", default=PDF_RECEIPT_DISCLAIMER_TEXT
)
PDF_RECEIPT_BILLING_ADDRESS = config(
"PDF_RECEIPT_BILLING_ADDRESS", default=PDF_RECEIPT_BILLING_ADDRESS
)
PDF_RECEIPT_TERMS_AND_CONDITIONS = config(
"PDF_RECEIPT_TERMS_AND_CONDITIONS", default=PDF_RECEIPT_TERMS_AND_CONDITIONS
)
PDF_RECEIPT_TAX_ID_LABEL = config(
"PDF_RECEIPT_TAX_ID_LABEL", default=PDF_RECEIPT_TAX_ID_LABEL
)
PDF_RECEIPT_LOGO_PATH = config("PDF_RECEIPT_LOGO_PATH", default=PDF_RECEIPT_LOGO_PATH)
PDF_RECEIPT_COBRAND_LOGO_PATH = config(
"PDF_RECEIPT_COBRAND_LOGO_PATH", default=PDF_RECEIPT_COBRAND_LOGO_PATH
)
PDF_RECEIPT_LOGO_HEIGHT_MM = config(
"PDF_RECEIPT_LOGO_HEIGHT_MM", default=PDF_RECEIPT_LOGO_HEIGHT_MM, formatter=int
)
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = config(
"PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM",
default=PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM,
formatter=int,
)
if (
FEATURES.get("ENABLE_COURSEWARE_SEARCH")
or FEATURES.get("ENABLE_DASHBOARD_SEARCH")
or FEATURES.get("ENABLE_COURSE_DISCOVERY")
or FEATURES.get("ENABLE_TEAMS")
):
# Use ElasticSearch as the search engine herein
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
ELASTIC_SEARCH_CONFIG = config(
"ELASTIC_SEARCH_CONFIG", default=[{}], formatter=json.loads
)
# Facebook app
FACEBOOK_API_VERSION = config("FACEBOOK_API_VERSION", default=None)
FACEBOOK_APP_SECRET = config("FACEBOOK_APP_SECRET", default=None)
FACEBOOK_APP_ID = config("FACEBOOK_APP_ID", default=None)
XBLOCK_SETTINGS = config("XBLOCK_SETTINGS", default={}, formatter=json.loads)
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get(
"LICENSING", False
)
XBLOCK_SETTINGS.setdefault("VideoModule", {})["YOUTUBE_API_KEY"] = config(
"YOUTUBE_API_KEY", default=YOUTUBE_API_KEY
)
##### CDN EXPERIMENT/MONITORING FLAGS #####
CDN_VIDEO_URLS = config("CDN_VIDEO_URLS", default=CDN_VIDEO_URLS)
ONLOAD_BEACON_SAMPLE_RATE = config(
"ONLOAD_BEACON_SAMPLE_RATE", default=ONLOAD_BEACON_SAMPLE_RATE
)
##### ECOMMERCE API CONFIGURATION SETTINGS #####
ECOMMERCE_PUBLIC_URL_ROOT = config(
"ECOMMERCE_PUBLIC_URL_ROOT", default=ECOMMERCE_PUBLIC_URL_ROOT
)
ECOMMERCE_API_URL = config("ECOMMERCE_API_URL", default=ECOMMERCE_API_URL)
ECOMMERCE_API_TIMEOUT = config(
"ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT, formatter=int
)
ECOMMERCE_SERVICE_WORKER_USERNAME = config(
"ECOMMERCE_SERVICE_WORKER_USERNAME", default=ECOMMERCE_SERVICE_WORKER_USERNAME
)
ECOMMERCE_API_TIMEOUT = config("ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT)
ECOMMERCE_API_SIGNING_KEY = config(
"ECOMMERCE_API_SIGNING_KEY", default=ECOMMERCE_API_SIGNING_KEY
)
##### Custom Courses for EdX #####
if FEATURES.get("CUSTOM_COURSES_EDX"):
INSTALLED_APPS += ("lms.djangoapps.ccx",)
FIELD_OVERRIDE_PROVIDERS += (
"lms.djangoapps.ccx.overrides.CustomCoursesForEdxOverrideProvider",
)
CCX_MAX_STUDENTS_ALLOWED = config(
"CCX_MAX_STUDENTS_ALLOWED", default=CCX_MAX_STUDENTS_ALLOWED
)
##### Individual Due Date Extensions #####
if FEATURES.get("INDIVIDUAL_DUE_DATES"):
FIELD_OVERRIDE_PROVIDERS += (
"courseware.student_field_overrides.IndividualStudentOverrideProvider",
)
##### Self-Paced Course Due Dates #####
FIELD_OVERRIDE_PROVIDERS += (
"courseware.self_paced_overrides.SelfPacedDateOverrideProvider",
)
# PROFILE IMAGE CONFIG
PROFILE_IMAGE_BACKEND = config("PROFILE_IMAGE_BACKEND", default=PROFILE_IMAGE_BACKEND)
PROFILE_IMAGE_SECRET_KEY = config(
"PROFILE_IMAGE_SECRET_KEY", default=PROFILE_IMAGE_SECRET_KEY
)
PROFILE_IMAGE_MAX_BYTES = config(
"PROFILE_IMAGE_MAX_BYTES", default=PROFILE_IMAGE_MAX_BYTES, formatter=int
)
PROFILE_IMAGE_MIN_BYTES = config(
"PROFILE_IMAGE_MIN_BYTES", default=PROFILE_IMAGE_MIN_BYTES, formatter=int
)
PROFILE_IMAGE_DEFAULT_FILENAME = "images/profiles/default"
# EdxNotes config
EDXNOTES_PUBLIC_API = config("EDXNOTES_PUBLIC_API", default=EDXNOTES_PUBLIC_API)
EDXNOTES_INTERNAL_API = config("EDXNOTES_INTERNAL_API", default=EDXNOTES_INTERNAL_API)
##### Credit Provider Integration #####
CREDIT_PROVIDER_SECRET_KEYS = config(
"CREDIT_PROVIDER_SECRET_KEYS", default={}, formatter=json.loads
)
##################### LTI Provider #####################
if FEATURES.get("ENABLE_LTI_PROVIDER"):
INSTALLED_APPS += ("lti_provider",)
LTI_USER_EMAIL_DOMAIN = config("LTI_USER_EMAIL_DOMAIN", default="lti.example.com")
# For more info on this, see the notes in common.py
LTI_AGGREGATE_SCORE_PASSBACK_DELAY = config(
"LTI_AGGREGATE_SCORE_PASSBACK_DELAY", default=LTI_AGGREGATE_SCORE_PASSBACK_DELAY
)
##################### Credit Provider help link ####################
CREDIT_HELP_LINK_URL = config("CREDIT_HELP_LINK_URL", default=CREDIT_HELP_LINK_URL)
#### JWT configuration ####
JWT_ISSUER = config("JWT_ISSUER", default=JWT_ISSUER)
JWT_EXPIRATION = config("JWT_EXPIRATION", default=JWT_EXPIRATION)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = config(
"PROCTORING_BACKEND_PROVIDER", default=PROCTORING_BACKEND_PROVIDER
)
PROCTORING_SETTINGS = config(
"PROCTORING_SETTINGS", default=PROCTORING_SETTINGS, formatter=json.loads
)
################# MICROSITE ####################
MICROSITE_CONFIGURATION = config(
"MICROSITE_CONFIGURATION", default={}, formatter=json.loads
)
MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default=""))
# Cutoff date for granting audit certificates
if config("AUDIT_CERT_CUTOFF_DATE", default=None):
AUDIT_CERT_CUTOFF_DATE = dateutil.parser.parse(
config("AUDIT_CERT_CUTOFF_DATE", default=AUDIT_CERT_CUTOFF_DATE)
)
################ CONFIGURABLE LTI CONSUMER ###############
# Add just the standard LTI consumer by default, forcing it to open in a new window and ask
# the user before sending email and username:
LTI_XBLOCK_CONFIGURATIONS = config(
"LTI_XBLOCK_CONFIGURATIONS",
default=[
{
"display_name": "LTI consumer",
"pattern": ".*",
"hidden_fields": [
"ask_to_send_email",
"ask_to_send_username",
"new_window",
],
"defaults": {
"ask_to_send_email": True,
"ask_to_send_username": True,
"launch_target": "new_window",
},
}
],
formatter=json.loads,
)
LTI_XBLOCK_SECRETS = config("LTI_XBLOCK_SECRETS", default={}, formatter=json.loads)
################################ FUN stuff ################################
SITE_VARIANT = "lms"
# Environment's name displayed in FUN's backoffice
ENVIRONMENT = config("ENVIRONMENT", default="no set")
BASE_ROOT = path("/edx/app/edxapp/")
# Fun-apps configuration
INSTALLED_APPS += (
"backoffice",
"bootstrapform",
"ckeditor",
"course_dashboard",
"course_pages",
"courses_api",
"courses",
"easy_thumbnails",
"edx_gea",
"forum_contributors",
"fun_api",
"fun_certificates",
"fun_instructor",
"fun",
"funsite",
"haystack",
"masquerade",
"newsfeed",
"password_container",
"payment_api",
"payment",
"pure_pagination",
"raven.contrib.django.raven_compat",
"rest_framework.authtoken",
"teachers",
"universities",
"videoproviders",
)
ROOT_URLCONF = "fun.lms.urls"
# Related Richie platform url
PLATFORM_RICHIE_URL = config("PLATFORM_RICHIE_URL", default=None)
# Haystack configuration (default is minimal working configuration)
HAYSTACK_CONNECTIONS = config(
"HAYSTACK_CONNECTIONS",
default={
"default": {"ENGINE": "courses.search_indexes.ConfigurableElasticSearchEngine"}
},
formatter=json.loads,
)
CKEDITOR_UPLOAD_PATH = "./"
CKEDITOR_CONFIGS = {
"default": {
"toolbar": [
[
"Undo",
"Redo",
"-",
"Bold",
"Italic",
"Underline",
"-",
"Link",
"Unlink",
"Anchor",
"-",
"Format",
"-",
"SpellChecker",
"Scayt",
"-",
"Maximize",
],
[
"HorizontalRule",
"-",
"Table",
"-",
"BulletedList",
"NumberedList",
"-",
"Cut",
"Copy",
"Paste",
"PasteText",
"PasteFromWord",
"-",
"SpecialChar",
"-",
"Source",
],
],
"toolbarCanCollapse": False,
"entities": False,
"width": 955,
"uiColor": "#9AB8F3",
},
"news": {
# Redefine path where the news images/files are uploaded. This would
# better be done at runtime with the 'reverse' function, but
# unfortunately there is no way around defining this in the settings
# file.
"filebrowserUploadUrl": "/news/ckeditor/upload/",
"filebrowserBrowseUrl": "/news/ckeditor/browse/",
"toolbar_Full": [
[
"Styles",
"Format",
"Bold",
"Italic",
"Underline",
"Strike",
"SpellChecker",
"Undo",
"Redo",
],
["Image", "Flash", "Table", "HorizontalRule"],
["NumberedList", "BulletedList", "Blockquote", "TextColor", "BGColor"],
["Smiley", "SpecialChar"],
["Source"],
],
},
}
# ### FUN-APPS SETTINGS ###
# This is dist-packages path where all fun-apps are
FUN_BASE_ROOT = path(os.path.dirname(pkgutil.get_loader("funsite").filename))
SHARED_ROOT = DATA_DIR / "shared"
# Add FUN applications templates directories to MAKO template finder before edX's ones
MAKO_TEMPLATES["main"] = [
# overrides template in edx-platform/lms/templates
FUN_BASE_ROOT / "funsite/templates/lms",
FUN_BASE_ROOT / "funsite/templates",
FUN_BASE_ROOT / "course_pages/templates",
FUN_BASE_ROOT / "payment/templates",
FUN_BASE_ROOT / "course_dashboard/templates",
FUN_BASE_ROOT / "newsfeed/templates",
FUN_BASE_ROOT / "fun_certificates/templates",
] + MAKO_TEMPLATES["main"]
# JS static override
DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms")
FUN_SMALL_LOGO_RELATIVE_PATH = "funsite/images/logos/funmooc173.png"
FUN_BIG_LOGO_RELATIVE_PATH = "funsite/images/logos/funmoocfp.png"
FAVICON_PATH = "fun/images/favicon.ico"
# Locale paths
# Here we rewrite LOCAL_PATHS to give precedence to our applications above edx-platform's ones,
# then we add xblocks which provide translations as there is no native mechanism to handle this
# See Xblock i18n: http://www.libremente.eu/2017/12/06/edx-translation/
LOCALIZED_FUN_APPS = [
"backoffice",
"course_dashboard",
"course_pages",
"courses",
"fun_api",
"fun_certificates",
"funsite",
"newsfeed",
"payment",
"universities",
"videoproviders",
]
LOCALE_PATHS = [FUN_BASE_ROOT / app / "locale" for app in LOCALIZED_FUN_APPS]
LOCALE_PATHS.append(REPO_ROOT / "conf/locale") # edx-platform locales
LOCALE_PATHS.append(path(pkgutil.get_loader("proctor_exam").filename) / "locale")
# -- Certificates
CERTIFICATES_DIRECTORY_NAME = "attestations"
FUN_LOGO_PATH = FUN_BASE_ROOT / "funsite/static" / FUN_BIG_LOGO_RELATIVE_PATH
FUN_ATTESTATION_LOGO_PATH = (
FUN_BASE_ROOT / "funsite/static" / "funsite/images/logos/funmoocattest.png"
)
STUDENT_NAME_FOR_TEST_CERTIFICATE = "Test User"
# Videofront subtitles cache
CACHES["video_subtitles"] = {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"KEY_PREFIX": "video_subtitles",
"LOCATION": DATA_DIR / "video_subtitles_cache",
}
# Course image thumbnails
FUN_THUMBNAIL_OPTIONS = {
"small": {"size": (270, 152), "crop": "smart"},
"big": {"size": (337, 191), "crop": "smart"},
"about": {"size": (730, 412), "crop": "scale"},
"facebook": {
"size": (600, 315),
"crop": "smart",
}, # https://developers.facebook.com/docs/sharing/best-practices
}
THUMBNAIL_PRESERVE_EXTENSIONS = True
THUMBNAIL_EXTENSION = "png"
##### ORA2 ######
ORA2_FILEUPLOAD_BACKEND = "swift"
ORA2_SWIFT_KEY = config("ORA2_SWIFT_KEY", default="")
ORA2_SWIFT_URL = config("ORA2_SWIFT_URL", default="")
# Prefix for uploads of example-based assessment AI classifiers
# This can be used to separate uploads for different environments
ORA2_FILE_PREFIX = config("ORA2_FILE_PREFIX", default=ORA2_FILE_PREFIX)
# Profile image upload
PROFILE_IMAGE_BACKEND = {
"class": "storages.backends.overwrite.OverwriteStorage",
"options": {
"location": os.path.join(MEDIA_ROOT, "profile-images/"),
"base_url": os.path.join(MEDIA_URL, "profile-images/"),
},
}
ENABLE_ADWAYS_FOR_COURSES = config(
"ENABLE_ADWAYS_FOR_COURSES", default=[], formatter=json.loads
)
# Add our v3 CSS and JS files to assets compilation pipeline to make them available in courseware.
# On FUN v3 frontend, which do not use edX's templates, those files are loaded
# by funsite/templates/funsite/parts/base.html and css/lms-main.css
PIPELINE_CSS["style-vendor"]["source_filenames"].append("fun/css/cookie-banner.css")
PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/header.css")
PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/footer.css")
# can't find any common group
for group in ["base_vendor", "main_vendor"]:
PIPELINE_JS[group]["source_filenames"].append("funsite/js/header.js")
PIPELINE_JS[group]["source_filenames"].append("fun/js/cookie-banner.js")
# Glowbl
GLOWBL_LTI_ENDPOINT = config(
"GLOWBL_LTI_ENDPOINT", default="http://ltiapps.net/test/tp.php"
)
GLOWBL_LTI_KEY = config("GLOWBL_LTI_KEY", default="jisc.ac.uk")
GLOWBL_LTI_SECRET = config("GLOWBL_LTI_SECRET", default="secret")
GLOWBL_LTI_ID = config("GLOWBL_LTI_ID", default="testtoolconsumer")
GLOWBL_LAUNCH_URL = config(
"GLOWBL_LAUNCH_URL", default="http://ltiapps.net/test/tp.php"
)
GLOWBL_COLL_OPT = config("GLOWBL_COLL_OPT", default="FunMoocJdR")
DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms")
DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["context_processors"].append(
"fun.context_processor.fun_settings"
)
TEMPLATES = [DEFAULT_TEMPLATE_ENGINE]
# This force Edx Studio to use our own video provider Xblock on default button
FUN_DEFAULT_VIDEO_PLAYER = "libcast_xblock"
MIDDLEWARE_CLASSES += (
"fun.middleware.LegalAcceptance",
"backoffice.middleware.PathLimitedMasqueradeMiddleware",
)
class LazyChoicesSorter(object):
def __init__(self, choices):
self.choices = choices
def __iter__(self):
for choice in sorted(self.choices, key=lambda peer: peer[1]):
yield choice
# These are the allowed subtitle languages, we have the same list on Videofront server
# We remove 2 deprecated chinese language codes which do not exist on Django 1.10 VideoFront
SUBTITLE_SUPPORTED_LANGUAGES = LazyChoicesSorter(
(code, ugettext_lazy(lang))
for code, lang in global_settings.LANGUAGES
if code not in ("zh-cn", "zh-tw")
)
ANONYMIZATION_KEY = config("ANONYMIZATION_KEY", default="")
RAVEN_CONFIG = config("RAVEN_CONFIG", default={"dsn": ""}, formatter=json.loads)
ELASTICSEARCH_INDEX_SETTINGS = {
"settings": {
"analysis": {
"filter": {
"elision": {
"type": "elision",
"articles": ["l", "m", "t", "qu", "n", "s", "j", "d"],
}
},
"analyzer": {
"custom_french_analyzer": {
"tokenizer": "letter",
"filter": [
"asciifolding",
"lowercase",
"french_stem",
"elision",
"stop",
"word_delimiter",
],
}
},
}
}
}
FUN_MKTG_URLS = config("FUN_MKTG_URLS", default={}, formatter=json.loads)
# Default visibility of student's profile to other students
ACCOUNT_VISIBILITY_CONFIGURATION["default_visibility"] = "private"
# A user is verified if he has an approved SoftwareSecurePhotoVerification entry
# this setting will create a dummy SoftwareSecurePhotoVerification for user in
# paybox success callback view. A this point, we think it's better to create a
# dummy one than to remove verifying process in edX
FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION = config(
"FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION", default=False, formatter=bool
)
ECOMMERCE_NOTIFICATION_URL = config("ECOMMERCE_NOTIFICATION_URL", default=None)
PAYMENT_ADMIN = "paybox@fun-mooc.fr"
# List of pattern definitions to automatically add verified users to a cohort
# If value is [] this feature is disabled
# Otherwise this setting is a list of
# tuple values (r"<course id regex>", "<cohort name>").
# e.g: if you want to enable this feature for a particular course you can set
# this setting to
# [
# (r"<course id>", "cohort name"),
# ]
VERIFIED_COHORTS = config("VERIFIED_COHORTS", default=[])
# Force Edx to use `libcast_xblock` as default video player
# in the studio (big green button) and if any xblock is called `video`
XBLOCK_SELECT_FUNCTION = prefer_fun_video
if "sentry" in LOGGING.get("handlers"):
LOGGING["handlers"]["sentry"]["environment"] = "development"
# Configure gelf handler to listen on graylog server
LOGGING["loggers"][""]["handlers"].append("gelf")
LOGGING["loggers"]["tracking"]["handlers"].append("gelf")
LOGGING["handlers"]["gelf"] = {
"level": "DEBUG",
"class": "djehouty.libgelf.handlers.GELFTCPSocketHandler",
"host": "graylog",
"port": 12201,
"null_character": True,
}
DEBUG = True
REQUIRE_DEBUG = True
EMAIL_BACKEND = config(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
PIPELINE_ENABLED = False
STATICFILES_STORAGE = "openedx.core.storage.DevelopmentStorage"
ALLOWED_HOSTS = ["*"]
FEATURES["AUTOMATIC_AUTH_FOR_TESTING"] = True
# ORA2 fileupload
ORA2_FILEUPLOAD_BACKEND = "filesystem"
ORA2_FILEUPLOAD_ROOT = os.path.join(SHARED_ROOT, "openassessment_submissions")
ORA2_FILEUPLOAD_CACHE_ROOT = os.path.join(
SHARED_ROOT, "openassessment_submissions_cache"
)
AUTHENTICATION_BACKENDS = config(
"AUTHENTICATION_BACKENDS",
default=["django.contrib.auth.backends.ModelBackend"],
formatter=json.loads
)
| 36.239158
| 140
| 0.724671
| 216
| 0.003801
| 114
| 0.002006
| 0
| 0
| 0
| 0
| 27,099
| 0.476902
|
12d2af7e340f2c0b16013db0e187eff0a983f2ec
| 14,028
|
py
|
Python
|
stashboard/handlers/site.py
|
kelnos/stashboard
|
5f92ed14b8cf17f4b1be8441005b187e97ca74b8
|
[
"MIT"
] | 1
|
2015-02-24T23:30:06.000Z
|
2015-02-24T23:30:06.000Z
|
stashboard/handlers/site.py
|
ratchetio/stashboard
|
f8e4e6d175f48701a154e4baca10de2a4a577ab4
|
[
"MIT"
] | null | null | null |
stashboard/handlers/site.py
|
ratchetio/stashboard
|
f8e4e6d175f48701a154e4baca10de2a4a577ab4
|
[
"MIT"
] | null | null | null |
# The MIT License
#
# Copyright (c) 2008 William T. Katz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__author__ = 'Kyle Conroy'
import datetime
import calendar
import logging
import os
import re
import string
import urllib
import urlparse
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from datetime import date, timedelta
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import simplejson as json
from time import mktime
from models import List, Status, Service, Event, Profile
import xml.etree.ElementTree as et
from utils import authorized
from wsgiref.handlers import format_date_time
def default_template_data():
data = {
"title": settings.SITE_NAME,
"report_url": settings.REPORT_URL,
"twitter_handle": settings.TWITTER_HANDLE,
}
user = users.get_current_user()
if user is not None:
data["user"] = user
data["logout_url"] = users.create_logout_url("/")
data["admin"] = users.is_current_user_admin()
return data
def get_past_days(num):
date = datetime.date.today()
dates = []
for i in range(1, num + 1):
dates.append(date - datetime.timedelta(days=i))
return dates
class BaseHandler(webapp.RequestHandler):
def render(self, template_values, filename):
self.response.out.write(render_to_string(filename, template_values))
def retrieve(self, key):
""" Helper for loading data from memcache """
all_pages = memcache.get("__all_pages__")
if all_pages is None:
all_pages = {}
item = memcache.get(key) if all_pages.has_key(key) else None
if item is not None:
return item
else:
item = self.data()
if not memcache.set(key, item):
logging.error("Memcache set failed on %s" % key)
else:
all_pages[key] = 1
if not memcache.set("__all_pages__", all_pages):
logging.error("Memcache set failed on __all_pages__")
return item
def not_found(self):
self.error(404)
self.render(default_template_data(), "404.html")
class NotFoundHandler(BaseHandler):
def get(self):
self.error(404)
self.render(default_template_data(), "404.html")
class UnauthorizedHandler(webapp.RequestHandler):
def get(self):
self.error(403)
self.render(default_template_data(), "404.html")
class RootHandler(BaseHandler):
def data(self):
services = []
default_status = Status.get_default()
for service in Service.all().order("list").order("name").fetch(100):
event = service.current_event()
if event is not None:
status = event.status
else:
status = default_status
today = date.today() + timedelta(days=1)
current, = service.history(1, default_status, start=today)
has_issues = (current["information"] and
status.key() == default_status.key())
service_dict = {
"slug": service.slug,
"name": service.name,
"url": service.url(),
"status": status,
"has_issues": has_issues,
"history": service.history(5, default_status),
}
services.append(service_dict)
return {
"days": get_past_days(5),
"statuses": Status.all().fetch(100),
"services": services,
}
def get(self):
td = default_template_data()
td.update(self.retrieve("frontpage"))
#td.update(self.data())
self.render(td, 'index.html')
class ListHandler(BaseHandler):
list = None
def data(self):
services = []
default_status = Status.get_default()
query = Service.all().filter("list =", self.list).order("name")
for service in query.fetch(100):
event = service.current_event()
if event is not None:
status = event.status
else:
status = default_status
today = date.today() + timedelta(days=1)
current, = service.history(1, default_status, start=today)
has_issues = (current["information"] and
status.key() == default_status.key())
service_dict = {
"slug": service.slug,
"name": service.name,
"url": service.url(),
"status": status,
"has_issues": has_issues,
"history": service.history(5, default_status),
}
services.append(service_dict)
return {
"days": get_past_days(5),
"statuses": Status.all().fetch(100),
"services": services,
}
def get(self, list_slug):
self.list = List.get_by_slug(list_slug)
if self.list is None:
self.not_found()
return
td = default_template_data()
td.update(self.retrieve("list"+list_slug))
#td.update(self.data())
self.render(td, 'index.html')
class ListListHandler(BaseHandler):
lists = []
statuses = []
def data(self):
services = []
default_status = Status.get_default()
lists = []
for list in self.lists:
l = List.get_by_slug(list)
if l is not None:
lists.append(l)
for service in Service.all().filter("list IN", lists).order("name").fetch(100):
event = service.current_event()
if event is not None:
status = event.status
else:
status = default_status
if len(self.statuses) and not status.slug in self.statuses: continue
today = date.today() + timedelta(days=1)
current, = service.history(1, default_status, start=today)
has_issues = (current["information"] and
status.key() == default_status.key())
service_dict = {
"slug": service.slug,
"name": service.name,
"url": service.url(),
"status": status,
"has_issues": has_issues,
"history": service.history(5, default_status),
}
services.append(service_dict)
return {
"days": get_past_days(5),
"statuses": Status.all().fetch(100),
"services": services,
}
def get(self):
self.lists = self.request.get_all('filter')
self.lists.sort()
self.statuses = self.request.get_all('status')
self.statuses.sort()
td = default_template_data()
td.update(self.retrieve("list"+"_".join(self.statuses)+"_".join(self.lists)))
#td.update(self.data())
self.render(td, 'index.html')
class ListSummaryHandler(BaseHandler):
def data(self):
lists = {}
default_status = Status.get_default()
for service in Service.all().order("list").fetch(100):
event = service.current_event()
if event is not None:
status = event.status
else:
status = default_status
if service.list and not lists.has_key(service.list.slug) or \
lists[service.list.slug]["status"].name < status.name:
lists[service.list.slug] = {"list": service.list, "status": status}
return { "lists": lists.items() }
def get(self):
td = default_template_data()
td.update(self.retrieve("summary"))
#td.update(self.data())
self.render(td, 'summary.html')
class ServiceHandler(BaseHandler):
def get(self, service_slug, year=None, month=None, day=None):
service = Service.get_by_slug(service_slug)
if not service:
self.not_found()
return
try:
if day:
start_date = date(int(year), int(month), int(day))
end_date = start_date + timedelta(days=1)
elif month:
start_date = date(int(year), int(month), 1)
days = calendar.monthrange(start_date.year,
start_date.month)[1]
end_date = start_date + timedelta(days=days)
elif year:
start_date = date(int(year), 1, 1)
end_date = start_date + timedelta(days=365)
else:
start_date = None
end_date = None
except ValueError:
self.not_found(404)
return
events = service.events
if start_date and end_date:
events.filter('start >= ', start_date).filter('start <', end_date)
td = default_template_data()
td["service"] = service
td["events"] = events.order("-start").fetch(500)
self.render(td, 'service.html')
class BaseDocumentationHandler(BaseHandler):
def get(self):
td = default_template_data()
td["selected"] = "overview"
self.render(td, 'publicdoc/index.html')
class DocumentationHandler(BaseHandler):
pages = [
"events",
"services",
"service-lists",
"status-images",
"statuses",
"status-images",
]
def get(self, page):
td = default_template_data()
if page not in self.pages:
self.render({}, '404.html')
return
td["selected"] = page
self.render(td, "publicdoc/%s.html" % page)
class CredentialsRedirectHandler(BaseHandler):
def get(self):
self.redirect("/admin/credentials")
class RSSHandler(BaseHandler):
""" Feed of the last settings.RSS_NUM_EVENTS_TO_FETCH events """
def get(self):
self.response.headers['Content-Type'] = "application/rss+xml; charset=utf-8"
host = self.request.headers.get('host', 'nohost')
base_url = self.request.scheme + "://" + host
events = []
query = Event.all().order("-start")
# Filter query by requested services, if specified in the 'service' URL parameter.
service_list = []
for service_arg in self.request.get_all('services'):
service_list.extend(service_arg.split(','))
service_list = map(lambda serv_slug: Service.get_by_slug(serv_slug), service_list)
# filter out any non-existent services
service_list = filter(lambda service: not service is None, service_list)
service_string = 'all services'
if len(service_list) > 0:
query.filter('service IN', service_list)
if len(service_list) == 1:
service_string = 'the %s service' % service_list[0].name
elif len(service_list) == 2:
service_string = 'the %s and %s services' % (service_list[0].name, service_list[1].name)
else:
service_string = 'the %s, and %s services' % (', '.join([service.name for service in service_list[:-1]]), service_list[-1].name)
# Create the root 'rss' element
rss_xml = et.Element('rss')
rss_xml.set('version', '2.0')
# Create the channel element and its metadata elements
channel = et.SubElement(rss_xml, 'channel')
title = et.SubElement(channel, 'title')
title.text = '%s Service Events' % settings.SITE_NAME
description = et.SubElement(channel, 'description')
description.text = 'This feed shows the last %d events on %s on %s.' % (settings.RSS_NUM_EVENTS_TO_FETCH, service_string, settings.SITE_NAME)
link = et.SubElement(channel, 'link')
link.text = base_url
# Create each of the feed events.
item_subelements = {
'title': lambda(event): '[%s - %s] %s' % (event.service.name, event.status.name, unicode(event.message)),
'description': lambda(event): '%s' % unicode(event.message),
'link': lambda(event): '%s/services/%s' % (base_url, event.service.slug),
'category': lambda(event): event.service.name,
'pubDate': lambda(event): format_date_time(mktime(event.start.timetuple())),
'guid': lambda(event): '%s/api/v1/services/%s/events/%s' % (base_url, event.service.slug, unicode(event.key()))
}
for event in query.fetch(settings.RSS_NUM_EVENTS_TO_FETCH):
item = et.SubElement(channel, 'item')
for tag, text_func in item_subelements.iteritems():
subelement = et.SubElement(item, tag)
subelement.text = text_func(event)
self.response.out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.response.out.write(et.tostring(rss_xml))
| 32.852459
| 149
| 0.590961
| 11,642
| 0.829912
| 0
| 0
| 0
| 0
| 0
| 0
| 2,874
| 0.204876
|
12d4213a1f0e884d767b82004cfca76be19c9038
| 1,375
|
py
|
Python
|
bga/forms.py
|
KarmaPoliceT2/bga
|
f7708ddae72bc83d68a2294d8f1b600345ebec30
|
[
"MIT"
] | null | null | null |
bga/forms.py
|
KarmaPoliceT2/bga
|
f7708ddae72bc83d68a2294d8f1b600345ebec30
|
[
"MIT"
] | 3
|
2019-12-26T16:57:34.000Z
|
2021-06-01T23:08:35.000Z
|
bga/forms.py
|
KarmaPoliceT2/bga
|
f7708ddae72bc83d68a2294d8f1b600345ebec30
|
[
"MIT"
] | null | null | null |
from wtforms import Form, StringField, PasswordField, DecimalField, IntegerField, SelectField, validators
from wtforms.fields.html5 import DateField
def strip_filter(x): return x.strip() if x else None
class RegistrationForm(Form):
username = StringField('Username', [validators.Length(
min=1, max=255)], filters=[strip_filter])
password = PasswordField('Password', [validators.Length(min=3, max=255)])
class CreateCourseForm(Form):
coursename = StringField(
'Course Name', [validators.Length(min=1, max=255)], filters=[strip_filter])
courselocation = StringField(
'Course Location', [validators.Length(min=1, max=255)], filters=[strip_filter])
rating = DecimalField('Rating', [validators.NumberRange(
min=50, max=150, message="Must Be Between 50-150")], places=2)
slope = IntegerField('Slope', [validators.NumberRange(
min=55, max=155, message="Must Be Between 55-155")])
courseimage = StringField('Course Image URL', [validators.Length(
min=1, max=255), validators.URL(message="Must Be a URL")], filters=[strip_filter])
class CreateScoreForm(Form):
course = SelectField('Course Name', choices=[('oops', 'oops')])
rounddate = DateField('Round Date', format='%Y-%m-%d')
score = IntegerField('Round Score')
attest = SelectField('Attesting Golfer', choices=[('oops', 'oops')])
| 42.96875
| 105
| 0.696
| 1,162
| 0.845091
| 0
| 0
| 0
| 0
| 0
| 0
| 236
| 0.171636
|
12d68f272974ae7982471fbca3af702e552c3c1f
| 597
|
py
|
Python
|
ejercicios_python/Clase05/practica5-9.py
|
hcgalvan/UNSAM-Python-programming
|
c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f
|
[
"MIT"
] | null | null | null |
ejercicios_python/Clase05/practica5-9.py
|
hcgalvan/UNSAM-Python-programming
|
c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f
|
[
"MIT"
] | null | null | null |
ejercicios_python/Clase05/practica5-9.py
|
hcgalvan/UNSAM-Python-programming
|
c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 08:32:03 2021
@author: User
"""
import numpy as np
import matplotlib.pyplot as plt
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(a)
print(a[0])
print(a.ndim) #te dice la cantidad de ejes (o dimensiones) del arreglo
print(a.shape) #Te va a dar una tupla de enteros que indican la cantidad de elementos en cada eje.
print(a.size)
#%%
vec_fila = a[np.newaxis, :]
print(vec_fila.shape, a.shape)
#%%
print(a.sum())
print(a.min())
print(a.max())
#%%
print(a)
print(a.max(axis=1))
print(a.max(axis=0))
#%%
print(np.random.random(3))
| 22.111111
| 98
| 0.649916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.38861
|
12d6fdba24bc3c779da8bc89c659942cc66fb630
| 9,284
|
py
|
Python
|
cluster_toolkit/xi.py
|
jhod0/cluster_toolkit
|
b515b39fc4d0a17c19be4530a75d089d190f50cb
|
[
"MIT"
] | null | null | null |
cluster_toolkit/xi.py
|
jhod0/cluster_toolkit
|
b515b39fc4d0a17c19be4530a75d089d190f50cb
|
[
"MIT"
] | 6
|
2019-08-14T18:54:23.000Z
|
2019-09-19T22:10:42.000Z
|
cluster_toolkit/xi.py
|
jhod0/cluster_toolkit
|
b515b39fc4d0a17c19be4530a75d089d190f50cb
|
[
"MIT"
] | null | null | null |
"""Correlation functions for matter and halos.
"""
import cluster_toolkit
from cluster_toolkit import _ArrayWrapper, _handle_gsl_error
import numpy as np
def xi_nfw_at_r(r, M, c, Omega_m, delta=200):
"""NFW halo profile correlation function.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h
c (float): Concentration
Omega_m (float): Omega_matter, matter fraction of the density
delta (int; optional): Overdensity, default is 200
Returns:
float or array like: NFW halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_nfw(r.cast(), len(r), M, c, delta,
Omega_m, xi.cast())
return xi.finish()
def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.):
"""Einasto halo profile.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h; not used if rhos is specified
conc (float): Concentration
alpha (float): Profile exponent
om (float): Omega_matter, matter fraction of the density
delta (int): Overdensity, default is 200
rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional
Returns:
float or array like: Einasto halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos,
conc, alpha, delta, om, xi.cast())
return xi.finish()
def xi_mm_at_r(r, k, P, N=500, step=0.005, exact=False):
"""Matter-matter correlation function.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
k (array like): Wavenumbers of power spectrum in h/Mpc comoving
P (array like): Matter power spectrum in (Mpc/h)^3 comoving
N (int; optional): Quadrature step count, default is 500
step (float; optional): Quadrature step size, default is 5e-3
exact (boolean): Use the slow, exact calculation; default is False
Returns:
float or array like: Matter-matter correlation function
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(r)
if not exact:
rc = cluster_toolkit._lib.calc_xi_mm(r.cast(), len(r), k.cast(),
P.cast(), len(k), xi.cast(),
N, step)
_handle_gsl_error(rc, xi_mm_at_r)
else:
if r.arr.max() > 1e3:
raise Exception("max(r) cannot be >1e3 for numerical stability.")
rc = cluster_toolkit._lib.calc_xi_mm_exact(r.cast(), len(r),
k.cast(), P.cast(),
len(k), xi.cast())
_handle_gsl_error(rc, xi_mm_at_r)
return xi.finish()
def xi_2halo(bias, xi_mm):
"""2-halo term in halo-matter correlation function
Args:
bias (float): Halo bias
xi_mm (float or array like): Matter-matter correlation function
Returns:
float or array like: 2-halo term in halo-matter correlation function
"""
xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(xi_mm)
cluster_toolkit._lib.calc_xi_2halo(len(xi_mm), bias, xi_mm.cast(),
xi.cast())
return xi.finish()
def xi_hm(xi_1halo, xi_2halo, combination="max"):
"""Halo-matter correlation function
Note: at the moment you can combine the 1-halo and 2-halo terms by either taking the max of the two or the sum of the two. The 'combination' field must be set to either 'max' (default) or 'sum'.
Args:
xi_1halo (float or array like): 1-halo term
xi_2halo (float or array like, same size as xi_1halo): 2-halo term
combination (string; optional): specifies how the 1-halo and 2-halo terms are combined, default is 'max' which takes the max of the two
Returns:
float or array like: Halo-matter correlation function
"""
if combination == "max":
switch = 0
elif combination == 'sum':
switch = 1
else:
raise Exception("Combinations other than maximum not implemented yet")
xi_1halo = _ArrayWrapper(xi_1halo, allow_multidim=True)
xi_2halo = _ArrayWrapper(xi_2halo, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(xi_1halo)
cluster_toolkit._lib.calc_xi_hm(len(xi_1halo), xi_1halo.cast(),
xi_2halo.cast(), xi.cast(), switch)
return xi.finish()
def xi_DK(r, M, conc, be, se, k, P, om, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, xi.cast())
return xi.finish()
def xi_DK_appendix1(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile, first form from the appendix, eq. A3.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
bias (float): halo bias
xi_mm (float or array like): matter correlation function at r
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True)
xi = np.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK_app1(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast())
return xi.finish()
def xi_DK_appendix2(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile, second form from the appendix, eq. A4.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
bias (float): halo bias
xi_mm (float or array like): matter correlation function at r
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k)
P = _ArrayWrapper(P)
xi_mm = _ArrayWrapper(xi_mm)
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK_app2(r.cast(), len(r), M, rhos, conc, be,
se, alpha, beta, gamma, delta,
k.cast(), P.cast(), len(k), om, bias,
xi_mm.cast(), xi.cast())
return xi.finish()
| 40.190476
| 198
| 0.62193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,474
| 0.589617
|
12d736e2a136d27d71bf7901bba9c44692b70118
| 1,500
|
py
|
Python
|
Tools/scripts/rgrep.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2015-05-21T23:47:54.000Z
|
2015-05-21T23:47:54.000Z
|
Tools/scripts/rgrep.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2015-10-29T20:51:31.000Z
|
2015-10-29T20:51:31.000Z
|
Tools/scripts/rgrep.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
#! /usr/bin/env python
"""Reverse grep.
Usage: rgrep [-i] pattern file
"""
import sys
import re
import getopt
def main():
bufsize = 64*1024
reflags = 0
opts, args = getopt.getopt(sys.argv[1:], "i")
for o, a in opts:
if o == '-i':
reflags = reflags | re.IGNORECASE
if len(args) < 2:
usage("not enough arguments")
if len(args) > 2:
usage("exactly one file argument required")
pattern, filename = args
try:
prog = re.compile(pattern, reflags)
except re.error as msg:
usage("error in regular expression: %s" % str(msg))
try:
f = open(filename)
except IOError as msg:
usage("can't open %s: %s" % (repr(filename), str(msg)), 1)
f.seek(0, 2)
pos = f.tell()
leftover = None
while pos > 0:
size = min(pos, bufsize)
pos = pos - size
f.seek(pos)
buffer = f.read(size)
lines = buffer.split("\n")
del buffer
if leftover is None:
if not lines[-1]:
del lines[-1]
else:
lines[-1] = lines[-1] + leftover
if pos > 0:
leftover = lines[0]
del lines[0]
else:
leftover = None
lines.reverse()
for line in lines:
if prog.search(line):
print(line)
def usage(msg, code=2):
sys.stdout = sys.stderr
print(msg)
print(__doc__)
sys.exit(code)
if __name__ == '__main__':
main()
| 23.076923
| 66
| 0.519333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.136667
|
12d758ba9b3d6c5825fba951fa8141e8f0dd86e9
| 5,161
|
py
|
Python
|
licel_format_parser/main.py
|
IFAEControl/lidar-cli
|
02480ecd932cad1e11a04d866eb2eafc214f678d
|
[
"BSD-3-Clause"
] | null | null | null |
licel_format_parser/main.py
|
IFAEControl/lidar-cli
|
02480ecd932cad1e11a04d866eb2eafc214f678d
|
[
"BSD-3-Clause"
] | null | null | null |
licel_format_parser/main.py
|
IFAEControl/lidar-cli
|
02480ecd932cad1e11a04d866eb2eafc214f678d
|
[
"BSD-3-Clause"
] | null | null | null |
import struct
f = open("c0610400.102200", 'rb')
class DateTime:
def __init__(self):
line = f.readline().strip()
self._letter = chr(line[0])
self._year = line[1:3].decode("utf-8")
self._month = int(chr(line[3]), 16)
self._day = line[4:6].decode("utf-8")
self._hour = line[6:8].decode("utf-8")
self._minute = line[9:11].decode("utf-8")
self._second = line[11:13].decode("utf-8")
self._millis = line[13:15].decode("utf-8")
def __str__(self):
date = "{}/{}/{} ".format(self._day, self._month, self._year)
time = "{}:{}:{}.{}".format(self._hour, self._minute, self._second, self._millis)
return "{} {}".format(date, time)
class Location:
def __init__(self):
line = f.readline().strip()
self._location = line[0:9].strip().decode("utf-8")
self._start_time = line[9:28].decode("utf-8")
self._stop_time = line[29:48].decode("utf-8")
self._higt_asl = line[49:53].decode("utf-8")
self._longitude = line[54:60].decode("utf-8")
self._latitude = line[61:67].decode("utf-8")
self._zenith_angle = line[68:70].decode("utf-8")
def __str__(self):
return "Location: {}\nStart: {}\nEnd: {}\nAltitude: {}, longitude: {}, latitude: {}, zenith_angle: {}".format(
self._location, self._start_time, self._stop_time, self._higt_asl, self._longitude, self._latitude,
self._zenith_angle
)
class LaserData:
def __init__(self):
line = f.readline().strip()
self.shots_laser_1 = line[0:7].decode("utf-8")
# XXX: Official documentation (22 february 2019) states that there should be 5 numbers but official licel app
# only returns 4
self.pulse_freq_1 = line[8:12].decode("utf-8")
self.shots_laser_2 = line[13:20].decode("utf-8")
# XXX: Official documentation (22 february 2019) states that there should be 5 numbers but official licel app
# only returns 4
self.pulse_freq_2 = line[21:24].decode("utf-8")
self.datasets_num = int(line[26:28].decode("utf-8"))
self.undocumented_laser_3 = line[29:36].decode("utf-8")
self.undocumented_freq_3 = line[37:41].decode("utf-8")
def __str__(self):
return str(self.datasets_num)
class DatasetDescription:
def __init__(self):
line = f.readline().strip()
self._active = bool(int(chr(line[0])))
self._analog = False
self._phontocounting = False
tmp = bool(int(chr(line[2])))
if tmp:
self._phontocounting = True
else:
self._analog = True
self._laser = int(chr(line[4]))
self._bins = line[6:11].decode("utf-8")
self._one = line[12]
self._pmt_voltage = line[14:18].decode("utf-8")
# XXX: Docs say two digits before the dot. But there is only one.
self._binwith = line[19:23].decode("utf-8")
self._wavelength = line[24:29].decode("utf-8")
self._polarisation = None
tmp = chr(line[31])
if tmp == 'o':
self._polarisation = "No"
elif tmp == 's':
self._polarisation = "Perpendicular"
elif tmp == "i":
self._polarisation = "parallel"
self._adc_bits = line[43:45].decode("utf-8")
self._number_of_shots = line[46:52].decode("utf-8")
self._analog_range_or_disc = line[53:58].decode("utf-8")
# XXX: According to the documentation BT = analog but in our samples from the official software BT = photon
# we only read the TR number
self._tr = int(chr(line[-1]))
def __str__(self):
print(self._tr)
return "Active: {}, analog: {}, photoncounting: {}, " \
"laser: {}, bins: {}".format(self._active, self._analog,
self._phontocounting, self._laser,
self._bins)
def read_dataset(file):
ch = file.read(1)
buf = []
while True:
if chr(ch[0]) == '\n' and chr(buf[-1]) == '\r':
break
buf.append(ch[0])
ch = file.read(1)
buf.append(ch[0])
return bytes(buf)
class Data:
def __init__(self):
# \r\n
f.readline()
# Actual dataset, without \r\n
line = read_dataset(f)[:-2]
line = read_dataset(f)[:-2]
line = read_dataset(f)[:-2]
int_array = [x[0] for x in struct.iter_unpack('<I', line)]
converted = [(x/58)*(500/(2**16-1)) for x in int_array]
print(converted)
class Header:
def __init__(self):
self._date_time = DateTime()
self._location = Location()
self._laser_data = LaserData()
self._datasets_descriptions = []
for _ in range(self._laser_data.datasets_num):
self._datasets_descriptions.append(DatasetDescription())
self._data = Data()
def __str__(self):
print(self._laser_data)
for i in self._datasets_descriptions:
print(i)
return "{}\n{}".format(self._date_time, self._location)
h = Header()
print(h)
| 33.083333
| 118
| 0.571207
| 4,822
| 0.934315
| 0
| 0
| 0
| 0
| 0
| 0
| 948
| 0.183685
|
12d85c3f8e0b325f0104a7462f8c848f6627e0a1
| 7,073
|
py
|
Python
|
built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modified optimizer_v2 implementation enabling XLA across variable updates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import utils as optimizer_utils
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
class OptimizerV2Modified(optimizer_v2.OptimizerV2):
"""This is a subclass optimizer that performs variable updates in
Distribution Strategy replica context. OptimizerV2 base class is currently
under refactoring and will have better support of this.
Please refer to optimizer_v2.OptimizerV2 for more details regarding the APIs.
"""
def __init__(self, name, use_experimental_compile=False, **kwargs):
"""Create a new Optimizer.
Args:
name: Optional name prefix for variables and ops created by the optimizer.
use_experimental_compile: when set to True, use experimental_compile on
the _distributed_apply function.
"""
super(OptimizerV2Modified, self).__init__(name=name, **kwargs)
self.use_experimental_compile = use_experimental_compile
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
"""Apply gradients to variables.
Only the last two lines are different from optimizer_v2.OptimizerV2.
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name passed
to the `Optimizer` constructor.
experimental_aggregate_gradients: Whether to sum gradients from different
replicas in the presense of `tf.distribute.Strategy`. If False, it's
user responsibility to aggregate the gradients. Default to True.
Returns:
An `Operation` that applies the specified gradients. The `iterations`
will be automatically increased by 1.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If called in cross-replica context.
"""
# pylint: disable=protected-access
grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
# pylint: enable=protected-access
var_list = [v for (_, v) in grads_and_vars]
with ops.name_scope_v2(self._name):
# Create iteration if necessary.
with ops.init_scope():
self._create_all_weights(var_list)
if not grads_and_vars:
# Distribution strategy does not support reducing an empty list of
# gradients
return control_flow_ops.no_op()
if distribute_ctx.in_cross_replica_context():
raise RuntimeError(
"`apply_gradients() cannot be called in cross-replica context. "
"Use `tf.distribute.Strategy.run` to enter replica "
"context.")
strategy = distribute_ctx.get_strategy()
if (not experimental_aggregate_gradients and strategy and isinstance(
strategy.extended,
parameter_server_strategy.ParameterServerStrategyExtended)):
raise NotImplementedError(
"`experimental_aggregate_gradients=False is not supported for "
"ParameterServerStrategy and CentralStorageStrategy")
apply_state = self._prepare(var_list)
if experimental_aggregate_gradients:
grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars)
grads_and_vars = self._aggregate_gradients(grads_and_vars)
grads_and_vars = self._transform_gradients(grads_and_vars)
self._distributed_apply(None, grads_and_vars, name, apply_state)
return self._iterations.assign_add(1, read_value=False)
def _distributed_apply_org(self, distribution, grads_and_vars, name, apply_state):
"""`apply_gradients` using a `DistributionStrategy`.
This is the _distributed_apply function in optimizer_v2,
returning a list of ops.
"""
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
apply_kwargs = {}
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
if "apply_state" in self._sparse_apply_args:
apply_kwargs["apply_state"] = apply_state
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices, **apply_kwargs)
if "apply_state" in self._dense_apply_args:
apply_kwargs["apply_state"] = apply_state
update_op = self._resource_apply_dense(grad, var, **apply_kwargs)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with ops.name_scope(name or self._name, skip_on_eager=True):
for grad, var in grads_and_vars:
update_ops.append(apply_grad_to_update_var(var, grad))
return control_flow_ops.group(*update_ops)
def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):
if self.use_experimental_compile:
self._distributed_apply_compile(distribution, grads_and_vars, name,
apply_state)
else:
self._distributed_apply_org(distribution, grads_and_vars, name,
apply_state)
#@tf.function(experimental_compile=True)
def _distributed_apply_compile(self, distribution, grads_and_vars, name,
apply_state):
"""This is a warpper, to return a tensor, making tf.func() happy."""
self._distributed_apply_org(distribution, grads_and_vars,
name, apply_state)
return tf.ones((), dtype=tf.bool)
| 42.10119
| 88
| 0.713276
| 5,626
| 0.795419
| 0
| 0
| 0
| 0
| 0
| 0
| 2,979
| 0.421179
|
12d9793b66d488d4aab6750551143953a771ab71
| 4,828
|
py
|
Python
|
src/data/utils.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | null | null | null |
src/data/utils.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | null | null | null |
src/data/utils.py
|
behavioral-data/multiverse
|
82b7265de0aa3e9d229ce9f3f86b8b48435ca365
|
[
"MIT"
] | 1
|
2021-08-19T15:21:50.000Z
|
2021-08-19T15:21:50.000Z
|
import os
import errno
import requests
import glob
import os
import json
from tqdm import tqdm
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def download_file(url, out_path,file_mode = "wb"):
response = requests.get(url)
if response:
out_file = open(out_path,file_mode)
out_file.write(response.content)
out_file.close()
return response.status_code == requests.codes.ok
def version_path_to_components(path):
slug_path, version_file = os.path.split(path)
version_id = version_file.split(".")[0]
comp_path, slug_id = os.path.split(slug_path)
comp_name = os.path.basename(comp_path)
return {"version_id" : version_id,
"slug_id" : slug_id,
"comp_name" : comp_name}
class CompetitionReader(object):
def __init__(self, path, python_only=False):
self.path = path
self.slug_ids = [os.path.basename(x) for x in glob.glob(os.path.join(self.path, "*"))]
self.comp_name = os.path.basename(self.path)
self.python_only = python_only
def apply_to_slugs(self,fn):
# Applies a function fn to a list of dicts of notebooks
for slug_id in self.slug_ids:
versions = self.load_slug_versions(slug_id)
yield fn(versions)
def load_slug_versions(self,slug_id):
versions = []
for path in glob.glob(os.path.join(self.path,slug_id,"*.json")):
with open(path) as version_file:
filename = os.path.basename(path)
version_id = os.path.splitext(filename)[0]
try:
version = json.load(version_file)
if not isinstance(version,dict):
continue
except json.decoder.JSONDecodeError:
continue
if self.python_only:
try:
if not version["metadata"]["language_info"]["name"] == "python":
continue
except KeyError:
continue
version["version_id"] = version_id
version["path"] = path
versions.append(version)
return versions
def write_jsonl(open_file, data, mode = "a"):
for datum in data:
open_file.write(json.dumps(datum))
open_file.write("\n")
def load_jsonl(path):
lines = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
loaded_line = json.loads(line)
lines.append(loaded_line)
return lines
def is_git_line(line):
return len(line) >0 and line[0] in ["+","-"]
def remove_git_chars(line):
if is_git_line(line):
return line[1:]
else:
return line
class KaggleDiffsReader():
def __init__(self,diff_path):
self.diff_path = diff_path
self.diffs = []
with open(self.diff_path, 'r', encoding='utf-8') as f:
for line in tqdm(f, desc="Loading Diffs"):
diff_line = json.loads(line)
orig, new = self.split_orig_and_new(diff_line)
diff = {
"metadata":diff_line["metadata"],
"orig":orig,
"new":new,
"cell_diff":diff_line["celll_diff"]
}
self.diffs.append(diff)
def __len__(self):
return len(self.diffs)
def __getitem__(self,i):
return self.diffs[i]
def remove_git_chars(self,line):
if line[0] in ["+","-"]:
return line[1:]
else:
return line
def split_orig_and_new(self,diff):
#TODO: Current preserves the plus or minus
lines = diff["cell_diff"].split("\n")
orig = [self.remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "+" ]
new = [self.remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "-"]
return "\n".join(orig), "\n".join(new)
def split_orig_and_new(diff):
lines = diff.split("\n")
orig = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "+" ]
new = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "-"]
return "\n".join(orig), "\n".join(new)
def get_inserted_and_removed(diff, as_string = False):
lines = diff.split("\n")
inserted = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] == "+" ]
removed = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] == "-"]
if as_string:
return "\n".join(inserted), "\n".join(removed)
else:
return inserted, removed
| 31.555556
| 94
| 0.570008
| 2,709
| 0.561102
| 217
| 0.044946
| 0
| 0
| 0
| 0
| 377
| 0.078086
|
12d99705dd6d38a5113e0f5059a5a16ef3ce2532
| 231
|
py
|
Python
|
LeetCode/Product and Sum/Subtract_Product_And_Sum.py
|
GSri30/Competetive_programming
|
0dc1681500a80b6f0979d0dc9f749357ee07bcb8
|
[
"MIT"
] | 22
|
2020-01-03T17:32:00.000Z
|
2021-11-07T09:31:44.000Z
|
LeetCode/Product and Sum/Subtract_Product_And_Sum.py
|
GSri30/Competetive_programming
|
0dc1681500a80b6f0979d0dc9f749357ee07bcb8
|
[
"MIT"
] | 10
|
2020-09-30T09:41:18.000Z
|
2020-10-11T11:25:09.000Z
|
LeetCode/Product and Sum/Subtract_Product_And_Sum.py
|
GSri30/Competetive_programming
|
0dc1681500a80b6f0979d0dc9f749357ee07bcb8
|
[
"MIT"
] | 25
|
2019-10-14T19:25:01.000Z
|
2021-05-26T08:12:20.000Z
|
class Solution:
def subtractProductAndSum(self, n: int) -> int:
x = n
add = 0
mul = 1
while x > 0 :
add += x%10
mul *= x%10
x = x//10
return mul - add
| 21
| 51
| 0.402597
| 230
| 0.995671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
12da373705e611aa87f9b708815df70bbd6ae325
| 14,870
|
py
|
Python
|
jocular/calibrator.py
|
MartinCooke/jocular
|
635816d4ef6aa6ea75187137e25386dad2d551e9
|
[
"MIT"
] | 6
|
2021-03-21T16:46:44.000Z
|
2021-11-27T14:07:06.000Z
|
jocular/calibrator.py
|
MartinCooke/jocular
|
635816d4ef6aa6ea75187137e25386dad2d551e9
|
[
"MIT"
] | null | null | null |
jocular/calibrator.py
|
MartinCooke/jocular
|
635816d4ef6aa6ea75187137e25386dad2d551e9
|
[
"MIT"
] | null | null | null |
''' Handles calibration library and calibration of subs.
'''
import os.path
import numpy as np
from scipy.stats import trimboth
from kivy.app import App
from loguru import logger
from kivy.properties import BooleanProperty, DictProperty, NumericProperty
from kivy.core.window import Window
from jocular.table import Table
from jocular.utils import make_unique_filename
from jocular.component import Component
from jocular.settingsmanager import Settings
from jocular.image import Image, save_image, fits_in_dir
date_time_format = '%d %b %y %H:%M'
class Calibrator(Component, Settings):
save_settings = ['apply_dark', 'apply_flat', 'apply_bias']
masters = DictProperty({})
apply_flat = BooleanProperty(False)
apply_dark = BooleanProperty(False)
apply_bias = BooleanProperty(False)
use_l_filter = BooleanProperty(True)
exposure_tol = NumericProperty(5)
temperature_tol = NumericProperty(5)
dark_days_tol = NumericProperty(1)
flat_days_tol = NumericProperty(60)
tab_name = 'Calibration'
configurables = [
('use_l_filter', {'name': 'use light flat?', 'switch': '',
'help': 'If there is no flat for the given filter, use a light flat if it exists'}),
('exposure_tol', {'name': 'exposure tolerance', 'float': (0, 30, 1),
'fmt': '{:.0f} seconds',
'help': 'When selecting a dark, select those within this exposure tolerance'}),
('temperature_tol', {'name': 'temperature tolerance', 'float': (0, 40, 1),
'fmt': '{:.0f} degrees',
'help': 'When selecting a dark, restrict to those within this temperature tolerance'}),
('dark_days_tol', {'name': 'dark age tolerance', 'float': (0, 300, 1),
'fmt': '{:.0f} days',
'help': 'Maximum age of darks to use if no temperature was specified'}),
('flat_days_tol', {'name': 'flat age tolerance', 'float': (0, 300, 1),
'fmt': '{:.0f} days',
'help': 'Maximum age of flats to use'}),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.app = App.get_running_app()
self.calibration_dir = self.app.get_path('calibration')
self.masters = {} # map from name to FITs Image instance
self.library = {} # map from name to calibration table info
''' construct above dicts from calibration FITs in calibration directory
'''
for f in fits_in_dir(self.calibration_dir):
path = os.path.join(self.calibration_dir, f)
try:
s = Image(path)
if s.is_master:
self.add_to_library(s)
except Exception as e:
logger.warning('Calibrator: unable to parse calibration {:} ({:})'.format(f, e))
def on_new_object(self, *args):
n_masters = len(self.library)
if n_masters > 0:
self.info('{:d} masters'.format(n_masters))
else:
self.info('no masters')
def add_to_library(self, m):
''' called on initialisation and when we save a new master
'''
# keys are full names so they can be reliably deleted
self.masters[m.fullname] = m
self.library[m.fullname] = {
'name': m.name,
'type': m.sub_type,
'exposure': str(m.exposure) if m.exposure is not None else '???',
'temperature': str(m.temperature) if m.temperature is not None else '???',
'filter': m.filter,
'created': m.create_time.strftime(date_time_format),
'shape_str': m.shape_str,
'age': m.age,
'nsubs': m.nsubs if m.nsubs is not None else 0
}
def create_master(self, sub_type=None, exposure=None, temperature=None, filt=None):
''' Called by ObjectIO to save an existing stack capture by Jocular as a calibration master
'''
logger.info('save master type {:} expo {:} temp {:} filt {:}'.format(
sub_type, exposure, temperature, filt))
stacker = Component.get('Stacker')
# force the use of method that the user has chosen or set up by default for this type of calib
master = stacker.get_stack(filt, calibration=True)
''' Apply bad pixel mapping to calibration frames
If dark, find hot pixels in master and remove, otherwise use existing BPM
NB not fully tested
'''
bpm = Component.get('BadPixelMap')
if sub_type == 'dark':
master = bpm.do_bpm(master, bpm.find_hot_pixels(master))
logger.debug('created BPM from darks and applied it')
else:
master = bpm.do_bpm(master)
logger.debug('applied BPM to master')
''' Flats were divided thru by their robust mean to account for level differences
but then scaled to 50% to enable B/W controls; so multiply by 2
'''
if sub_type == 'flat':
master = 2 * master
self.save_master(data=master, exposure=exposure, filt=filt, temperature=temperature,
sub_type=sub_type, nsubs=stacker.get_selected_sub_count())
# add to notes field of current DSO
Component.get('Notes').notes = 'exposure {:} filter {:} temperature {:}'.format(exposure, filt, temperature)
def save_master(self, data=None, exposure=None, filt=None, temperature=None, sub_type=None, nsubs=None):
''' Save master and add to library to make it available immediately. Called both by
create_master above and by the Watched camera for any alien master subs. The difference is
that create_master above does BPM/flat handling etc so only applies to natively-captured
calibration masters.
'''
logger.info('new master type {:} expo {:} temp {:} filt {:} nsubs {:}'.format(
sub_type, exposure, temperature, filt, nsubs))
name = 'master{:}.fit'.format(sub_type)
path = make_unique_filename(os.path.join(self.calibration_dir, name))
save_image(data=data, path=path, exposure=exposure, filt=filt, temperature=temperature,
sub_type='master ' + sub_type, nsubs=nsubs)
self.add_to_library(Image(path))
def calibrate(self, sub):
# Given a light sub, apply calibration. Fails silently if no suitable calibration masters.
sub.calibrations = set({})
if not self.library:
self.info('no library')
return
if not (self.apply_dark or self.apply_bias or self.apply_flat):
self.info('none')
return
# get all masters (check speed, but should be quick)
dark = self.get_dark(sub)
flat = self.get_flat(sub)
bias = self.get_bias(sub)
logger.debug('D {:} F {:} B {:}'.format(dark, flat, bias))
D = self.get_master(dark)
# if D is not None:
# print('{:} min {:} max {:} median {:} mean {:}'.format(dark, np.min(D), np.max(D), np.median(D), np.mean(D)))
F = self.get_master(flat)
# if F is not None:
# print('{:} min {:} max {:} median {:} mean {:}'.format(flat, np.min(F), np.max(F), np.median(F), np.mean(F)))
B = self.get_master(bias)
# if B is not None:
# print('{:} min {:} max {:} median {:} mean {:}'.format(bias, np.min(B), np.max(B), np.median(B), np.mean(B)))
im = sub.get_image()
if self.apply_dark and self.apply_flat:
if dark is not None and flat is not None:
im = (im - D) / F
sub.calibrations = {'dark', 'flat'}
elif dark is not None:
im = im - D
sub.calibrations = {'dark'}
elif flat is not None:
if bias is not None:
sub.calibrations = {'flat', 'bias'}
im = (im - B) / F
else:
sub.calibrations = {'flat'}
im = im / F # inadvisable, but we allow it
elif self.apply_dark:
if dark is not None:
im = im - D
sub.calibrations = {'dark'}
elif self.apply_flat:
if flat is not None:
if bias is not None:
sub.calibrations = {'flat', 'bias'}
im = (im - B) / F
else:
sub.calibrations = {'flat'}
im = im / F
elif self.apply_bias:
if bias is not None:
sub.calibrations = {'bias'}
im = im - B
# limit
im[im < 0] = 0
im[im > 1] = 1
sub.image = im
applied = ' '.join(list(sub.calibrations))
if applied:
self.info(applied)
else:
self.info('none suitable')
def get_dark(self, sub):
# Find suitable dark for this sub given its parameters
if sub.exposure is None:
return None
# choose darks that are the right shape with exposure within tolerance
darks = {k: v for k, v in self.masters.items()
if v.shape == sub.shape and
v.sub_type == 'dark' and
v.exposure is not None and
abs(v.exposure - sub.exposure) < self.exposure_tol}
temperature = Component.get('Session').temperature
if temperature is not None:
# we know temperature, select those with temperatures and within tolerance
darks = [k for k, v in darks.items() if
v.temperature is not None and abs(v.temperature - temperature) < self.temperature_tol]
else:
# find those within date tolerance (set to 1 to get darks in current session)
darks = [k for k, v in darks.items() if v.age < self.dark_days_tol]
# if we have darks, return name of first one
return darks[0] if len(darks) > 0 else None
def get_bias(self, sub):
# get the most recent bias
bias = {k: v.age for k, v in self.masters.items()
if v.shape == sub.shape and v.sub_type == 'bias' }
return min(bias, key=bias.get) if len(bias) > 0 else None
def get_flat(self, sub):
# flats of right shape
flats = {k:v for k, v in self.masters.items()
if v.shape == sub.shape and v.sub_type == 'flat'}
# flat in required filter
if sub.filter is not None:
flats_in_filt = {k: v for k, v in flats.items() if v.filter is not None and v.filter == sub.filter}
else:
flats_in_filt = {}
# if we have none and can use L filter, use these
if (len(flats_in_filt) == 0) and self.use_l_filter:
flats_in_filt = {k:v for k, v in flats.items() if v.filter == 'L'}
# do we have any now? if not, return
if len(flats_in_filt) == 0:
return None
# find any within day tolerance, noting that this compares the date of the flat with
# the date of the sub (i.e. not necessarily the current date)
flats = {k: abs(v.create_time - sub.create_time).days for k,v in flats_in_filt.items()}
flats = {k: v for k, v in flats.items() if v <= self.flat_days_tol}
# find most recent if there is a choice
for k in sorted(flats, key=flats.get):
return k
return None
def get_master(self, name):
if name is None:
return None
# Retrieve image (NB loaded on demand, so effectively a cache)
return self.masters[name].get_image()
def _most_subs(self, cands):
c = {k: cands[k]['nsubs'] for k in cands.keys()}
return max(c, key=c.get)
def calibrate_flat(self, sub):
''' Perform calibrations on flat which include subtracting bias if
available , and rescaling so the mean intensity is .5 (because outlier rejection
methods used to combine flat subs work best with normalised frames due to changing
light levels; the value of .5 is so that we can use B & W controls; we rescale to
a mean of 1 when saving since this is what a good flat needs for dividing)
'''
im = sub.get_image()
# subtract bias if available
bias = self.get_bias(sub)
if bias is not None:
#print('subtracting bias')
im = im - self.get_master(bias)
# normalise by mean of image in central 3rd zone
perc = 75 # retain central 75% of points when computing mean
w, h = im.shape
w1, w2 = int(w / 3), int(2 * w / 3)
h1, h2 = int(h / 3), int(2 * h / 3)
imr = im[h1: h2, w1: w2]
robust_mean = np.mean(trimboth(np.sort(imr.ravel(), axis=0),
(100 - perc)/100, axis=0), axis=0)
sub.image = .5 * im / robust_mean
def build_calibrations(self):
''' Contruct table from library
'''
return Table(
size=Window.size,
data=self.library,
name='Calibration masters',
description='Calibration masters',
cols={
'Name': {'w': 300, 'align': 'left', 'field': 'name'},
'Type': {'w': 60, 'field': 'type', 'align': 'left'},
'Exposure': {'w': 80, 'field': 'exposure'},
'Temp. C': {'w': 80, 'field': 'temperature', 'type': str},
'Filter': {'w': 80, 'field': 'filter'},
'Created': {'w': 180, 'field': 'created', 'sort': {'DateFormat': date_time_format}},
'Size': {'w': 110, 'field': 'shape_str'},
'Age': {'w': 50, 'field': 'age', 'type': int},
'Subs': {'w': 50, 'field': 'nsubs', 'type': int}
},
actions={'move to delete dir': self.move_to_delete_folder},
on_hide_method=self.app.table_hiding
)
def show_calibration_table(self, *args):
''' Called when user clicks 'library' on GUI
'''
if not hasattr(self, 'calibration_table'):
self.calibration_table = self.build_calibrations()
self.app.showing = 'calibration'
# check for redraw
if self.calibration_table not in self.app.gui.children:
self.app.gui.add_widget(self.calibration_table, index=0)
self.calibration_table.show()
def move_to_delete_folder(self, *args):
objio = Component.get('ObjectIO')
for nm in self.calibration_table.selected:
if nm in self.library:
objio.delete_file(os.path.join(self.calibration_dir, nm))
del self.library[nm]
del self.masters[nm]
logger.info('deleted {:} calibration masters'.format(len(self.calibration_table.selected)))
self.calibration_table.update()
| 39.028871
| 123
| 0.571688
| 14,325
| 0.962831
| 0
| 0
| 0
| 0
| 0
| 0
| 5,037
| 0.338554
|
12dbd5bf3d381ee625187e0ae26efd79aef7f23a
| 1,128
|
py
|
Python
|
test/office_schema.py
|
chrismaille/marshmallow-pynamodb
|
1e799041ff1053a6aa67ce72729e7262cb0f746f
|
[
"MIT"
] | 3
|
2020-05-17T15:04:27.000Z
|
2021-08-12T14:27:15.000Z
|
test/office_schema.py
|
chrismaille/marshmallow-pynamodb
|
1e799041ff1053a6aa67ce72729e7262cb0f746f
|
[
"MIT"
] | 2
|
2020-05-06T00:11:49.000Z
|
2022-02-23T11:45:54.000Z
|
test/office_schema.py
|
chrismaille/marshmallow-pynamodb
|
1e799041ff1053a6aa67ce72729e7262cb0f746f
|
[
"MIT"
] | 1
|
2020-04-30T19:34:22.000Z
|
2020-04-30T19:34:22.000Z
|
from test.office_model import Headquarters, Office
from marshmallow import fields
from pynamodb.attributes import DiscriminatorAttribute
from marshmallow_pynamodb import ModelSchema
class OfficeSchema(ModelSchema):
"""Office Schema for PynamoDB Office Model.
We are overriding PynamoDB
NumberSetAttribute and UnicodeSetAttribute fields
to maintain list order
"""
numbers = fields.List(fields.Integer)
departments = fields.List(fields.String)
security_number = fields.Str(allow_none=True)
cls = DiscriminatorAttribute()
class Meta:
"""Schema Model Meta Class."""
model = Office
class HQSchema(OfficeSchema):
"""Model Schema with parent Schemas field Introspection.
Fields are introspected using
parent marshmallow ModelSchemas. (ex.: OfficeSchema Schema)
"""
class Meta:
model = Headquarters
class HeadquartersSchema(ModelSchema):
"""Model Schema with parent Models field Introspection.
Fields are introspected using
parent PynamoDB Models. (ex.: Office Model)
"""
class Meta:
model = Headquarters
| 22.117647
| 63
| 0.721631
| 935
| 0.828901
| 0
| 0
| 0
| 0
| 0
| 0
| 506
| 0.448582
|
12ddf9c1d17cbd9db7aea277570f0278393c93a6
| 1,599
|
py
|
Python
|
energy_demand/initalisations/initialisations.py
|
willu47/energy_demand
|
59a2712f353f47e3dc237479cc6cc46666b7d0f1
|
[
"MIT"
] | null | null | null |
energy_demand/initalisations/initialisations.py
|
willu47/energy_demand
|
59a2712f353f47e3dc237479cc6cc46666b7d0f1
|
[
"MIT"
] | null | null | null |
energy_demand/initalisations/initialisations.py
|
willu47/energy_demand
|
59a2712f353f47e3dc237479cc6cc46666b7d0f1
|
[
"MIT"
] | null | null | null |
"""Helper initialising functions
"""
#pylint: disable=I0011, C0321, C0301, C0103, C0325, R0902, R0913, no-member, E0213
def init_fuel_tech_p_by(all_enduses_with_fuels, nr_of_fueltypes):
"""Helper function to define stocks for all enduse and fueltype
Parameters
----------
all_enduses_with_fuels : dict
Provided fuels
nr_of_fueltypes : int
Nr of fueltypes
Returns
-------
fuel_tech_p_by : dict
"""
fuel_tech_p_by = {}
for enduse in all_enduses_with_fuels:
fuel_tech_p_by[enduse] = dict.fromkeys(range(nr_of_fueltypes), {})
return fuel_tech_p_by
def dict_zero(first_level_keys):
"""Initialise a dictionary with one level
Parameters
----------
first_level_keys : list
First level data
Returns
-------
one_level_dict : dict
dictionary
"""
one_level_dict = dict.fromkeys(first_level_keys, 0) # set zero as argument
return one_level_dict
def service_type_tech_by_p(lu_fueltypes, fuel_tech_p_by):
"""Initialise dict and fill with zeros
Parameters
----------
lu_fueltypes : dict
Look-up dictionary
fuel_tech_p_by : dict
Fuel fraction per technology for base year
Return
-------
service_fueltype_tech_by_p : dict
Fraction of service per fueltype and technology for base year
"""
service_fueltype_tech_by_p = {}
for fueltype_int in lu_fueltypes.values():
service_fueltype_tech_by_p[fueltype_int] = dict.fromkeys(fuel_tech_p_by[fueltype_int].keys(), 0)
return service_fueltype_tech_by_p
| 24.984375
| 104
| 0.676048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 942
| 0.589118
|
12df0714eb5fa8ab8f6068ed158fd58746d6bc32
| 37
|
py
|
Python
|
npd_well_decoder/__init__.py
|
fmell/npd-well-name-decoder
|
a44ec28a6ef3b32ba38751eeffff479008b53e2d
|
[
"MIT"
] | null | null | null |
npd_well_decoder/__init__.py
|
fmell/npd-well-name-decoder
|
a44ec28a6ef3b32ba38751eeffff479008b53e2d
|
[
"MIT"
] | null | null | null |
npd_well_decoder/__init__.py
|
fmell/npd-well-name-decoder
|
a44ec28a6ef3b32ba38751eeffff479008b53e2d
|
[
"MIT"
] | null | null | null |
from .npd import parse_wellbore_name
| 18.5
| 36
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
12e000a4e8578ea58e111e55e0187884ea14b784
| 26,842
|
py
|
Python
|
Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py
|
jickieduan/python27
|
c752b552396bbed68d8555080d475718cea2edd0
|
[
"bzip2-1.0.6"
] | 5
|
2019-03-11T14:30:31.000Z
|
2021-12-04T14:11:54.000Z
|
Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py
|
jickieduan/python27
|
c752b552396bbed68d8555080d475718cea2edd0
|
[
"bzip2-1.0.6"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py
|
jickieduan/python27
|
c752b552396bbed68d8555080d475718cea2edd0
|
[
"bzip2-1.0.6"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
###############################################################################
# Name: util.py #
# Purpose: Misc utility functions used through out Editra #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
This file contains various helper functions and utilities that the program uses.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: util.py 72623 2012-10-06 19:33:06Z CJP $"
__revision__ = "$Revision: 72623 $"
#--------------------------------------------------------------------------#
# Imports
import os
import sys
import mimetypes
import encodings
import codecs
import urllib2
import wx
# Editra Libraries
import ed_glob
import ed_event
import ed_crypt
import dev_tool
import syntax.syntax as syntax
import syntax.synglob as synglob
import ebmlib
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class DropTargetFT(wx.PyDropTarget):
"""Drop target capable of accepting dropped files and text
@todo: has some issues with the clipboard on windows under certain
conditions. They are not fatal but need fixing.
"""
def __init__(self, window, textcallback=None, filecallback=None):
"""Initializes the Drop target
@param window: window to receive drop objects
@keyword textcallback: Callback for when text is dropped
@keyword filecallback: Callback for when file(s) are dropped
"""
super(DropTargetFT, self).__init__()
# Attributes
self.window = window
self._data = dict(data=None, fdata=None, tdata=None,
tcallb=textcallback, fcallb=filecallback)
self._tmp = None
self._lastp = None
# Setup
self.InitObjects()
def CreateDragString(self, txt):
"""Creates a bitmap of the text that is being dragged
@todo: possibly set colors to match highlighting of text
@todo: generalize this to be usable by other widgets besides stc
"""
if not isinstance(self.window, wx.stc.StyledTextCtrl):
return
stc = self.window
txt = txt.split(stc.GetEOLChar())
longest = (0, 0)
for line in txt:
ext = stc.GetTextExtent(line)
if ext[0] > longest[0]:
longest = ext
cords = [ (0, x * longest[1]) for x in range(len(txt)) ]
try:
mdc = wx.MemoryDC(wx.EmptyBitmap(longest[0] + 5,
longest[1] * len(txt), 32))
mdc.SetBackgroundMode(wx.TRANSPARENT)
mdc.SetTextForeground(stc.GetDefaultForeColour())
mdc.SetFont(stc.GetDefaultFont())
mdc.DrawTextList(txt, cords)
self._tmp = wx.DragImage(mdc.GetAsBitmap())
except wx.PyAssertionError, msg:
Log("[droptargetft][err] %s" % str(msg))
def InitObjects(self):
"""Initializes the text and file data objects
@postcondition: all data objects are initialized
"""
self._data['data'] = wx.DataObjectComposite()
self._data['tdata'] = wx.TextDataObject()
self._data['fdata'] = wx.FileDataObject()
self._data['data'].Add(self._data['tdata'], True)
self._data['data'].Add(self._data['fdata'], False)
self.SetDataObject(self._data['data'])
def OnEnter(self, x_cord, y_cord, drag_result):
"""Called when a drag starts
@param x_cord: x cord of enter point
@param y_cord: y cord of enter point
@param drag_result: wxDrag value
@return: result of drop object entering window
"""
# GetData seems to happen automatically on msw, calling it again
# causes this to fail the first time.
if wx.Platform in ['__WXGTK__', '__WXMSW__']:
return wx.DragCopy
if wx.Platform == '__WXMAC__':
try:
self.GetData()
except wx.PyAssertionError:
return wx.DragError
self._lastp = (x_cord, y_cord)
files = self._data['fdata'].GetFilenames()
text = self._data['tdata'].GetText()
if len(files):
self.window.SetCursor(wx.StockCursor(wx.CURSOR_COPY_ARROW))
else:
self.CreateDragString(text)
return drag_result
def OnDrop(self, x_cord=0, y_cord=0):
"""Gets the drop cords
@keyword x_cord: x cord of drop object
@keyword y_cord: y cord of drop object
@todo: implement snapback when drop is out of range
"""
self._tmp = None
self._lastp = None
return True
def OnDragOver(self, x_cord, y_cord, drag_result):
"""Called when the cursor is moved during a drag action
@param x_cord: x cord of mouse
@param y_cord: y cord of mouse
@param drag_result: Drag result value
@return: result of drag over
@todo: For some reason the caret position changes which can be seen
by the brackets getting highlighted. However the actual caret
is not moved.
"""
stc = self.window
if self._tmp is None:
if hasattr(stc, 'DoDragOver'):
val = stc.DoDragOver(x_cord, y_cord, drag_result)
self.ScrollBuffer(stc, x_cord, y_cord)
drag_result = wx.DragCopy
else:
# A drag image was created
if hasattr(stc, 'DoDragOver'):
point = wx.Point(x_cord, y_cord)
self._tmp.BeginDrag(point - self._lastp, stc)
self._tmp.Hide()
stc.DoDragOver(x_cord, y_cord, drag_result)
self._tmp.Move(point)
self._tmp.Show()
self._tmp.RedrawImage(self._lastp, point, True, True)
self._lastp = point
self.ScrollBuffer(stc, x_cord, y_cord)
drag_result = wx.DragCopy
return drag_result
def OnData(self, x_cord, y_cord, drag_result):
"""Gets and processes the dropped data
@param x_cord: x coordinate
@param y_cord: y coordinate
@param drag_result: wx Drag result value
@postcondition: dropped data is processed
"""
self.window.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
if self.window.HasCapture():
self.window.ReleaseMouse()
try:
data = self.GetData()
except wx.PyAssertionError:
wx.PostEvent(self.window.GetTopLevelParent(), \
ed_event.StatusEvent(ed_event.edEVT_STATUS, -1,
_("Unable to accept dropped file "
"or text")))
data = False
drag_result = wx.DragCancel
if data:
files = self._data['fdata'].GetFilenames()
text = self._data['tdata'].GetText()
if len(files) > 0 and self._data['fcallb'] is not None:
self._data['fcallb'](files)
elif len(text) > 0:
if self._data['tcallb'] is not None:
self._data['tcallb'](text)
elif hasattr(self.window, 'DoDropText'):
self.window.DoDropText(x_cord, y_cord, text)
self.InitObjects()
return drag_result
def OnLeave(self):
"""Handles the event of when the drag object leaves the window
@postcondition: Cursor is set back to normal state
"""
self.window.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
if self.window.HasCapture():
self.window.ReleaseMouse()
if self._tmp is not None:
try:
self._tmp.EndDrag()
except wx.PyAssertionError, msg:
Log("[droptargetft][err] %s" % str(msg))
@staticmethod
def ScrollBuffer(stc, x_cord, y_cord):
"""Scroll the buffer as the dragged text is moved towards the
ends.
@param stc: StyledTextCtrl
@param x_cord: int (x position)
@param y_cord: int (y position)
@note: currently does not work on wxMac
"""
try:
cline = stc.PositionFromPoint(wx.Point(x_cord, y_cord))
if cline != wx.stc.STC_INVALID_POSITION:
cline = stc.LineFromPosition(cline)
fline = stc.GetFirstVisibleLine()
lline = stc.GetLastVisibleLine()
if (cline - fline) < 2:
stc.ScrollLines(-1)
elif lline - cline < 2:
stc.ScrollLines(1)
else:
pass
except wx.PyAssertionError, msg:
Log("[droptargetft][err] ScrollBuffer: %s" % msg)
#---- End FileDropTarget ----#
class EdClipboard(ebmlib.CycleCache):
"""Local clipboard object
@todo: make into a singleton
"""
def GetNext(self):
"""Get the next item in the cache"""
# Initialize the clipboard if it hasn't been loaded yet and
# there is something in the system clipboard
if self.GetCurrentSize() == 0:
txt = GetClipboardText()
if txt is not None:
self.Put(txt)
return super(EdClipboard, self).GetNext()
def IsAtIndex(self, txt):
"""Is the passed in phrase at the current cycle index in the
cache. Used to check if index should be reset or to continue in
the cycle.
@param txt: selected text
"""
pre = self.PeekPrev()
next = self.PeekNext()
if txt in (pre, next):
return True
else:
return False
def Put(self, txt):
"""Put some text in the clipboard
@param txt: Text to put in the system clipboard
"""
pre = self.PeekPrev()
next = self.PeekNext()
if len(txt) and txt not in (pre, next):
self.PutItem(txt)
#---- Misc Common Function Library ----#
# Used for holding the primary selection on mac/msw
FAKE_CLIPBOARD = None
def GetClipboardText(primary=False):
"""Get the primary selection from the clipboard if there is one
@return: str or None
"""
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(True)
elif primary:
# Fake the primary selection on mac/msw
global FAKE_CLIPBOARD
return FAKE_CLIPBOARD
else:
pass
text_obj = wx.TextDataObject()
rtxt = None
if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open():
if wx.TheClipboard.GetData(text_obj):
rtxt = text_obj.GetText()
wx.TheClipboard.Close()
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(False)
return rtxt
def SetClipboardText(txt, primary=False):
"""Copies text to the clipboard
@param txt: text to put in clipboard
@keyword primary: Set txt as primary selection (x11)
"""
# Check if using primary selection
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(True)
elif primary:
# Fake the primary selection on mac/msw
global FAKE_CLIPBOARD
FAKE_CLIPBOARD = txt
return True
else:
pass
data_o = wx.TextDataObject()
data_o.SetText(txt)
if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open():
wx.TheClipboard.SetData(data_o)
wx.TheClipboard.Close()
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(False)
return True
else:
return False
def FilterFiles(file_list):
"""Filters a list of paths and returns a list of paths
that can probably be opened in the editor.
@param file_list: list of files/folders to filter for good files in
"""
good = list()
checker = ebmlib.FileTypeChecker()
for path in file_list:
if not checker.IsBinary(path):
good.append(path)
return good
def GetFileType(fname):
"""Get what the type of the file is as Editra sees it
in a formatted string.
@param fname: file path
@return: string (formatted/translated filetype)
"""
if os.path.isdir(fname):
return _("Folder")
eguess = syntax.GetTypeFromExt(fname.split('.')[-1])
if eguess == synglob.LANG_TXT and fname.split('.')[-1] == 'txt':
return _("Text Document")
elif eguess == synglob.LANG_TXT:
mtype = mimetypes.guess_type(fname)[0]
if mtype is not None:
return mtype
else:
return _("Unknown")
else:
return _("%s Source File") % eguess
def GetFileReader(file_name, enc='utf-8'):
"""Returns a file stream reader object for reading the
supplied file name. It returns a file reader using the encoding
(enc) which defaults to utf-8. If lookup of the reader fails on
the host system it will return an ascii reader.
If there is an error in creating the file reader the function
will return a negative number.
@param file_name: name of file to get a reader for
@keyword enc: encoding to use for reading the file
@return file reader, or int if error.
"""
try:
file_h = file(file_name, "rb")
except (IOError, OSError):
dev_tool.DEBUGP("[file_reader] Failed to open file %s" % file_name)
return -1
try:
reader = codecs.getreader(enc)(file_h)
except (LookupError, IndexError, ValueError):
dev_tool.DEBUGP('[file_reader] Failed to get %s Reader' % enc)
reader = file_h
return reader
def GetFileWriter(file_name, enc='utf-8'):
"""Returns a file stream writer object for reading the
supplied file name. It returns a file writer in the supplied
encoding if the host system supports it other wise it will return
an ascii reader. The default will try and return a utf-8 reader.
If there is an error in creating the file reader the function
will return a negative number.
@param file_name: path of file to get writer for
@keyword enc: encoding to write text to file with
"""
try:
file_h = open(file_name, "wb")
except IOError:
dev_tool.DEBUGP("[file_writer][err] Failed to open file %s" % file_name)
return -1
try:
writer = codecs.getwriter(enc)(file_h)
except (LookupError, IndexError, ValueError):
dev_tool.DEBUGP('[file_writer][err] Failed to get %s Writer' % enc)
writer = file_h
return writer
# TODO: DEPRECATED - remove once callers migrate to ebmlib
GetFileManagerCmd = ebmlib.GetFileManagerCmd
def GetUserConfigBase():
"""Get the base user configuration directory path"""
cbase = ed_glob.CONFIG['CONFIG_BASE']
if cbase is None:
cbase = wx.StandardPaths_Get().GetUserDataDir()
if wx.Platform == '__WXGTK__':
if u'.config' not in cbase and not os.path.exists(cbase):
# If no existing configuration return xdg config path
base, cfgdir = os.path.split(cbase)
tmp_path = os.path.join(base, '.config')
if os.path.exists(tmp_path):
cbase = os.path.join(tmp_path, cfgdir.lstrip(u'.'))
return cbase + os.sep
def HasConfigDir(loc=u""):
""" Checks if the user has a config directory and returns True
if the config directory exists or False if it does not.
@return: whether config dir in question exists on an expected path
"""
cbase = GetUserConfigBase()
to_check = os.path.join(cbase, loc)
return os.path.exists(to_check)
def MakeConfigDir(name):
"""Makes a user config directory
@param name: name of config directory to make in user config dir
"""
cbase = GetUserConfigBase()
try:
os.mkdir(cbase + name)
except (OSError, IOError):
pass
def RepairConfigState(path):
"""Repair the state of profile path, updating and creating it
it does not exist.
@param path: path of profile
"""
if os.path.isabs(path) and os.path.exists(path):
return path
else:
# Need to fix some stuff up
CreateConfigDir()
import profiler
return profiler.Profile_Get("MYPROFILE")
def CreateConfigDir():
""" Creates the user config directory its default sub
directories and any of the default config files.
@postcondition: all default configuration files/folders are created
"""
#---- Resolve Paths ----#
config_dir = GetUserConfigBase()
profile_dir = os.path.join(config_dir, u"profiles")
dest_file = os.path.join(profile_dir, u"default.ppb")
ext_cfg = [u"cache", u"styles", u"plugins"]
#---- Create Directories ----#
if not os.path.exists(config_dir):
os.mkdir(config_dir)
if not os.path.exists(profile_dir):
os.mkdir(profile_dir)
for cfg in ext_cfg:
if not HasConfigDir(cfg):
MakeConfigDir(cfg)
import profiler
profiler.TheProfile.LoadDefaults()
profiler.Profile_Set("MYPROFILE", dest_file)
profiler.TheProfile.Write(dest_file)
profiler.UpdateProfileLoader()
def ResolvConfigDir(config_dir, sys_only=False):
"""Checks for a user config directory and if it is not
found it then resolves the absolute path of the executables
directory from the relative execution path. This is then used
to find the location of the specified directory as it relates
to the executable directory, and returns that path as a
string.
@param config_dir: name of config directory to resolve
@keyword sys_only: only get paths of system config directory or user one
@note: This method is probably much more complex than it needs to be but
the code has proven itself.
"""
# Try to get a User config directory
if not sys_only:
user_config = GetUserConfigBase()
user_config = os.path.join(user_config, config_dir)
if os.path.exists(user_config):
return user_config + os.sep
# Check if the system install path has already been resolved once before
if ed_glob.CONFIG['INSTALL_DIR'] != u"":
tmp = os.path.join(ed_glob.CONFIG['INSTALL_DIR'], config_dir)
tmp = os.path.normpath(tmp) + os.sep
if os.path.exists(tmp):
return tmp
else:
del tmp
# The following lines are used only when Editra is being run as a
# source package. If the found path does not exist then Editra is
# running as as a built package.
if not hasattr(sys, 'frozen'):
path = __file__
if not ebmlib.IsUnicode(path):
path = path.decode(sys.getfilesystemencoding())
path = os.sep.join(path.split(os.sep)[:-2])
path = path + os.sep + config_dir + os.sep
if os.path.exists(path):
if not ebmlib.IsUnicode(path):
path = unicode(path, sys.getfilesystemencoding())
return path
# If we get here we need to do some platform dependent lookup
# to find everything.
path = sys.argv[0]
if not ebmlib.IsUnicode(path):
path = unicode(path, sys.getfilesystemencoding())
# If it is a link get the real path
if os.path.islink(path):
path = os.path.realpath(path)
# Tokenize path
pieces = path.split(os.sep)
if wx.Platform == u'__WXMSW__':
# On Windows the exe is in same dir as config directories
pro_path = os.sep.join(pieces[:-1])
if os.path.isabs(pro_path):
pass
elif pro_path == u"":
pro_path = os.getcwd()
pieces = pro_path.split(os.sep)
pro_path = os.sep.join(pieces[:-1])
else:
pro_path = os.path.abspath(pro_path)
elif wx.Platform == u'__WXMAC__':
# On OS X the config directories are in the applet under Resources
stdpath = wx.StandardPaths_Get()
pro_path = stdpath.GetResourcesDir()
pro_path = os.path.join(pro_path, config_dir)
else:
pro_path = os.sep.join(pieces[:-2])
if pro_path.startswith(os.sep):
pass
elif pro_path == u"":
pro_path = os.getcwd()
pieces = pro_path.split(os.sep)
if pieces[-1] not in [ed_glob.PROG_NAME.lower(), ed_glob.PROG_NAME]:
pro_path = os.sep.join(pieces[:-1])
else:
pro_path = os.path.abspath(pro_path)
if wx.Platform != u'__WXMAC__':
pro_path = pro_path + os.sep + config_dir + os.sep
path = os.path.normpath(pro_path) + os.sep
# Make sure path is unicode
if not ebmlib.IsUnicode(path):
path = unicode(path, sys.getdefaultencoding())
return path
def GetResources(resource):
"""Returns a list of resource directories from a given toplevel config dir
@param resource: config directory name
@return: list of resource directory that exist under the given resource path
"""
rec_dir = ResolvConfigDir(resource)
if os.path.exists(rec_dir):
rec_lst = [ rec.title() for rec in os.listdir(rec_dir)
if os.path.isdir(rec_dir + rec) and rec[0] != u"." ]
return rec_lst
else:
return -1
def GetResourceFiles(resource, trim=True, get_all=False,
suffix=None, title=True):
"""Gets a list of resource files from a directory and trims the
file extentions from the names if trim is set to True (default).
If the get_all parameter is set to True the function will return
a set of unique items by looking up both the user and system level
files and combining them, the default behavior returns the user
level files if they exist or the system level files if the
user ones do not exist.
@param resource: name of config directory to look in (i.e cache)
@keyword trim: trim file extensions or not
@keyword get_all: get a set of both system/user files or just user level
@keyword suffix: Get files that have the specified suffix or all (default)
@keyword title: Titlize the results
"""
rec_dir = ResolvConfigDir(resource)
if get_all:
rec_dir2 = ResolvConfigDir(resource, True)
rec_list = list()
if not os.path.exists(rec_dir):
return -1
else:
recs = os.listdir(rec_dir)
if get_all and os.path.exists(rec_dir2):
recs.extend(os.listdir(rec_dir2))
for rec in recs:
if os.path.isfile(rec_dir + rec) or \
(get_all and os.path.isfile(rec_dir2 + rec)):
# If a suffix was specified only keep files that match
if suffix is not None:
if not rec.endswith(suffix):
continue
# Trim the last part of an extension if one exists
if trim:
rec = ".".join(rec.split(u".")[:-1]).strip()
# Make the resource name a title if requested
if title and len(rec):
rec = rec[0].upper() + rec[1:]
if len(rec):
rec_list.append(rec)
rec_list.sort()
return list(set(rec_list))
def GetAllEncodings():
"""Get all encodings found on the system
@return: list of strings
"""
elist = encodings.aliases.aliases.values()
elist = list(set(elist))
elist.sort()
elist = [ enc for enc in elist if not enc.endswith('codec') ]
return elist
def Log(msg, *args):
"""Push the message to the apps log
@param msg: message string to log
@param args: optional positional arguments to use as a printf formatting
to the message.
"""
try:
wx.GetApp().GetLog()(msg, args)
except:
pass
def GetProxyOpener(proxy_set):
"""Get a urlopener for use with a proxy
@param proxy_set: proxy settings to use
"""
Log("[util][info] Making proxy opener with %s" % str(proxy_set))
proxy_info = dict(proxy_set)
auth_str = "%(uname)s:%(passwd)s@%(url)s"
url = proxy_info['url']
if url.startswith('http://'):
auth_str = "http://" + auth_str
proxy_info['url'] = url.replace('http://', '')
else:
pass
if len(proxy_info.get('port', '')):
auth_str = auth_str + ":%(port)s"
proxy_info['passwd'] = ed_crypt.Decrypt(proxy_info['passwd'],
proxy_info['pid'])
Log("[util][info] Formatted proxy request: %s" % \
(auth_str.replace('%(passwd)s', '****') % proxy_info))
proxy = urllib2.ProxyHandler({"http" : auth_str % proxy_info})
opener = urllib2.build_opener(proxy, urllib2.HTTPHandler)
return opener
#---- GUI helper functions ----#
def SetWindowIcon(window):
"""Sets the given windows icon to be the programs
application icon.
@param window: window to set app icon for
"""
try:
if wx.Platform == "__WXMSW__":
ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.ico"
window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_ICO))
else:
ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.png"
window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_PNG))
finally:
pass
#-----------------------------------------------------------------------------#
class IntValidator(wx.PyValidator):
"""A Generic integer validator"""
def __init__(self, min_=0, max_=0):
"""Initialize the validator
@keyword min_: min value to accept
@keyword max_: max value to accept
"""
wx.PyValidator.__init__(self)
self._min = min_
self._max = max_
# Event management
self.Bind(wx.EVT_CHAR, self.OnChar)
def Clone(self):
"""Clones the current validator
@return: clone of this object
"""
return IntValidator(self._min, self._max)
def Validate(self, win):
"""Validate an window value
@param win: window to validate
"""
val = win.GetValue()
return val.isdigit()
def OnChar(self, event):
"""Process values as they are entered into the control
@param event: event that called this handler
"""
key = event.GetKeyCode()
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or \
key > 255 or chr(key) in '0123456789':
event.Skip()
return
if not wx.Validator_IsSilent():
wx.Bell()
return
| 33.891414
| 80
| 0.595671
| 10,199
| 0.379964
| 0
| 0
| 910
| 0.033902
| 0
| 0
| 10,834
| 0.403621
|
12e061c5c6e2f04c0f2228f70f6bcd0e8dd58774
| 1,105
|
py
|
Python
|
genrl/environments/vec_env/utils.py
|
matrig/genrl
|
25eb018f18a9a1d0865c16e5233a2a7ccddbfd78
|
[
"MIT"
] | 390
|
2020-05-03T17:34:02.000Z
|
2022-03-05T11:29:07.000Z
|
genrl/environments/vec_env/utils.py
|
matrig/genrl
|
25eb018f18a9a1d0865c16e5233a2a7ccddbfd78
|
[
"MIT"
] | 306
|
2020-05-03T05:53:53.000Z
|
2022-03-12T00:27:28.000Z
|
genrl/environments/vec_env/utils.py
|
matrig/genrl
|
25eb018f18a9a1d0865c16e5233a2a7ccddbfd78
|
[
"MIT"
] | 64
|
2020-05-05T20:23:30.000Z
|
2022-03-30T08:43:10.000Z
|
from typing import Tuple
import torch
class RunningMeanStd:
"""
Utility Function to compute a running mean and variance calculator
:param epsilon: Small number to prevent division by zero for calculations
:param shape: Shape of the RMS object
:type epsilon: float
:type shape: Tuple
"""
def __init__(self, epsilon: float = 1e-4, shape: Tuple = ()):
self.mean = torch.zeros(shape).double()
self.var = torch.ones(shape).double()
self.count = epsilon
def update(self, batch: torch.Tensor):
batch_mean = torch.mean(batch, axis=0)
batch_var = torch.var(batch, axis=0)
batch_count = batch.shape[0]
total_count = self.count + batch_count
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / total_count
M2 = (
self.var * self.count
+ batch_var * batch_count
+ (delta ** 2) * self.count * batch_count / total_count
)
self.mean = new_mean
self.var = M2 / (total_count - 1)
self.count = total_count
| 28.333333
| 77
| 0.611765
| 1,063
| 0.961991
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.227149
|
12e064fd8ee7774d0bfca223891f1c72e7cca90f
| 2,752
|
py
|
Python
|
releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py
|
sumitneup/pota
|
a1d7a59b5ca29813d8b7f3fa77cca0a47404b785
|
[
"MIT"
] | null | null | null |
releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py
|
sumitneup/pota
|
a1d7a59b5ca29813d8b7f3fa77cca0a47404b785
|
[
"MIT"
] | null | null | null |
releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py
|
sumitneup/pota
|
a1d7a59b5ca29813d8b7f3fa77cca0a47404b785
|
[
"MIT"
] | null | null | null |
import mtoa.ui.ae.templates as templates
import pymel.core as pm
import maya.cmds as cmds
import mtoa.ui.ae.utils as aeUtils
class aiPotaTemplate(templates.AttributeTemplate):
"""
def filenameEditBokeh(self, mData) :
attr = self.nodeAttr('aiBokehEXRPath')
cmds.setAttr(attr,mData,type="string")
def LoadFilenameButtonPushBokeh(self, *args):
basicFilter = 'All Files (*.*)'
ret = cmds.fileDialog2(fileFilter=basicFilter, dialogStyle=2, cap='Select sample_bokeh file location',fm=0)
if ret is not None and len(ret):
self.filenameEditBokeh(ret[0])
cmds.textFieldButtonGrp("filenameBokehGrp", edit=True, text=ret[0])
def filenameNewBokeh(self, nodeName):
path = cmds.textFieldButtonGrp("filenameBokehGrp", label="Bokeh AOV EXR path", changeCommand=self.filenameEditBokeh, width=300)
cmds.textFieldButtonGrp(path, edit=True, text=cmds.getAttr(nodeName))
cmds.textFieldButtonGrp(path, edit=True, buttonLabel="...",
buttonCommand=self.LoadFilenameButtonPushBokeh)
def filenameReplaceBokeh(self, nodeName):
cmds.textFieldButtonGrp("filenameBokehGrp", edit=True, text=cmds.getAttr(nodeName) )
"""
def setup(self):
self.beginLayout("Polynomial Optics", collapse=False)
self.addControl("aiLensModel", label="Lens Model")
self.addControl("aiSensorWidth", label="Sensor Width (mm)")
self.addControl("aiWavelength", label="Wavelength (nm)")
self.addControl("aiDof", label="Enable depth of field")
self.addControl("aiFstop", label="F-stop")
self.addControl("aiFocalDistance", label="Focus distance (cm)")
self.addControl("aiExtraSensorShift", label="Extra Sensor shift (mm)")
self.addControl("aiVignettingRetries", label="Vignetting retries")
self.addControl("aiApertureBlades", label="Aperture blades")
self.addControl("aiProperRayDerivatives", label="Proper Ray Derivatives")
# add these in the aovshader template instead
# self.suppress('normalCamera')
# self.suppress('hardwareColor')
self.endLayout()
"""
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.beginLayout("AOV shader", collapse=False)
self.addControl("aiBackwardSamples", label="Backwards samples")
self.addControl("aiMinimumRgb", label="Minimum RGB")
self.addCustom("aiBokehEXRPath", self.filenameNewBokeh, self.filenameReplaceBokeh)
self.endLayout()
"""
templates.registerTranslatorUI(aiPotaTemplate, "camera", "pota")
| 39.884058
| 135
| 0.673328
| 2,558
| 0.929506
| 0
| 0
| 0
| 0
| 0
| 0
| 2,013
| 0.731468
|
12e101d3d1c0a3624036d3fc55bbec2095eca800
| 2,690
|
py
|
Python
|
tests/test_user.py
|
munniomer/Send-IT-Api-v1
|
17041c987638c7e47c7c2ebed29bf7e2b5156bed
|
[
"CNRI-Python",
"OML"
] | null | null | null |
tests/test_user.py
|
munniomer/Send-IT-Api-v1
|
17041c987638c7e47c7c2ebed29bf7e2b5156bed
|
[
"CNRI-Python",
"OML"
] | null | null | null |
tests/test_user.py
|
munniomer/Send-IT-Api-v1
|
17041c987638c7e47c7c2ebed29bf7e2b5156bed
|
[
"CNRI-Python",
"OML"
] | 1
|
2019-02-05T07:44:19.000Z
|
2019-02-05T07:44:19.000Z
|
import unittest
from app import create_app
import json
from tests.basetest import BaseTest
class TestUSer(BaseTest):
"""User tests class"""
def test_user_registration(self):
"tests if new user can register"
respon = self.client.post("/api/v1/user/register", json=self.new_user)
self.assertEqual(respon.status_code, 201)
def test_if_name_city_valid(self):
"""Tests if names and city are valid"""
respon = self.client.post(
"/api/v1/user/register", json=self.new_user1, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('PLease check if your fname, lname or city is empty or contains numbers',
str(respon.data))
def test_if_email_valid(self):
"""Tests if email is valid"""
respon = self.client.post(
"/api/v1/user/register", json=self.new_user2, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('Please enter a valid emai',
str(respon.data))
def test_if_email_exist(self):
"""Tests if email is valid"""
self.client.post(
"/api/v1/user/register", json=self.new_user6, content_type='application/json')
respon = self.client.post(
"/api/v1/user/register", json=self.new_user6, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('That email exists. use a unique email',
str(respon.data))
def test_if_phone_valid(self):
"""Tests if email is exists"""
respon = self.client.post(
"/api/v1/user/register", json=self.new_user3, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('Please enter a valid phone number ',
str(respon.data))
def test_if_password_valid(self):
"""Tests if passwords are empty or less than 3"""
respon = self.client.post(
"/api/v1/user/register", json=self.new_user4, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('Please check if your password or confirm password are empty or less than 3',
str(respon.data))
def test_if_password_match(self):
"""Tests if passwords match"""
respon = self.client.post(
"/api/v1/user/register", json=self.new_user5, content_type='application/json')
self.assertEqual(respon.status_code, 400)
self.assertIn('confirm password does not match password',
str(respon.data))
| 42.03125
| 99
| 0.637546
| 2,596
| 0.965056
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.320446
|
12e130d67ccfe9de3b3564473e4a39882ddb1111
| 583
|
py
|
Python
|
authors/apps/profiles/migrations/0023_auto_20190124_1222.py
|
andela/ah-django-unchained
|
a4e5f6cd11fdc0b9422020693ac1200b849cf0f3
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/profiles/migrations/0023_auto_20190124_1222.py
|
andela/ah-django-unchained
|
a4e5f6cd11fdc0b9422020693ac1200b849cf0f3
|
[
"BSD-3-Clause"
] | 26
|
2019-01-07T14:22:05.000Z
|
2019-02-28T17:11:48.000Z
|
authors/apps/profiles/migrations/0023_auto_20190124_1222.py
|
andela/ah-django-unchained
|
a4e5f6cd11fdc0b9422020693ac1200b849cf0f3
|
[
"BSD-3-Clause"
] | 3
|
2019-09-19T22:16:09.000Z
|
2019-10-16T21:16:16.000Z
|
# Generated by Django 2.1.4 on 2019-01-24 12:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0022_auto_20190123_1211'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='first_name',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='userprofile',
name='last_name',
field=models.CharField(blank=True, max_length=100),
),
]
| 24.291667
| 63
| 0.595197
| 490
| 0.84048
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.2247
|
12e2d80d29d4efd869955ca94be7cd962776dc80
| 811
|
py
|
Python
|
Algorithm/ShellSort/pyShellSort.py
|
commanderHR1/algorithms
|
d077364e8b08ae2b7b93bc01a73f622421086365
|
[
"MIT"
] | 1
|
2020-07-17T20:49:55.000Z
|
2020-07-17T20:49:55.000Z
|
Algorithm/ShellSort/pyShellSort.py
|
commanderHR1/algorithms
|
d077364e8b08ae2b7b93bc01a73f622421086365
|
[
"MIT"
] | null | null | null |
Algorithm/ShellSort/pyShellSort.py
|
commanderHR1/algorithms
|
d077364e8b08ae2b7b93bc01a73f622421086365
|
[
"MIT"
] | null | null | null |
# Implementation of Shell Sort algorithm in Python
def shellSort(arr):
interval = 1
# Initializes interval
while (interval < (len(arr) // 3)):
interval = (interval * 3) + 1
while (interval > 0):
for i in range(interval, len(arr)):
# Select val to be inserted
val = arr[i]
j = i
# Shift element right
while ((j > interval - 1) and (arr[j - interval] >= val)):
arr[j] = arr[j - interval]
j -= interval
# Insert val at hole position
arr[j] = val
# Calculate interval
interval = (interval - 1) / 3
l = [4, 1, 2, 5, 3]
print("Initial list: " + str(l))
shellSort(l)
print("Sorted list: " + str(l))
| 26.16129
| 70
| 0.477189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 200
| 0.246609
|
12e5018fbac310b4e1d16e7744a8549158b1a76a
| 1,943
|
py
|
Python
|
photos/models.py
|
benjaminbills/galleria
|
4c89f265a2f4f853a5685828d5bc505b51b9bb74
|
[
"MIT"
] | null | null | null |
photos/models.py
|
benjaminbills/galleria
|
4c89f265a2f4f853a5685828d5bc505b51b9bb74
|
[
"MIT"
] | null | null | null |
photos/models.py
|
benjaminbills/galleria
|
4c89f265a2f4f853a5685828d5bc505b51b9bb74
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Image(models.Model):
name = models.CharField(max_length=255)
posted_date = models.DateTimeField(auto_now_add=True)
image_description = models.CharField(max_length=500, default='DEFAULT VALUE')
image = models.ImageField(upload_to='images/', blank=True)
location = models.ForeignKey('Location', on_delete=models.CASCADE, default=0)
category = models.ForeignKey('Category', on_delete=models.CASCADE, default=0)
def __str__(self):
return self.name
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def update_image(cls,image_id):
image=cls.objects.filter(pk=image_id)
image.update()
return image
@classmethod
def search_by_category(cls,search_term):
images = cls.objects.filter(category__name__contains=search_term)
return images
@classmethod
def search_by_location(cls,search_term):
images = cls.objects.filter(location__name__contains=search_term)
return images
@classmethod
def get_image_by_id(cls,id):
image=cls.objects.get(id=id)
return image
class Location(models.Model):
name = models.CharField(max_length=255)
#magic method
def __str__(self):
return self.name
def save_location(self):
self.save()
def delete_location(self):
self.delete()
@classmethod
def update_location(cls, id, name):
location = cls.objects.filter(pk=id).update(name=name)
return location
class Category(models.Model):
name = models.CharField(max_length=255)
#magic method
def __str__(self):
return self.name
def save_category(self):
self.save()
def delete_category(self):
self.delete()
@classmethod
def update_category(cls, id, name):
category = cls.objects.filter(pk=id).update(name=name)
return category
| 26.986111
| 79
| 0.691199
| 1,879
| 0.967061
| 0
| 0
| 799
| 0.41122
| 0
| 0
| 96
| 0.049408
|
12e533fd59ecf8d6a32514514fcb290ff13e6ec1
| 1,322
|
py
|
Python
|
main.py
|
kramrm/gcf-alerting-discord
|
c73d88520a783f9c4d12099bb8e21f03a950eebc
|
[
"MIT"
] | null | null | null |
main.py
|
kramrm/gcf-alerting-discord
|
c73d88520a783f9c4d12099bb8e21f03a950eebc
|
[
"MIT"
] | null | null | null |
main.py
|
kramrm/gcf-alerting-discord
|
c73d88520a783f9c4d12099bb8e21f03a950eebc
|
[
"MIT"
] | null | null | null |
import base64
import json
from webhook import post_webhook
from datetime import datetime
def hello_pubsub(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
#post_webhook(message=f'{pubsub_message}', timestamp='now', status='status', title='title')
message = json.loads(pubsub_message)
message = message['incident']
#post_webhook(message, timestamp, status, title='Monitoring'):
null = None
status = 'Status'
log_message = ''
title = 'Monitoring Alert'
status = message['state'].title()
timestamp = datetime.utcfromtimestamp(message["started_at"]).isoformat()
log_message += f'Started: {timestamp} UTC'
color = 16772608
if message['ended_at'] is not None:
timestamp = datetime.utcfromtimestamp(message["ended_at"]).isoformat()
log_message += f'\nEnded: {timestamp} UTC'
color = 65297
title = message['policy_name']
log_message += f'\n{message["summary"]}'
log_message += f'\n[Monitor Event]({message["url"]})'
post_webhook(message=log_message, timestamp=timestamp, status=status, title=title, color=color)
| 38.882353
| 99
| 0.683812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.419818
|
12e58dae1b7214722dcef0c29dfe11fbbf4c0b51
| 358
|
py
|
Python
|
libzyre.py
|
brettviren/wafit
|
39e9f2748c095dc4c3421a5de0f10f300d8da30b
|
[
"BSD-3-Clause"
] | null | null | null |
libzyre.py
|
brettviren/wafit
|
39e9f2748c095dc4c3421a5de0f10f300d8da30b
|
[
"BSD-3-Clause"
] | null | null | null |
libzyre.py
|
brettviren/wafit
|
39e9f2748c095dc4c3421a5de0f10f300d8da30b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env waf
'''
This is a wafit tool for using zyre
'''
import util
def options(opt):
opt.load("libczmq")
util.generic_options(opt, "libzyre", libs=False)
def configure(cfg):
cfg.load("libczmq")
util.generic_configure_incs(cfg, "libzyre", "zyre.h", "libczmq")
util.generic_configure_libs(cfg, "libzyre", "zyre", "libczmq")
| 21.058824
| 68
| 0.667598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.385475
|
12e5fe65e4d8ed7a4606ea760b1a56fc1a8485e1
| 6,226
|
py
|
Python
|
scripts/run-gmm.py
|
vr100/nfl-kaggle
|
74386b672ef4bb894bdf943df866855c4b555ede
|
[
"MIT"
] | null | null | null |
scripts/run-gmm.py
|
vr100/nfl-kaggle
|
74386b672ef4bb894bdf943df866855c4b555ede
|
[
"MIT"
] | null | null | null |
scripts/run-gmm.py
|
vr100/nfl-kaggle
|
74386b672ef4bb894bdf943df866855c4b555ede
|
[
"MIT"
] | null | null | null |
import argparse, os, fnmatch, json, joblib
import pandas as pd
from sklearn.mixture import GaussianMixture
from sklearn.metrics import adjusted_rand_score
# Reference paper - https://arxiv.org/abs/1906.11373
# "Unsupervised Methods for Identifying Pass Coverage Among Defensive Backs with NFL Player Tracking Data"
STATS_PREFIX = "week"
SKIP_COLS_KEY = "global_skip_cols"
ONLY_CLOSEST_KEY = "only_closest"
CLOSE_TO_BR_KEY = "close_to_br"
SELECT_GROUP_KEY = "select_group_by"
GROUP_BY = ["gameId", "playId"]
MAX_COL = "closest_frames"
def run_gmm_for_g_and_k(file_data, g, k, skip_cols, only_closest, close_to_br):
file_count = len(file_data)
data = pd.DataFrame()
for j in range(file_count):
if j == k:
continue
data = data.append(file_data[j], ignore_index=True)
if only_closest == 1:
data = data.loc[data.groupby(GROUP_BY)[MAX_COL].idxmax()].reset_index(
drop=True)
elif len(close_to_br) != 0:
data = data[data[CLOSE_TO_BR_KEY].isin(close_to_br)]
x = data.drop(skip_cols, axis = 1).dropna()
gmm = GaussianMixture(n_components=g,
covariance_type="full", max_iter=1000)
gmm = gmm.fit(x)
x_k = file_data[k].drop(skip_cols, axis = 1).dropna()
gmm_k = GaussianMixture(n_components=g,
covariance_type="full", max_iter=1000)
gmm_k = gmm_k.fit(x_k)
# predict cluster for the k week on both models
y = gmm.predict(x_k)
y_k = gmm_k.predict(x_k)
ari = adjusted_rand_score(y, y_k)
# return the computed ari and gmm (skipping k)
return (ari, gmm)
def run_gmm_for_group_count(file_data, group_count, config):
print("Running gmm for group count {}".format(group_count))
ari = []
gmm = []
file_count = len(file_data)
for k in range(file_count):
# print("Running gmm by leaving out index {}".format(k))
(ari_k, gmm_k) = run_gmm_for_g_and_k(file_data, group_count, k,
config[SKIP_COLS_KEY], config[ONLY_CLOSEST_KEY],
config[CLOSE_TO_BR_KEY])
ari.append(ari_k)
gmm.append(gmm_k)
ari_max_index = ari.index(max(ari))
ari_max = ari[ari_max_index]
gmm_max = gmm[ari_max_index]
ari_sum = sum(ari)
result = {
"lowo_index": ari_max_index,
"max_ari": ari_max,
"total_ari": ari_sum,
"gmm": gmm_max
}
return result
def run_gmm_feature_influence(file_data, group_count, skip_lowo, config):
print("Running gmm for group {}, skipping lowo index: {}".format(
group_count, skip_lowo))
if len(file_data) == 0:
return
global_skip_cols = config[SKIP_COLS_KEY]
cols = set(file_data[0].columns) - set(global_skip_cols)
result = {}
for c in cols:
print("Skipping feature {}".format(c))
skip_cols = global_skip_cols + [c]
ari_c, gmm_c = run_gmm_for_g_and_k(file_data, group_count, skip_lowo,
skip_cols, config[ONLY_CLOSEST_KEY], config[CLOSE_TO_BR_KEY])
result[c] = {
"ari": ari_c,
"gmm": gmm_c
}
return result
def save_results(output_folder, gmms, selected_g, influence_aris, config):
groups = sorted(gmms.keys())
gmm_result = {}
for g in groups:
gmm_result[g] = {k: gmms[g][k] for k in gmms[g].keys() - {"gmm"}}
selected_result = { **gmm_result[selected_g] }
selected_result["group_count"] = selected_g
selected_result["selection_key"] = config[SELECT_GROUP_KEY]
if config[ONLY_CLOSEST_KEY] == 1:
selected_result[ONLY_CLOSEST_KEY] = config[ONLY_CLOSEST_KEY]
else:
selected_result[CLOSE_TO_BR_KEY] = config[CLOSE_TO_BR_KEY]
influence_result = {
"group_count": selected_g,
"lowo_index": selected_result["lowo_index"],
"ari_with_all_features": selected_result["max_ari"]
}
feature_result = {}
influences = {}
ari_with_all = selected_result["max_ari"]
for feature in influence_aris:
ari = influence_aris[feature]["ari"]
influences[feature] = {
"influence": ari_with_all - ari,
"ari": ari
}
feature_result = dict(sorted(influences.items(),
key=lambda item: item[1]["influence"], reverse=True))
influence_result["feature_data"] = feature_result
output = {
"group_data": gmm_result,
"selected_group": selected_result,
"feature_influence": influence_result
}
output_path = os.path.join(output_folder, "results.json")
json_data = json.dumps(output, indent=2)
with open(output_path, "w") as output_file:
output_file.write(json_data)
print("Result saved to {}".format(output_path))
output_path = os.path.join(output_folder, "config.json")
json_data = json.dumps(config, indent=2)
with open(output_path, "w") as output_file:
output_file.write(json_data)
print("Config saved to {}".format(output_path))
selected_gmm = gmms[selected_g]["gmm"]
gmm_path = os.path.join(output_folder, "gmm.joblib")
joblib.dump(selected_gmm, gmm_path)
print("GMM model saved to {}".format(gmm_path))
def run_gmm(data_folder, output_folder, config):
stats_files = fnmatch.filter(os.listdir(data_folder), "{}*.csv".format(
STATS_PREFIX))
file_data = []
for sf in stats_files:
print("Working on file {} ...".format(sf))
input_file = os.path.join(data_folder, sf)
stats_data = pd.read_csv(input_file)
file_data.append(stats_data)
gmm_groups = {}
for g in range(config["group_min"], config["group_max"] + 1):
result = run_gmm_for_group_count(file_data, g, config)
gmm_groups[g] = result
group_key = config[SELECT_GROUP_KEY]
selected_group = max(gmm_groups, key= lambda x: gmm_groups[x][group_key])
gmm_influence_result = run_gmm_feature_influence(file_data, selected_group,
gmm_groups[selected_group]["lowo_index"], config)
save_results(output_folder, gmm_groups, selected_group,
gmm_influence_result, config)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path", type=str, help="specifies the folder containing data files",
required=True)
parser.add_argument(
"--config_path", type=str, help="specifies the json config file",
required=True)
parser.add_argument(
"--output_path", type=str, help="specifies the output folder path",
required=True)
return vars(parser.parse_args())
def main():
args = parse_args()
print("Args: {}".format(args))
data_path = os.path.abspath(args["data_path"])
config_path = os.path.abspath(args["config_path"])
output_path = os.path.abspath(args["output_path"])
with open(config_path) as f:
config = json.load(f)
print("Config: {}".format(config))
run_gmm(data_path, output_path, config)
main()
| 32.092784
| 106
| 0.734661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,166
| 0.187279
|
12e658cebecc095f8910cdb95d0ccbd190f22eff
| 106
|
py
|
Python
|
module01/classes/class06b.py
|
LauroHBrant/python-course
|
2154181ca4b684b0d1fa635706bcb1647a753bc3
|
[
"MIT"
] | 2
|
2021-01-07T23:59:36.000Z
|
2021-01-18T00:23:52.000Z
|
module01/classes/class06b.py
|
LauroHBrant/python-course
|
2154181ca4b684b0d1fa635706bcb1647a753bc3
|
[
"MIT"
] | null | null | null |
module01/classes/class06b.py
|
LauroHBrant/python-course
|
2154181ca4b684b0d1fa635706bcb1647a753bc3
|
[
"MIT"
] | null | null | null |
from style import blue, none
n = input(f'Type {blue}something{none}: ')
print(f'{blue}{n.isnumeric()}')
| 17.666667
| 42
| 0.669811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.518868
|
12e805833151bd1898679d1e39b89a2e7fde7f1c
| 2,600
|
py
|
Python
|
custom_components/panasonic_cc/__init__.py
|
shyne99/panasonic_cc
|
ec7912e4067ebd0c08ea2a16c123c50d69a2fca6
|
[
"MIT"
] | null | null | null |
custom_components/panasonic_cc/__init__.py
|
shyne99/panasonic_cc
|
ec7912e4067ebd0c08ea2a16c123c50d69a2fca6
|
[
"MIT"
] | null | null | null |
custom_components/panasonic_cc/__init__.py
|
shyne99/panasonic_cc
|
ec7912e4067ebd0c08ea2a16c123c50d69a2fca6
|
[
"MIT"
] | null | null | null |
"""Platform for the Panasonic Comfort Cloud."""
from datetime import timedelta
import logging
from typing import Any, Dict
import asyncio
from async_timeout import timeout
import voluptuous as vol
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD)
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers import discovery
from .const import TIMEOUT
from .panasonic import PanasonicApiDevice
_LOGGER = logging.getLogger(__name__)
DOMAIN = "panasonic_cc"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PANASONIC_DEVICES = "panasonic_devices"
COMPONENT_TYPES = ["climate", "sensor", "switch"]
def setup(hass, config):
pass
async def async_setup(hass: HomeAssistant, config: Dict) -> bool:
"""Set up the Garo Wallbox component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Establish connection with Comfort Cloud."""
import pcomfortcloud
conf = entry.data
if PANASONIC_DEVICES not in hass.data:
hass.data[PANASONIC_DEVICES] = []
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
api = pcomfortcloud.Session(username, password, verifySsl=False)
devices = await hass.async_add_executor_job(api.get_devices)
for device in devices:
try:
api_device = PanasonicApiDevice(hass, api, device)
await api_device.update()
hass.data[PANASONIC_DEVICES].append(api_device)
except Exception as e:
_LOGGER.warning(f"Failed to setup device: {device['name']} ({e})")
if hass.data[PANASONIC_DEVICES]:
for component in COMPONENT_TYPES:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await asyncio.wait(
[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in COMPONENT_TYPES
]
)
hass.data.pop(PANASONIC_DEVICES)
return True
| 27.368421
| 83
| 0.699231
| 0
| 0
| 0
| 0
| 0
| 0
| 1,485
| 0.571154
| 268
| 0.103077
|
12e82d4517d5644cd0b40eba9d476a8a70aa842c
| 5,806
|
py
|
Python
|
django/bossingest/test/test_ingest_manager.py
|
jhuapl-boss/boss
|
c2e26d272bd7b8d54abdc2948193163537e31291
|
[
"Apache-2.0"
] | 20
|
2016-05-16T21:08:13.000Z
|
2021-11-16T11:50:19.000Z
|
django/bossingest/test/test_ingest_manager.py
|
jhuapl-boss/boss
|
c2e26d272bd7b8d54abdc2948193163537e31291
|
[
"Apache-2.0"
] | 31
|
2016-10-28T17:51:11.000Z
|
2022-02-10T08:07:31.000Z
|
django/bossingest/test/test_ingest_manager.py
|
jhuapl-boss/boss
|
c2e26d272bd7b8d54abdc2948193163537e31291
|
[
"Apache-2.0"
] | 12
|
2016-10-28T17:47:01.000Z
|
2021-05-18T23:47:06.000Z
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest.mock import patch, MagicMock
from bossingest.ingest_manager import IngestManager
from bossingest.models import IngestJob
from bossingest.test.setup import SetupTests
from bosscore.test.setup_db import SetupTestDB
from bosscore.error import ErrorCodes
from bosscore.lookup import LookUpKey
import bossutils.aws
from django.contrib.auth.models import User
from ndingest.ndqueue.uploadqueue import UploadQueue
from rest_framework.test import APITestCase
class BossIngestManagerTest(APITestCase):
def setUp(self):
"""
Initialize the database
:return:
"""
dbsetup = SetupTestDB()
self.user = dbsetup.create_super_user(username='testuser', email='test@test.com', password='testuser')
dbsetup.set_user(self.user)
self.client.force_login(self.user)
dbsetup.insert_ingest_test_data()
setup = SetupTests()
# Get the config_data for v1 schema
config_data = setup.get_ingest_config_data_dict()
self.example_config_data = config_data
self.volumetric_config_data = setup.get_ingest_config_data_dict_volumetric()
# Unit under test.
self.ingest_mgr = IngestManager()
def test_validate_ingest(self):
"""Method to test validation method"""
#Validate schema and config file
response = self.ingest_mgr.validate_config_file(self.example_config_data)
assert (response is True)
#Validate properties
response = self.ingest_mgr.validate_properties()
assert (response is True)
def test_validate_config_file(self):
"""Method to test validation of a config file"""
self.ingest_mgr.validate_config_file(self.example_config_data)
assert(self.ingest_mgr.config is not None)
assert (self.ingest_mgr.config.config_data is not None)
def test_validate_properties(self):
"""Methos to test validation of properties of the config data"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
assert (self.ingest_mgr.collection.name == 'my_col_1')
assert (self.ingest_mgr.experiment.name == 'my_exp_1')
assert (self.ingest_mgr.channel.name == 'my_ch_1')
def test_create_ingest_job(self):
"""Method to test creation of a ingest job from a config_data dict"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
assert (job.id is not None)
assert (job.ingest_type == IngestJob.TILE_INGEST)
assert (job.tile_size_x == 512)
assert (job.tile_size_y == 512)
assert (job.tile_size_z == 1)
assert (job.tile_size_t == 1)
def test_create_ingest_job_volumetric(self):
self.ingest_mgr.validate_config_file(self.volumetric_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
assert (job.id is not None)
assert (job.ingest_type == IngestJob.VOLUMETRIC_INGEST)
assert (job.tile_size_x == 1024)
assert (job.tile_size_y == 1024)
assert (job.tile_size_z == 64)
assert (job.tile_size_t == 1)
def test_generate_upload_queue_args_tile_job(self):
"""Ensure ingest_type set properly"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr._generate_upload_queue_args(job)
assert actual['ingest_type'] == IngestJob.TILE_INGEST
assert actual['z_chunk_size'] == 16
def test_generate_upload_queue_args_volumetric_job(self):
"""Ensure ingest_type set properly"""
self.ingest_mgr.validate_config_file(self.volumetric_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr._generate_upload_queue_args(job)
assert actual['ingest_type'] == IngestJob.VOLUMETRIC_INGEST
assert actual['z_chunk_size'] == 64
assert actual['ingest_queue'] is None
def test_tile_bucket_name(self):
""" Test get tile bucket name"""
tile_bucket_name = self.ingest_mgr.get_tile_bucket()
assert(tile_bucket_name is not None)
def test_get_resource_data(self):
"""Run the method and ensure keys set"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr.get_resource_data(job.id)
self.assertIn('boss_key', actual)
self.assertIn('lookup_key', actual)
self.assertIn('channel', actual)
self.assertIn('experiment', actual)
self.assertIn('coord_frame', actual)
| 40.887324
| 110
| 0.707544
| 4,706
| 0.810541
| 0
| 0
| 0
| 0
| 0
| 0
| 1,335
| 0.229935
|
12e8353d99830242965335f0aba978e3cb0ab443
| 5,505
|
py
|
Python
|
sanic_devtools/log.py
|
yunstanford/sanic-devtools
|
9e8a6d011db025d53ddd6012b5542dc18825d4b0
|
[
"MIT"
] | 12
|
2019-09-06T05:14:46.000Z
|
2022-02-17T09:26:38.000Z
|
sanic_devtools/log.py
|
yunstanford/sanic-devtools
|
9e8a6d011db025d53ddd6012b5542dc18825d4b0
|
[
"MIT"
] | null | null | null |
sanic_devtools/log.py
|
yunstanford/sanic-devtools
|
9e8a6d011db025d53ddd6012b5542dc18825d4b0
|
[
"MIT"
] | 1
|
2019-09-10T03:57:21.000Z
|
2019-09-10T03:57:21.000Z
|
import json
import logging
import logging.config
import platform
import re
import traceback
from io import StringIO
import pygments
from devtools import pformat
from devtools.ansi import isatty, sformat
from pygments.formatters import Terminal256Formatter
from pygments.lexers import Python3TracebackLexer
rs_dft_logger = logging.getLogger('sdev.server.dft')
rs_aux_logger = logging.getLogger('sdev.server.aux')
tools_logger = logging.getLogger('sdev.tools')
main_logger = logging.getLogger('sdev.main')
LOG_FORMATS = {
logging.DEBUG: sformat.dim,
logging.INFO: sformat.green,
logging.WARN: sformat.yellow,
}
pyg_lexer = Python3TracebackLexer()
pyg_formatter = Terminal256Formatter(style='vim')
split_log = re.compile(r'^(\[.*?\])')
class HighlightStreamHandler(logging.StreamHandler):
def setFormatter(self, fmt):
self.formatter = fmt
self.formatter.stream_is_tty = isatty(self.stream) and platform.system().lower() != 'windows'
class DefaultFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, style='%'):
super().__init__(fmt, datefmt, style)
self.stream_is_tty = False
def format(self, record):
msg = super().format(record)
if not self.stream_is_tty:
return msg
m = split_log.match(msg)
log_color = LOG_FORMATS.get(record.levelno, sformat.red)
if m:
time = sformat(m.groups()[0], sformat.magenta)
return time + sformat(msg[m.end():], log_color)
else:
return sformat(msg, log_color)
class AccessFormatter(logging.Formatter):
"""
Used to log sanic_access and sanic_server
"""
def __init__(self, fmt=None, datefmt=None, style='%'):
super().__init__(fmt, datefmt, style)
self.stream_is_tty = False
def formatMessage(self, record):
msg = super().formatMessage(record)
if msg[0] != '{':
return msg
# json from AccessLogger
obj = json.loads(msg)
if self.stream_is_tty:
# in future we can do clever things about colouring the message based on status code
msg = '{} {} {}'.format(
sformat(obj['time'], sformat.magenta),
sformat(obj['prefix'], sformat.blue),
sformat(obj['msg'], sformat.dim if obj['dim'] else sformat.reset),
)
else:
msg = '{time} {prefix} {msg}'.format(**obj)
details = getattr(record, 'details', None)
if details:
msg = 'details: {}\n{}'.format(pformat(details, highlight=self.stream_is_tty), msg)
return msg
def formatException(self, ei):
sio = StringIO()
traceback.print_exception(*ei, file=sio)
stack = sio.getvalue()
sio.close()
if self.stream_is_tty and pyg_lexer:
return pygments.highlight(stack, lexer=pyg_lexer, formatter=pyg_formatter).rstrip('\n')
else:
return stack
def log_config(verbose: bool) -> dict:
"""
Setup default config. for dictConfig.
:param verbose: level: DEBUG if True, INFO if False
:return: dict suitable for ``logging.config.dictConfig``
"""
log_level = 'DEBUG' if verbose else 'INFO'
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
'class': 'sanic_devtools.log.DefaultFormatter',
},
'no_ts': {
'format': '%(message)s',
'class': 'sanic_devtools.log.DefaultFormatter',
},
'sanic': {
'format': '%(message)s',
'class': 'sanic_devtools.log.AccessFormatter',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'default'
},
'no_ts': {
'level': log_level,
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'no_ts'
},
'sanic_access': {
'level': log_level,
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'sanic'
},
'sanic_server': {
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'sanic'
},
},
'loggers': {
rs_dft_logger.name: {
'handlers': ['default'],
'level': log_level,
},
rs_aux_logger.name: {
'handlers': ['default'],
'level': log_level,
},
tools_logger.name: {
'handlers': ['default'],
'level': log_level,
},
main_logger.name: {
'handlers': ['no_ts'],
'level': log_level,
},
'sanic.access': {
'handlers': ['sanic_access'],
'level': log_level,
'propagate': False,
},
'sanic.server': {
'handlers': ['sanic_server'],
'level': log_level,
},
},
}
def setup_logging(verbose):
config = log_config(verbose)
logging.config.dictConfig(config)
| 31.820809
| 101
| 0.547502
| 2,221
| 0.403451
| 0
| 0
| 0
| 0
| 0
| 0
| 1,386
| 0.251771
|
12e86cadd6eb11b7a84bc77642dccfd6d3f1bfb4
| 1,893
|
py
|
Python
|
rest_fhir/mixins/conditional_read.py
|
weynelucas/django-rest-fhir
|
560a0aadd0cfa43b6dc58f995c86015f6eefb768
|
[
"MIT"
] | 2
|
2021-05-07T12:16:27.000Z
|
2021-12-16T20:45:36.000Z
|
rest_fhir/mixins/conditional_read.py
|
weynelucas/django-rest-fhir
|
560a0aadd0cfa43b6dc58f995c86015f6eefb768
|
[
"MIT"
] | 3
|
2021-05-10T19:40:33.000Z
|
2021-06-27T14:24:47.000Z
|
rest_fhir/mixins/conditional_read.py
|
weynelucas/django-rest-fhir
|
560a0aadd0cfa43b6dc58f995c86015f6eefb768
|
[
"MIT"
] | 1
|
2021-08-09T22:00:22.000Z
|
2021-08-09T22:00:22.000Z
|
import calendar
from typing import Union
import dateutil.parser
from rest_framework import status
from rest_framework.response import Response
from django.utils.cache import get_conditional_response
from django.utils.http import http_date
from ..models import Resource, ResourceVersion
FhirResource = Union[Resource, ResourceVersion]
class ConditionalReadMixin:
def conditional_read(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
res_data = serializer.data
# Test If-Modified-Since and If-None-Match preconditions
# https://www.hl7.org/fhir/http.html#cread
etag, last_modified = self.get_conditional_args(res_data)
response = get_conditional_response(request, etag, last_modified)
if response is not None:
return response
# Set revelant header on the response if request method is safe
headers = self.get_conditional_headers(res_data)
return Response(
data=res_data,
status=status.HTTP_200_OK,
headers=headers,
)
def etag_func(self, data) -> str:
return 'W/"%s"' % data['meta']['versionId']
def last_modified_func(self, data) -> str:
dt = dateutil.parser.parse(data['meta']['lastUpdated'])
return calendar.timegm(dt.utctimetuple())
def get_conditional_args(self, data: dict):
etag = self.etag_func(data)
last_modified = self.last_modified_func(data)
return (
etag,
last_modified,
)
def get_conditional_headers(self, data):
etag, last_modified = self.get_conditional_args(data)
headers = dict()
if etag:
headers['ETag'] = etag
if last_modified:
headers['Last-Modified'] = http_date(last_modified)
return headers
| 29.578125
| 73
| 0.663497
| 1,552
| 0.819863
| 0
| 0
| 0
| 0
| 0
| 0
| 226
| 0.119387
|
12e90bbcd25c813026449118e104295e2d5b4d7b
| 803
|
py
|
Python
|
code_week27_1026_111/sort_colors.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week27_1026_111/sort_colors.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week27_1026_111/sort_colors.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
'''
给定一个包含红色、白色和蓝色,一共 n 个元素的数组,原地对它们进行排序,使得相同颜色的元素相邻,并按照红色、白色、蓝色顺序排列。
此题中,我们使用整数 0、 1 和 2 分别表示红色、白色和蓝色。
注意:
不能使用代码库中的排序函数来解决这道题。
示例:
输入: [2,0,2,1,1,0]
输出: [0,0,1,1,2,2]
进阶:
一个直观的解决方案是使用计数排序的两趟扫描算法。
首先,迭代计算出0、1 和 2 元素的个数,然后按照0、1、2的排序,重写当前数组。
你能想出一个仅使用常数空间的一趟扫描算法吗?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/sort-colors
'''
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
ptr = 0
for i in range(n):
if nums[i] == 0:
nums[i],nums[ptr] = nums[ptr],nums[i]
ptr += 1
for i in range(ptr,n):
if nums[i] == 1:
nums[i],nums[ptr] = nums[ptr],nums[i]
ptr +=1
| 21.131579
| 65
| 0.554172
| 463
| 0.37982
| 0
| 0
| 0
| 0
| 0
| 0
| 830
| 0.680886
|
12ea0884e04ad5410800ee3a274f85dcb7596112
| 363
|
py
|
Python
|
solutions/lowest_common_ancestor_deepest_leaves/__main__.py
|
ansonmiu0214/dsa-worked-solutions
|
88801d268b78506edd77e771c29b4c9f4ae0f59a
|
[
"MIT"
] | null | null | null |
solutions/lowest_common_ancestor_deepest_leaves/__main__.py
|
ansonmiu0214/dsa-worked-solutions
|
88801d268b78506edd77e771c29b4c9f4ae0f59a
|
[
"MIT"
] | null | null | null |
solutions/lowest_common_ancestor_deepest_leaves/__main__.py
|
ansonmiu0214/dsa-worked-solutions
|
88801d268b78506edd77e771c29b4c9f4ae0f59a
|
[
"MIT"
] | null | null | null |
from .solution import lcaDeepestLeaves
from ..utils import TreeNode
print('Enter tree, e.g. [2,3,1,3,1,null,1]:', end=' ')
nodes = [int(node) if node != 'null' else None for node in input().strip().split(',')]
root = TreeNode.fromList(nodes)
lowestCommonAncestor = lcaDeepestLeaves(root)
print(f'The lowest common ancestor is: {lowestCommonAncestor.toList()}')
| 36.3
| 86
| 0.721763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.316804
|
12ea961825e76ebc83c3a72ff0731af4a86af12d
| 2,472
|
py
|
Python
|
code/python3/index_values_with_geo.py
|
jaylett/xapian-docsprint
|
2e8fdffecf71f7042c0abe49924ba48c11818b7e
|
[
"MIT"
] | 47
|
2015-01-20T15:38:41.000Z
|
2022-02-15T21:03:50.000Z
|
code/python3/index_values_with_geo.py
|
jaylett/xapian-docsprint
|
2e8fdffecf71f7042c0abe49924ba48c11818b7e
|
[
"MIT"
] | 16
|
2015-06-09T16:12:50.000Z
|
2020-02-05T06:40:18.000Z
|
code/python3/index_values_with_geo.py
|
jaylett/xapian-docsprint
|
2e8fdffecf71f7042c0abe49924ba48c11818b7e
|
[
"MIT"
] | 56
|
2015-01-20T15:38:44.000Z
|
2022-03-03T18:13:39.000Z
|
#!/usr/bin/env python
import json
from support import parse_states
import sys
import xapian
def index(datapath, dbpath):
# Create or open the database we're going to be writing to.
db = xapian.WritableDatabase(dbpath, xapian.DB_CREATE_OR_OPEN)
# Set up a TermGenerator that we'll use in indexing.
termgenerator = xapian.TermGenerator()
termgenerator.set_stemmer(xapian.Stem("en"))
for fields in parse_states(datapath):
# 'fields' is a dictionary mapping from field name to value.
# Pick out the fields we're going to index.
name = fields.get('name', u'')
description = fields.get('description', u'')
motto = fields.get('motto', u'')
admitted = fields.get('admitted', None)
population = fields.get('population', None)
order = fields.get('order', u'')
# We make a document and tell the term generator to use this.
doc = xapian.Document()
termgenerator.set_document(doc)
# index each field with a suitable prefix
termgenerator.index_text(name, 1, 'S')
termgenerator.index_text(description, 1, 'XD')
termgenerator.index_text(motto, 1, 'XM')
# Index fields without prefixes for general search.
termgenerator.index_text(name)
termgenerator.increase_termpos()
termgenerator.index_text(description)
termgenerator.increase_termpos()
termgenerator.index_text(motto)
# Add document values.
if admitted is not None:
doc.add_value(1, xapian.sortable_serialise(int(admitted[:4])))
doc.add_value(2, admitted) # YYYYMMDD
if population is not None:
doc.add_value(3, xapian.sortable_serialise(int(population)))
### Start of example code.
midlat = fields['midlat']
midlon = fields['midlon']
if midlat and midlon:
doc.add_value(4, "%f,%f" % (float(midlat), float(midlon)))
### End of example code.
# Store all the fields for display purposes.
doc.set_data(json.dumps(fields))
# We use the order to ensure each object ends up in the
# database only once no matter how many times we run the
# indexer.
idterm = u"Q" + order
doc.add_boolean_term(idterm)
db.replace_document(idterm, doc)
if len(sys.argv) != 3:
print("Usage: %s DATAPATH DBPATH" % sys.argv[0])
sys.exit(1)
index(datapath = sys.argv[1], dbpath = sys.argv[2])
| 35.314286
| 74
| 0.644013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 771
| 0.311893
|
12eab71a1efede1b96f0100790956e17f9d9393a
| 1,265
|
py
|
Python
|
logger.py
|
drewstone/dynamic-governanceq
|
924317800db7bca6308ff912b16c7b834ab30e32
|
[
"MIT"
] | null | null | null |
logger.py
|
drewstone/dynamic-governanceq
|
924317800db7bca6308ff912b16c7b834ab30e32
|
[
"MIT"
] | null | null | null |
logger.py
|
drewstone/dynamic-governanceq
|
924317800db7bca6308ff912b16c7b834ab30e32
|
[
"MIT"
] | null | null | null |
import constants
def init(mode, gov, agents):
if mode == constants.DEBUG_LOGGING or mode == constants.LOG_INIT:
print("Agents = {}".format(
list(map(lambda agent: agent.capacity, agents))))
print("Starting param: {}".format(gov.param))
def round(mode, round, gov, throughput):
if mode == constants.DEBUG_LOGGING or mode == constants.LOG_ROUND:
print("\nRound {} | OLD_P = {}, NEW_P = {}, TPS = {}, RULE = {}\n"
.format(round,
gov.prev_param,
gov.param,
throughput,
gov.decision_type))
def dropout(mode, active, inactive):
if mode == constants.DEBUG_LOGGING or mode == constants.LOG_DROPOUT:
print("Active agents: {}".format(
list(map(lambda a: a.capacity, active))))
print("Inactive agents: {}".format(
list(map(lambda a: a.capacity, inactive))))
def payments(mode, payments):
if mode == constants.DEBUG_LOGGING or mode == constants.LOG_PAYMENTS:
if payments:
payment_logs = list(map(lambda p: "Param {} => {}"
.format(p[1], p[0]), payments))
print("\t\t\tPayments\n" + "\n".join(payment_logs))
| 36.142857
| 74
| 0.554941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 171
| 0.135178
|
12ec838b4e6e3d1f8f2bea5549297c2e3c075ade
| 2,484
|
py
|
Python
|
test/core/bad_ssl/gen_build_yaml.py
|
Akrog/grpc
|
14800b0c1acc2d10d4fd0826731ecae2cb448143
|
[
"Apache-2.0"
] | 3
|
2020-10-07T14:20:21.000Z
|
2021-10-08T14:49:17.000Z
|
test/core/bad_ssl/gen_build_yaml.py
|
Akrog/grpc
|
14800b0c1acc2d10d4fd0826731ecae2cb448143
|
[
"Apache-2.0"
] | 1
|
2021-03-04T02:33:56.000Z
|
2021-03-04T02:33:56.000Z
|
test/core/bad_ssl/gen_build_yaml.py
|
Akrog/grpc
|
14800b0c1acc2d10d4fd0826731ecae2cb448143
|
[
"Apache-2.0"
] | 5
|
2021-02-19T09:46:00.000Z
|
2022-03-13T17:33:34.000Z
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
import collections
import yaml
TestOptions = collections.namedtuple('TestOptions', 'flaky cpu_cost')
default_test_options = TestOptions(False, 1.0)
# maps test names to options
BAD_CLIENT_TESTS = {
'cert': default_test_options._replace(cpu_cost=0.1),
# Disabling this test because it does not link correctly as written
# 'alpn': default_test_options._replace(cpu_cost=0.1),
}
def main():
json = {
'#':
'generated with test/bad_ssl/gen_build_json.py',
'libs': [{
'name': 'bad_ssl_test_server',
'build': 'private',
'language': 'c',
'src': ['test/core/bad_ssl/server_common.cc'],
'headers': ['test/core/bad_ssl/server_common.h'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': ['grpc_test_util', 'grpc', 'gpr']
}],
'targets': [{
'name': 'bad_ssl_%s_server' % t,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/bad_ssl/servers/%s.cc' % t],
'vs_proj_dir': 'test/bad_ssl',
'platforms': ['linux', 'posix', 'mac'],
'deps': ['bad_ssl_test_server', 'grpc_test_util', 'grpc', 'gpr']
} for t in sorted(BAD_CLIENT_TESTS.keys())] + [{
'name': 'bad_ssl_%s_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c',
'src': ['test/core/bad_ssl/bad_ssl_test.cc'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': ['grpc_test_util', 'grpc', 'gpr']
} for t in sorted(BAD_CLIENT_TESTS.keys())]
}
print yaml.dump(json)
if __name__ == '__main__':
main()
| 35.485714
| 76
| 0.595813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,530
| 0.615942
|
12ed7f2619866ebbd758994ab5e6290f518e72e4
| 6,608
|
py
|
Python
|
tests/test_providers.py
|
thejoeejoee/django-allauth-cas
|
5db34b546eb32524a3a1a4b90f411e370ac7ad9b
|
[
"MIT"
] | null | null | null |
tests/test_providers.py
|
thejoeejoee/django-allauth-cas
|
5db34b546eb32524a3a1a4b90f411e370ac7ad9b
|
[
"MIT"
] | null | null | null |
tests/test_providers.py
|
thejoeejoee/django-allauth-cas
|
5db34b546eb32524a3a1a4b90f411e370ac7ad9b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from six.moves.urllib.parse import urlencode
from django.contrib import messages
from django.contrib.messages.api import get_messages
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.messages.storage.base import Message
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import RequestFactory, TestCase, override_settings
from allauth.socialaccount.providers import registry
from allauth_cas.views import AuthAction
from .example.provider import ExampleCASProvider
class CASProviderTests(TestCase):
def setUp(self):
self.request = self._get_request()
self.provider = ExampleCASProvider(self.request)
def _get_request(self):
request = RequestFactory().get('/test/')
SessionMiddleware(lambda: None).process_request(request)
MessageMiddleware(lambda: None).process_request(request)
return request
def test_register(self):
"""
Example CAS provider is registered as social account provider.
"""
self.assertIsInstance(registry.by_id('theid'), ExampleCASProvider)
def test_get_login_url(self):
url = self.provider.get_login_url(self.request)
self.assertEqual('/accounts/theid/login/', url)
url_with_qs = self.provider.get_login_url(
self.request,
next='/path?quéry=string&two=whoam%C3%AF',
)
self.assertEqual(
url_with_qs,
'/accounts/theid/login/?next=%2Fpath%3Fqu%C3%A9ry%3Dstring%26two%3'
'Dwhoam%25C3%25AF'
)
def test_get_callback_url(self):
url = self.provider.get_callback_url(self.request)
self.assertEqual('/accounts/theid/login/callback/', url)
url_with_qs = self.provider.get_callback_url(
self.request,
next='/path?quéry=string&two=whoam%C3%AF',
)
self.assertEqual(
url_with_qs,
'/accounts/theid/login/callback/?next=%2Fpath%3Fqu%C3%A9ry%3Dstrin'
'g%26two%3Dwhoam%25C3%25AF'
)
def test_get_logout_url(self):
url = self.provider.get_logout_url(self.request)
self.assertEqual('/accounts/theid/logout/', url)
url_with_qs = self.provider.get_logout_url(
self.request,
next='/path?quéry=string&two=whoam%C3%AF',
)
self.assertEqual(
url_with_qs,
'/accounts/theid/logout/?next=%2Fpath%3Fqu%C3%A9ry%3Dstring%26two%'
'3Dwhoam%25C3%25AF'
)
@override_settings(SOCIALACCOUNT_PROVIDERS={
'theid': {
'AUTH_PARAMS': {'key': 'value'},
},
})
def test_get_auth_params(self):
action = AuthAction.AUTHENTICATE
auth_params = self.provider.get_auth_params(self.request, action)
self.assertDictEqual(auth_params, {
'key': 'value',
})
@override_settings(SOCIALACCOUNT_PROVIDERS={
'theid': {
'AUTH_PARAMS': {'key': 'value'},
},
})
def test_get_auth_params_with_dynamic(self):
factory = RequestFactory()
request = factory.get(
'/test/?auth_params=next%3Dtwo%253Dwhoam%2525C3%2525AF%2526qu%2525'
'C3%2525A9ry%253Dstring'
)
request.session = {}
action = AuthAction.AUTHENTICATE
auth_params = self.provider.get_auth_params(request, action)
self.assertDictEqual(auth_params, {
'key': 'value',
'next': 'two=whoam%C3%AF&qu%C3%A9ry=string',
})
def test_add_message_suggest_caslogout(self):
expected_msg_base_str = (
"To logout of The Provider, please close your browser, or visit "
"this <a href=\"/accounts/theid/logout/?{}\">link</a>."
)
# Defaults.
req1 = self.request
self.provider.add_message_suggest_caslogout(req1)
expected_msg1 = Message(
messages.INFO,
expected_msg_base_str.format(urlencode({'next': '/test/'})),
)
self.assertIn(expected_msg1, get_messages(req1))
# Custom arguments.
req2 = self._get_request()
self.provider.add_message_suggest_caslogout(
req2, next_page='/redir/', level=messages.WARNING)
expected_msg2 = Message(
messages.WARNING,
expected_msg_base_str.format(urlencode({'next': '/redir/'})),
)
self.assertIn(expected_msg2, get_messages(req2))
def test_message_suggest_caslogout_on_logout(self):
self.assertFalse(
self.provider.message_suggest_caslogout_on_logout(self.request))
with override_settings(SOCIALACCOUNT_PROVIDERS={
'theid': {'MESSAGE_SUGGEST_CASLOGOUT_ON_LOGOUT': True},
}):
self.assertTrue(
self.provider
.message_suggest_caslogout_on_logout(self.request)
)
@override_settings(SOCIALACCOUNT_PROVIDERS={
'theid': {
'MESSAGE_SUGGEST_CASLOGOUT_ON_LOGOUT_LEVEL': messages.WARNING,
},
})
def test_message_suggest_caslogout_on_logout_level(self):
self.assertEqual(messages.WARNING, (
self.provider
.message_suggest_caslogout_on_logout_level(self.request)
))
def test_extract_uid(self):
response = 'useRName', {}
uid = self.provider.extract_uid(response)
self.assertEqual('useRName', uid)
def test_extract_common_fields(self):
response = 'useRName', {}
common_fields = self.provider.extract_common_fields(response)
self.assertDictEqual(common_fields, {
'username': 'useRName',
'first_name': None,
'last_name': None,
'name': None,
'email': None,
})
def test_extract_common_fields_with_extra(self):
response = 'useRName', {'username': 'user', 'email': 'user@mail.net'}
common_fields = self.provider.extract_common_fields(response)
self.assertDictEqual(common_fields, {
'username': 'user',
'first_name': None,
'last_name': None,
'name': None,
'email': 'user@mail.net',
})
def test_extract_extra_data(self):
response = 'useRName', {'user_attr': 'thevalue', 'another': 'value'}
extra_data = self.provider.extract_extra_data(response)
self.assertDictEqual(extra_data, {
'user_attr': 'thevalue',
'another': 'value',
'uid': 'useRName',
})
| 32.875622
| 79
| 0.623033
| 6,048
| 0.914839
| 0
| 0
| 1,381
| 0.208894
| 0
| 0
| 1,384
| 0.209348
|
12ed9629940a31dc96db1b6d58b951b990da8233
| 3,723
|
py
|
Python
|
infoblox_netmri/api/remote/models/device_password_log_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/device_password_log_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/device_password_log_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DevicePasswordLogRemote(RemoteModel):
"""
This table list out entries of DevicePasswordLog
| ``DevicePwLogID:`` The internal NetMRI identifier for the device password log.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device from which device password log table information was collected.
| ``attribute type:`` number
| ``DevicePwLogTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``DevicePwLogProtocol:`` The protocol of the device password log.
| ``attribute type:`` string
| ``DevicePwLogPassword:`` The password of the device password log.
| ``attribute type:`` string
| ``DevicePwLogSNMPAuthProto:`` The SNMP password is authenticated for the device password log.
| ``attribute type:`` string
| ``DevicePwLogSNMPPrivProto:`` The SNMP private password protocol of the device password log.
| ``attribute type:`` string
| ``DevicePwLogStatus:`` The status of the device password log.
| ``attribute type:`` string
| ``DevicePwLogPasswordSecure:`` The password of the device password log.
| ``attribute type:`` string
| ``DevicePwLogUsernameSecure:`` The username of the device password log.
| ``attribute type:`` string
| ``DevicePwLogEnablePasswordSecure:`` The password is enabled for device password log.
| ``attribute type:`` string
| ``DevicePwLogSNMPAuthPWSecure:`` The SNMP password is authenticated for the device password log.
| ``attribute type:`` string
| ``DevicePwLogSNMPPrivPWSecure:`` The SNMP private password of the device password log.
| ``attribute type:`` string
| ``SecureVersion:`` The encryption version of the username and passwords.
| ``attribute type:`` number
"""
properties = ("DevicePwLogID",
"DataSourceID",
"DeviceID",
"DevicePwLogTimestamp",
"DevicePwLogProtocol",
"DevicePwLogPassword",
"DevicePwLogSNMPAuthProto",
"DevicePwLogSNMPPrivProto",
"DevicePwLogStatus",
"DevicePwLogPasswordSecure",
"DevicePwLogUsernameSecure",
"DevicePwLogEnablePasswordSecure",
"DevicePwLogSNMPAuthPWSecure",
"DevicePwLogSNMPPrivPWSecure",
"SecureVersion",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DevicePwLogID": self.DevicePwLogID })
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DevicePwLogID": self.DevicePwLogID })
@property
@check_api_availability
def infradevice(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.infradevice(**{"DevicePwLogID": self.DevicePwLogID })
| 31.285714
| 130
| 0.608649
| 3,615
| 0.970991
| 0
| 0
| 771
| 0.207091
| 0
| 0
| 2,738
| 0.735428
|
12ee13303b7604822dba3ba0cf7479d1d2caaf67
| 4,477
|
py
|
Python
|
selenium_utils/element.py
|
defactto/selenium-utils
|
d3a71f3baaaa0da29e3b1ab869f8c685ea5d1b42
|
[
"Apache-2.0"
] | 7
|
2016-08-24T20:29:47.000Z
|
2020-01-29T13:59:03.000Z
|
selenium_utils/element.py
|
defactto/selenium-utils
|
d3a71f3baaaa0da29e3b1ab869f8c685ea5d1b42
|
[
"Apache-2.0"
] | null | null | null |
selenium_utils/element.py
|
defactto/selenium-utils
|
d3a71f3baaaa0da29e3b1ab869f8c685ea5d1b42
|
[
"Apache-2.0"
] | 1
|
2020-01-06T18:41:15.000Z
|
2020-01-06T18:41:15.000Z
|
import logging
import time
from selenium.common import exceptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common import action_chains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium_utils import exception
logger = logging.getLogger(__name__)
def hover_over_element(driver: WebDriver, element):
"""Moves the mouse pointer to the element and hovers"""
action_chains.ActionChains(driver).move_to_element(element).perform()
def wait_until_stops_moving(element, wait_seconds=1):
"""Waits until the element stops moving
Args:
selenium.webdriver.remote.webelement.WebElement
"""
prev_location = None
timer_begin = time.time()
while prev_location != element.location:
prev_location = element.location
time.sleep(0.1)
if time.time() - timer_begin > wait_seconds:
raise exception.ElementMovingTimeout
def get_when_visible(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.presence_of_element_located(locator))
def wait_until_condition(driver: WebDriver, condition, wait_seconds=1):
"""Wait until given expected condition is met"""
WebDriverWait(
driver,
wait_seconds).until(condition)
def wait_until_not_present(driver: WebDriver, locator):
"""Wait until no element(-s) for locator given are present in the DOM."""
wait_until_condition(driver, lambda d: len(d.find_elements(*locator)) == 0)
def get_when_all_visible(driver: WebDriver, locator, wait_seconds=1):
"""Return WebElements by locator when all of them are visible.
Args:
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElements
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.visibility_of_any_elements_located(locator))
def get_when_clickable(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.element_to_be_clickable(locator))
def get_when_invisible(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.invisibility_of_element_located(locator))
def wait_for_element_text(driver: WebDriver, locator, text, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
text (str)
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.text_to_be_present_in_element(locator, text))
def is_value_in_attr(element, attr="class", value="active"):
"""Checks if the attribute value is present for given attribute
Args:
element (selenium.webdriver.remote.webelement.WebElement)
attr (basestring): attribute name e.g. "class"
value (basestring): value in the class attribute that
indicates the element is now active/opened
Returns:
bool
"""
attributes = element.get_attribute(attr)
return value in attributes.split()
def click_on_staleable_element(driver: WebDriver, el_locator, wait_seconds=1):
"""Clicks an element that can be modified between the time we find it and when we click on it"""
time_start = time.time()
while time.time() - time_start < wait_seconds:
try:
driver.find_element(*el_locator).click()
break
except exceptions.StaleElementReferenceException as e:
logger.error(str(e))
time.sleep(0.1)
else:
raise exception.ElementNotFound(el_locator)
def scroll_into_view(driver: WebDriver, element, offset_pixels=0):
"""Scrolls page to element using JS"""
driver.execute_script("return arguments[0].scrollIntoView();", element)
# compensate for the header
driver.execute_script("window.scrollBy(0, -{});".format(offset_pixels))
return element
| 29.071429
| 100
| 0.691311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,574
| 0.351575
|
12ee5dcab405211321c77a37855a79013c17587c
| 1,421
|
py
|
Python
|
modules/iib_applications.py
|
satbel/ib-metrics-pyclient
|
1670df55684a7182884fcfc777fde5ae44095f8f
|
[
"MIT"
] | null | null | null |
modules/iib_applications.py
|
satbel/ib-metrics-pyclient
|
1670df55684a7182884fcfc777fde5ae44095f8f
|
[
"MIT"
] | null | null | null |
modules/iib_applications.py
|
satbel/ib-metrics-pyclient
|
1670df55684a7182884fcfc777fde5ae44095f8f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Various functions for ib applications."""
from modules.iib_api import get_status
def get_metric_name(metric_label):
"""Returns pushgateway formatted metric name."""
return 'ib_application_{0}'.format(metric_label)
def get_metric_annotation():
"""Returns dictionary with annotations 'HELP' and 'TYPE' for metrics."""
annotations = {
'status': '# HELP {0} Current status of IB application.\n\
# TYPE {0} gauge\n'.format(get_metric_name('status'))}
return annotations
def format_applications(applications, broker_name):
"""Returns string with all metrics for all applications which ready to push to pushgateway."""
metrics_annotation = get_metric_annotation()
app_metric_data = str()
for app in applications:
app_list = app.split()
egname, app_name, status = app_list[6], app_list[2], app_list[8].replace(".","")
template_string = 'egname="{0}", brokername="{1}", appname="{2}"'.format(
egname.replace("'", ""),
broker_name,
app_name.replace("'", ""))
app_metric = '{0}{{{1}}} {2}\n'.format(
get_metric_name(metric_label='status'),
template_string,
get_status(status=status))
app_metric_data += app_metric
app_metric_data = '{0}{1}'.format(
metrics_annotation['status'],
app_metric_data)
return app_metric_data
| 36.435897
| 98
| 0.64532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 489
| 0.344124
|
12f0e1426999717b706caac8906a3500e72dc344
| 1,366
|
py
|
Python
|
clock.py
|
hcjk/kitchen-bot
|
5122101ed840b6bdf0b56d3c154de083cb793eda
|
[
"MIT"
] | null | null | null |
clock.py
|
hcjk/kitchen-bot
|
5122101ed840b6bdf0b56d3c154de083cb793eda
|
[
"MIT"
] | null | null | null |
clock.py
|
hcjk/kitchen-bot
|
5122101ed840b6bdf0b56d3c154de083cb793eda
|
[
"MIT"
] | 1
|
2019-06-10T01:25:49.000Z
|
2019-06-10T01:25:49.000Z
|
import os
import requests
import psycopg2
import db_lib as db
from app import send_message, log
from apscheduler.schedulers.blocking import BlockingScheduler
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
def kitchen_reminder():
# fetch current status
status = db.getStatus(conn)
# if notify is disabled, no operation needed
if status == "DISABLED":
log("kitchen_reminder trigger; bot NOTIFY_STATUS is disabled")
return "ok", 200
currentBoyNum = db.getBoyNum(conn)
# if first day has passed
if currentBoyNum == 1:
# increment day
currentBoy = db.getBoy(conn)
db.changeDay(conn, currentBoy)
# if second day has passed
elif currentBoyNum == 2:
# pass responsiblity
currentBoy = db.getBoy(conn)
nextBoy = db.getNextBoy(conn)
db.updateBoy(conn, currentBoy, nextBoy)
# send message to new kitchen boy
msg = "{}, it is your kitchen day!".format(db.getNickname(conn, nextBoy))
send_message(msg, [nextBoy])
else:
log("Error: getBoyNum() returned an unexpected value: {}".format(currentBoyNum))
return "ok", 200
def rent_reminder():
msg = "Don't forget to pay rent!"
send_message(msg, db.getAll(conn))
return "ok", 200
sched = BlockingScheduler()
sched.add_job(kitchen_reminder, 'cron', hour=0, minute=0)
sched.add_job(rent_reminder, 'cron', day=1)
sched.start()
| 23.964912
| 82
| 0.732064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 408
| 0.298682
|
12f18b28f2f44fef548bff40a3b625b2e4be86b9
| 2,940
|
py
|
Python
|
ucf_sub_catkin_ros/src/sub_states/src/qual/test.py
|
RoboticsClubatUCF/RoboSub
|
47304c620f963a8762db57a7ed248d1df90190fb
|
[
"MIT"
] | null | null | null |
ucf_sub_catkin_ros/src/sub_states/src/qual/test.py
|
RoboticsClubatUCF/RoboSub
|
47304c620f963a8762db57a7ed248d1df90190fb
|
[
"MIT"
] | 19
|
2016-09-16T19:52:57.000Z
|
2018-04-14T18:16:17.000Z
|
ucf_sub_catkin_ros/src/sub_states/src/qual/test.py
|
RoboticsClubatUCF/RoboSub
|
47304c620f963a8762db57a7ed248d1df90190fb
|
[
"MIT"
] | 8
|
2016-01-06T20:56:45.000Z
|
2017-02-26T02:49:17.000Z
|
#!/usr/bin/env python
import rospy
import smach
import gate
import pole
class SubStates:
def __init__(self):
rospy.loginfo("State Machine has started.")
self.gate = smach.StateMachine(outcomes=['preempted', 'POLE', 'GATE'])
self.pole = smach.StateMachine(outcomes=['preempted', 'GATE', 'POLE'])
self.tasks = smach.StateMachine(outcomes=['POLE', 'GATE', 'preempted', self.gate, self.pole])
with self.tasks:
smach.StateMachine.add('Start', self.pole, transitions={'POLE':self.pole, 'GATE':self.gate})
with self.gate:
smach.StateMachine.add('LOCATE', gate.locate(),
transitions={'preempted':'preempted',
'success': 'ALIGN',
'failure': 'LOCATE'})
smach.StateMachine.add('ALIGN', gate.align(),
transitions={'preempted':'preempted',
'success': 'THROUGH',
'failure': 'LOCATE'})
smach.StateMachine.add('THROUGH', gate.through(),
transitions={'preempted':'preempted',
'success': 'POLE',
'failure':'LOCATE'})
with self.pole:
smach.StateMachine.add('LOCATE', pole.locate(),
transitions={'preempted':'preempted',
'success': 'ALIGN',
'failure': 'LOCATE'})
smach.StateMachine.add('ALIGN', pole.align(),
transitions={'preempted':'preempted',
'success': 'DRIFT',
'failure': 'LOCATE'})
smach.StateMachine.add('DRIFT', pole.drift(),
transitions={'preempted':'preempted',
'success': 'GATE',
'failure': 'LOCATE'})
if __name__ == '__main__':
rospy.init_node('hippo_sm')
sm = SubStates()
outcome = sm.tasks.execute()
rospy.spin()
| 49.830508
| 116
| 0.343197
| 2,731
| 0.928912
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.181293
|
12f1cb8ec52078b29ac8f9e2d9706191446e64ae
| 3,271
|
py
|
Python
|
networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py
|
vinicius-marinho/GloboNetworkAPI
|
94651d3b4dd180769bc40ec966814f3427ccfb5b
|
[
"Apache-2.0"
] | 73
|
2015-04-13T17:56:11.000Z
|
2022-03-24T06:13:07.000Z
|
networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | 99
|
2015-04-03T01:04:46.000Z
|
2021-10-03T23:24:48.000Z
|
networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py
|
shildenbrand/GloboNetworkAPI
|
515d5e961456cee657c08c275faa1b69b7452719
|
[
"Apache-2.0"
] | 64
|
2015-08-05T21:26:29.000Z
|
2022-03-22T01:06:28.000Z
|
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.plugins.SDN.ODL.flows.acl import AclFlowBuilder
class TestSendFlowsWithTCPFlags(NetworkApiTestCase):
""" Class to test flows that have tcp flags on it """
def test_flow_with_ack_flag(self):
""" Try to send a flow with ACK flag """
acl = {
"kind": "acl_with_tcp_flags",
"rules": [{
"action": "permit",
"description": "ACK access",
"destination": "10.0.0.0/8",
"id": "300",
"l4-options": {
"flags": [
"ACK"
]
},
"owner": "networkapi",
"protocol": "tcp",
"source": "0.0.0.0/0"
}]
}
# Beryllium
flows = AclFlowBuilder(acl, environment=0, version='BERYLLIUM')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flag-match']['tcp-flag']
assert tcp_flag == 16
# Carbon
flows = AclFlowBuilder(acl, environment=0, version='CARBON')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags']
assert tcp_flag == 16
# Boron
flows = AclFlowBuilder(acl, environment=0, version='BORON')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags']
assert tcp_flag == 16
# Nitrogen
flows = AclFlowBuilder(acl, environment=0, version='NITROGEN')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags']
assert tcp_flag == 16
def test_flow_with_RST_flag(self):
""" Try to send a flow with RST flag """
acl = {
"kind": "acl_with_tcp_flags",
"rules": [{
"action": "permit",
"description": "RST access",
"destination": "10.0.0.0/8",
"id": "200",
"l4-options": {
"flags": [
"RST"
]
},
"owner": "networkapi",
"protocol": "tcp",
"source": "0.0.0.0/0"
}]
}
# Beryllium
flows = AclFlowBuilder(acl, environment=0, version='BERYLLIUM')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flag-match']['tcp-flag']
assert tcp_flag == 4
# Carbon
flows = AclFlowBuilder(acl, environment=0, version='CARBON')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags']
assert tcp_flag == 4
# Boron
flows = AclFlowBuilder(acl, environment=0, version='BORON')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags']
assert tcp_flag == 4
# Nitrogen
flows = AclFlowBuilder(acl, environment=0, version='NITROGEN')
flow = flows.build().next()
tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags']
assert tcp_flag == 4
| 33.377551
| 75
| 0.496484
| 3,146
| 0.961785
| 0
| 0
| 0
| 0
| 0
| 0
| 971
| 0.296851
|
12f2139184aea177b546923cc78d7b43a26b2e26
| 4,940
|
py
|
Python
|
Old/OpenCV Scripts/red_filtered_detector.py
|
multirotorsociety/SAFMC-19-D2-Autonomous-Drone
|
fd9f0fae5d7cbf618b327224e06a7f459612b4ca
|
[
"MIT"
] | 6
|
2019-04-01T02:38:40.000Z
|
2021-06-05T18:23:06.000Z
|
Old/OpenCV Scripts/red_filtered_detector.py
|
multirotorsociety/SAFMC-19-D2-Autonomous-Drone
|
fd9f0fae5d7cbf618b327224e06a7f459612b4ca
|
[
"MIT"
] | null | null | null |
Old/OpenCV Scripts/red_filtered_detector.py
|
multirotorsociety/SAFMC-19-D2-Autonomous-Drone
|
fd9f0fae5d7cbf618b327224e06a7f459612b4ca
|
[
"MIT"
] | 1
|
2019-09-01T08:58:28.000Z
|
2019-09-01T08:58:28.000Z
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import numpy as np
import time
from fractions import Fraction
from PIL import Image
#cap = cv2.VideoCapture(0)
camera = PiCamera()
camera.resolution = (426, 240)
camera.framerate = 24
camera.exposure_mode = 'off'
camera.exposure_compensation = -3
camera.drc_strength = 'off'
camera.still_stats = False
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(25, 16), Fraction(25,16))
rawCapture = PiRGBArray(camera, size=(426, 240))
# allow the camera to warmup
time.sleep(0.1)
# lower = [135, 130, 50]
# upper = [180, 200, 255]
# lower = [160, 100, 100]
# upper = [180, 255, 255]
# lower2 = [0, 100, 100]
# upper2 = [10, 255, 255]
#lower1 = [0, 50, 50]
#upper1 = [5, 255, 255]
out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240))
# lower = np.array(lower, dtype = "uint8")
# upper = np.array(upper, dtype = "uint8")
# lower2 = np.array(lower2, dtype = "uint8")
# upper2 = np.array(upper2, dtype = "uint8")
#lower1 = np.array(lower1, dtype = "uint8")
#upper1 = np.array(upper1, dtype = "uint8")
for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#print(camera.awb_gains)
#r, frame = cap.read()
for i in range(5): # Clears the 5 frame buffer
frame = img.array
height, width = frame.shape[:2]
centre = (int(width/2), int(height/2))
#frame = cv2.GaussianBlur(frame, (9, 9), 0)
#frame = cv2.medianBlur(frame,3)
#frame = cv2.GaussianBlur(frame, (9, 9), 0)
#mask = cv2.inRange(frame, lower, upper)
#mask2 = cv2.inRange(frame, lower2, upper2)
#mask2 = cv2.inRange(frame, lower1, upper1)
#mask = mask1 + mask2
#img_rec_red = cv2.bitwise_and(frame, frame, mask = mask)
#img_rec_redo = cv2.bitwise_and(frame, frame, mask = mask2)
#cv2.imshow("pre or1", img_rec_red)
#cv2.imshow("pre or2", img_rec_redo)
#img_rec_red = cv2.bitwise_or(img_rec_red, img_rec_redo)
b_channel = np.array(frame[:,:,0]).astype('float')
g_channel = np.array(frame[:,:,1]).astype('float')
r_channel = np.array(frame[:,:,2]).astype('float')
# #cv2.imshow('b_chan', b_channel)
# # cv2.imshow('g_chan', g_channel)
# # cv2.imshow('r_chan', r_channel)
bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)
img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2))
#img_rec_red2 = np.divide(r_channel, 255)
img_rec_red2 = np.divide(img_rec_red2,255)
#img_rec_red2 = np.square(img_rec_red2)
img_rec_red2[img_rec_red2 < 0.3] = 0
img_rec_red2 = img_rec_red2 * 255
img_rec_red2 = np.floor(img_rec_red2).astype('uint8')
#img_rec_red = cv2.cvtColor(img_rec_red, cv2.COLOR_BGR2GRAY)
#cv2.imshow('recred2', img_rec_red2)
ret, th = cv2.threshold(img_rec_red2,10,255,cv2.THRESH_BINARY)
#ret, th = cv2.threshold(r_channel.astype('uint8'),110,255,cv2.THRESH_BINARY)
#th = cv2.bitwise_not(th, th)
kernel = np.ones((5,5),np.uint8)
#th = cv2.erode(th, kernel)
th = cv2.dilate(th, kernel)
th = cv2.GaussianBlur(th, (5,5), 0)
try:
M = cv2.moments(th)
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# put text and highlight the center
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
#cv2.putText(frame, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.line(frame, centre, (cX, cY), (255,0,0), 2)
dX = cX - centre[0]
dY = centre[1] - cY
cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
print('Velocities: ' + str(dX) + "," + str(dY))
except:
print("No centre detected")
#kernel2 = np.ones((15,15),np.uint8)
#eroded_th = cv2.erode(dilated_th, kernel2)
#blurred_th = cv2.GaussianBlur(eroded_th.copy(), (9, 9), 0)
#eroded_th = cv2.bitwise_not(eroded_th,eroded_th)
#dilated_th = cv2.bitwise_not(dilated_th, dilated_th)
# circles = cv2.HoughCircles(th,cv2.HOUGH_GRADIENT, 1,1000,
# param1=40,param2=23,minRadius=20,maxRadius=0)
# try:
# circles = np.uint16(np.around(circles))
# for i in circles[0,:]:
# # draw the outer circle
# cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
# # draw the center of the circle
# cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)
# except:
# pass
cv2.imshow('original', frame)
#cv2.imshow('rec_red',img_rec_red)
cv2.imshow('detected circles',th)
out.write(frame)
k = cv2.waitKey(1)
rawCapture.truncate(0)
if k == 0xFF & ord("q"):
break
#cv2.destroyAllWindows()
#cap.release()
out.release()
| 33.154362
| 109
| 0.618421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,573
| 0.52085
|
12f21abc71f1092fae63b143257827c5624eebdf
| 2,965
|
py
|
Python
|
setup.py
|
Maven85/plugin.video.magenta-sport
|
e05eeea629295d79de7467d495eb0c20b3adb60b
|
[
"MIT"
] | null | null | null |
setup.py
|
Maven85/plugin.video.magenta-sport
|
e05eeea629295d79de7467d495eb0c20b3adb60b
|
[
"MIT"
] | null | null | null |
setup.py
|
Maven85/plugin.video.magenta-sport
|
e05eeea629295d79de7467d495eb0c20b3adb60b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Module: default
# Author: asciidisco
# Created on: 24.07.2017
# License: MIT https://goo.gl/WA1kby
"""Setup"""
from __future__ import unicode_literals
from os.path import abspath, dirname, join
from re import search
from sys import exit, version, version_info
from setuptools import find_packages, setup
REQUIRED_PYTHON_VERSION = (2, 7)
PACKAGES = find_packages()
INSTALL_DEPENDENCIES = []
SETUP_DEPENDENCIES = []
TEST_DEPENDENCIES = [
'nose',
'Kodistubs',
'httpretty',
'mock',
]
EXTRA_DEPENDENCIES = {
'dev': [
'nose',
'flake8',
'codeclimate-test-reporter',
'pylint',
'mccabe',
'pycodestyle',
'pyflakes',
'Kodistubs',
'httpretty',
'mock',
'requests',
'beautifulsoup4',
'pyDes',
'radon',
'Sphinx',
'sphinx_rtd_theme',
'm2r',
'kodi-release-helper',
'dennis',
'blessings',
'demjson',
'restructuredtext_lint',
'yamllint',
]
}
def get_addon_data():
"""Loads the Kodi plugin data from addon.xml"""
root_dir = dirname(abspath(__file__))
pathname = join(root_dir, 'addon.xml')
with open(pathname, 'rb') as addon_xml:
addon_xml_contents = addon_xml.read()
_id = search(
r'(?<!xml )id="(.+?)"',
addon_xml_contents).group(1)
author = search(
r'(?<!xml )provider-name="(.+?)"',
addon_xml_contents).group(1)
name = search(
r'(?<!xml )name="(.+?)"',
addon_xml_contents).group(1)
version = search(
r'(?<!xml )version="(.+?)"',
addon_xml_contents).group(1)
desc = search(
r'(?<!xml )description lang="en_GB">(.+?)<',
addon_xml_contents).group(1)
email = search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
source = search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
return {
'id': _id,
'author': author,
'name': name,
'version': version,
'desc': desc,
'email': email,
'source': source,
}
if version_info < REQUIRED_PYTHON_VERSION:
exit('Python >= 2.7 is required. Your version:\n{0}'.format(version))
if __name__ == '__main__':
ADDON_DATA = get_addon_data()
setup(
name=ADDON_DATA.get('name'),
version=ADDON_DATA.get('version'),
author=ADDON_DATA.get('author'),
author_email=ADDON_DATA.get('email'),
description=ADDON_DATA.get('desc'),
packages=PACKAGES,
include_package_data=True,
install_requires=INSTALL_DEPENDENCIES,
setup_requires=SETUP_DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
extras_require=EXTRA_DEPENDENCIES,
test_suite='nose.collector',
)
| 26.711712
| 73
| 0.551771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 847
| 0.285666
|
12f2a17d10d6e7d8016a1adfcae38305fb8b1df9
| 2,386
|
py
|
Python
|
franka_lcas_experiments/script/load_model_rtp.py
|
arsh09/franka_ros_lcas
|
b6211125436849d5c7def8ad96a384cc34f2f121
|
[
"Apache-2.0"
] | 2
|
2021-11-09T00:50:43.000Z
|
2021-11-15T09:50:47.000Z
|
franka_lcas_experiments/script/load_model_rtp.py
|
arsh09/franka_ros_lcas
|
b6211125436849d5c7def8ad96a384cc34f2f121
|
[
"Apache-2.0"
] | null | null | null |
franka_lcas_experiments/script/load_model_rtp.py
|
arsh09/franka_ros_lcas
|
b6211125436849d5c7def8ad96a384cc34f2f121
|
[
"Apache-2.0"
] | 1
|
2021-11-17T13:24:23.000Z
|
2021-11-17T13:24:23.000Z
|
import numpy as np
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.keras.models import Model
import tensorflow as tf
from PIL import Image
from utils_rtp import ProMP
class Predictor:
def __init__(self, encoder_model_path, predictor_model_path):
self.all_phi = self.promp_train()
encoder_model = tf.keras.models.load_model(encoder_model_path)
self.encoder = Model(encoder_model.input, encoder_model.get_layer("bottleneck").output)
self.exp_model = tf.keras.models.load_model(predictor_model_path, compile=False)
def promp_train(self):
phi = ProMP().basis_func_gauss_glb()
zeros = np.zeros([phi.shape[0], 8])
h1 = np.hstack((phi, zeros, zeros, zeros, zeros, zeros, zeros))
h2 = np.hstack((zeros, phi, zeros, zeros, zeros, zeros, zeros))
h3 = np.hstack((zeros, zeros, phi, zeros, zeros, zeros, zeros))
h4 = np.hstack((zeros, zeros, zeros, phi, zeros, zeros, zeros))
h5 = np.hstack((zeros, zeros, zeros, zeros, phi, zeros, zeros))
h6 = np.hstack((zeros, zeros, zeros, zeros, zeros, phi, zeros))
h7 = np.hstack((zeros, zeros, zeros, zeros, zeros, zeros, phi))
vstack = np.vstack((h1, h2, h3, h4, h5, h6, h7))
vstack = tf.cast(vstack, tf.float32)
return vstack
def preprocess_image(self, image):
return np.asarray(image.resize((256, 256)))
def predict(self, image_numpy):
# image_numpy = np.expand_dims(image_numpy, axis=0)
latent_img = self.encoder.predict(image_numpy/255)
q_val_pred = self.exp_model.predict(latent_img)
traj_pred = np.matmul(self.all_phi, np.transpose(q_val_pred)).squeeze()
return traj_pred #np.reshape(traj_pred, (-1, 150))
if __name__ == "__main__":
ENCODED_MODEL_PATH = "/home/arshad/Documents/reach_to_palpate_validation_models/encoded_model_regions"
PREDICTOR_MODEL = "/home/arshad/Documents/reach_to_palpate_validation_models/model_cnn_rgb_1"
image = np.load( "/home/arshad/catkin_ws/image_xy_rtp.npy" )
predictor = Predictor(ENCODED_MODEL_PATH, PREDICTOR_MODEL)
traj = predictor.predict(image)
np.save("/home/arshad/catkin_ws/predicted_joints_values_rtp.npy", traj)
print ("\n Predicted ProMPs weights for RTP task. Joint trajectory is saved in the file. \n Press 'p' to display the trajectory...")
| 40.440678
| 138
| 0.690696
| 1,574
| 0.659681
| 0
| 0
| 0
| 0
| 0
| 0
| 510
| 0.213747
|
12f2a97e43141a9ad0fb868815aad72bb1ff0352
| 5,648
|
py
|
Python
|
sdv/tabular/ctgan.py
|
joanvaquer/SDV
|
83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28
|
[
"MIT"
] | null | null | null |
sdv/tabular/ctgan.py
|
joanvaquer/SDV
|
83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28
|
[
"MIT"
] | null | null | null |
sdv/tabular/ctgan.py
|
joanvaquer/SDV
|
83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28
|
[
"MIT"
] | null | null | null |
"""Wrapper around CTGAN model."""
from sdv.tabular.base import BaseTabularModel
class CTGAN(BaseTabularModel):
"""Model wrapping ``CTGANSynthesizer`` model.
Args:
field_names (list[str]):
List of names of the fields that need to be modeled
and included in the generated output data. Any additional
fields found in the data will be ignored and will not be
included in the generated output.
If ``None``, all the fields found in the data are used.
field_types (dict[str, dict]):
Dictinary specifying the data types and subtypes
of the fields that will be modeled. Field types and subtypes
combinations must be compatible with the SDV Metadata Schema.
field_transformers (dict[str, str]):
Dictinary specifying which transformers to use for each field.
Available transformers are:
* ``integer``: Uses a ``NumericalTransformer`` of dtype ``int``.
* ``float``: Uses a ``NumericalTransformer`` of dtype ``float``.
* ``categorical``: Uses a ``CategoricalTransformer`` without gaussian noise.
* ``categorical_fuzzy``: Uses a ``CategoricalTransformer`` adding gaussian noise.
* ``one_hot_encoding``: Uses a ``OneHotEncodingTransformer``.
* ``label_encoding``: Uses a ``LabelEncodingTransformer``.
* ``boolean``: Uses a ``BooleanTransformer``.
* ``datetime``: Uses a ``DatetimeTransformer``.
anonymize_fields (dict[str, str]):
Dict specifying which fields to anonymize and what faker
category they belong to.
primary_key (str):
Name of the field which is the primary key of the table.
constraints (list[Constraint, dict]):
List of Constraint objects or dicts.
table_metadata (dict or metadata.Table):
Table metadata instance or dict representation.
If given alongside any other metadata-related arguments, an
exception will be raised.
If not given at all, it will be built using the other
arguments or learned from the data.
epochs (int):
Number of training epochs. Defaults to 300.
log_frequency (boolean):
Whether to use log frequency of categorical levels in conditional
sampling. Defaults to ``True``.
embedding_dim (int):
Size of the random sample passed to the Generator. Defaults to 128.
gen_dim (tuple or list of ints):
Size of the output samples for each one of the Residuals. A Resiudal Layer
will be created for each one of the values provided. Defaults to (256, 256).
dis_dim (tuple or list of ints):
Size of the output samples for each one of the Discriminator Layers. A Linear
Layer will be created for each one of the values provided. Defaults to (256, 256).
l2scale (float):
Wheight Decay for the Adam Optimizer. Defaults to 1e-6.
batch_size (int):
Number of data samples to process in each step.
"""
_CTGAN_CLASS = None
_model = None
_DTYPE_TRANSFORMERS = {
'O': 'label_encoding'
}
def __init__(self, field_names=None, field_types=None, field_transformers=None,
anonymize_fields=None, primary_key=None, constraints=None, table_metadata=None,
epochs=300, log_frequency=True, embedding_dim=128, gen_dim=(256, 256),
dis_dim=(256, 256), l2scale=1e-6, batch_size=500):
super().__init__(
field_names=field_names,
primary_key=primary_key,
field_types=field_types,
anonymize_fields=anonymize_fields,
constraints=constraints,
table_metadata=table_metadata
)
try:
from ctgan import CTGANSynthesizer # Lazy import to make dependency optional
self._CTGAN_CLASS = CTGANSynthesizer
except ImportError as ie:
ie.msg += (
'\n\nIt seems like `ctgan` is not installed.\n'
'Please install it using:\n\n pip install sdv[ctgan]'
)
raise
self._embedding_dim = embedding_dim
self._gen_dim = gen_dim
self._dis_dim = dis_dim
self._l2scale = l2scale
self._batch_size = batch_size
self._epochs = epochs
self._log_frequency = log_frequency
def _fit(self, table_data):
"""Fit the model to the table.
Args:
table_data (pandas.DataFrame):
Data to be learned.
"""
self._model = self._CTGAN_CLASS(
embedding_dim=self._embedding_dim,
gen_dim=self._gen_dim,
dis_dim=self._dis_dim,
l2scale=self._l2scale,
batch_size=self._batch_size,
)
categoricals = [
field
for field, meta in self._metadata.get_fields().items()
if meta['type'] == 'categorical'
]
self._model.fit(
table_data,
epochs=self._epochs,
discrete_columns=categoricals,
log_frequency=self._log_frequency,
)
def _sample(self, num_rows):
"""Sample the indicated number of rows from the model.
Args:
num_rows (int):
Amount of rows to sample.
Returns:
pandas.DataFrame:
Sampled data.
"""
return self._model.sample(num_rows)
| 40.056738
| 97
| 0.599681
| 5,564
| 0.985127
| 0
| 0
| 0
| 0
| 0
| 0
| 3,672
| 0.650142
|
12f479c7b7668c843b94467ffeb73f441443785b
| 1,130
|
py
|
Python
|
cointrader/config.py
|
3con/cointrader
|
abb3d13d1105e11db0070a9052c45cb8a87f168c
|
[
"MIT"
] | 103
|
2017-03-10T07:23:12.000Z
|
2021-08-24T17:39:22.000Z
|
cointrader/config.py
|
altfund/cointrader-1
|
abb3d13d1105e11db0070a9052c45cb8a87f168c
|
[
"MIT"
] | 91
|
2017-03-11T06:23:09.000Z
|
2021-11-15T17:47:06.000Z
|
cointrader/config.py
|
fwolfst/cointrader
|
abb3d13d1105e11db0070a9052c45cb8a87f168c
|
[
"MIT"
] | 36
|
2017-03-23T17:48:08.000Z
|
2020-02-21T23:42:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import logging.config
if (sys.version_info > (3, 0)):
# Python 3 code in this block
import configparser
else:
# Python 2 code in this block
import ConfigParser as configparser
DEFAULT_CONFIG = ".cointrader.ini"
def get_path_to_config():
env = os.getenv("HOME")
return os.path.join(env, DEFAULT_CONFIG)
class Config(object):
def __init__(self, configfile=None):
self.verbose = False
self.market = "poloniex"
self.api_key = None
self.api_secret = None
if configfile:
logging.config.fileConfig(configfile.name)
config = configparser.ConfigParser()
config.readfp(configfile)
exchange = config.get("DEFAULT", "exchange")
self.api_key = config.get(exchange, "api_key")
self.api_secret = config.get(exchange, "api_secret")
@property
def api(self):
if not self.api_key or not self.api_secret:
raise RuntimeError("API not configured")
return self.api_key, self.api_secret
| 25.681818
| 64
| 0.642478
| 716
| 0.633628
| 0
| 0
| 178
| 0.157522
| 0
| 0
| 195
| 0.172566
|
12f51cb5ac4eefb8f57e6dbd0a326e1ca9a0b225
| 712
|
py
|
Python
|
src/snakeoil/descriptors.py
|
Arusekk/snakeoil
|
aad28a50118223766e5308452b369f2c72b971b2
|
[
"BSD-3-Clause"
] | null | null | null |
src/snakeoil/descriptors.py
|
Arusekk/snakeoil
|
aad28a50118223766e5308452b369f2c72b971b2
|
[
"BSD-3-Clause"
] | null | null | null |
src/snakeoil/descriptors.py
|
Arusekk/snakeoil
|
aad28a50118223766e5308452b369f2c72b971b2
|
[
"BSD-3-Clause"
] | null | null | null |
"""Classes implementing the descriptor protocol."""
__all__ = ("classproperty",)
class classproperty:
"""Like the builtin :py:func:`property` but takes a single classmethod.
Essentially, it allows you to use a property on a class itself- not
just on its instances.
Used like this:
>>> from snakeoil.descriptors import classproperty
>>> class foo:
...
... @classproperty
... def test(cls):
... print("invoked")
... return True
>>> foo.test
invoked
True
>>> foo().test
invoked
True
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
| 20.342857
| 75
| 0.605337
| 627
| 0.880618
| 0
| 0
| 0
| 0
| 0
| 0
| 530
| 0.744382
|