hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
705fc5faf5b63443aa1c22a09ab52fb617d85d14
| 1,911
|
py
|
Python
|
tools/mo/openvino/tools/mo/ops/TFFFT.py
|
si-eun-kim/openvino
|
1db4446e2a6ead55d066e0b4e718fa37f509353a
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/ops/TFFFT.py
|
si-eun-kim/openvino
|
1db4446e2a6ead55d066e0b4e718fa37f509353a
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/ops/TFFFT.py
|
tuxedcat/openvino
|
5939cb1b363ebb56b73c2ad95d8899961a084677
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class TFFFT(Op):
"""
This operation is intended to read TF operations FFT, FFT2D, FFT3D, IFFT, IFFT2D, IFFT3D, RFFT, RFFT2D, RFFT3D,
IRFFT, IRFFT2D, IRFFT3D. The operation TFFFT has two attributes: an integer attribute num_of_dimensions and
a string attribute fft_kind.
If an operation is used to read FFT, FFT2D, or FFT3D, then the attribute 'fft_kind' is 'DFT'.
If an operation is used to read IFFT, IFFT2D, or IFFT3D, then the attribute 'fft_kind' is 'IDFT'.
If an operation is used to read RFFT, RFFT2D, or RFFT3D, then the attribute 'fft_kind' is 'RDFT'.
If an operation is used to read IRFFT, IRFFT2D, or IRFFT3D, then the attribute 'fft_kind' is 'IRDFT'.
The attribute 'num_of_dimensions' is equal to number of transformed axes, i.e. 1 for FFT, IFFT, RFFT, and IRFFT;
2 for FFT2D, IFFT2D, RFFT2D, and IRFFT2D; 3 for FFT3D, IFFT3D, RFFT3D, and IRFFT3D.
The transformation TFFFTToDFT converts the operation TFFFT into MO operation according to the following rules:
1) FFT, FFT2D, FFT3D are converted into DFT;
2) IFFT, IFFT2D, IFFT3D are converted into IDFT;
3) RFFT, RFFT2D, RFFT3D are converted into RDFT;
4) IRFFT, IRFFT2D, IRFFT3D are converted into IRDFT.
"""
op = 'TFFFT'
enabled = False
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'op': self.op,
'out_ports_count': 1,
'in_ports_count': 1,
}
assert 'fft_kind' in attrs, 'Attribute fft_kind is not given for the operation TFFFT.'
assert 'num_of_dimensions' in attrs, 'Attribute num_of_dimensions is not given for the operation TFFFT.'
super().__init__(graph, mandatory_props, attrs)
| 47.775
| 116
| 0.695447
|
45d0f133e36e984c4521fcaf6e31398cfb7a6315
| 1,620
|
py
|
Python
|
python/lgbserver/lgbserver/test_model.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 6
|
2022-02-15T21:54:19.000Z
|
2022-02-16T21:18:54.000Z
|
python/lgbserver/lgbserver/test_model.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 7
|
2021-08-31T23:55:06.000Z
|
2022-03-02T11:34:58.000Z
|
python/lgbserver/lgbserver/test_model.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 2
|
2021-12-16T10:32:07.000Z
|
2022-02-28T17:08:52.000Z
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lightgbm as lgb
import os
from sklearn.datasets import load_iris
from lgbserver import LightGBMModel
import pandas as pd
model_dir = os.path.join(os.path.dirname(__file__), "example_model", "model")
BST_FILE = "model.bst"
NTHREAD = 1
def test_model():
iris = load_iris()
y = iris['target']
X = pd.DataFrame(iris['data'], columns=iris['feature_names'])
dtrain = lgb.Dataset(X, label=y)
params = {
'objective': 'multiclass',
'metric': 'softmax',
'num_class': 3
}
lgb_model = lgb.train(params=params, train_set=dtrain)
model_file = os.path.join(model_dir, BST_FILE)
lgb_model.save_model(model_file)
model = LightGBMModel("model", model_dir, NTHREAD)
model.load()
request = {"x": {0: 1.1}, 'sepal_width_(cm)': {0: 3.5}, 'petal_length_(cm)': {0: 1.4},
'petal_width_(cm)': {0: 0.2}, 'sepal_length_(cm)': {0: 5.1}}
response = model.predict({"inputs": [request, request]})
import numpy
assert numpy.argmax(response["predictions"][0]) == 0
| 33.061224
| 90
| 0.685802
|
f0a8edc242f064cd7b0300568153deb1f0b6c858
| 25
|
py
|
Python
|
test/tests/39.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/tests/39.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/tests/39.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
# should_error
int.a = 1
| 8.333333
| 14
| 0.68
|
32d97faca52be7e65f444621a1dfd80d90be601e
| 2,604
|
py
|
Python
|
tensorflow_datasets/translate/wmt14.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:41:27.000Z
|
2021-05-10T10:41:27.000Z
|
tensorflow_datasets/translate/wmt14.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/translate/wmt14.py
|
sourcery-ai-bot/datasets
|
b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28
|
[
"Apache-2.0"
] | 1
|
2021-07-04T11:07:35.000Z
|
2021-07-04T11:07:35.000Z
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT14: Translate dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt14/translation-task.html"
_CITATION = """
@InProceedings{bojar-EtAl:2014:W14-33,
author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale\v{s}},
title = {Findings of the 2014 Workshop on Statistical Machine Translation},
booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation},
month = {June},
year = {2014},
address = {Baltimore, Maryland, USA},
publisher = {Association for Computational Linguistics},
pages = {12--58},
url = {http://www.aclweb.org/anthology/W/W14/W14-3302}
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fr", "hi", "ru"]
]
class Wmt14Translate(wmt.WmtTranslate):
"""WMT 14 translation datasets for all {xx, "en"} language pairs."""
# Version history:
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
BUILDER_CONFIGS = [
wmt.WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2014 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version=tfds.core.Version("1.0.0"),
) for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v7", "commoncrawl", "multiun",
"newscommentary_v9", "gigafren", "czeng_10", "yandexcorpus",
"wikiheadlines_hi", "wikiheadlines_ru", "hindencorp_01"
],
tfds.Split.VALIDATION: [
"newsdev2014", "newstest2013"
],
tfds.Split.TEST: [
"newstest2014"
]
}
| 36.676056
| 293
| 0.665131
|
a1bdd6b642a5f5ea5701af05f1fc9c7c86d40d72
| 1,751
|
py
|
Python
|
code/dictation.py
|
nkgfry/knausj_talon
|
a2327dbfaf685f99a873f11a2005d8563814a7ce
|
[
"Unlicense"
] | null | null | null |
code/dictation.py
|
nkgfry/knausj_talon
|
a2327dbfaf685f99a873f11a2005d8563814a7ce
|
[
"Unlicense"
] | null | null | null |
code/dictation.py
|
nkgfry/knausj_talon
|
a2327dbfaf685f99a873f11a2005d8563814a7ce
|
[
"Unlicense"
] | null | null | null |
from talon import Module, ui, actions
mod = Module()
#Courtesy of https://github.com/dwiel/talon_community/blob/master/misc/dictation.py
#Port for Talon's new api + wav2letter
#dictionary of sentence ends. No space should appear after these.
sentence_ends = {
"." : ".",
"?" : "?",
"!" : "!",
#these are mapped with names since passing "\n" didn't work for reasons
"new-paragraph" : "\n\n",
"new-line" : "\n",
}
#dictionary of punctuation. no space before these.
punctuation = {
"," : ",",
":" : ":",
";" : ";",
}
def remove_dragon_junk(word):
return str(word).lstrip("\\").split("\\")[0]
class AutoFormat:
def __init__(self):
self.reset()
ui.register("app_deactivate", lambda app: self.reset())
ui.register("win_focus", lambda win: self.reset())
def reset(self):
self.caps = True
self.space = False
def insert(self, text):
for word in text.split():
remove_dragon_junk(word)
is_sentence_end = False
is_punctuation = False
if word in sentence_ends:
word = sentence_ends[word]
is_sentence_end = True
elif word in punctuation:
word = punctuation[word]
#do nothing
is_punctuation = True
elif self.space:
actions.insert(" ")
if self.caps:
word = word.capitalize()
actions.insert(word)
self.space = "\n" not in word
self.caps = is_sentence_end
auto_format = AutoFormat()
@mod.action_class
class Actions():
def dictate(text: str):
"""Insert auto formatted text"""
auto_format.insert(text)
| 25.014286
| 83
| 0.561965
|
a9def4d04e24db5fd53ebf3e662f04edc0200838
| 7,556
|
py
|
Python
|
encoder_models/ResNet.py
|
LindaSt/FeatureExtractor
|
74d96244ea869dbe359f6990e045ff971cde5d52
|
[
"MIT"
] | null | null | null |
encoder_models/ResNet.py
|
LindaSt/FeatureExtractor
|
74d96244ea869dbe359f6990e045ff971cde5d52
|
[
"MIT"
] | null | null | null |
encoder_models/ResNet.py
|
LindaSt/FeatureExtractor
|
74d96244ea869dbe359f6990e045ff971cde5d52
|
[
"MIT"
] | null | null | null |
"""
Model definition adapted from: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import logging
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class _BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(_BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class _Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
expected_input_size = 224
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
# if not ablate:
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
# if self.ablate:
# return x
# else:
# x = self.fc(x)
# return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a _ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(_BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
resnet18.expected_input_size = ResNet.expected_input_size
def resnet34(pretrained=False, **kwargs):
"""Constructs a _ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(_BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
resnet34.expected_input_size = ResNet.expected_input_size
def resnet50(pretrained=False, **kwargs):
"""Constructs a _ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(_Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
resnet50.expected_input_size = ResNet.expected_input_size
def resnet101(pretrained=False, **kwargs):
"""Constructs a _ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(_Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
resnet101.expected_input_size = ResNet.expected_input_size
def resnet152(pretrained=False, **kwargs):
"""Constructs a _ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(_Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)
except Exception as exp:
logging.warning(exp)
return model
resnet152.expected_input_size = ResNet.expected_input_size
| 30.224
| 105
| 0.609449
|
f724bcbe159f2f3f6bd41ab81d7cfc844b8fbfab
| 10,961
|
bzl
|
Python
|
internal/pkg_npm/pkg_npm.bzl
|
mr-tim/rules_nodejs
|
7648412d96828875343b0d9c74ddf4d7531eed72
|
[
"Apache-2.0"
] | 1
|
2020-10-25T10:29:06.000Z
|
2020-10-25T10:29:06.000Z
|
internal/pkg_npm/pkg_npm.bzl
|
samschlegel/rules_nodejs
|
21836475c2294476a5a792d5fb0ee3e713f7c6fe
|
[
"Apache-2.0"
] | 8
|
2021-03-11T00:12:31.000Z
|
2022-02-27T07:35:43.000Z
|
internal/pkg_npm/pkg_npm.bzl
|
samschlegel/rules_nodejs
|
21836475c2294476a5a792d5fb0ee3e713f7c6fe
|
[
"Apache-2.0"
] | null | null | null |
"""npm packaging
Note, this is intended for sharing library code with non-Bazel consumers.
If all users of your library code use Bazel, they should just add your library
to the `deps` of one of their targets.
"""
load("//:providers.bzl", "DeclarationInfo", "JSNamedModuleInfo", "LinkablePackageInfo", "NodeContextInfo")
_DOC = """The pkg_npm rule creates a directory containing a publishable npm artifact.
Example:
```python
load("@build_bazel_rules_nodejs//:index.bzl", "pkg_npm")
pkg_npm(
name = "my_package",
srcs = ["package.json"],
deps = [":my_typescript_lib"],
substitutions = {"//internal/": "//"},
)
```
You can use a pair of `// BEGIN-INTERNAL ... // END-INTERNAL` comments to mark regions of files that should be elided during publishing.
For example:
```javascript
function doThing() {
// BEGIN-INTERNAL
// This is a secret internal-only comment
doInternalOnlyThing();
// END-INTERNAL
}
```
With the Bazel stamping feature, pkg_npm will replace any placeholder version in your package with the actual version control tag.
See the [stamping documentation](https://github.com/bazelbuild/rules_nodejs/blob/master/docs/index.md#stamping)
Usage:
`pkg_npm` yields three labels. Build the package directory using the default label:
```sh
$ bazel build :my_package
Target //:my_package up-to-date:
bazel-out/fastbuild/bin/my_package
$ ls -R bazel-out/fastbuild/bin/my_package
```
Dry-run of publishing to npm, calling `npm pack` (it builds the package first if needed):
```sh
$ bazel run :my_package.pack
INFO: Running command line: bazel-out/fastbuild/bin/my_package.pack
my-package-name-1.2.3.tgz
$ tar -tzf my-package-name-1.2.3.tgz
```
Actually publish the package with `npm publish` (also builds first):
```sh
# Check login credentials
$ bazel run @nodejs//:npm_node_repositories who
# Publishes the package
$ bazel run :my_package.publish
```
You can pass arguments to npm by escaping them from Bazel using a double-hyphen, for example:
`bazel run my_package.publish -- --tag=next`
"""
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
PKG_NPM_ATTRS = {
"package_name": attr.string(
doc = """Optional package_name that this npm package may be imported as.""",
),
"srcs": attr.label_list(
doc = """Files inside this directory which are simply copied into the package.""",
allow_files = True,
),
"hide_build_files": attr.bool(
doc = """If set BUILD and BUILD.bazel files are prefixed with `_` in the npm package.
The default is True since npm packages that contain BUILD files don't work with
`yarn_install` and `npm_install` without a post-install step that deletes or renames them.
NB: Bazel has a change in https://github.com/bazelbuild/bazel/pull/10261
(expected in version 2.1) that adds .bazelignore
support for external repositories, which will make this attribute obsolete.""",
default = True,
),
"nested_packages": attr.label_list(
doc = """Other pkg_npm rules whose content is copied into this package.""",
allow_files = True,
),
"node_context_data": attr.label(
default = "@build_bazel_rules_nodejs//internal:node_context_data",
providers = [NodeContextInfo],
doc = "Internal use only",
),
"replace_with_version": attr.string(
doc = """If set this value is replaced with the version stamp data.
See the section on stamping in the README.""",
default = "0.0.0-PLACEHOLDER",
),
"substitutions": attr.string_dict(
doc = """Key-value pairs which are replaced in all the files while building the package.""",
),
"vendor_external": attr.string_list(
doc = """External workspaces whose contents should be vendored into this workspace.
Avoids 'external/foo' path segments in the resulting package.""",
),
"deps": attr.label_list(
doc = """Other targets which produce files that should be included in the package, such as `rollup_bundle`""",
allow_files = True,
),
"_npm_script_generator": attr.label(
default = Label("//internal/pkg_npm:npm_script_generator"),
cfg = "host",
executable = True,
),
"_packager": attr.label(
default = Label("//internal/pkg_npm:packager"),
cfg = "host",
executable = True,
),
"_run_npm_template": attr.label(
default = Label("@nodejs//:run_npm.sh.template"),
allow_single_file = True,
),
}
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
PKG_NPM_OUTPUTS = {
"pack": "%{name}.pack",
"publish": "%{name}.publish",
}
# Takes a depset of files and returns a corresponding list of file paths without any files
# that aren't part of the specified package path. Also include files from external repositories
# that explicitly specified in the vendor_external list.
def _filter_out_external_files(ctx, files, package_path):
result = []
for file in files:
# NB: package_path may be an empty string
if file.short_path.startswith(package_path) and not file.short_path.startswith("../"):
result.append(file.path)
else:
for v in ctx.attr.vendor_external:
if file.short_path.startswith("../%s/" % v):
result.append(file.path)
return result
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
def create_package(ctx, deps_files, nested_packages):
"""Creates an action that produces the npm package.
It copies srcs and deps into the artifact and produces the .pack and .publish
scripts.
Args:
ctx: the skylark rule context
deps_files: list of files to include in the package which have been
specified as dependencies
nested_packages: list of TreeArtifact outputs from other actions which are
to be nested inside this package
Returns:
The tree artifact which is the publishable directory.
"""
stamp = ctx.attr.node_context_data[NodeContextInfo].stamp
all_files = deps_files + ctx.files.srcs
if not stamp and len(all_files) == 1 and all_files[0].is_directory and len(ctx.files.nested_packages) == 0:
# Special case where these is a single dep that is a directory artifact and there are no
# source files or nested_packages; in that case we assume the package is contained within
# that single directory and there is no work to do
package_dir = all_files[0]
_create_npm_scripts(ctx, package_dir)
return package_dir
package_dir = ctx.actions.declare_directory(ctx.label.name)
package_path = ctx.label.package
# List of dependency sources which are local to the package that defines the current
# target. Also include files from external repositories that explicitly specified in
# the vendor_external list. We only want to package deps files which are inside of the
# current package unless explicitely specified.
filtered_deps_sources = _filter_out_external_files(ctx, deps_files, package_path)
args = ctx.actions.args()
args.use_param_file("%s", use_always = True)
args.add(package_dir.path)
args.add(package_path)
args.add_joined([s.path for s in ctx.files.srcs], join_with = ",", omit_if_empty = False)
args.add(ctx.bin_dir.path)
args.add(ctx.genfiles_dir.path)
args.add_joined(filtered_deps_sources, join_with = ",", omit_if_empty = False)
args.add_joined([p.path for p in nested_packages], join_with = ",", omit_if_empty = False)
args.add(ctx.attr.substitutions)
args.add(ctx.attr.replace_with_version)
args.add(ctx.version_file.path if stamp else "")
args.add_joined(ctx.attr.vendor_external, join_with = ",", omit_if_empty = False)
args.add("1" if ctx.attr.hide_build_files else "0")
inputs = ctx.files.srcs + deps_files + nested_packages
# The version_file is an undocumented attribute of the ctx that lets us read the volatile-status.txt file
# produced by the --workspace_status_command. That command will be executed whenever
# this action runs, so we get the latest version info on each execution.
# See https://github.com/bazelbuild/bazel/issues/1054
if stamp:
inputs.append(ctx.version_file)
ctx.actions.run(
progress_message = "Assembling npm package %s" % package_dir.short_path,
mnemonic = "AssembleNpmPackage",
executable = ctx.executable._packager,
inputs = inputs,
outputs = [package_dir],
arguments = [args],
)
_create_npm_scripts(ctx, package_dir)
return package_dir
def _create_npm_scripts(ctx, package_dir):
args = ctx.actions.args()
args.add_all([
package_dir.path,
ctx.outputs.pack.path,
ctx.outputs.publish.path,
ctx.file._run_npm_template.path,
])
ctx.actions.run(
progress_message = "Generating npm pack & publish scripts",
mnemonic = "GenerateNpmScripts",
executable = ctx.executable._npm_script_generator,
inputs = [ctx.file._run_npm_template, package_dir],
outputs = [ctx.outputs.pack, ctx.outputs.publish],
arguments = [args],
# Must be run local (no sandbox) so that the pwd is the actual execroot
# in the script which is used to generate the path in the pack & publish
# scripts.
execution_requirements = {"local": "1"},
)
def _pkg_npm(ctx):
deps_files_depsets = []
for dep in ctx.attr.deps:
# Collect whatever is in the "data"
deps_files_depsets.append(dep.data_runfiles.files)
# Only collect DefaultInfo files (not transitive)
deps_files_depsets.append(dep.files)
# All direct & transitive JavaScript-producing deps
# TODO: switch to JSModuleInfo when it is available
if JSNamedModuleInfo in dep:
deps_files_depsets.append(dep[JSNamedModuleInfo].sources)
# Include all transitive declerations
if DeclarationInfo in dep:
deps_files_depsets.append(dep[DeclarationInfo].transitive_declarations)
# Note: to_list() should be called once per rule!
deps_files = depset(transitive = deps_files_depsets).to_list()
package_dir = create_package(ctx, deps_files, ctx.files.nested_packages)
package_dir_depset = depset([package_dir])
result = [
DefaultInfo(
files = package_dir_depset,
runfiles = ctx.runfiles([package_dir]),
),
]
if ctx.attr.package_name:
result.append(LinkablePackageInfo(
package_name = ctx.attr.package_name,
path = package_dir.path,
files = package_dir_depset,
))
return result
pkg_npm = rule(
implementation = _pkg_npm,
attrs = PKG_NPM_ATTRS,
doc = _DOC,
outputs = PKG_NPM_OUTPUTS,
)
| 36.055921
| 136
| 0.684609
|
baea87738fefd5092b28cd6516d4229089db5195
| 4,189
|
py
|
Python
|
sponge-integration-tests/examples/core/rules_immediate_no_duration.py
|
mnpas/sponge
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
[
"Apache-2.0"
] | 9
|
2017-12-16T21:48:57.000Z
|
2022-01-06T12:22:24.000Z
|
sponge-integration-tests/examples/core/rules_immediate_no_duration.py
|
mnpas/sponge
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
[
"Apache-2.0"
] | 3
|
2020-12-18T11:56:46.000Z
|
2022-03-31T18:37:10.000Z
|
sponge-integration-tests/examples/core/rules_immediate_no_duration.py
|
mnpas/sponge
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
[
"Apache-2.0"
] | 2
|
2019-12-29T16:08:32.000Z
|
2020-06-15T14:05:34.000Z
|
"""
Sponge Knowledge Base
Using rules - immediate, no duration
Note that auto-enable is turned off in the configuration.
"""
from org.openksavi.sponge.examples.util import CorrelationEventsLog
def onInit():
global correlationEventsLog
# Variables for assertions only
correlationEventsLog = CorrelationEventsLog()
sponge.setVariable("correlationEventsLog", correlationEventsLog)
def runRule(rule):
rule.logger.debug("Sequence: {}", SpongeUtils.getAbbreviatedEventSequenceString(rule))
global correlationEventsLog
correlationEventsLog.addEvents(rule.meta.name, rule)
# Naming F(irst), L(ast), A(ll), N(one)
# F(irst)F(irst)F(irst)
class RuleFFF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3 :first"])
def onRun(self, event):
runRule(self)
# F(irst)F(irst)L(ast)
class RuleFFL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3 :last"])
def onRun(self, event):
runRule(self)
# F(irst)F(irst)A(ll)
class RuleFFA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3 :all"])
def onRun(self, event):
runRule(self)
# F(irst)F(irst)N(one)
class RuleFFN(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e4 :none"])
def onRun(self, event):
runRule(self)
# F(irst)L(ast)F(irst)
class RuleFLF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e3 :first"])
def onRun(self, event):
runRule(self)
# F(irst)L(ast)L(ast)
class RuleFLL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e3 :last"])
def onRun(self, event):
runRule(self)
# F(irst)L(ast)A(ll)
class RuleFLA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e3 :all"])
def onRun(self, event):
runRule(self)
# F(irst)L(ast)N(one)
class RuleFLN(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e4 :none"])
def onRun(self, event):
runRule(self)
# F(irst)A(ll)F(irst)
class RuleFAF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e3 :first"])
def onRun(self, event):
runRule(self)
# F(irst)A(ll)L(ast)
class RuleFAL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e3 :last"])
def onRun(self, event):
runRule(self)
# F(irst)A(ll)A(ll)
class RuleFAA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e3 :all"])
def onRun(self, event):
runRule(self)
# F(irst)A(ll)N(one)
class RuleFAN(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e5 :none"])
def onRun(self, event):
runRule(self)
# F(irst)N(one)F(irst)
class RuleFNF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e5 :none", "e3"])
def onRun(self, event):
runRule(self)
# F(irst)N(one)L(ast)
class RuleFNL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e5 :none", "e3 :last"])
def onRun(self, event):
runRule(self)
# F(irst)N(one)A(ll)
class RuleFNA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e5 :none", "e3 :all"])
def onRun(self, event):
runRule(self)
class RuleFNFReject(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :none", "e3"])
def onRun(self, event):
runRule(self)
def onLoad():
sponge.enableAll(
RuleFFF,
RuleFFA,
RuleFNF, RuleFNFReject,
RuleFNA,
RuleFLF,
RuleFLA,
RuleFAF,
RuleFAA
)
def onStartup():
sponge.event("e1").set("label", "1").send()
sponge.event("e2").set("label", "2").send()
sponge.event("e2").set("label", "3").send()
sponge.event("e3").set("label", "4").send()
sponge.event("e2").set("label", "5").send()
sponge.event("e3").set("label", "6").send()
sponge.event("e3").set("label", "7").send()
| 27.201299
| 91
| 0.570542
|
80cf8c701d656b2489782c960e49fc157f74d9da
| 1,157
|
py
|
Python
|
cloudmesh_examples/example_1/cloudmesh_task/parallel.py
|
JulienPalard/cloudmesh
|
1759b88daef3a13917492d028fdabe08f03ca996
|
[
"Apache-2.0"
] | null | null | null |
cloudmesh_examples/example_1/cloudmesh_task/parallel.py
|
JulienPalard/cloudmesh
|
1759b88daef3a13917492d028fdabe08f03ca996
|
[
"Apache-2.0"
] | 4
|
2021-06-08T20:20:08.000Z
|
2022-03-11T23:30:22.000Z
|
cloudmesh_examples/example_1/cloudmesh_task/parallel.py
|
JulienPalard/cloudmesh
|
1759b88daef3a13917492d028fdabe08f03ca996
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pprint
from cloudmesh_base.util import banner
from datetime import timedelta, datetime
def Sequential(execution_array, f, **kwargs):
print "ARGS", kwargs
result = {}
for element in execution_array:
print "submitting -> {0}".format(element)
result[element] = f(element, **kwargs)
return result
def Parallel(execution_array, f, **kwargs):
task = {}
for element in execution_array:
print "submitting -> {0}".format(element)
task[element] = f.apply_async(args=(element,),
kwargs=kwargs,
expires=10)
banner("tasks", c=".")
pprint(task)
result = {}
for element in execution_array:
print "getting -> {0}".format(element), str(task[element])
result[element] = task[element].get(propagate=False, no_ack=False)
banner("info")
print "INFO", task[element].info
banner("result")
print "RESULT", task[element].result
banner("backend")
print "BACKEND", task[element].backend
# print "OOOO", result[element]
return result
| 28.925
| 74
| 0.594641
|
c573be985b9a4e29403da492acd03f4993576e48
| 1,346
|
py
|
Python
|
raiden/utils/formatting.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
raiden/utils/formatting.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
raiden/utils/formatting.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
import functools
import eth_utils
from eth_utils import (
encode_hex,
is_0x_prefixed,
is_checksum_address,
remove_0x_prefix,
to_canonical_address,
)
from raiden.exceptions import InvalidChecksummedAddress
from raiden.utils.typing import Address, Iterable, List, Optional, TokenAddress, Union
def address_checksum_and_decode(addr: str) -> Address:
""" Accepts a string address and turns it into binary.
Makes sure that the string address provided starts is 0x prefixed and
checksummed according to EIP55 specification
"""
if not is_0x_prefixed(addr):
raise InvalidChecksummedAddress("Address must be 0x prefixed")
if not is_checksum_address(addr):
raise InvalidChecksummedAddress("Address must be EIP55 checksummed")
return to_canonical_address(addr)
def pex(data: bytes) -> str:
return remove_0x_prefix(encode_hex(data))[:8]
def lpex(lst: Iterable[bytes]) -> List[str]:
return [pex(l) for l in lst]
def optional_address_to_string(
address: Optional[Union[Address, TokenAddress]] = None,
) -> Optional[str]:
if address is None:
return None
return to_checksum_address(address)
# to_checksum_address is slow, so let's cache the last 1000 results
to_checksum_address = functools.lru_cache(maxsize=1000)(eth_utils.to_checksum_address)
| 26.92
| 86
| 0.742942
|
f91385972d448f693c395d32cad3a9b4ffcc46bc
| 1,696
|
py
|
Python
|
sdk/aks/azure-mgmt-devspaces/azure/mgmt/devspaces/models/tracked_resource.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/aks/azure-mgmt-devspaces/azure/mgmt/devspaces/models/tracked_resource.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/aks/azure-mgmt-devspaces/azure/mgmt/devspaces/models/tracked_resource.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param tags: Tags for the Azure resource.
:type tags: dict[str, str]
:param location: Region where the Azure resource is located.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
| 33.254902
| 76
| 0.567807
|
657d64c74e24aaeb11a86dffe5ab27a47d96adf9
| 31,894
|
py
|
Python
|
tools/third_party/pytest/testing/test_collection.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 9
|
2019-04-01T10:57:10.000Z
|
2021-12-02T11:12:06.000Z
|
tools/third_party/pytest/testing/test_collection.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 33
|
2019-03-21T10:18:37.000Z
|
2022-03-23T13:21:40.000Z
|
tools/third_party/pytest/testing/test_collection.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T01:20:16.000Z
|
2021-11-23T17:25:02.000Z
|
from __future__ import absolute_import, division, print_function
import pprint
import sys
import pytest
import _pytest._code
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED, _in_venv
class TestCollector(object):
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0 # NOQA
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3
assert fn != modcol
assert fn != [1, 2, 3]
assert [1, 2, 3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol(
"""
class TestClass(object):
def test_foo():
pass
"""
)
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
"""
)
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
def test_can_skip_class_with_test_attr(self, testdir):
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
testdir.makepyfile(
"""
class TestFoo(object):
__test__ = False
def __init__(self):
pass
def test_foo():
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
class TestCollectFS(object):
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", "test_notfound.py")
tmpdir.ensure("dist", "test_notfound.py")
tmpdir.ensure("_darcs", "test_notfound.py")
tmpdir.ensure("CVS", "test_notfound.py")
tmpdir.ensure("{arch}", "test_notfound.py")
tmpdir.ensure(".whatever", "test_notfound.py")
tmpdir.ensure(".bzr", "test_notfound.py")
tmpdir.ensure("normal", "test_found.py")
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
testdir.tmpdir.ensure("virtual", bindir, fname)
testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
assert "test_invenv" not in result.stdout.str()
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
# allow test collection if user directly passes in the directory
result = testdir.runpytest("virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# norecursedirs takes priority
testdir.tmpdir.ensure(".virtual", bindir, fname)
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" not in result.stdout.str()
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test__in_venv(self, testdir, fname):
"""Directly test the virtual env detection function"""
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# no bin/activate, not a virtualenv
base_path = testdir.tmpdir.mkdir("venv")
assert _in_venv(base_path) is False
# with bin/activate, totally a virtualenv
base_path.ensure(bindir, fname)
assert _in_venv(base_path) is True
def test_custom_norecursedirs(self, testdir):
testdir.makeini(
"""
[pytest]
norecursedirs = mydir xyz*
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini(
"""
[pytest]
testpaths = gui uts
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems("-v")
assert [x.name for x in items] == ["test_gui", "test_uts"]
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ("env", "gui", "uts"):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ["test_%s" % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ("env", "gui", "uts"):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ["test_%s" % dirname]
class TestCollectPluginHookRelay(object):
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_file(self, path, parent):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == ".abc"
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback(object):
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report():
outcome = yield
rep = outcome.get_result()
rep.headerlines += ["header1"]
outcome.force_result(rep)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"])
class TestCustomConftests(object):
def test_ignore_collect_path(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
"""
)
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return True
"""
)
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines("*1 passed*")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines("*collected 0 items*")
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
"""
)
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
"""
)
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest(
"""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
"""
)
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest(
"""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
"""
)
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
class TestSession(object):
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
# rootid = rcol.nodeid
# root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
# assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def get_reported_items(self, hookrec):
"""Return pytest.Item instances reported by the pytest_collectreport hook"""
calls = hookrec.getcalls("pytest_collectreport")
return [
x
for call in calls
for x in call.report.result
if isinstance(x, pytest.Item)
]
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.result[0].name == 'test_func'"),
]
)
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"]
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
normid = p.basename + "::TestClass::()::test_method"
for id in [
p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest(
"""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
"""
% p.basename
)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == collector.session.fspath"),
(
"pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'",
),
("pytest_collectstart", "collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
]
)
assert len(self.get_reported_items(hookrec)) == 2
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"),
]
)
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
]
)
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
arg = p.basename + "::TestClass::test_method"
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
class Test_getinitialnodes(object):
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
with tmpdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
with subdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems(object):
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile(
"""
def test_1():
pass
def test_2():
pass
"""
)
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile(
"""
def testone():
pass
class TestX(object):
def testmethod_one(self):
pass
class TestY(TestX):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[2].name == "testmethod_one"
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
p = testdir.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
class TestCase(object):
def test_y(self):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2(object):
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
"""
)
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines(["*1 passed*"])
class TestNodekeywords(object):
def test_no_under(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
values = list(modcol.keywords)
assert modcol.name in values
for x in values:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile(
"""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
"""
)
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
COLLECTION_ERROR_PY_FILES = dict(
test_01_failure="""
def test_1():
assert False
""",
test_02_import_error="""
import asdfasdfasdf
def test_2():
assert True
""",
test_03_import_error="""
import asdfasdfasdf
def test_3():
assert True
""",
test_04_success="""
def test_4():
assert True
""",
)
def test_exit_on_collection_error(testdir):
"""Verify that all collection errors are collected and no tests executed"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=1")
assert res.ret == 1
res.stdout.fnmatch_lines(
["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"]
)
assert "test_03" not in res.stdout.str()
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_continue_on_collection_errors(testdir):
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines(
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
)
def test_continue_on_collection_errors_maxfail(testdir):
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
4 tests: 2 collection errors + 1 failure + 1 success
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
def test_fixture_scope_sibling_conftests(testdir):
"""Regression test case for https://github.com/pytest-dev/pytest/issues/2836"""
foo_path = testdir.mkpydir("foo")
foo_path.join("conftest.py").write(
_pytest._code.Source(
"""
import pytest
@pytest.fixture
def fix():
return 1
"""
)
)
foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1")
# Tests in `food/` should not see the conftest fixture from `foo/`
food_path = testdir.mkpydir("food")
food_path.join("test_food.py").write("def test_food(fix): assert fix == 1")
res = testdir.runpytest()
assert res.ret == 1
res.stdout.fnmatch_lines(
[
"*ERROR at setup of test_food*",
"E*fixture 'fix' not found",
"*1 passed, 1 error*",
]
)
| 33.750265
| 88
| 0.570985
|
d38875338a7e8d485b45914e7943f12541f2b2b6
| 4,470
|
py
|
Python
|
flightlib/setup.py
|
arsimone/flightmare
|
c546d9d54970c7ad803f3ada4c2ea64c51ab7287
|
[
"MIT"
] | null | null | null |
flightlib/setup.py
|
arsimone/flightmare
|
c546d9d54970c7ad803f3ada4c2ea64c51ab7287
|
[
"MIT"
] | null | null | null |
flightlib/setup.py
|
arsimone/flightmare
|
c546d9d54970c7ad803f3ada4c2ea64c51ab7287
|
[
"MIT"
] | null | null | null |
import os
import glob
import shutil
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
FLIGHTLIB_EXTERNAL_FILES = os.environ["FLIGHTMARE_PATH"] + \
"/flightlib/externals/"
# --------------------------------
# remove cached external files
# a hack to solve some cmake error when using "pip install ."
try:
for i, p in enumerate(glob.glob(os.path.join(FLIGHTLIB_EXTERNAL_FILES, "*"))):
shutil.rmtree(p)
print("Removing some cache file: ", p)
except:
pass
FLIGHTLIB_BUILD_FILES = os.environ["FLIGHTMARE_PATH"] + \
"/flightlib/build/"
# --------------------------------
# remove cached build files
# a hack to solve some cmake error when using "pip install ."
try:
for i, p in enumerate(glob.glob(os.path.join(FLIGHTLIB_BUILD_FILES, "*"))):
shutil.rmtree(p)
print("Removing some cache file: ", p)
except:
pass
# --------------------------------
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(
self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j4']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] +
cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] +
build_args, cwd=self.build_temp)
setup(
name='flightgym',
version='0.0.1',
author='Yunlong Song',
author_email='song@ifi.uzh.ch',
description='Flightmare: A Quadrotor Simulator.',
long_description='',
ext_modules=[CMakeExtension('flightlib')],
#install_requires=['gym==0.11', 'ruamel.yaml',
# 'numpy', 'stable_baselines==2.10.1'],
install_requires=['gym==0.17.3', 'ruamel.yaml',
'numpy', 'stable_baselines==2.10.1'],
cmdclass=dict(build_ext=CMakeBuild),
include_package_data=True,
zip_safe=False,
)
# setup(name='flightgym',
# version='0.0.1',
# author="Yunlong Song",
# author_email='song@ifi.uzh.ch',
# description="Flightmare: A Quadrotor Simulator",
# long_description='',
# packages=[''],
# package_dir={'': './build/'},
# package_data={'': ['flightgym.cpython-36m-x86_64-linux-gnu.so']},
# zip_fase=True,
# url=None,
# )
| 36.341463
| 94
| 0.551007
|
ba490aec9f90b1fad3b55a0bf950a61ec76b6a25
| 1,471
|
py
|
Python
|
Tools/Preprocess.py
|
Ideas-Laboratory/DRImplicitVecXform
|
2ec0c64fb098e29ce74929f5e19bce90b2f5791c
|
[
"MIT"
] | 2
|
2020-09-14T03:29:39.000Z
|
2021-03-02T03:28:36.000Z
|
Tools/Preprocess.py
|
VisLabWang/DRImplicitVecXform
|
2ec0c64fb098e29ce74929f5e19bce90b2f5791c
|
[
"MIT"
] | null | null | null |
Tools/Preprocess.py
|
VisLabWang/DRImplicitVecXform
|
2ec0c64fb098e29ce74929f5e19bce90b2f5791c
|
[
"MIT"
] | 1
|
2021-04-16T20:15:46.000Z
|
2021-04-16T20:15:46.000Z
|
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import euclidean_distances
def normalize(x, low=-1, up=1):
data_shape = x.shape
n = data_shape[0]
dim = data_shape[1]
new_x = np.zeros(data_shape)
min_v = np.zeros((1, dim))
max_v = np.zeros((1, dim))
for i in range(0, dim):
min_v[0, i] = min(x[:, i])
max_v[0, i] = max(x[:, i])
for i in range(0, n):
for j in range(0, dim):
if min_v[0, j] == max_v[0, j]:
new_x[i, j] = 0
continue
new_x[i, j] = (x[i, j]-min_v[0, j])/(max_v[0, j]-min_v[0, j])*(up-low)+low
return new_x
def knn(data, k):
nbr_s = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(data)
distance, index = nbr_s.kneighbors(data)
return index
def has_repeat(X):
(n, m) = X.shape
D = euclidean_distances(X)
repeat = False
repeat_index = []
for i in range(0, n):
for j in range(0, n):
if i == j:
continue
else:
temp_bool = True
for k in range(0, m):
if X[i, k] != X[j, k]:
temp_bool = False
if temp_bool:
temp_number = max(i, j)
if not temp_number in repeat_index:
repeat_index.append(max(i, j))
repeat = True
return repeat
| 26.267857
| 86
| 0.49966
|
949e4e6527a76c7ce3258b821c524a3eed7ad123
| 3,090
|
py
|
Python
|
02_holdout_set_y_cross_validation/code/model.py
|
sebastiandres/mat281_m04_data_science
|
adc86f7a30dd87e922be5a7396d3802319b79e29
|
[
"MIT"
] | null | null | null |
02_holdout_set_y_cross_validation/code/model.py
|
sebastiandres/mat281_m04_data_science
|
adc86f7a30dd87e922be5a7396d3802319b79e29
|
[
"MIT"
] | null | null | null |
02_holdout_set_y_cross_validation/code/model.py
|
sebastiandres/mat281_m04_data_science
|
adc86f7a30dd87e922be5a7396d3802319b79e29
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
from scipy import random
secret_true_params = (5., np.pi/4.0, 0., 0.)
def my_model(x, a, b, c, d):
return a*np.cos(b*x+c) + d
def generate_data(N=100, true_params=secret_true_params,
seed = 42):
x = np.linspace(-2.5, 2.5, N)
y1 = my_model(x, *true_params)
y2 = 1.0 * random.normal(size=N)
# Create the data
data = np.array([x,y1+y2]).T
# Shuffle the data
permuted_data = random.permutation(data)
# Save the data
np.savetxt("dataN%d.txt"%N, data)
return data
def load_data(myfile):
data = np.loadtxt(myfile)
return data
def get_params(data):
# Use optimize to get A and B using the data
xdata = data[:,0]
ydata = data[:,1]
popt, pcov = optimize.curve_fit(my_model, xdata, ydata, maxfev=5000)
return popt
def get_error(model_params, data):
x_data = data[:,0]
y_data = data[:,1]
y_prediction = my_model(x_data, *model_params)
#error_1 = np.abs(y_data-y_prediction).sum() / len(y_data)
error_2 = np.sum((y_data-y_prediction)**2).sum()**0.5 / len(y_data)**0.5
return error_2
def plot(training_data, testing_data, training_params, all_data_params, true_params=secret_true_params):
fig = plt.figure(figsize=(16,8))
plt.plot(training_data[:,0], training_data[:,1], 'bs', label="training data", alpha=0.75, ms=10)
plt.plot(testing_data[:,0], testing_data[:,1], 'ro', label="testing data", alpha=0.75, ms=10)
data = np.vstack([training_data, testing_data])
x = np.array(sorted(data[:,0].copy()))
plt.plot(x, my_model(x, *true_params),
'k', label="true params", lw=2.0)
plt.plot(x, my_model(x, *training_params),
'b', label="training params", lw=2.0)
plt.plot(x, my_model(x, *all_data_params),
'g', label="all data params", lw=2.0)
xmin, xmax = x.min(), x.max()
plt.xlim([xmin-.2, xmax+0.2])
plt.legend(numpoints=1, loc="lower center")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
return
def full_report(training_data, testing_data, training_params, all_data_params):
data = np.vstack([training_data, testing_data])
print("The obtained model parameters for training dataset are:")
print("\t(a,b,c,d) = (%.3f, %.3f, %.3f, %.3f)" %tuple(training_params))
print("The obtained model parameters for the whole dataset are:")
print("\t(a,b,c,d) = (%.3f, %.3f, %.3f, %.3f)" %tuple(all_data_params))
print("The true model parameters are:")
print("\t(a,b,c,d) = (%.3f, %.3f, %.3f, %.3f)" %tuple(secret_true_params))
print("")
prediction_error = get_error(training_params, testing_data)
print("Conservative error estimation on testing dataset: %.2f" %prediction_error)
true_error = get_error(secret_true_params, testing_data)
print("Pure random error on testing dataset: %.2f" %true_error)
all_error = get_error(secret_true_params, data)
print("Pure random error on all data: %.2f" %all_error)
if __name__=="__main__":
generate_data(N=20)
generate_data(N=50)
generate_data(N=100)
generate_data(N=500)
generate_data(N=5000)
| 35.517241
| 104
| 0.671521
|
e6be056e7a668d093d614d6927e834de649e91fe
| 558
|
py
|
Python
|
THU--Deep_Learning/homework-2/homework2-mlp/network.py
|
AlbertMillan/THU--ACM_2019-2021
|
fc5b3cc7efcd8066e04cf6da6785c6051cd665fb
|
[
"MIT"
] | 4
|
2020-12-29T14:49:44.000Z
|
2021-09-17T07:46:28.000Z
|
THU--Deep_Learning/homework-2/homework2-mlp/network.py
|
AlbertMillan/THU--ACM_2019-2021
|
fc5b3cc7efcd8066e04cf6da6785c6051cd665fb
|
[
"MIT"
] | null | null | null |
THU--Deep_Learning/homework-2/homework2-mlp/network.py
|
AlbertMillan/THU--ACM_2019-2021
|
fc5b3cc7efcd8066e04cf6da6785c6051cd665fb
|
[
"MIT"
] | 2
|
2020-12-26T03:05:53.000Z
|
2020-12-29T14:51:51.000Z
|
""" Network Class """
class Network():
def __init__(self):
self.layerList = []
self.numLayer = 0
def add(self, layer):
self.numLayer += 1
self.layerList.append(layer)
def forward(self, x):
# forward layer by layer
for i in range(self.numLayer):
x = self.layerList[i].forward(x)
return x
def backward(self, delta):
# backward layer by layer
for i in reversed(range(self.numLayer)): # reversed
# print("Layer: {}, Delta: {}".format(i, delta.shape))
delta = self.layerList[i].backward(delta)
| 24.26087
| 70
| 0.621864
|
e469dfe331b2ff322a6da64122752c5e709ef8dc
| 8,612
|
py
|
Python
|
rlchat/train.py
|
Mrpatekful/dialogue-reinforce
|
e37fd566174f1dd538682a24eebb73e4b4134155
|
[
"MIT"
] | 11
|
2019-12-12T04:56:38.000Z
|
2021-12-10T03:01:35.000Z
|
rlchat/train.py
|
Mrpatekful/dialogue-reinforcement
|
e37fd566174f1dd538682a24eebb73e4b4134155
|
[
"MIT"
] | 5
|
2021-03-18T22:48:04.000Z
|
2022-03-11T23:42:25.000Z
|
rlchat/train.py
|
Mrpatekful/dialogue-reinforcement
|
e37fd566174f1dd538682a24eebb73e4b4134155
|
[
"MIT"
] | 5
|
2019-12-11T22:45:14.000Z
|
2020-11-16T14:17:36.000Z
|
"""
@author: Patrik Purgai
@copyright: Copyright 2019, rlchat
@license: MIT
@email: purgai.patrik@gmail.com
@date: 2019.02.20.
"""
import numpy as np
import torch
import copy
import signal
import json
from os.path import isfile
from parlai.core.params import ParlaiParser
from parlai.core.logs import TensorboardLogger
from parlai.core.utils import Timer
from parlai.core.agents import _create_task_agents
from parlai.scripts.train_model import TrainLoop, setup_args
from parlai.scripts.build_pytorch_data import get_pyt_dict_file
from parlai.scripts.build_dict import build_dict
from worlds import RLDialogWorld, ACTIVE, STATIC
from agents import create_agent, freeze_agent # pylint: disable=import-error
def setup_rl_args():
parser = setup_args()
reinforce = parser.add_argument_group('Reinforce Arguments')
reinforce.add_argument(
'-dl',
'--dialog_rounds',
type=int,
default=2,
help='Number of rollouts rounds for estimating the reward.')
reinforce.add_argument(
'-dl',
'--dialog_branches',
type=int,
default=5,
help='Branches of the active agent responses during rollout.')
reinforce.add_argument(
'-lmp',
'--language_model_path',
type=str,
default=None,
help='Path of the language model for the reward.')
reinforce.add_argument(
'-rd',
'--reward_decay',
type=float,
default=0.9,
help='Value of the reward decay.')
return parser
def create_task(opt, active_agent, static_agent):
"""Creates a world + task_agents (aka a task)
assuming ``opt['task']="task_dir:teacher_class:options"``
"""
task = opt.get('task')
pyt_task = opt.get('pytorch_teacher_task')
pyt_dataset = opt.get('pytorch_teacher_dataset')
if not (task or pyt_task or pyt_dataset):
raise RuntimeError(
'No task specified. Please select a task with ' +
'--task {task_name}.')
if not task:
opt['task'] = 'pytorch_teacher'
world = create_task_world(opt, active_agent, static_agent)
if opt.get('batchsize', 1) > 1:
opt['batch_size'] = 1
print('Batching is not implemented yet, setting bs to 1.')
# raise NotImplementedError('Btaching is not implemented yet.')
# world = BatchWorld(opt, world)
return world
def create_task_world(opt, active_agent, static_agent):
teacher = _create_task_agents(opt)
return RLDialogWorld(opt, active_agent, static_agent, teacher)
class ReinforceLoop(TrainLoop):
def __init__(self, opt):
signal.signal(signal.SIGINT, signal.default_int_handler)
if isinstance(opt, ParlaiParser):
opt = opt.parse_args()
# Possibly load from checkpoint
trainstats_suffix = '.trainstats'
if (opt.get('model_file') and isfile(
opt['model_file'] + '.checkpoint')):
opt['init_model'] = opt['model_file'] + '.checkpoint'
trainstats_suffix = '.checkpoint.trainstats'
else:
pass
# TODO for testing only
# raise RuntimeError('WARNING: Reinforcement learning'
# ' must be initialized by a model.checkpoint '
# 'file and {} does not exist.'.format(
# opt['model_file'] + '.checkpoint'))
# Possibly build a dictionary (not all models do this).
if (
opt['dict_build_first'] and
not (opt.get('dict_file') or opt.get('model_file'))
):
raise RuntimeError('WARNING: For train_model, '
'please specify either a '
'model_file or dict_file.')
if opt['dict_build_first'] and 'dict_file' in opt:
if opt.get('pytorch_teacher_task'):
opt['dict_file'] = get_pyt_dict_file(opt)
elif opt['dict_file'] is None and opt.get('model_file'):
opt['dict_file'] = opt['model_file'] + '.dict'
print("[ building dictionary first... ]")
build_dict(opt, skip_if_built=True)
# Create model and assign it to the specified task
self.agent = create_agent(opt)
# Freeze the model for the static dialogue partner
static_agent = copy.deepcopy(self.agent)
self.agent.id = ACTIVE
static_agent.id = STATIC
freeze_agent(static_agent)
self.world = create_task(opt, self.agent, static_agent)
# set up timers
self.train_time = Timer()
self.validate_time = Timer()
self.log_time = Timer()
self.save_time = Timer()
print('[ training... ]')
self.parleys = 0
self.max_num_epochs = (
opt['num_epochs'] if
opt['num_epochs'] > 0
else float('inf'))
self.max_train_time = (
opt['max_train_time'] if
opt['max_train_time'] > 0
else float('inf'))
self.log_every_n_secs = (
opt['log_every_n_secs'] if
opt['log_every_n_secs'] > 0
else float('inf'))
self.val_every_n_secs = (
opt['validation_every_n_secs'] if
opt['validation_every_n_secs'] > 0
else float('inf'))
self.save_every_n_secs = (
opt['save_every_n_secs'] if
opt['save_every_n_secs'] > 0
else float('inf'))
self.val_every_n_epochs = (
opt['validation_every_n_epochs'] if
opt['validation_every_n_epochs'] > 0
else float('inf'))
# smart defaults for --validation-metric-mode
if opt['validation_metric'] in {'loss', 'ppl', 'mean_rank'}:
opt['validation_metric_mode'] = 'min'
elif opt['validation_metric'] in {
'accuracy', 'hits@1', 'hits@5', 'f1', 'bleu'}:
opt['validation_metric_mode'] = 'max'
if opt.get('validation_metric_mode') is None:
opt['validation_metric_mode'] = 'max'
self.last_valid_epoch = 0
self.valid_optim = (1 if opt['validation_metric_mode'] ==
'max' else -1)
self.valid_reports = []
self.best_valid = None
if (opt.get('model_file') and
isfile(opt['model_file'] + '.best_valid')):
with open(opt['model_file'] + ".best_valid", 'r') as f:
x = f.readline()
self.best_valid = float(x)
f.close()
self.impatience = 0
self.saved = False
self.valid_world = None
self.opt = opt
# we may have been preempted, make sure we note that amount
self._preempted_epochs = 0.0
if (
opt.get('model_file') and
isfile(opt['model_file'] + trainstats_suffix)
):
# looks like we were preempted. make sure we load up our total
# training stats, etc
with open(opt['model_file'] + trainstats_suffix) as ts:
obj = json.load(ts)
self._preempted_epochs = obj.get('total_epochs', 0)
self.train_time.total = obj.get('train_time', 0)
self.impatience = obj.get('impatience', 0)
self.valid_reports = obj.get('valid_reports', [])
if opt['tensorboard_log'] is True:
self.writer = TensorboardLogger(opt)
def train(self):
opt = self.opt
world = self.world
with world:
while True:
# do one example / batch of examples
world.parley()
self.parleys += 1
# get the total training examples done, compute epochs
self._total_epochs = (
self.world.get_total_epochs()
)
exs_per_epoch = self.world.num_examples()
self._total_exs = int(np.round(self._total_epochs * exs_per_epoch))
if self.log_time.time() > self.log_every_n_secs:
self.log()
if (
self.save_time.time() > self.save_every_n_secs and
opt.get('model_file')
):
print("[ saving model checkpoint: {}.checkpoint".format(
opt['model_file']
))
self.save_model('.checkpoint')
self.save_time.reset()
if __name__ == '__main__':
ReinforceLoop(setup_rl_args().parse_args()).train()
| 34.039526
| 83
| 0.572573
|
9654992ad10cf74cc53a13b13f3d82073988b3c6
| 3,178
|
py
|
Python
|
polling_stations/apps/data_collection/management/commands/import_chiltern.py
|
alexsdutton/UK-Polling-Stations
|
01ec234fd4a832694870d5ed9de069a228397f53
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_chiltern.py
|
alexsdutton/UK-Polling-Stations
|
01ec234fd4a832694870d5ed9de069a228397f53
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_chiltern.py
|
alexsdutton/UK-Polling-Stations
|
01ec234fd4a832694870d5ed9de069a228397f53
|
[
"BSD-3-Clause"
] | null | null | null |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000005"
addresses_name = "parl.2019-12-12/Version 1/Democracy_Club__12December2019 Chesham & Amersham.tsv"
stations_name = "parl.2019-12-12/Version 1/Democracy_Club__12December2019 Chesham & Amersham.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def station_record_to_dict(self, record):
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if record.addressline6 == "HP5 SPJ":
record = record._replace(addressline6="HP5 2PJ")
if record.addressline6 in ["HP16 0HR", "HP5 1JY"]:
return None # surprising polling station assignments
if uprn == "10013777238":
record = record._replace(addressline6="HP16 0RL")
rec = super().address_record_to_dict(record)
if uprn in [
# both postcodes look wrong
"10003904383", # Misbourne Suite, Rayners Extra Care Home, Weedon Hill, Hyde Heath, Amersham, Bucks
]:
return None
if uprn in [
"100081184958", # HP52HF -> HP52HG : 115 Bellingdon Road, Chesham, Bucks
"200003031219", # HP65RP -> HP65RW : Redlands, The Green, Hyde Heath, Amersham, Bucks
"200003031221", # HP65RP -> HP65RW : Troy Cottage, The Green, Hyde Heath, Amersham, Bucks
"100081186932", # HP79AZ -> HP79RZ : 268 Chiltern Heights, White Lion Road, Little Chalfont, Amersham
"100081077865", # HP66PG -> HP66PQ : Beech Hanger, 112 Bell Lane, Little Chalfont, Amersham, Bucks
"10013780932", # SL99FH -> SL90FH : 3 Drury Close, Chalfont Dene, Chalfont St Peter, Bucks
"100081082308", # HP51TW -> HP51TP : Willow Cottage, Latimer Road, Chesham, Bucks
"10013781205", # SL99LS -> SL99LX : Overdale Cottage, Nicol Road, Chalfont St Peter, Bucks
"100081083675", # SL98RP -> SL98RS : Woolton House, Maltmans Lane, Chalfont St Peter, Gerrards Cross
"100081280755", # SL98RS -> SL98RP : The Old Malt House, Maltmans Lane, Chalfont St Peter, Gerrards Cross
"100081187111", # HP84BP -> HP84BW : Pine Acre, Burtons Way, Chalfont St Giles
"200003036501", # HP169LQ -> HP160RR : Ballinger Meadow, Herberts Hole, Great Missenden
"10012939600", # HP169BY -> HP169BG : St Martins, Grimms Hill, Great Missenden, Bucks
"100080498318", # HP66RT -> HP66SE : The Grove, 19 Amersham Road, Little Chalfont
"100081080415", # HP84EE -> HP84EF : Bow Wood Barn, Bottom House Lane, Chalfont St. Giles, Bucks
"200003030834", # HP160LR -> HP160RL : Hawthorn Farm, Hyde End, Chesham Road, Great Missenden, Bucks
]:
rec["accept_suggestion"] = True
if uprn in [
"10013777238", # see postcode fix for this UPRN above
]:
rec["accept_suggestion"] = False
return rec
| 52.098361
| 118
| 0.647892
|
b36c7e5c153be9eaf804814bddeffa594e1aab35
| 879
|
py
|
Python
|
tests/test_articles.py
|
wendymunyasi/News-IP2
|
147d0b062b76e72f6942971af105e09f11a51e45
|
[
"MIT"
] | null | null | null |
tests/test_articles.py
|
wendymunyasi/News-IP2
|
147d0b062b76e72f6942971af105e09f11a51e45
|
[
"MIT"
] | null | null | null |
tests/test_articles.py
|
wendymunyasi/News-IP2
|
147d0b062b76e72f6942971af105e09f11a51e45
|
[
"MIT"
] | null | null | null |
import unittest
from app.models import Article
class SourceTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article(
"bbc-news", "BBC News", "Police search homes in Streatham attack probe", "Two addresses are searched as it emerges attacker Sudesh Amman was released from prison last month.", "http://www.bbc.co.uk/news/uk-51356447", "https://ichef.bbci.co.uk/news/1024/branded_news/D862/production/_104849355_terror105-18amman.jpg", "2020-02-03T09:43:01Z", "Image copyrightMet Police Police have been searching two addresses as part of the investigation into the attack in Streatham on Sunday")
def test_instance(self):
self.assertTrue(isinstance(self.new_article, Article))
| 46.263158
| 490
| 0.713311
|
d34dabbd5c096fff30fc522d216424ec7ea08f41
| 5,151
|
py
|
Python
|
tests/test_comparison.py
|
CPSuperstore/PyVersionNumber
|
eef7dc45469603c0875c8359c562a72f48fe761c
|
[
"MIT"
] | null | null | null |
tests/test_comparison.py
|
CPSuperstore/PyVersionNumber
|
eef7dc45469603c0875c8359c562a72f48fe761c
|
[
"MIT"
] | null | null | null |
tests/test_comparison.py
|
CPSuperstore/PyVersionNumber
|
eef7dc45469603c0875c8359c562a72f48fe761c
|
[
"MIT"
] | null | null | null |
import unittest
import sys
sys.path.append('..')
from PyVersionNumber import VersionNumber
class TestCaseComparisons(unittest.TestCase):
def test_less_than(self):
# equals
self.assertEqual(VersionNumber(1, 2, 3) < VersionNumber(1, 2, 3), False)
# greater than
self.assertEqual(VersionNumber(1, 2, 3) < VersionNumber(0, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 3) < VersionNumber(1, 1, 3), False)
self.assertEqual(VersionNumber(1, 2, 3) < VersionNumber(1, 2, 2), False)
self.assertEqual(VersionNumber(3, 2, 1) < VersionNumber(1, 2, 2), False)
# less than
self.assertEqual(VersionNumber(0, 2, 3) < VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 1, 3) < VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 2) < VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 2) < VersionNumber(3, 2, 1), True)
def test_greater_than(self):
# equals
self.assertEqual(VersionNumber(1, 2, 3) > VersionNumber(1, 2, 3), False)
# greater than
self.assertEqual(VersionNumber(1, 2, 3) > VersionNumber(0, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 3) > VersionNumber(1, 1, 3), True)
self.assertEqual(VersionNumber(1, 2, 3) > VersionNumber(1, 2, 2), True)
self.assertEqual(VersionNumber(3, 2, 1) > VersionNumber(1, 2, 2), True)
# less than
self.assertEqual(VersionNumber(0, 2, 3) > VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 1, 3) > VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 2) > VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 2) > VersionNumber(3, 2, 1), False)
def test_equal_to(self):
# equals
self.assertEqual(VersionNumber(1, 2, 3) == VersionNumber(1, 2, 3), True)
# greater than
self.assertEqual(VersionNumber(1, 2, 3) == VersionNumber(0, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 3) == VersionNumber(1, 1, 3), False)
self.assertEqual(VersionNumber(1, 2, 3) == VersionNumber(1, 2, 2), False)
self.assertEqual(VersionNumber(3, 2, 1) == VersionNumber(1, 2, 2), False)
# less than
self.assertEqual(VersionNumber(0, 2, 3) == VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 1, 3) == VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 2) == VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 2) == VersionNumber(3, 2, 1), False)
def test_not_equal_to(self):
# equals
self.assertEqual(VersionNumber(1, 2, 3) != VersionNumber(1, 2, 3), False)
# greater than
self.assertEqual(VersionNumber(1, 2, 3) != VersionNumber(0, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 3) != VersionNumber(1, 1, 3), True)
self.assertEqual(VersionNumber(1, 2, 3) != VersionNumber(1, 2, 2), True)
self.assertEqual(VersionNumber(3, 2, 1) != VersionNumber(1, 2, 2), True)
# less than
self.assertEqual(VersionNumber(0, 2, 3) != VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 1, 3) != VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 2) != VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 2) != VersionNumber(3, 2, 1), True)
def test_less_than_equals(self):
# equals
self.assertEqual(VersionNumber(1, 2, 3) <= VersionNumber(1, 2, 3), True)
# greater than
self.assertEqual(VersionNumber(1, 2, 3) <= VersionNumber(0, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 3) <= VersionNumber(1, 1, 3), False)
self.assertEqual(VersionNumber(1, 2, 3) <= VersionNumber(1, 2, 2), False)
self.assertEqual(VersionNumber(3, 2, 1) <= VersionNumber(1, 2, 2), False)
# less than
self.assertEqual(VersionNumber(0, 2, 3) <= VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 1, 3) <= VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 2) <= VersionNumber(1, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 2) <= VersionNumber(3, 2, 1), True)
def test_greater_than_equals(self):
# equals
self.assertEqual(VersionNumber(1, 2, 3) >= VersionNumber(1, 2, 3), True)
# greater than
self.assertEqual(VersionNumber(1, 2, 3) >= VersionNumber(0, 2, 3), True)
self.assertEqual(VersionNumber(1, 2, 3) >= VersionNumber(1, 1, 3), True)
self.assertEqual(VersionNumber(1, 2, 3) >= VersionNumber(1, 2, 2), True)
self.assertEqual(VersionNumber(3, 2, 1) >= VersionNumber(1, 2, 2), True)
# less than
self.assertEqual(VersionNumber(0, 2, 3) >= VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 1, 3) >= VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 2) >= VersionNumber(1, 2, 3), False)
self.assertEqual(VersionNumber(1, 2, 2) >= VersionNumber(3, 2, 1), False)
if __name__ == '__main__':
unittest.main()
| 47.694444
| 81
| 0.625704
|
2a6a248b3d4ceebf171ba43b135ea30815b5b96b
| 255
|
py
|
Python
|
app/botbegone/urls.py
|
KatherineJF/recipe-app-api
|
6337db40e021233b82f193e41c66d62a009adaf8
|
[
"MIT"
] | null | null | null |
app/botbegone/urls.py
|
KatherineJF/recipe-app-api
|
6337db40e021233b82f193e41c66d62a009adaf8
|
[
"MIT"
] | 2
|
2020-03-24T16:45:17.000Z
|
2020-03-31T01:19:30.000Z
|
app/botbegone/urls.py
|
KatherineJF/recipe-app-api
|
6337db40e021233b82f193e41c66d62a009adaf8
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from botbegone import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'botbegone'
urlpatterns=[
path("",include(router.urls))
]
| 18.214286
| 48
| 0.784314
|
90b73ada6ac08c6db4c1209720428e9cb5731257
| 3,839
|
py
|
Python
|
tests/test_typical_price.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 635
|
2017-04-04T20:24:47.000Z
|
2022-03-28T16:00:23.000Z
|
tests/test_typical_price.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 24
|
2017-10-22T15:01:54.000Z
|
2021-01-30T19:51:00.000Z
|
tests/test_typical_price.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 183
|
2017-07-01T16:06:39.000Z
|
2022-03-07T23:29:11.000Z
|
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import typical_price
class TestTypicalPrice(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.close_data = SampleData().get_sample_close_data()
self.high_data = SampleData().get_sample_high_data()
self.low_data = SampleData().get_sample_low_data()
self.tp_expected = [792.42999999999995, 802.86666666666667,
804.61333333333334, 809.96999999999991, 807.61666666666667,
809.68999999999994, 812.28666666666652, 815.05666666666673,
812.59666666666669, 809.88666666666677, 815.84666666666669,
817.94666666666672, 815.52333333333343, 807.71999999999991,
809.66666666666663, 795.22666666666657, 791.40333333333331,
776.2733333333332, 778.42000000000007, 764.65999999999997,
764.31333333333339, 776.02333333333343, 789.42000000000007,
785.71000000000004, 780.31666666666661, 779.13000000000011,
784.9133333333333, 784.84666666666669, 775.96999999999991,
786.21999999999991, 780.14333333333343, 775.19999999999993,
753.2266666666668, 771.70333333333338, 780.18666666666661,
805.73000000000002, 811.87333333333333, 801.88666666666666,
781.03000000000009, 782.09666666666669, 788.48000000000002,
805.62333333333333, 809.75666666666666, 819.56333333333339,
817.06333333333339, 822.00666666666666, 828.43666666666661,
835.50333333333344, 824.03666666666652, 821.82333333333338,
827.11333333333334, 821.61999999999989, 806.84666666666669,
804.60666666666668, 803.99666666666656, 811.95666666666659,
809.61333333333334, 813.90333333333331, 800.75999999999988,
803.03666666666675, 801.40333333333331, 802.7166666666667,
800.14666666666665, 803.9666666666667, 802.64666666666665,
810.02666666666664, 810.48333333333323, 802.69333333333327,
815.21000000000004, 816.0, 805.07666666666671, 799.78666666666652,
795.49666666666656, 797.88999999999999, 801.10666666666657,
790.60333333333335, 788.78000000000009, 798.62, 788.29333333333341,
802.70666666666659, 807.78666666666652, 808.00999999999988,
796.85333333333335, 791.17666666666673, 789.77666666666664,
791.77666666666664, 795.82333333333338, 793.09333333333325,
791.31333333333316, 793.57333333333327, 796.26999999999998,
796.90999999999997, 799.42999999999995, 802.87, 805.45666666666659,
801.36333333333334, 805.80666666666673, 806.89999999999998,
808.01666666666677, 808.6633333333333, 807.36000000000001,
805.23666666666668, 806.80333333333328, 797.16666666666663, 798.88,
800.11000000000001, 800.88000000000011, 791.46333333333325,
765.80333333333328, 761.89999999999998, 757.54333333333341,
757.55666666666673, 759.20666666666659, 754.46333333333325,
756.90333333333331, 753.5, 753.0866666666667, 735.65999999999997,
735.94666666666672, 729.61000000000001, 732.53000000000009,
727.24666666666656, 717.77666666666664, 707.38666666666666,
709.04333333333341, 704.95666666666659, 710.37666666666667]
def test_typical_price(self):
tp = typical_price.typical_price(self.close_data, self.high_data, self.low_data)
np.testing.assert_array_equal(tp, self.tp_expected)
def test_typical_price_invalid_data(self):
self.close_data.append(1)
with self.assertRaises(Exception) as cm:
typical_price.typical_price(self.close_data, self.high_data, self.low_data)
expected = ("Error: mismatched data lengths, check to ensure that all input data is the same length and valid")
self.assertEqual(str(cm.exception), expected)
| 56.455882
| 119
| 0.742641
|
b2f4db24c95829d011562e84e50a41ccbfd3ed44
| 22,945
|
py
|
Python
|
helpers/caches.py
|
tritanium-industries/TITDev
|
85203129095168bee70630211f625d4a5d0133a1
|
[
"MIT"
] | null | null | null |
helpers/caches.py
|
tritanium-industries/TITDev
|
85203129095168bee70630211f625d4a5d0133a1
|
[
"MIT"
] | null | null | null |
helpers/caches.py
|
tritanium-industries/TITDev
|
85203129095168bee70630211f625d4a5d0133a1
|
[
"MIT"
] | null | null | null |
import calendar
import time
import os
import json
from flask import g, session
from defusedxml import ElementTree
import requests
from pymongo.errors import BulkWriteError
from helpers import conversions, error_handling
from views.auth import user_agent
xml_headers = {
"User-Agent": user_agent
}
if os.environ.get("EXTERNAL"):
secrets = {
"jf_key_id": os.environ["jf_key_id"],
"jf_vcode": os.environ["jf_vcode"],
"main_key_id": os.environ["main_key_id"],
"main_vcode": os.environ["main_vcode"]
}
else:
with open("../Other-Secrets/TITDev.json") as secrets_file:
secrets = json.load(secrets_file)
def stations():
db_stations_cache = g.mongo.db.caches.find_one({"_id": "stations"})
bulk_op = g.mongo.db.stations.initialize_unordered_bulk_op()
bulk_run = False
if not db_stations_cache or db_stations_cache["cached_until"] < time.time():
xml_stations_response = requests.get("https://api.eveonline.com/eve/ConquerableStationList.xml.aspx",
headers=xml_headers)
# XML Parse
try:
xml_stations_tree = ElementTree.fromstring(xml_stations_response.text)
except ElementTree.ParseError:
print(xml_stations_response.text)
return None
# Store in database
xml_time_pattern = "%Y-%m-%d %H:%M:%S"
g.mongo.db.caches.update({"_id": "stations"}, {"cached_until": int(calendar.timegm(time.strptime(
xml_stations_tree[2].text, xml_time_pattern)))}, upsert=True)
for station in xml_stations_tree[1][0]:
bulk_run = True
bulk_op.find({"_id": int(station.attrib["stationID"])}).upsert().update(
{"$set": {"name": station.attrib["stationName"]}})
if bulk_run:
bulk_op.execute()
def character(char_ids):
"""
:param char_ids: [character_id, ...]
:return:
"""
missing_names = []
for char_id in char_ids:
db_character = g.mongo.db.characters.find_one({"_id": char_id})
if not db_character:
missing_names.append(char_id)
db_characters_cache = g.mongo.db.caches.find_one({"_id": "characters"})
bulk_op = g.mongo.db.characters.initialize_unordered_bulk_op()
bulk_run = False
if missing_names or not db_characters_cache or db_characters_cache["cached_until"] < time.time():
if db_characters_cache and db_characters_cache["cached_until"] > time.time():
character_payload = {
"ids": ",".join([str(x) for x in missing_names])
}
else:
character_payload = {
"ids": ",".join([str(x) for x in char_ids])
}
xml_character_response = requests.get("https://api.eveonline.com/eve/CharacterAffiliation.xml.aspx",
data=character_payload, headers=xml_headers)
# XML Parse
try:
xml_character_tree = ElementTree.fromstring(xml_character_response.text)
except ElementTree.ParseError:
print(xml_character_response.text)
return None
xml_time_pattern = "%Y-%m-%d %H:%M:%S"
g.mongo.db.caches.update({"_id": "characters"}, {"cached_until": int(calendar.timegm(time.strptime(
xml_character_tree[2].text, xml_time_pattern)))}, upsert=True)
if xml_character_tree[1].tag == "error":
print(xml_character_tree[1].attrib["code"], xml_character_tree[1].text)
else:
for name in xml_character_tree[1][0]:
bulk_run = True
bulk_op.find({"_id": int(name.attrib["characterID"])}).upsert().update({"$set": {
"name": name.attrib["characterName"],
"corporation_id": int(name.attrib["corporationID"]),
"corporation_name": name.attrib["corporationName"],
"alliance_id": int(name.attrib["allianceID"]),
"alliance_name": name.attrib["allianceName"]
}})
if bulk_run:
bulk_op.execute()
def contracts(keys=None, celery_time=0):
"""
:param keys: [("jf_service" or "personal", key_id, vcode, character_id), (), ...]
:param celery_time: Set to the next run time instance
:return:
"""
if celery_time:
g.mongo.db.caches.update({"_id": "jf_service"},
{"$set": {
"next_check": time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(int(time.time()) + celery_time))}})
invalid_apis = set()
if not keys:
# Default Refreshes
keys = [("jf_service", secrets["jf_key_id"], secrets["jf_vcode"])]
bulk_op = g.mongo.db.contracts.initialize_unordered_bulk_op()
bulk_run = False
for service in keys:
if service[0] == "personal":
# If service is personal, uses key_caches database for cache values instead
db_cache = g.mongo.db.key_caches.find_one({"_id": service[3]})
cache_time = db_cache.get("contracts", 0) if db_cache else 0
else:
db_cache = g.mongo.db.caches.find_one({"_id": service[0]})
cache_time = db_cache.get("cached_until", 0) if db_cache else 0
if not db_cache or cache_time < time.time():
# Clean contract history
month_ago = int(time.time()) - 2629743 # Services are 1 month
two_weeks_ago = int(time.time()) - 1512000 # Personals are 2 1/2 weeks
filter_time = month_ago
if service[0] == "personal":
filter_time = two_weeks_ago
if service[0] == "personal":
xml_contracts_payload = {
"keyID": service[1],
"vCode": service[2],
"characterID": service[3]
}
xml_contracts_response = requests.get("https://api.eveonline.com/char/Contracts.xml.aspx",
data=xml_contracts_payload, headers=xml_headers)
else:
xml_contracts_payload = {
"keyID": service[1],
"vCode": service[2]
}
xml_contracts_response = requests.get("https://api.eveonline.com/Corp/Contracts.xml.aspx",
data=xml_contracts_payload, headers=xml_headers)
# XML Parse
try:
xml_contracts_tree = ElementTree.fromstring(xml_contracts_response.text)
except ElementTree.ParseError:
print(xml_contracts_response.text)
return list(invalid_apis)
# Store in database
xml_time_pattern = "%Y-%m-%d %H:%M:%S"
if service[0] == "personal":
g.mongo.db.key_caches.update({"_id": int(service[3])}, {"$set": {
"contracts": int(
calendar.timegm(time.strptime(xml_contracts_tree[2].text, xml_time_pattern))),
"contracts_str": xml_contracts_tree[2].text,
"key": int(service[1])}
}, upsert=True)
else:
g.mongo.db.caches.update({"_id": service[0]}, {"$set": {"cached_until": int(
calendar.timegm(time.strptime(xml_contracts_tree[2].text, xml_time_pattern))),
"cached_str": xml_contracts_tree[2].text}}, upsert=True)
if xml_contracts_tree[1].tag == "error":
print(xml_contracts_tree[1].attrib["code"], xml_contracts_tree[1].text, service[1])
conversions.invalidate_key([service[1]], session["CharacterOwnerHash"])
invalid_apis.add(service[1])
else:
for contract in xml_contracts_tree[1][0]:
issue_time = int(calendar.timegm(time.strptime(contract.attrib["dateIssued"], xml_time_pattern)))
if issue_time > filter_time:
bulk_run = True
bulk_op.find({
"_id.id": int(contract.attrib["contractID"]), "_id.service": service[0]
}).upsert().update(
{
"$set": {
"issuer_id": int(contract.attrib["issuerID"]),
"assignee_id": int(contract.attrib["assigneeID"]),
"acceptor_id": int(contract.attrib["acceptorID"]),
"start_station_id": int(contract.attrib["startStationID"]),
"end_station_id": int(contract.attrib["endStationID"]),
"type": contract.attrib["type"],
"status": contract.attrib["status"],
"title": contract.attrib["title"],
"for_corp": int(contract.attrib["forCorp"]),
"date_issued": contract.attrib["dateIssued"],
"date_expired": contract.attrib["dateExpired"],
"date_accepted": contract.attrib["dateAccepted"],
"num_days": int(contract.attrib["numDays"]),
"date_completed": contract.attrib["dateCompleted"],
"price": float(contract.attrib["price"]),
"reward": float(contract.attrib["reward"]),
"collateral": float(contract.attrib["collateral"]),
"volume": float(contract.attrib["volume"]),
"issued_int": issue_time
}
})
if bulk_run:
try:
bulk_op.execute()
except BulkWriteError as bulk_op_error:
print("error", bulk_op_error.details)
return list(invalid_apis)
def api_keys(api_key_list, unassociated=False, dashboard_id=None, verify_mask=True):
"""
:param verify_mask: Choose whether to reject or expire non-conforming access masks
:param api_key_list: [(key_id, vcode), (), ...]
:param unassociated: True to add to unassociated API keys
:param dashboard_id: Set the associated dashboard id. Defaults to the session variable.
:return:
"""
if unassociated:
api_owner = "unassociated"
elif dashboard_id:
api_owner = dashboard_id
else:
api_owner = session["CharacterOwnerHash"]
with open("configs/base.json", "r") as base_config_file:
base_config = json.load(base_config_file)
errors_list = []
bulk_op = g.mongo.db.api_keys.initialize_ordered_bulk_op()
bulk_run = False
for key_id, vcode in api_key_list:
db_api_cache = g.mongo.db.api_keys.find_one({"_id": api_owner,
"keys.key_id": {"$eq": int(key_id)}})
cache_timer = 0
if db_api_cache and api_owner != "unassociated":
cache_timer_list = [key["cached_until"] for key in db_api_cache["keys"] if key["key_id"] == int(key_id)]
cache_timer = max(cache_timer_list)
elif api_owner == "unassociated":
cache_timer = 0
if not db_api_cache or cache_timer < time.time():
xml_contracts_payload = {
"keyID": key_id,
"vCode": vcode
}
xml_api_key_response = requests.get("https://api.eveonline.com/account/APIKeyInfo.xml.aspx",
data=xml_contracts_payload, headers=xml_headers)
# XML Parse
try:
xml_api_key_tree = ElementTree.fromstring(xml_api_key_response.text)
except ElementTree.ParseError:
print(xml_api_key_response.text)
return errors_list
# Store in database
xml_time_pattern = "%Y-%m-%d %H:%M:%S"
failed = False
expired = False
if xml_api_key_tree[1].tag == "error":
errors_list.append("CCP gave an error for key with id " +
"{}. Ensure the key is not expired and is valid.".format(key_id))
failed = True
elif xml_api_key_tree[1][0].attrib["accessMask"] != str(base_config["access_mask"]):
errors_list.append("Key with id {} is not (or no longer) a full API key.".format(key_id))
if verify_mask:
failed = True
else:
expired = True
elif xml_api_key_tree[1][0].attrib["type"] != "Account":
errors_list.append("Key with id {} is not an Account API key.".format(key_id))
failed = True
elif xml_api_key_tree[1][0].attrib["expires"].strip():
errors_list.append("Key with id {} expires. Must be a non-expiring API key.".format(key_id))
failed = True
# Check for fail
if failed:
conversions.invalidate_key([key_id], api_owner)
continue
else:
conversions.validate_key([key_id], api_owner, expired)
# If same character is input, remove old keys first
bulk_op.find({"_id": api_owner}).upsert().update(
{
"$pull": {
"keys": {"key_id": int(key_id)}
}
})
if api_owner != "unassociated":
# Remove keys from unassociated if found
bulk_op.find({"_id": "unassociated"}).upsert().update(
{
"$pull": {
"keys": {"key_id": int(key_id)}
}
}
)
for api_character in xml_api_key_tree[1][0][0]:
bulk_run = True
update_request = {"$push": {"keys": {
"key_id": int(key_id),
"vcode": vcode,
"character_id": int(api_character.attrib["characterID"]),
"character_name": api_character.attrib["characterName"],
"cached_until": int(calendar.timegm(time.strptime(xml_api_key_tree[2].text,
xml_time_pattern))),
"cached_str": xml_api_key_tree[2].text,
"corporation_id": int(api_character.attrib["corporationID"]),
"alliance_id": int(api_character.attrib["allianceID"]),
"corporation_name": api_character.attrib["corporationName"].strip(),
"alliance_name": api_character.attrib["allianceName"].strip()
}}}
if api_owner != "unassociated" or (api_owner == "unassociated" and not g.mongo.db.api_keys.find_one(
{"keys.key_id": {"$eq": int(key_id)}, "_id": {"$ne": "unassociated"}})):
bulk_op.find({"_id": api_owner}).upsert().update(update_request)
if bulk_run:
bulk_op.execute()
return errors_list
def wallet_journal(keys=None):
"""
:param keys: [("personal", key_id, vcode), (), ...] or None for jf_wallet
:return:
"""
with open("configs/base.json", "r") as base_config_file:
base_config = json.load(base_config_file)
if not keys:
# Default Refreshes
keys = [("jf_wallet", secrets["jf_key_id"], secrets["jf_vcode"])]
bulk_op = g.mongo.db.wallet_journal.initialize_unordered_bulk_op()
bulk_run = False
for service in keys:
if service[0] == "jf_wallet":
db_wallet_journal_cache = g.mongo.db.caches.find_one({"_id": service[0]})
else:
db_wallet_journal_cache = g.mongo.db.key_caches.find_one({"_id": "wallet_journal"})
if not db_wallet_journal_cache or db_wallet_journal_cache.get("cached_until", 0) < time.time():
if service[0] == "jf_wallet":
xml_wallet_journal_payload = {
"keyID": service[1],
"vCode": service[2],
"accountKey": base_config["jf_account_key"]
}
else:
xml_wallet_journal_payload = {
"keyID": service[1],
"vCode": service[2]
}
xml_wallet_journal_response = requests.get("https://api.eveonline.com/corp/WalletJournal.xml.aspx",
data=xml_wallet_journal_payload, headers=xml_headers)
# XML Parse
try:
xml_wallet_journal_tree = ElementTree.fromstring(xml_wallet_journal_response.text)
except ElementTree.ParseError:
print(xml_wallet_journal_response.text)
return None
# Store in database
xml_time_pattern = "%Y-%m-%d %H:%M:%S"
g.mongo.db.caches.update({"_id": service[0]}, {"cached_until": int(calendar.timegm(
time.strptime(xml_wallet_journal_tree[2].text, xml_time_pattern))),
"cached_str": xml_wallet_journal_tree[2].text}, upsert=True)
for transaction in xml_wallet_journal_tree[1][0]:
bulk_run = True
bulk_op.find({"_id": int(transaction.attrib["refID"]), "service": service[0]}).upsert().update(
{
"$set": {
"ref_type_id": int(transaction.attrib["refTypeID"]),
"owner_name_1": transaction.attrib["ownerName1"],
"owner_id_1": int(transaction.attrib["ownerID1"]),
"owner_name_2": transaction.attrib["ownerName2"],
"owner_id_2": int(transaction.attrib["ownerID2"]),
"amount": float(transaction.attrib["amount"]),
"reason": transaction.attrib["reason"]
}
})
if bulk_run:
bulk_op.execute()
def character_sheet(keys):
"""
:param keys: [(key_id, vcode, character_id), (), ....]
:return:
"""
bulk_op = g.mongo.db.character_sheet.initialize_unordered_bulk_op()
bulk_run = False
for service in keys:
db_character_sheet_cache = g.mongo.db.key_caches.find_one({"_id": service[2]})
if not db_character_sheet_cache or db_character_sheet_cache.get("character_sheet", 0) < time.time():
xml_character_sheet_payload = {
"keyID": service[0],
"vCode": service[1],
"characterID": service[2]
}
xml_character_sheet_response = requests.get("https://api.eveonline.com/char/CharacterSheet.xml.aspx",
data=xml_character_sheet_payload, headers=xml_headers)
# XML Parse
try:
xml_character_sheet_tree = ElementTree.fromstring(xml_character_sheet_response.text)
except ElementTree.ParseError:
print(xml_character_sheet_response.text)
return None
# Store in database
xml_time_pattern = "%Y-%m-%d %H:%M:%S"
g.mongo.db.key_caches.update({"_id": service[2]}, {
"character_sheet": int(calendar.timegm(
time.strptime(xml_character_sheet_tree[2].text, xml_time_pattern))),
"character_sheet_str": xml_character_sheet_tree[2].text,
"key": int(service[0])
}, upsert=True)
for skill in xml_character_sheet_tree[1][33]:
bulk_run = True
bulk_op.find({"_id": service[2]}).upsert().update(
{
"$set": {
"skills." + skill.attrib["typeID"]: {
"skill_points": int(skill.attrib["skillpoints"]),
"level": int(skill.attrib["level"])
}
}
})
if bulk_run:
bulk_op.execute()
def security_characters():
db_security_characters_cache = g.mongo.db.caches.find_one({"_id": "security_characters"})
bulk_op = g.mongo.db.security_characters.initialize_unordered_bulk_op()
bulk_run = False
if not db_security_characters_cache or db_security_characters_cache["cached_until"] < time.time():
xml_security_characters_payload = {
"keyID": secrets["main_key_id"],
"vCode": secrets["main_vcode"],
"extended": 1
}
xml_security_characters_response = requests.get("https://api.eveonline.com/corp/MemberTracking.xml.aspx",
data=xml_security_characters_payload, headers=xml_headers)
# XML Parse
try:
xml_security_characters_tree = ElementTree.fromstring(xml_security_characters_response.text)
except ElementTree.ParseError:
print(xml_security_characters_response.text)
return None
# Store in database
if xml_security_characters_tree[1].tag == "error":
raise error_handling.ConfigError("Main Corp API is not valid.")
g.mongo.db.caches.update({"_id": "security_characters"}, {
"cached_until": conversions.xml_time(xml_security_characters_tree[2].text),
"cached_str": xml_security_characters_tree[2].text
}, upsert=True)
for corp_char in xml_security_characters_tree[1][0]:
bulk_run = True
bulk_op.find({"_id": int(corp_char.attrib["characterID"])}).upsert().update(
{"$set": {
"name": corp_char.attrib["name"],
"join_time": conversions.xml_time(corp_char.attrib["startDateTime"]),
"title": corp_char.attrib["title"],
"log_on_time": conversions.xml_time(corp_char.attrib.get("logonDateTime")),
"log_off_time": conversions.xml_time(corp_char.attrib.get("logoffDateTime")),
"last_location_id": corp_char.attrib.get("locationID"),
"last_location_str": corp_char.attrib.get("location"),
"last_ship_id": corp_char.attrib.get("shipTypeID"),
"last_ship_str": corp_char.attrib.get("shipType")
}})
if bulk_run:
# Clear entire database first
g.mongo.db.security_characters.remove({})
bulk_op.execute()
| 44.990196
| 117
| 0.542384
|
cb7349d08b7b1d9dfa3da3f5819727de66e66da7
| 4,003
|
py
|
Python
|
experiments/murtaza/off_policy_ssl/gym/ant/bc.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/murtaza/off_policy_ssl/gym/ant/bc.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/murtaza/off_policy_ssl/gym/ant/bc.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
from railrl.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from railrl.torch.sac.policies import GaussianPolicy
from railrl.launchers.experiments.ashvin.awr_sac_rl import experiment
from railrl.launchers.launcher_util import run_experiment
import railrl.misc.hyperparameter as hyp
if __name__ == "__main__":
variant = dict(
num_epochs=0,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=0,
min_num_steps_before_training=0,
max_path_length=1000,
batch_size=512,
replay_buffer_size=int(1E6),
algorithm="BC",
version="normal",
collection_mode='batch',
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
layer_size=256,
num_layers=4,
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
bc_num_pretrain_steps=1000000,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=0,
policy_weight_decay=1e-4,
compute_bc=True,
bc_weight=1.0,
rl_weight=0.0,
bc_loss_type='mse',
pretraining_env_logging_period=10000,
do_pretrain_rollouts=True,
),
policy_kwargs=dict(
hidden_sizes=[256]*4,
max_log_std=0,
min_log_std=-6,
),
path_loader_kwargs=dict(
demo_paths=[
dict(
path='demos/ant_action_noise_1000.npy',
obs_dict=False,
is_demo=True,
train_split=.9,
data_split=.01,
),
],
),
path_loader_class=DictToMDPPathLoader,
)
search_space = {
'trainer_kwargs.use_automatic_entropy_tuning':[False],
'trainer_kwargs.bc_num_pretrain_steps':[400000],
'trainer_kwargs.bc_weight':[1],
'train_rl':[False],
'pretrain_policy':[True],
'pretrain_rl':[False],
'load_demos':[True],
'path_loader_kwargs.demo_paths':[
[
dict(
path='demos/ant_action_noise_1000.npy',
obs_dict=False,
is_demo=True,
train_split=.9,
data_split=.01,
),
],
[
dict(
path='demos/ant_action_noise_1000.npy',
obs_dict=False,
is_demo=True,
train_split=.9,
data_split=.015,
),
],
[
dict(
path='demos/ant_action_noise_1000.npy',
obs_dict=False,
is_demo=True,
train_split=.9,
data_split=.025,
),
],
],
'env': [
'ant',
],
'policy_class':[
GaussianPolicy,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_prefix = 'bc_ant_frac_trajs_sweep'
# n_seeds = 2
# mode = 'ec2'
# exp_prefix = 'bc_ant_gym_v2'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
num_exps_per_instance=1,
use_gpu=True,
gcp_kwargs=dict(
preemptible=False,
),
skip_wait=True,
)
| 29.873134
| 75
| 0.508119
|
b6bf1d723a7a61dbe4ff44b79d8f65262273fd38
| 6,501
|
py
|
Python
|
kubernetes/client/models/v1beta2_replica_set_spec.py
|
fooka03/python
|
073cf4d89e532f92b57e8955b4efc3d5d5eb80cf
|
[
"Apache-2.0"
] | 2
|
2020-07-02T05:47:41.000Z
|
2020-07-02T05:50:34.000Z
|
kubernetes/client/models/v1beta2_replica_set_spec.py
|
fooka03/python
|
073cf4d89e532f92b57e8955b4efc3d5d5eb80cf
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:44:49.000Z
|
2021-03-25T23:44:49.000Z
|
k8sdeployment/k8sstat/python/kubernetes/client/models/v1beta2_replica_set_spec.py
|
JeffYFHuang/gpuaccounting
|
afa934350ebbd0634beb60b9df4a147426ea0006
|
[
"MIT"
] | 1
|
2021-10-13T17:45:37.000Z
|
2021-10-13T17:45:37.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1beta2ReplicaSetSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'replicas': 'int',
'selector': 'V1LabelSelector',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'replicas': 'replicas',
'selector': 'selector',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None): # noqa: E501
"""V1beta2ReplicaSetSpec - a model defined in OpenAPI""" # noqa: E501
self._min_ready_seconds = None
self._replicas = None
self._selector = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if replicas is not None:
self.replicas = replicas
self.selector = selector
if template is not None:
self.template = template
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1beta2ReplicaSetSpec. # noqa: E501
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:return: The min_ready_seconds of this V1beta2ReplicaSetSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1beta2ReplicaSetSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1beta2ReplicaSetSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def replicas(self):
"""Gets the replicas of this V1beta2ReplicaSetSpec. # noqa: E501
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller # noqa: E501
:return: The replicas of this V1beta2ReplicaSetSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1beta2ReplicaSetSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller # noqa: E501
:param replicas: The replicas of this V1beta2ReplicaSetSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def selector(self):
"""Gets the selector of this V1beta2ReplicaSetSpec. # noqa: E501
:return: The selector of this V1beta2ReplicaSetSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1beta2ReplicaSetSpec.
:param selector: The selector of this V1beta2ReplicaSetSpec. # noqa: E501
:type: V1LabelSelector
"""
if selector is None:
raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
self._selector = selector
@property
def template(self):
"""Gets the template of this V1beta2ReplicaSetSpec. # noqa: E501
:return: The template of this V1beta2ReplicaSetSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1beta2ReplicaSetSpec.
:param template: The template of this V1beta2ReplicaSetSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
self._template = template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ReplicaSetSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.168367
| 276
| 0.629134
|
e00c3ef37f96f1bc0b89ac4dcd8e60f69c6ad153
| 802
|
py
|
Python
|
Python/Day8.py
|
RylandeFM/AoC2020
|
77d3e0f4a992073fd814bb664f4e4432dd8cf861
|
[
"MIT"
] | null | null | null |
Python/Day8.py
|
RylandeFM/AoC2020
|
77d3e0f4a992073fd814bb664f4e4432dd8cf861
|
[
"MIT"
] | null | null | null |
Python/Day8.py
|
RylandeFM/AoC2020
|
77d3e0f4a992073fd814bb664f4e4432dd8cf861
|
[
"MIT"
] | null | null | null |
inputString = open("Python/Day8Input.txt", "r").read().splitlines()
accumulator, swappedPos = 0, set()
def runProgram(skip):
global accumulator
pos, hasSwapped, executedPos = 0, False, set()
while pos not in executedPos:
instr, value = inputString[pos].split(" ")
executedPos.add(pos)
if skip and instr != "acc" and not hasSwapped and pos not in swappedPos:
hasSwapped = True
swappedPos.add(pos)
instr = "nop" if instr == "jmp" else "jmp"
if instr == "acc": accumulator += int(value)
pos += int(value) if instr == "jmp" else 1
if pos >= len(inputString): return True
return False
runProgram(False)
print(accumulator)
accumulator = 0
while not runProgram(True):
accumulator = 0
print(accumulator)
| 32.08
| 80
| 0.63217
|
1b92bb70df490a74d4a37d3a2d24b3a6686ad52c
| 2,556
|
py
|
Python
|
smanager/machines/views/machines.py
|
Ferald89/deicaphone
|
52d0c8ee1818a655257c222f7a0d8f9cc5c52bd3
|
[
"MIT"
] | null | null | null |
smanager/machines/views/machines.py
|
Ferald89/deicaphone
|
52d0c8ee1818a655257c222f7a0d8f9cc5c52bd3
|
[
"MIT"
] | 1
|
2020-08-03T16:35:56.000Z
|
2020-08-03T16:35:56.000Z
|
smanager/machines/views/machines.py
|
Ferald89/deicaphone
|
52d0c8ee1818a655257c222f7a0d8f9cc5c52bd3
|
[
"MIT"
] | null | null | null |
"""Machine views."""
# Django REST Framework
from rest_framework import viewsets, mixins
# Permissions
from rest_framework.permissions import IsAuthenticated
from smanager.machines.permissions.machines import IsMachineAdmin
# Serializer
from smanager.machines.serializers import MachineModelSerializer, ManualModelSerializer
from smanager.users.serializers import UserModelSerializer
# Model
from smanager.machines.models import Machine, Membership, Manual
from smanager.users.models import User
class MachineViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""Project view set."""
serializer_class = MachineModelSerializer
lookup_field = 'serial_number'
def get_queryset(self):
"""Restric list to public only"""
queryset = Machine.objects.filter(owner=self.request.user)
if self.action == 'list':
# chenge to is public project
return queryset
return queryset
def get_permissions(self):
"""Assign permissions based on action."""
permissions = [IsAuthenticated]
if self.action in ['update', 'partial_update']:
permissions.append(IsMachineAdmin)
return [permission() for permission in permissions]
def perform_create(self, serializer):
"""Assign machine admin"""
machine = serializer.save()
user = self.request.user
profile = user.profile
# import ipdb; ipdb.set_trace()
Membership.objects.create(
user=user,
profile=profile,
machine=machine,
is_owner=True,
)
def retrieve(self, request, *args, **kwargs):
"""Add extra data to the response"""
response = super(MachineViewSet, self).retrieve(request, *args, **kwargs)
user = User.objects.filter(
id=response.data['owner'],
)
machineobject = Machine.objects.get(serial_number=response.data['serial_number'])
# import ipdb; ipdb.set_trace()
manuals = Manual.objects.filter(
used_in=machineobject
)
data = {
'machine': response.data,
'user': UserModelSerializer(user, many=True).data,
'manual': ManualModelSerializer(manuals, many=True).data,
}
response.data = data
return response
| 33.631579
| 89
| 0.635759
|
e546ccc35ed6f75337f9d30d0644060dd00a1edf
| 4,089
|
py
|
Python
|
tools/valid.py
|
Sim-ai/HRNet-Image-Classification
|
c2b2c19604f3076cd49369c4226f45d77e6c973a
|
[
"MIT"
] | 1
|
2020-11-25T02:16:04.000Z
|
2020-11-25T02:16:04.000Z
|
tools/valid.py
|
Sim-ai/HRNet-Image-Classification
|
c2b2c19604f3076cd49369c4226f45d77e6c973a
|
[
"MIT"
] | null | null | null |
tools/valid.py
|
Sim-ai/HRNet-Image-Classification
|
c2b2c19604f3076cd49369c4226f45d77e6c973a
|
[
"MIT"
] | 1
|
2020-12-07T11:17:35.000Z
|
2020-12-07T11:17:35.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import shutil
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import _init_paths
import models
from config import config
from config import update_config
from core.function import validate
from utils.modelsummary import get_model_summary
from utils.utils import create_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--testModel',
help='testModel',
type=str,
default='')
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'valid')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(
config)
dump_input = torch.rand(
(1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])
)
logger.info(get_model_summary(model, dump_input))
if config.TEST.MODEL_FILE:
logger.info('=> loading model from {}'.format(config.TEST.MODEL_FILE))
model.load_state_dict(torch.load(config.TEST.MODEL_FILE))
else:
model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
gpus = list(config.GPUS)
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss().cuda()
# Data loading code
valdir = os.path.join(config.DATASET.ROOT,
config.DATASET.TEST_SET)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
valid_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),
transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.ToTensor(),
normalize,
])),
batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
# evaluate on validation set
validate(config, valid_loader, model, criterion, final_output_dir,
tb_log_dir, None)
if __name__ == '__main__':
main()
| 31.697674
| 80
| 0.603571
|
c711314a99013f68adc46fed37a2cdbd5a45c17d
| 11,733
|
py
|
Python
|
tests/core/test_lightning_optimizer.py
|
Code-Cornelius/pytorch-lightning
|
ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057
|
[
"Apache-2.0"
] | 15,666
|
2020-01-14T07:16:15.000Z
|
2022-03-31T23:22:26.000Z
|
tests/core/test_lightning_optimizer.py
|
Code-Cornelius/pytorch-lightning
|
ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057
|
[
"Apache-2.0"
] | 9,140
|
2020-01-14T03:10:42.000Z
|
2022-03-31T19:57:09.000Z
|
tests/core/test_lightning_optimizer.py
|
Code-Cornelius/pytorch-lightning
|
ce95891f6ab21a6cb1e5e6bc46cebafe9aab6057
|
[
"Apache-2.0"
] | 2,340
|
2020-01-14T06:45:32.000Z
|
2022-03-31T22:57:07.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from typing import Any
from unittest.mock import DEFAULT, patch
import pytest
import torch
from torch.optim import Adam, Optimizer, SGD
from pytorch_lightning import Trainer
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loops.optimization.optimizer_loop import Closure
from tests.helpers.boring_model import BoringModel
@pytest.mark.parametrize("auto", (True, False))
def test_lightning_optimizer(tmpdir, auto):
"""Test that optimizer are correctly wrapped by our LightningOptimizer."""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
if not auto:
# note: this is not recommended, only done for coverage
optimizer = LightningOptimizer(optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)
trainer.fit(model)
lightning_opt = model.optimizers()
assert str(lightning_opt) == "Lightning" + str(lightning_opt.optimizer)
def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(tmpdir):
"""Test that the user can use our LightningOptimizer.
Not recommended.
"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx):
opt_1, opt_2 = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
def closure(opt):
output = self.layer(batch)
loss = self.loss(batch, output)
opt.zero_grad()
self.manual_backward(loss)
if batch_idx % 2 == 0:
closure(opt_1)
opt_1.step()
closure(opt_2)
opt_2.step()
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=8, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)
with patch.multiple(torch.optim.SGD, zero_grad=DEFAULT, step=DEFAULT) as sgd, patch.multiple(
torch.optim.Adam, zero_grad=DEFAULT, step=DEFAULT
) as adam:
trainer.fit(model)
assert sgd["step"].call_count == 4
assert adam["step"].call_count == 8
assert sgd["zero_grad"].call_count == 4
assert adam["zero_grad"].call_count == 8
def test_state(tmpdir):
model = torch.nn.Linear(3, 4)
optimizer = torch.optim.Adam(model.parameters())
lightning_optimizer = LightningOptimizer(optimizer)
# test state
assert optimizer.state == lightning_optimizer.state
lightning_optimizer.state = optimizer.state
assert optimizer.state == lightning_optimizer.state
# test param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
lightning_optimizer.param_groups = optimizer.param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
# test defaults
assert optimizer.defaults == lightning_optimizer.defaults
lightning_optimizer.defaults = optimizer.defaults
assert optimizer.defaults == lightning_optimizer.defaults
assert isinstance(lightning_optimizer, LightningOptimizer)
assert isinstance(lightning_optimizer, Adam)
assert isinstance(lightning_optimizer, Optimizer)
lightning_dict = {
k: v for k, v in lightning_optimizer.__dict__.items() if k not in {"_optimizer", "_optimizer_idx", "_trainer"}
}
assert lightning_dict == optimizer.__dict__
assert optimizer.state_dict() == lightning_optimizer.state_dict()
assert optimizer.state == lightning_optimizer.state
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
"""Test overriding zero_grad works in automatic_optimization."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
return super().training_step(batch, batch_idx)
def training_epoch_end(self, outputs):
...
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
if isinstance(optimizer, SGD) and batch_idx % 2 == 0:
optimizer.zero_grad()
if isinstance(optimizer, Adam) and batch_idx % 5 == 0:
optimizer.zero_grad()
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=20, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
trainer.fit(model)
assert adam_zero_grad.call_count == 4
assert sgd_zero_grad.call_count == 10
def test_lightning_optimizer_automatic_optimization_optimizer_step(tmpdir):
"""Test overriding step works in automatic_optimization."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
return super().training_step(batch, batch_idx)
def training_epoch_end(self, outputs):
...
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, **_):
assert isinstance(optimizer_closure, Closure)
# zero_grad is called inside the closure
optimizer_closure()
# not passing the closure to the optimizer because step is mocked
if isinstance(optimizer, SGD) and batch_idx % 2 == 0:
optimizer.step()
if isinstance(optimizer, Adam) and batch_idx % 4 == 0:
optimizer.step()
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
limit_train_batches = 8
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=limit_train_batches,
limit_val_batches=1,
max_epochs=1,
enable_model_summary=False,
)
with patch.multiple(torch.optim.SGD, zero_grad=DEFAULT, step=DEFAULT) as sgd, patch.multiple(
torch.optim.Adam, zero_grad=DEFAULT, step=DEFAULT
) as adam:
trainer.fit(model)
assert sgd["step"].call_count == limit_train_batches // 2
assert adam["step"].call_count == limit_train_batches // 4
assert sgd["zero_grad"].call_count == limit_train_batches
assert adam["zero_grad"].call_count == limit_train_batches
def test_lightning_optimizer_automatic_optimization_lbfgs_zero_grad(tmpdir):
"""Test zero_grad is called the same number of times as LBFGS requires for reevaluation of the loss in
automatic_optimization."""
class TestModel(BoringModel):
def configure_optimizers(self):
return torch.optim.LBFGS(self.parameters())
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=1, max_epochs=1, enable_model_summary=False
)
with patch("torch.optim.LBFGS.zero_grad") as zero_grad:
trainer.fit(model)
lbfgs = model.optimizers()
max_iter = lbfgs.param_groups[0]["max_iter"]
assert zero_grad.call_count == max_iter
class OptimizerWithHooks(Optimizer):
def __init__(self, model):
self._fwd_handles = []
self._bwd_handles = []
self.params = []
for _, mod in model.named_modules():
mod_class = mod.__class__.__name__
if mod_class != "Linear":
continue
handle = mod.register_forward_pre_hook(self._save_input) # save the inputs
self._fwd_handles.append(handle) # collect forward-save-input hooks in list
handle = mod.register_backward_hook(self._save_grad_output) # save the gradients
self._bwd_handles.append(handle) # collect backward-save-grad hook in list
# save the parameters
params = [mod.weight]
if mod.bias is not None:
params.append(mod.bias)
# save a param_group for each module
d = {"params": params, "mod": mod, "layer_type": mod_class}
self.params.append(d)
super().__init__(self.params, {"lr": 0.01})
def _save_input(self, mod, i):
"""Saves input of layer."""
if mod.training:
self.state[mod]["x"] = i[0]
def _save_grad_output(self, mod, _, grad_output):
"""Saves grad on output of layer to grad is scaled with batch_size since gradient is spread over samples in
mini batch."""
batch_size = grad_output[0].shape[0]
if mod.training:
self.state[mod]["grad"] = grad_output[0] * batch_size
def step(self, closure=None):
closure()
for group in self.param_groups:
_ = self.state[group["mod"]]["x"]
_ = self.state[group["mod"]]["grad"]
return True
def test_lightning_optimizer_keeps_hooks(tmpdir):
class TestModel(BoringModel):
count_on_train_batch_start = 0
count_on_train_batch_end = 0
def configure_optimizers(self):
return OptimizerWithHooks(self)
def on_train_batch_start(self, batch: Any, batch_idx: int) -> None:
self.count_on_train_batch_start += 1
optimizer = self.optimizers(use_pl_optimizer=False)
assert len(optimizer._fwd_handles) == 1
def on_train_batch_end(self, outputs: Any, batch: Any, batch_idx: int) -> None:
self.count_on_train_batch_end += 1
del self.trainer._lightning_optimizers
gc.collect() # not necessary, just in case
trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=4, limit_val_batches=1, max_epochs=1)
model = TestModel()
trainer.fit(model)
assert model.count_on_train_batch_start == 4
assert model.count_on_train_batch_end == 4
| 37.605769
| 118
| 0.678514
|
258b9c831601e5deaf7030fe40cb0ba97c6e0510
| 4,336
|
py
|
Python
|
jobsite/settings.py
|
Manasranjanpati/Jobpost
|
e5654129538e70cedf8aafc65c1b0289a01535e5
|
[
"MIT"
] | null | null | null |
jobsite/settings.py
|
Manasranjanpati/Jobpost
|
e5654129538e70cedf8aafc65c1b0289a01535e5
|
[
"MIT"
] | 1
|
2020-11-07T04:15:05.000Z
|
2020-11-07T04:15:05.000Z
|
jobsite/settings.py
|
Manasranjanpati/Jobpost
|
e5654129538e70cedf8aafc65c1b0289a01535e5
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRETKEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'ckeditor',
'accounts',
'jobs',
'companydashboard',
'applicantprofile',
'payments',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jobsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jobsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.mysql",
# "NAME": os.getenv("DB_NAME"),
# "USER": os.getenv("DB_USER"),
# "PASSWORD": os.getenv("DB_PASSWORD"),
# "HOST": os.getenv("DB_HOST"),
# "PORT": os.getenv("DB_PORT"),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Static Files
# Adds the sitewide static folder so that Django can search it too
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static_cdn', 'static_root')
# Media Files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static_cdn', 'media_root')
# Custom User Model
AUTH_USER_MODEL = 'accounts.User'
# Login config
LOGIN_URL = '/login'
LOGOUT_REDIRECT_URL = '/'
# Messages style config
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.DEBUG: 'is-link',
messages.INFO: 'is-info',
messages.SUCCESS: 'is-success',
messages.WARNING: 'is-warning',
messages.ERROR: 'is-danger',
}
# CKEDITOR Settings
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink'],
['RemoveFormat', 'Source']
]
}
}
# Stripe Keys
STRIPE_PUB_KEY = os.getenv('STRIPE_PUB_KEY')
STRIPE_SECRET_KEY = os.getenv('STRIPE_SECRET_KEY')
| 24.359551
| 140
| 0.659133
|
c92f2897ac9f050bd6c0ad79739da16d051e3b26
| 1,759
|
py
|
Python
|
18/solution-b.py
|
gmodena/adventofcode2017
|
6fb72b2220f38e5f65b04f491c8787f1b999ba60
|
[
"Unlicense"
] | 1
|
2017-12-08T21:36:50.000Z
|
2017-12-08T21:36:50.000Z
|
18/solution-b.py
|
gmodena/adventofcode2017
|
6fb72b2220f38e5f65b04f491c8787f1b999ba60
|
[
"Unlicense"
] | null | null | null |
18/solution-b.py
|
gmodena/adventofcode2017
|
6fb72b2220f38e5f65b04f491c8787f1b999ba60
|
[
"Unlicense"
] | null | null | null |
registers = [defaultdict(int), defaultdict(int)]
def get_value(registers, v):
try:
return int(v)
except ValueError:
return registers[v]
if __name__ == '__main__':
op = 0; reg = 1; val = 2
with open('input.txt') as infile:
instructions = [line.strip().split(' ') for line in infile]
pid = 0
eip = [0, 0]
lock = [False, False]
messages = [[], []]
registers[0]['p'] = 0
registers[1]['p'] = 1
count = 0
while not (lock[0] and lock[1]):
pid = (pid+1) % (len(eip))
if len(instructions) <= eip[pid] < 0:
break
ins = instructions[eip[pid]]
if ins[op] == 'snd':
messages[pid].append(get_value(registers[pid], ins[reg]))
if pid == 1:
count += 1
elif ins[op] == 'set':
registers[pid][ins[reg]] = get_value(registers[pid], ins[val])
elif ins[op] == 'add':
registers[pid][ins[reg]] += get_value(registers[pid], ins[val])
elif ins[op] == 'mul':
registers[pid][ins[reg]] *= get_value(registers[pid], ins[val])
elif ins[op] == 'mod':
registers[pid][ins[reg]] %= get_value(registers[pid], ins[val])
elif ins[op] == 'rcv':
sender_pid = (pid+1) % (len(eip))
try:
registers[pid][ins[reg]] = messages[sender_pid].pop(0)
lock[pid] = False
except IndexError:
lock[pid] = True
continue
elif ins[op] == 'jgz':
reg_value = get_value(registers[pid], ins[reg])
if reg_value > 0:
eip[pid] += get_value(registers[pid], ins[val])
continue
eip[pid] += 1
print(count)
| 32.574074
| 75
| 0.498579
|
2f5b9fd09b8364b242404f5da987a659d4716f67
| 12,537
|
py
|
Python
|
tests/heuristic_test.py
|
TeamJumpstart/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 10
|
2021-04-18T17:54:02.000Z
|
2021-07-26T19:58:41.000Z
|
tests/heuristic_test.py
|
DiddiZ/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 1
|
2021-04-21T15:13:41.000Z
|
2021-04-21T15:13:41.000Z
|
tests/heuristic_test.py
|
DiddiZ/InformatiCup2021
|
a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4
|
[
"MIT"
] | 1
|
2021-04-20T09:42:50.000Z
|
2021-04-20T09:42:50.000Z
|
import time
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import heuristics
from environments import spe_ed
def empty_board_1player():
""" board state visualised: board size = 5x5
- - - - -
- - - - -
- - 1 - -
- - - - -
- - - - -
"""
cells = np.zeros((5, 5), dtype=bool)
player = spe_ed.Player(player_id=1, x=2, y=2, direction=spe_ed.directions[0], speed=1, active=True)
opponents = []
rounds = 0
return (cells, player, opponents, rounds, time.time() + 10)
def empty_board_2players():
""" board state visualised: board size = 5x5
2 - - - -
- - - - -
- - - - -
- - - - -
- - - - 1
"""
cells = np.zeros((5, 5), dtype=bool)
player = spe_ed.Player(player_id=1, x=4, y=4, direction=spe_ed.directions[3], speed=1, active=True)
opponents = [spe_ed.Player(player_id=2, x=0, y=0, direction=spe_ed.directions[1], speed=1, active=True)]
rounds = 0
return (cells, player, opponents, rounds, time.time() + 10)
def empty_board_3players():
""" board state visualised: board size = 5x5
2 - - - 3
- - - - -
- - - - -
- - - - -
- - - - 1
"""
cells = np.zeros((5, 5), dtype=bool)
player = spe_ed.Player(player_id=1, x=4, y=4, direction=spe_ed.directions[3], speed=1, active=True)
opponents = [
spe_ed.Player(player_id=2, x=0, y=0, direction=spe_ed.directions[1], speed=1, active=True),
spe_ed.Player(player_id=3, x=0, y=4, direction=spe_ed.directions[1], speed=1, active=True)
]
rounds = 0
return (cells, player, opponents, rounds, time.time() + 10)
def default_round1_board():
""" board state visualised: board size = 5x5
# - - - -
2 - - - -
- - # 1 -
- - - - -
- - - - -
"""
cells = np.zeros((5, 5), dtype=bool)
cells[2, 2:4] = True # path of player_id 1
cells[0:2, 0] = True # path of player_id 2
player = spe_ed.Player(
player_id=1, x=3, y=2, direction=spe_ed.directions[0], speed=1, active=True
) # pos: 3/2, direction: right
opponents = [
spe_ed.Player(player_id=2, x=0, y=1, direction=spe_ed.directions[1], speed=1, active=True)
] # pos: 0/1 direction: down
rounds = 1
return (cells, player, opponents, rounds, time.time() + 10)
def default_almost_full_board():
""" board state visualised: board size = 5x5
- # # # #
- # # # #
2 # - - 1
# # # # #
# # # # #
"""
cells = np.ones((5, 5), dtype=bool)
cells[2, 2:4] = False # free path for player_id 1
cells[0:2, 0] = False # free path for player_id 2
player = spe_ed.Player(
player_id=1, x=4, y=2, direction=spe_ed.directions[2], speed=1, active=True
) # pos: 4/2, direction: left
opponents = [
spe_ed.Player(player_id=2, x=0, y=2, direction=spe_ed.directions[3], speed=1, active=True)
] # pos:0/2, direction: up
rounds = 30
return (cells, player, opponents, rounds, time.time() + 10)
class TestRandomHeuristic(unittest.TestCase):
def test_random_output(self):
"""The heuristic should return a value between 0 and 1, independently of the input."""
score = heuristics.RandomHeuristic().score(None, None, None, None, None)
self.assertGreaterEqual(score, 0)
self.assertLessEqual(score, 1)
score = heuristics.RandomHeuristic().score(*default_round1_board())
self.assertGreaterEqual(score, 0)
self.assertLessEqual(score, 1)
score = heuristics.RandomHeuristic().score(*default_round1_board())
self.assertGreaterEqual(score, 0)
self.assertLessEqual(score, 1)
score = heuristics.RandomHeuristic().score(*default_almost_full_board())
self.assertGreaterEqual(score, 0)
self.assertLessEqual(score, 1)
class TestRegionHeuristic(unittest.TestCase):
def test_empty_board(self):
score = heuristics.RegionHeuristic().score(*empty_board_1player())
self.assertEqual(score, 1)
def test_default_round1_board(self):
score = heuristics.RegionHeuristic().score(*default_round1_board())
self.assertEqual(score, (23 / 2 / 25) * (1 - 23 / 2 / 25))
score = heuristics.RegionHeuristic(include_opponent_regions=False).score(*default_round1_board())
self.assertEqual(score, (23 / 2 / 25))
def test_default_almost_full_board(self):
score = heuristics.RegionHeuristic().score(*default_almost_full_board())
self.assertEqual(score, (3 / 1 / 25) * (1 - 3 / 1 / 25))
score = heuristics.RegionHeuristic(include_opponent_regions=False).score(*default_almost_full_board())
self.assertEqual(score, (3 / 1 / 25))
def test_immutable_input(self):
"""Check if the heuristic modifies the input data itself."""
board_state = default_round1_board()
heuristics.RegionHeuristic().score(*board_state)
assert_array_equal(board_state[0], default_round1_board()[0])
class TestOpponentDistanceHeuristic(unittest.TestCase):
def test_default_round1_board(self):
score = heuristics.OpponentDistanceHeuristic(dist_threshold=16).score(*default_round1_board())
self.assertEqual(score, 4.0 / 10.0)
def test_default_almost_full_board(self):
score = heuristics.OpponentDistanceHeuristic(dist_threshold=16).score(*default_almost_full_board())
self.assertEqual(score, 4.0 / 10.0)
def test_immutable_input(self):
"""Check if the heuristic modifies the input data itself."""
board_state = default_round1_board()
heuristics.OpponentDistanceHeuristic().score(*board_state)
assert_array_equal(board_state[0], default_round1_board()[0])
class TestVoronoiHeuristic(unittest.TestCase):
def test_empty_board(self):
score = heuristics.VoronoiHeuristic(max_steps=16, opening_iterations=0).score(*empty_board_1player())
self.assertEqual(score, 1.0)
def test_default_round1_board(self):
score = heuristics.VoronoiHeuristic(max_steps=16, opening_iterations=0).score(*default_round1_board())
self.assertEqual(score, 12.0 / 25.0)
def test_default_almost_full_board(self):
score = heuristics.VoronoiHeuristic(max_steps=16, opening_iterations=0).score(*default_almost_full_board())
self.assertEqual(score, 3.0 / 25.0)
def test_immutable_input(self):
"""Check if the heuristic modifies the input data itself."""
board_state = default_round1_board()
heuristics.VoronoiHeuristic(max_steps=16, opening_iterations=0).score(*board_state)
assert_array_equal(board_state[0], default_round1_board()[0])
class TestRandomProbingHeuristic(unittest.TestCase):
def test_empty_board(self):
"""Evaluating the policy should not throw any error."""
heuristics.RandomProbingHeuristic(heuristics.RegionHeuristic(), n_steps=5,
n_probes=100).score(*empty_board_1player())
def test_default_round1_board(self):
"""Evaluating the policy should not throw any error."""
heuristics.RandomProbingHeuristic(heuristics.RegionHeuristic(), n_steps=5,
n_probes=100).score(*default_round1_board())
def test_default_almost_full_board(self):
"""Evaluating the policy should not throw any error."""
heuristics.RandomProbingHeuristic(heuristics.RegionHeuristic(), n_steps=5,
n_probes=100).score(*default_almost_full_board())
def test_immutable_input(self):
"""Check if the heuristic modifies the input data itself."""
board_state = default_round1_board()
heuristics.RandomProbingHeuristic(heuristic=heuristics.RandomHeuristic(), n_steps=5,
n_probes=10).score(*board_state)
assert_array_equal(board_state[0], default_round1_board()[0])
class TestPathLengthHeuristic(unittest.TestCase):
def test_small_board(self):
""" board state visualised: board size = 1x3
1 - -
"""
cells = np.zeros((1, 3), dtype=bool)
player = spe_ed.Player(player_id=1, x=0, y=0, direction=spe_ed.directions[0], speed=1, active=True)
opponents = []
rounds = 0
score = heuristics.PathLengthHeuristic(n_steps=1).score(cells, player, opponents, rounds, time.time() + 10)
self.assertEqual(score, 1.0)
score = heuristics.PathLengthHeuristic(n_steps=2).score(cells, player, opponents, rounds, time.time() + 10)
self.assertEqual(score, 1.0)
score = heuristics.PathLengthHeuristic(n_steps=3).score(cells, player, opponents, rounds, time.time() + 10)
self.assertEqual(score, 2 / 3)
def test_small_board2(self):
""" board state visualised: board size = 1x3
1 - -
- - -
"""
cells = np.zeros((2, 3), dtype=bool)
player = spe_ed.Player(player_id=1, x=0, y=0, direction=spe_ed.directions[0], speed=1, active=True)
opponents = []
rounds = 0
score = heuristics.PathLengthHeuristic(n_steps=5).score(cells, player, opponents, rounds, time.time() + 10)
self.assertEqual(score, 1.0)
score = heuristics.PathLengthHeuristic(n_steps=6).score(cells, player, opponents, rounds, time.time() + 10)
self.assertEqual(score, 1.0)
score = heuristics.PathLengthHeuristic(n_steps=7).score(cells, player, opponents, rounds, time.time() + 10)
self.assertEqual(score, 6 / 7)
def test_empty_board(self):
board_state = empty_board_1player()
score = heuristics.PathLengthHeuristic(n_steps=5).score(*board_state)
self.assertEqual(score, 1.0)
score = heuristics.PathLengthHeuristic(n_steps=10).score(*board_state)
self.assertEqual(score, 1.0)
# score = heuristics.PathLengthHeuristic(n_steps=25).score(*board_state)
# self.assertEqual(score, 1.0)
def test_default_round1_board(self):
score = heuristics.PathLengthHeuristic(n_steps=5).score(*default_round1_board())
self.assertGreater(score, 2.0 / 5.0)
self.assertLessEqual(score, 1.0)
def test_default_almost_full_board(self):
score = heuristics.PathLengthHeuristic(n_steps=5).score(*default_almost_full_board())
self.assertEqual(score, 2.0 / 5.0)
def test_immutable_input(self):
"""Check if the heuristic modifies the input data itself."""
board_state = default_round1_board()
heuristics.PathLengthHeuristic(n_steps=5).score(*board_state)
assert_array_equal(board_state[0], default_round1_board()[0])
class TestCompositeHeuristic(unittest.TestCase):
def test_normalized_output_value(self):
"""Should normalize weight values and return a value between 0 and 1."""
score = heuristics.CompositeHeuristic(
heuristics=[
heuristics.RandomHeuristic(),
heuristics.RandomHeuristic(),
heuristics.RandomHeuristic(),
],
weights=[1, 20000, 1000]
).score(*empty_board_1player())
self.assertGreaterEqual(score, 0.0)
self.assertLessEqual(score, 1.0)
def test_default_weights(self):
""" Composite heuristic should be callable within other composite heuristic.
Should return always a normalized result.
"""
score = heuristics.CompositeHeuristic(
heuristics=[
heuristics.OpponentDistanceHeuristic(),
heuristics.PathLengthHeuristic(n_steps=2),
]
).score(*default_round1_board())
self.assertGreaterEqual(score, 0.0)
self.assertLessEqual(score, 1.0)
def test_double_stacked_composites(self):
""" Composite heuristic should be callable within other composite heuristic.
Should return always a normalized result.
"""
score = heuristics.CompositeHeuristic(
heuristics=[
heuristics.OpponentDistanceHeuristic(),
heuristics.PathLengthHeuristic(n_steps=2),
heuristics.CompositeHeuristic(
heuristics=[
heuristics.RandomHeuristic(),
heuristics.PathLengthHeuristic(n_steps=2),
],
weights=[1, 2000]
),
]
).score(*default_round1_board())
self.assertGreaterEqual(score, 0.0)
self.assertLessEqual(score, 1.0)
| 38.694444
| 115
| 0.642339
|
8bd2b98316b5918e4ed9130bc0e5402d12b642a2
| 2,122
|
py
|
Python
|
spirals/good_fit_test.py
|
kadglass/RotationCurves
|
7f52dac022ca4df666e671eeb7f0aaf65c8515c3
|
[
"BSD-3-Clause"
] | 2
|
2020-03-20T09:43:57.000Z
|
2020-05-27T23:50:38.000Z
|
spirals/good_fit_test.py
|
kadglass/RotationCurves
|
7f52dac022ca4df666e671eeb7f0aaf65c8515c3
|
[
"BSD-3-Clause"
] | 1
|
2021-09-14T07:16:09.000Z
|
2021-09-14T07:16:09.000Z
|
spirals/good_fit_test.py
|
kadglass/RotationCurves
|
7f52dac022ca4df666e671eeb7f0aaf65c8515c3
|
[
"BSD-3-Clause"
] | 3
|
2021-09-13T18:22:03.000Z
|
2022-02-08T20:00:24.000Z
|
'''
Script to set the "good_fit" field based on the original fit to the formula from
Barrera-Ballestros (BB) without introducing any loss penalty on the values of
the individual parameters.
A galaxy is considered to have a bad fit if any of the following are true:
- Rturn = 0
- Rturn > 200
- Vmax > 20,000
These galaxies will have "good_fit" = 0 (False).
'''
from astropy.table import Table
import numpy as np
################################################################################
# Data
#-------------------------------------------------------------------------------
filename = 'Pipe3D-master_file_vflag_10_smooth2p27_N2O2_noWords.txt'
data = Table.read(filename, format='ascii.commented_header')
################################################################################
################################################################################
# Set "good_fit" value for each galaxy
#-------------------------------------------------------------------------------
data['good_fit'] = np.zeros(len(data), dtype=int)
for i in range(len(data)):
curve_used = data['curve_used'][i]
# Positive rotation curve
if curve_used == 1:
Rturn_used = data['pos_r_turn'][i]
Vmax_used = data['pos_v_max'][i]
# Average rotation curve
elif curve_used == 0:
Rturn_used = data['avg_r_turn'][i]
Vmax_used = data['avg_v_max'][i]
# Negative rotation curve
elif curve_used == -1:
Rturn_used = data['neg_r_turn'][i]
Vmax_used = data['neg_v_max'][i]
# Is this a "good" fit?
if Rturn_used > 0 and Rturn_used < 200 and Vmax_used < 20000:
data['good_fit'][i] = 1
################################################################################
################################################################################
# Save results
#-------------------------------------------------------------------------------
data.write(filename[:-4] + '_goodFit.txt', format='ascii.commented_header',
overwrite=True)
################################################################################
| 32.151515
| 81
| 0.438737
|
acd0c8ba1e1ae95680d9b5d8028932b87a3525f0
| 16,367
|
py
|
Python
|
rmgpy/data/reference.py
|
xiaoruiDong/RMG-Py
|
7c613c6ffe64c37564cfab2d634566f67d32b947
|
[
"MIT"
] | 1
|
2020-10-14T12:01:47.000Z
|
2020-10-14T12:01:47.000Z
|
rmgpy/data/reference.py
|
Tingchenlee/RMG-Py
|
c18ea8090d995e58e2428bfa87cd9036733a5fc0
|
[
"MIT"
] | null | null | null |
rmgpy/data/reference.py
|
Tingchenlee/RMG-Py
|
c18ea8090d995e58e2428bfa87cd9036733a5fc0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2020 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
Contains classes and functions for working with the bibliographic information.
Currently there are three such classes:
* :class:`Article` - For articles in a journal, magazine, or other periodical
* :class:`Book` - For complete books
* :class:`Thesis` - For a graduate thesis
The above are all derived from the base :class:`Reference` class, which can
also be used if the reference does not fit into any of the above categories.
"""
import re
################################################################################
class Reference(object):
"""
A base class for representing bibliographic information. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`authors` A list of the authors of the reference
`title` The title of the reference
`year` The year the reference was published (as a string)
`doi` A DOI link to the reference
`url` Any other link to the reference
=================== ========================================================
"""
def __init__(self, authors=None, title='', year='', doi='', url=''):
self.authors = authors or []
self.title = title
self.year = year
self.doi = doi
self.url = url
def __repr__(self):
"""
Return a string representation of the reference that can be used to
reconstruct the object.
"""
string = self.to_pretty_repr()
string = re.sub(r'\(\n ', '(', string)
string = re.sub(r',\n ', ', ', string)
string = re.sub(r',\n\)', ')', string)
string = re.sub(r' = ', '=', string)
return string
def __str__(self):
"""
Return a string representation of the reference in reStructuredText
format.
"""
string = self.get_author_string()
if self.title != '':
string += u' *{0}*'.format(self.title)
if self.year != '':
string += u' ({0})'.format(self.year)
if string and string[-1] != '.':
string += '.'
return string
def to_pretty_repr(self):
"""
Return a string representation of the reference that can be used to
reconstruct the object.
"""
string = u'Reference(\n'
if len(self.authors) != 0:
string += u' authors = [{0}],\n'.format(
', '.join(['{0!r}'.format(author.encode("utf-8")) for author in self.authors]))
if self.title != '':
string += u' title = {0!r},\n'.format(self.title.encode("utf-8"))
if self.year != '':
string += u' year = {0!r},\n'.format(self.year.encode("utf-8"))
if self.doi != '':
string += u' doi = {0!r},\n'.format(self.doi.encode("utf-8"))
if self.url != '':
string += u' url = {0!r},\n'.format(self.url.encode("utf-8"))
return string + u')'
def get_author_string(self):
"""
Return a pretty, reStructuredText-formatted string of the authors.
"""
authors = ''
if self.authors is not None and len(self.authors) > 0:
if len(self.authors) == 1:
authors = u'{0}.'.format(self.authors[0])
elif len(self.authors) == 2:
authors = u'{0} and {1}.'.format(self.authors[0], self.authors[1])
elif self.authors[-1] == 'et al':
authors = u'{0} et al.'.format(', '.join(self.authors[:-1]))
else:
authors = u'{0}, and {1}.'.format(', '.join(self.authors[:-1]), self.authors[-1])
# reStructuredText automatically interprets "A." et al as a
# numbered list; this suppresses that behavior
if authors[1:3] == '. ':
authors = authors[0:2] + u'\ ' + authors[2:]
# If the last author is of the form "Lastname, A. B.", this will
# remove the extra period at the end of the sentence
if authors[-2:] == '..':
authors = authors[:-1]
return authors
################################################################################
class Article(Reference):
"""
A class for representing an article in a journal, magazine, or other
periodical. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`authors` A list of the authors of the reference
`title` The title of the reference
`journal` The abbreviated name of the journal
`volume` The volume that the article appears in (as a string)
`number` The number that the article appears in (as a string)
`pages` The range of pages of the article (as a string)
`year` The year the reference was published (as a string)
`doi` A DOI link to the reference
`url` Any other link to the reference
=================== ========================================================
"""
def __init__(self, authors=None, title='', journal='', volume='', number='', pages='', year='', doi='', url=''):
Reference.__init__(self, authors=authors, title=title, year=year, doi=doi, url=url)
self.journal = journal
self.volume = volume
self.number = number
self.pages = pages
def __str__(self):
"""
Return a string representation of the reference in reStructuredText
format.
"""
string = self.get_author_string()
if self.title != '':
string += u' "{0}."'.format(self.title)
if self.journal != '':
string += u' *{0}*'.format(self.journal)
if self.volume != '':
string += u' **{0}**'.format(self.volume)
if self.number != '':
string += u' ({0})'.format(self.number)
if self.pages != '':
string += u', p. {0}'.format(self.pages)
if self.year != '':
string += u' ({0})'.format(self.year)
if string and string[-1] != '.':
string += u'.'
return string
def to_pretty_repr(self):
"""
Return a string representation of the reference that can be used to
reconstruct the object.
"""
string = u'Article(\n'
if len(self.authors) != 0:
string += u' authors = [{0}],\n'.format(
', '.join(['{0!r}'.format(author.encode("utf-8")) for author in self.authors]))
if self.title != '':
string += u' title = {0!r},\n'.format(self.title.encode("utf-8"))
if self.journal != '':
string += u' journal = {0!r},\n'.format(self.journal.encode("utf-8"))
if self.volume != '':
string += u' volume = {0!r},\n'.format(self.volume.encode("utf-8"))
if self.number != '':
string += u' number = {0!r},\n'.format(self.number.encode("utf-8"))
if self.pages != '':
string += u' pages = {0!r},\n'.format(self.pages.encode("utf-8"))
if self.year != '':
string += u' year = {0!r},\n'.format(self.year.encode("utf-8"))
if self.doi != '':
string += u' doi = {0!r},\n'.format(self.doi.encode("utf-8"))
if self.url != '':
string += u' url = {0!r},\n'.format(self.url.encode("utf-8"))
return string + u')'
################################################################################
class Book(Reference):
"""
A class for representing a complete book. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`authors` A list of the authors of the reference
`title` The title of the reference
`publisher` The publisher of the book
`address` The address of the publisher (usually city and state/country)
`volume` The volume of the book
`series` The series the book belongs to
`edition` The edition of the book, as a string ordinal (e.g. ``'First'``)
`year` The year the reference was published (as a string)
`doi` A DOI link to the reference
`url` Any other link to the reference
=================== ========================================================
"""
def __init__(self, authors=None, title='', publisher='', address='', volume='', series='', edition='', year='',
doi='', url=''):
Reference.__init__(self, authors=authors, title=title, year=year, doi=doi, url=url)
self.publisher = publisher
self.address = address
self.volume = volume
self.series = series
self.edition = edition
def __str__(self):
"""
Return a string representation of the reference in reStructuredText
format.
"""
string = self.get_author_string()
if self.title != '':
string += u' *{0}.*'.format(self.title)
if self.edition != '':
string += u' {0} edition.'.format(self.edition)
if self.volume != '':
string += u' Vol. {0}.'.format(self.volume)
if self.address != '':
string += u' {0}:'.format(self.address)
if self.publisher != '':
string += u' **{0}**'.format(self.publisher)
if self.year != '':
string += u' ({0})'.format(self.year)
return string + u'.'
def to_pretty_repr(self):
"""
Return a string representation of the reference that can be used to
reconstruct the object.
"""
string = u'Book(\n'
if len(self.authors) != 0:
string += u' authors = [{0}],\n'.format(
', '.join(['{0!r}'.format(author.encode("utf-8")) for author in self.authors]))
if self.title != '':
string += u' title = {0!r},\n'.format(self.title.encode("utf-8"))
if self.publisher != '':
string += u' publisher = {0!r},\n'.format(self.publisher.encode("utf-8"))
if self.address != '':
string += u' address = {0!r},\n'.format(self.address.encode("utf-8"))
if self.volume != '':
string += u' volume = {0!r},\n'.format(self.volume.encode("utf-8"))
if self.series != '':
string += u' series = {0!r},\n'.format(self.series.encode("utf-8"))
if self.edition != '':
string += u' edition = {0!r},\n'.format(self.edition.encode("utf-8"))
if self.year != '':
string += u' year = {0!r},\n'.format(self.year.encode("utf-8"))
if self.doi != '':
string += u' doi = {0!r},\n'.format(self.doi.encode("utf-8"))
if self.url != '':
string += u' url = {0!r},\n'.format(self.url.encode("utf-8"))
return string + u')'
################################################################################
class Thesis(Reference):
"""
A class for representing a graduate thesis. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`authors` A list of the authors of the reference
`title` The title of the reference
`degree` ``'Ph.D.'`` or ``'Masters'``
`school` The name of the institution at which the thesis was written
`year` The year the reference was published (as a string)
`doi` A DOI link to the reference
`url` Any other link to the reference
=================== ========================================================
"""
def __init__(self, authors=None, title='', degree='', school='', year='', doi='', url=''):
Reference.__init__(self, authors=authors, title=title, year=year, doi=doi, url=url)
self.degree = degree
self.school = school
def __str__(self):
"""
Return a string representation of the reference in reStructuredText
format.
"""
string = self.get_author_string()
if self.title != '':
string += u' "{0}."'.format(self.title)
if self.degree != '':
string += u' {0} thesis.'.format(self.degree)
if self.school != '':
string += u' {0}'.format(self.school)
if self.year != '':
string += u' ({0})'.format(self.year)
if string and string[-1] != '.':
string += u'.'
return string
def to_pretty_repr(self):
"""
Return a string representation of the reference that can be used to
reconstruct the object.
"""
string = u'Thesis(\n'
if len(self.authors) != 0:
string += u' authors = [{0}],\n'.format(
', '.join(['{0!r}'.format(author.encode("utf-8")) for author in self.authors]))
if self.title != '':
string += u' title = {0!r},\n'.format(self.title.encode("utf-8"))
if self.degree != '':
string += u' degree = {0!r},\n'.format(self.degree.encode("utf-8"))
if self.school != '':
string += u' school = {0!r},\n'.format(self.school.encode("utf-8"))
if self.year != '':
string += u' year = {0!r},\n'.format(self.year.encode("utf-8"))
if self.doi != '':
string += u' doi = {0!r},\n'.format(self.doi.encode("utf-8"))
if self.url != '':
string += u' url = {0!r},\n'.format(self.url.encode("utf-8"))
return string + u')'
################################################################################
| 44.115903
| 116
| 0.46997
|
f90cc7f471330812fca6a20f62b12b9004097c54
| 1,053
|
py
|
Python
|
onnx2keras/upsampling_layers.py
|
jkparuchuri/onnx2keras
|
d24302f48bf74ce2ccc51fce8f096dbf4cb8917a
|
[
"MIT"
] | null | null | null |
onnx2keras/upsampling_layers.py
|
jkparuchuri/onnx2keras
|
d24302f48bf74ce2ccc51fce8f096dbf4cb8917a
|
[
"MIT"
] | null | null | null |
onnx2keras/upsampling_layers.py
|
jkparuchuri/onnx2keras
|
d24302f48bf74ce2ccc51fce8f096dbf4cb8917a
|
[
"MIT"
] | null | null | null |
from tensorflow import keras
import numpy as np
import logging
def convert_upsample(node, params, layers, lambda_func, node_name, keras_name):
"""
Convert upsample.
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param lambda_func: function for keras Lambda layer
:param node_name: internal converter name
:param keras_name: resulting layer name
:return: None
"""
logger = logging.getLogger('onnx2keras:upsample')
logger.warning('!!! EXPERIMENTAL SUPPORT (upsample) !!!')
if len(node.input) != 1:
raise AttributeError('Unsupported number of inputs')
if params['mode'].decode('utf-8') != 'nearest':
logger.error('Cannot convert non-nearest upsampling.')
raise AssertionError('Cannot convert non-nearest upsampling')
scale = np.uint8(params['scales'][-2:])
upsampling = keras.layers.UpSampling2D(
size=scale, name=keras_name
)
layers[node_name] = upsampling(layers[node.input[0]])
| 30.970588
| 79
| 0.693257
|
79c20469533c14eeb7d8fb1e33a696ae8ff2131f
| 1,391
|
py
|
Python
|
src/sultan/echo/__init__.py
|
curtismuntz/sultan
|
65b4271a161d6c19a9eb0170b5a95832a139ab7f
|
[
"MIT"
] | 692
|
2016-08-30T15:09:48.000Z
|
2022-03-15T00:14:22.000Z
|
src/sultan/echo/__init__.py
|
curtismuntz/sultan
|
65b4271a161d6c19a9eb0170b5a95832a139ab7f
|
[
"MIT"
] | 48
|
2016-08-30T22:39:52.000Z
|
2020-09-08T13:34:56.000Z
|
src/sultan/echo/__init__.py
|
curtismuntz/sultan
|
65b4271a161d6c19a9eb0170b5a95832a139ab7f
|
[
"MIT"
] | 41
|
2016-09-19T20:36:23.000Z
|
2021-09-10T13:29:34.000Z
|
import logging
from sultan.core import Base
from sultan.echo.colorlog import StreamHandler, ColoredFormatter
from sultan.config import settings
handler = StreamHandler()
handler.setFormatter(ColoredFormatter(
settings.LOG_FORMAT,
log_colors=settings.LOG_COLORS
))
def getLogger(name='', level=logging.DEBUG):
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(level)
return logger
class Echo(Base):
def __init__(self, activated=True):
self.logger = getLogger(name='sultan')
self.activated = activated
def log(self, msg):
if self.activated:
self.logger.info(msg)
def cmd(self, msg):
if self.activated:
self.logger.debug(msg)
def stdout(self, msg):
if self.activated:
self.logger.info(msg)
def stderr(self, msg):
if self.activated:
self.logger.critical(msg)
def debug(self, msg):
if self.activated:
self.logger.debug(msg)
def info(self, msg):
if self.activated:
self.logger.info(msg)
def warn(self, msg):
if self.activated:
self.logger.warning(msg)
def error(self, msg):
if self.activated:
self.logger.error(msg)
def critical(self, msg):
if self.activated:
self.logger.critical(msg)
| 19.591549
| 64
| 0.622574
|
94892e2cf634c76e92d831837270f21eb43d2e5f
| 3,087
|
py
|
Python
|
Anchors/Move ogonek Anchors to Baseline Intersection.py
|
justanotherfoundry/Glyphs-Scripts
|
f28aeab0224ae19ace4a86cf363e7990985199b7
|
[
"Apache-2.0"
] | 283
|
2015-01-07T12:35:35.000Z
|
2022-03-29T06:10:44.000Z
|
Anchors/Move ogonek Anchors to Baseline Intersection.py
|
justanotherfoundry/Glyphs-Scripts
|
f28aeab0224ae19ace4a86cf363e7990985199b7
|
[
"Apache-2.0"
] | 203
|
2015-01-26T18:43:08.000Z
|
2022-03-04T01:47:58.000Z
|
Anchors/Move ogonek Anchors to Baseline Intersection.py
|
justanotherfoundry/Glyphs-Scripts
|
f28aeab0224ae19ace4a86cf363e7990985199b7
|
[
"Apache-2.0"
] | 96
|
2015-01-19T20:58:03.000Z
|
2022-03-29T06:10:56.000Z
|
#MenuTitle: Move ogonek anchors to baseline intersection
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
In selected glyphs, moves all ogonek and _ogonek anchors to the rightmost intersection of the outline with the baseline. Verbose report in
"""
import math
from Foundation import NSPoint, NSBundle
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def angle( firstPoint, secondPoint ):
xDiff = firstPoint.x - secondPoint.x
yDiff = firstPoint.y - secondPoint.y
tangens = yDiff / xDiff
angle = math.atan( tangens ) * 180.0 / math.pi
return angle
def sliceIntersections( thisLayer, startPoint, endPoint ):
return thisLayer.calculateIntersectionsStartPoint_endPoint_( startPoint, endPoint )
def intersectionOnBaseline( thisLayer ):
"""Returns the NSPoint of the rightmost intersection with the baseline."""
goodMeasure = 1
originX = thisLayer.bounds.origin.x - goodMeasure
originPoint = NSPoint( originX, 0.0 )
targetX = originX + thisLayer.bounds.size.width + goodMeasure
targetPoint = NSPoint( targetX, 0.0 )
intersections = sliceIntersections( thisLayer, originPoint, targetPoint )
# print("intersectionOnBaseline:", intersections, originPoint, targetPoint)
if len(intersections) > 2:
rightmostIntersection = intersections[-2].pointValue()
return rightmostIntersection
else:
return None
def process( thisLayer ):
ogonekAnchor = thisLayer.anchors["ogonek"]
if not ogonekAnchor:
ogonekAnchor = thisLayer.anchors["_ogonek"]
if ogonekAnchor:
baselineOutlineIntersection = intersectionOnBaseline( thisLayer )
if baselineOutlineIntersection:
ogonekAnchor.position = baselineOutlineIntersection
print("✅ Layer %s: ogonek anchor moved to %.1f, %.1f." % (thisLayer.name, ogonekAnchor.x, ogonekAnchor.y))
# selects anchor on thisLayer:
itemsToBeSelected = NSMutableArray.arrayWithObject_( ogonekAnchor )
thisLayer.setSelection_( itemsToBeSelected )
else:
# put it on the baseline, at least:
ogonekAnchor.y = 0
print("⚠️ Layer %s: ogonek moved to baseline, but there is no outline intersection." % thisLayer.name )
else:
print("❓ Layer %s: No anchor ogonek or _ogonek found." % thisLayer.name )
try:
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
Glyphs.clearLog() # clears macro window log
print("Move ogonek anchors to baseline intersection:\n")
for thisGlyph in [l.parent for l in selectedLayers]:
print("Processing: %s" % thisGlyph.name)
thisGlyph.beginUndo() # begin undo grouping
for thisLayer in thisGlyph.layers:
if thisLayer.isMasterLayer or thisLayer.isSpecialLayer:
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
print("\nDone.")
except Exception as e:
Glyphs.showMacroWindow()
print("\n⚠️ Script Error:\n")
print(e)
print()
import traceback
print(traceback.format_exc())
finally:
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| 34.3
| 139
| 0.760285
|
4d621ed1db01a30d0c6ddae7905c3d73d1c9a0eb
| 4,590
|
py
|
Python
|
dakara_server/users/tests/test_emails.py
|
DakaraProject/dakara-server
|
b28fc1a8561e431d562102932f3d6ff3607e545b
|
[
"MIT"
] | 4
|
2018-07-24T18:22:16.000Z
|
2020-01-24T16:30:54.000Z
|
dakara_server/users/tests/test_emails.py
|
DakaraProject/dakara-server
|
b28fc1a8561e431d562102932f3d6ff3607e545b
|
[
"MIT"
] | 88
|
2017-11-04T08:58:02.000Z
|
2022-03-30T11:39:08.000Z
|
dakara_server/users/tests/test_emails.py
|
DakaraProject/dakara-server
|
b28fc1a8561e431d562102932f3d6ff3607e545b
|
[
"MIT"
] | 1
|
2018-05-05T15:37:20.000Z
|
2018-05-05T15:37:20.000Z
|
from unittest.mock import ANY, patch
from internal.tests.base_test import UserModel
from users import emails
from users.tests.base_test import UsersAPITestCase, config_email_disabled
@patch("users.emails.send_mail")
class SendNotificationToManagersTestCase(UsersAPITestCase):
"""Test the send_notification_to_managers function."""
def test_send(self, mocked_send_mail):
"""Test send notification email to managers."""
self.create_user(
"TestManger", email="test@manager.com", users_level=UserModel.MANAGER
)
user = self.create_user("TestUser", email="test@user.com")
emails.send_notification_to_managers(user)
# assert call
mocked_send_mail.assert_called_once_with(
"New user registered", ANY, ANY, ["test@manager.com"], fail_silently=False
)
# assert the content of the mail
content = mocked_send_mail.call_args_list[0][0][1]
self.assertIn("TestUser (test@user.com)", content)
self.assertIn("/settings/users/{}".format(user.id), content)
@config_email_disabled
def test_send_email_disabled(self, mocked_send_mail):
"""Test notification email to managers not sent when emails are disabled."""
self.create_user(
"TestManger", email="test@manager.com", users_level=UserModel.MANAGER
)
user = self.create_user("TestUser", email="test@user.com")
emails.send_notification_to_managers(user)
# assert call
mocked_send_mail.assert_not_called()
def test_send_no_managers(self, mocked_send_mail):
"""Test send notification email when there are no managers."""
user = self.create_user("TestUser", email="test@user.com")
with self.assertLogs("users.emails", "DEBUG") as logger:
emails.send_notification_to_managers(user)
mocked_send_mail.assert_not_called()
self.assertListEqual(
logger.output,
[
"WARNING:users.emails:No managers to send message to when validating "
"new account of TestUser"
],
)
class GetNotificationToManagersTestCase(UsersAPITestCase):
"""Test the get_notification_to_managers function."""
def test_get(self):
"""Test to get notification template for managers."""
user = self.create_user("TestUser", email="test@user.com")
content = emails.get_notification_to_managers(user)
self.assertIn("TestUser (test@user.com)", content)
self.assertIn("http://frontend-host/settings/users/1", content)
@patch("users.emails.send_mail")
class SendNotificationToUserValidatedTestCase(UsersAPITestCase):
"""Test the send_notification_to_user_validated function."""
def test_send(self, mocked_send_mail):
"""Test send notification to user."""
user = self.create_user("TestUser", email="test@user.com")
emails.send_notification_to_user_validated(user)
mocked_send_mail.assert_called_with(
"Account validated",
ANY,
ANY,
[user.email],
fail_silently=False,
)
@config_email_disabled
def test_send_email_disabled(self, mocked_send_mail):
"""Test notification to user not sent when email disabled."""
user = self.create_user("TestUser", email="test@user.com")
emails.send_notification_to_user_validated(user)
mocked_send_mail.assert_not_called()
class GetManagersEmailsTestCase(UsersAPITestCase):
"""Test get_managers_emails function."""
def test_get_managers_emails(self):
# Create users in database
self.create_user("User", email="user@example.com")
manager_validated = self.create_user(
"ManagerValidated", email="mv@example.com", users_level=UserModel.MANAGER
)
manager_unvalidated = self.create_user(
"ManagerUnValidated", email="muv@example.com", users_level=UserModel.MANAGER
)
manager_unvalidated.validated_by_email = False
manager_unvalidated.save()
# Check only validated manager is returned
self.assertCountEqual([manager_validated.email], emails.get_managers_emails())
class GetNotificationToUserValidatedTestCase(UsersAPITestCase):
"""Test the get_notification_to_user_validated function."""
def test_get(self):
"""Test to get notification template for validated users."""
content = emails.get_notification_to_user_validated()
self.assertIn("http://frontend-host/login", content)
| 35.581395
| 88
| 0.683442
|
090c35a6ad701cbfc3e2d5a5a7c7977223c4dc49
| 2,710
|
py
|
Python
|
Project 2/network_p2_2/Transmitter.py
|
MercurialJD/Athernet
|
0579d5471d7172c8714400bcbc7f892ee4e0f25f
|
[
"MIT"
] | null | null | null |
Project 2/network_p2_2/Transmitter.py
|
MercurialJD/Athernet
|
0579d5471d7172c8714400bcbc7f892ee4e0f25f
|
[
"MIT"
] | null | null | null |
Project 2/network_p2_2/Transmitter.py
|
MercurialJD/Athernet
|
0579d5471d7172c8714400bcbc7f892ee4e0f25f
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
import sounddevice as sd
from utils import *
class Transmitter:
def __init__(self, config):
self.data = None
self.fs = config.fs
self.fc = config.fc
self.header_length = config.header_length
self.preamble_wave = config.preamble_wave
self.carrier_wave = config.carrier_wave
self.audio_out = config.audio_out
self.crc_length = config.crc_length
self.wave_length_per_bit = config.wave_length_per_bit
self.frame_num = None
self.frame_length = None
self.ostream = sd.OutputStream(samplerate=self.fs, channels=1, latency="low")
self.ostream.start()
def addHeader(self, id, frame):
''' Add ID for each frame at head'''
new_frame = np.zeros((frame.size + self.header_length), dtype=np.uint8)
new_frame[:self.header_length] = dec2arr(id, self.header_length)
new_frame[self.header_length:] = copy.deepcopy(frame)
return new_frame
def addCRC(self, frame):
''' Add crc-8 at the tail, calculated from header+frame_data '''
new_frame = np.zeros((frame.size + self.crc_length), dtype=np.uint8)
new_frame[:frame.size] = copy.deepcopy(frame)
new_frame[frame.size:] = dec2arr(int(generateCRC(frame, mode='crc-'+str(self.crc_length)), 16), self.crc_length)
return new_frame
def modulate(self, frame):
frame_wave = np.zeros((frame.size * self.wave_length_per_bit))
for i in range(frame.size):
frame_wave[self.wave_length_per_bit*i : self.wave_length_per_bit*(i+1)] = \
self.carrier_wave[self.wave_length_per_bit*i : self.wave_length_per_bit*(i+1)] * (frame[i] * 2 - 1)
return frame_wave
def makeSound(self, wave):
print("Sending...")
self.ostream.write(wave)
print("Sending Finished.")
def send(self, data):
self.data = data
self.frame_num = data.shape[0]
self.frame_length = data.shape[1]
# self.ostream.start()
whole_wave = np.array([])
for i in range(self.frame_num):
random_wave = self.modulate(np.random.rand(50))
header_frame = self.addHeader(i, self.data[i])
header_frame_crc = self.addCRC(header_frame)
frame_wave = self.modulate(header_frame_crc)
whole_wave = np.concatenate((whole_wave, random_wave, self.preamble_wave, frame_wave), axis=0)
whole_wave = np.concatenate((whole_wave, random_wave.repeat(20)), axis=0)
if self.audio_out:
self.makeSound(whole_wave.astype(np.float32))
return whole_wave
def __del__(self):
self.ostream.close()
| 38.169014
| 120
| 0.643542
|
b9091703e1402c65c1f4a6f07bae6b6864b79975
| 17,266
|
py
|
Python
|
machine_learning/linear_discriminant_analysis.py
|
MKiperszmid/Python
|
6b368e6ab2fa1a839b029fd45e127521bbe76005
|
[
"MIT"
] | 1
|
2020-08-28T18:25:45.000Z
|
2020-08-28T18:25:45.000Z
|
machine_learning/linear_discriminant_analysis.py
|
MKiperszmid/Python
|
6b368e6ab2fa1a839b029fd45e127521bbe76005
|
[
"MIT"
] | 1
|
2020-08-28T18:24:31.000Z
|
2020-08-28T19:35:47.000Z
|
machine_learning/linear_discriminant_analysis.py
|
MKiperszmid/Python
|
6b368e6ab2fa1a839b029fd45e127521bbe76005
|
[
"MIT"
] | null | null | null |
"""
Linear Discriminant Analysis
Assumptions About Data :
1. The input variables has a gaussian distribution.
2. The variance calculated for each input variables by class grouping is the
same.
3. The mix of classes in your training set is representative of the problem.
Learning The Model :
The LDA model requires the estimation of statistics from the training data :
1. Mean of each input value for each class.
2. Probability of an instance belong to each class.
3. Covariance for the input data for each class
Calculate the class means :
mean(x) = 1/n ( for i = 1 to i = n --> sum(xi))
Calculate the class probabilities :
P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1))
P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1))
Calculate the variance :
We can calculate the variance for dataset in two steps :
1. Calculate the squared difference for each input variable from the
group mean.
2. Calculate the mean of the squared difference.
------------------------------------------------
Squared_Difference = (x - mean(k)) ** 2
Variance = (1 / (count(x) - count(classes))) *
(for i = 1 to i = n --> sum(Squared_Difference(xi)))
Making Predictions :
discriminant(x) = x * (mean / variance) -
((mean ** 2) / (2 * variance)) + Ln(probability)
---------------------------------------------------------------------------
After calculating the discriminant value for each class, the class with the
largest discriminant value is taken as the prediction.
Author: @EverLookNeverSee
"""
from math import log
from os import name, system
from random import gauss, seed
# Make a training dataset drawn from a gaussian distribution
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
"""
Generate gaussian distribution instances based-on given mean and standard deviation
:param mean: mean value of class
:param std_dev: value of standard deviation entered by usr or default value of it
:param instance_count: instance number of class
:return: a list containing generated values based-on given mean, std_dev and
instance_count
>>> gaussian_distribution(5.0, 1.0, 20) # doctest: +NORMALIZE_WHITESPACE
[6.288184753155463, 6.4494456086997705, 5.066335808938262, 4.235456349028368,
3.9078267848958586, 5.031334516831717, 3.977896829989127, 3.56317055489747,
5.199311976483754, 5.133374604658605, 5.546468300338232, 4.086029056264687,
5.005005283626573, 4.935258239627312, 3.494170998739258, 5.537997178661033,
5.320711100998849, 7.3891120432406865, 5.202969177309964, 4.855297691835079]
"""
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)]
# Make corresponding Y flags to detecting classes
def y_generator(class_count: int, instance_count: list) -> list:
"""
Generate y values for corresponding classes
:param class_count: Number of classes(data groupings) in dataset
:param instance_count: number of instances in class
:return: corresponding values for data groupings in dataset
>>> y_generator(1, [10])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> y_generator(2, [5, 10])
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> y_generator(4, [10, 5, 15, 20]) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
"""
return [k for k in range(class_count) for _ in range(instance_count[k])]
# Calculate the class means
def calculate_mean(instance_count: int, items: list) -> float:
"""
Calculate given class mean
:param instance_count: Number of instances in class
:param items: items that related to specific class(data grouping)
:return: calculated actual mean of considered class
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> calculate_mean(len(items), items)
5.011267842911003
"""
# the sum of all items divided by number of instances
return sum(items) / instance_count
# Calculate the class probabilities
def calculate_probabilities(instance_count: int, total_count: int) -> float:
"""
Calculate the probability that a given instance will belong to which class
:param instance_count: number of instances in class
:param total_count: the number of all instances
:return: value of probability for considered class
>>> calculate_probabilities(20, 60)
0.3333333333333333
>>> calculate_probabilities(30, 100)
0.3
"""
# number of instances in specific class divided by number of all instances
return instance_count / total_count
# Calculate the variance
def calculate_variance(items: list, means: list, total_count: int) -> float:
"""
Calculate the variance
:param items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param total_count: the number of all instances
:return: calculated variance for considered dataset
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> means = [5.011267842911003]
>>> total_count = 20
>>> calculate_variance([items], means, total_count)
0.9618530973487491
"""
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
# for loop iterates over number of elements in inner layer of items
for item in items[i]:
# appending squared differences to 'squared_diff' list
squared_diff.append((item - means[i])**2)
# one divided by (the number of all instances - number of classes) multiplied by
# sum of all squared differences
n_classes = len(means) # Number of classes in dataset
return 1 / (total_count - n_classes) * sum(squared_diff)
# Making predictions
def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
""" This function predicts new indexes(groups for our data)
:param x_items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param variance: calculated value of variance by calculate_variance function
:param probabilities: a list containing all probabilities of classes
:return: a list containing predicted Y values
>>> x_items = [[6.288184753155463, 6.4494456086997705, 5.066335808938262,
... 4.235456349028368, 3.9078267848958586, 5.031334516831717,
... 3.977896829989127, 3.56317055489747, 5.199311976483754,
... 5.133374604658605, 5.546468300338232, 4.086029056264687,
... 5.005005283626573, 4.935258239627312, 3.494170998739258,
... 5.537997178661033, 5.320711100998849, 7.3891120432406865,
... 5.202969177309964, 4.855297691835079], [11.288184753155463,
... 11.44944560869977, 10.066335808938263, 9.235456349028368,
... 8.907826784895859, 10.031334516831716, 8.977896829989128,
... 8.56317055489747, 10.199311976483754, 10.133374604658606,
... 10.546468300338232, 9.086029056264687, 10.005005283626572,
... 9.935258239627313, 8.494170998739259, 10.537997178661033,
... 10.320711100998848, 12.389112043240686, 10.202969177309964,
... 9.85529769183508], [16.288184753155463, 16.449445608699772,
... 15.066335808938263, 14.235456349028368, 13.907826784895859,
... 15.031334516831716, 13.977896829989128, 13.56317055489747,
... 15.199311976483754, 15.133374604658606, 15.546468300338232,
... 14.086029056264687, 15.005005283626572, 14.935258239627313,
... 13.494170998739259, 15.537997178661033, 15.320711100998848,
... 17.389112043240686, 15.202969177309964, 14.85529769183508]]
>>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
>>> variance = 0.9618530973487494
>>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
>>> predict_y_values(x_items, means, variance,
... probabilities) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2]
"""
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
# for loop iterates over number of elements in list
for i in range(len(x_items)):
# for loop iterates over number of inner items of each element
for j in range(len(x_items[i])):
temp = [] # to store all discriminant values of each item as a list
# for loop iterates over number of classes we have in our dataset
for k in range(len(x_items)):
# appending values of discriminants for each class to 'temp' list
temp.append(
x_items[i][j] * (means[k] / variance)
- (means[k] ** 2 / (2 * variance))
+ log(probabilities[k])
)
# appending discriminant values of each item to 'results' list
results.append(temp)
return [result.index(max(result)) for result in results]
# Calculating Accuracy
def accuracy(actual_y: list, predicted_y: list) -> float:
"""
Calculate the value of accuracy based-on predictions
:param actual_y:a list containing initial Y values generated by 'y_generator'
function
:param predicted_y: a list containing predicted Y values generated by
'predict_y_values' function
:return: percentage of accuracy
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
... 1, 1 ,1 ,1 ,1 ,1 ,1]
>>> predicted_y = [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,
... 0, 0, 1, 1, 1, 0, 1, 1, 1]
>>> accuracy(actual_y, predicted_y)
50.0
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> predicted_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> accuracy(actual_y, predicted_y)
100.0
"""
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of correct predictions divided by number
# of all data and multiplied by 100
return (correct / len(actual_y)) * 100
# Main Function
def main():
""" This function starts execution phase """
while True:
print(" Linear Discriminant Analysis ".center(50, "*"))
print("*" * 50, "\n")
print("First of all we should specify the number of classes that")
print("we want to generate as training dataset")
# Trying to get number of classes
n_classes = 0
while True:
try:
user_input = int(
input("Enter the number of classes (Data Groupings): ").strip()
)
if user_input > 0:
n_classes = user_input
break
else:
print(
f"Your entered value is {user_input} , Number of classes "
f"should be positive!"
)
continue
except ValueError:
print("Your entered value is not numerical!")
print("-" * 100)
std_dev = 1.0 # Default value for standard deviation of dataset
# Trying to get the value of standard deviation
while True:
try:
user_sd = float(
input(
"Enter the value of standard deviation"
"(Default value is 1.0 for all classes): "
).strip()
or "1.0"
)
if user_sd >= 0.0:
std_dev = user_sd
break
else:
print(
f"Your entered value is {user_sd}, Standard deviation should "
f"not be negative!"
)
continue
except ValueError:
print("Your entered value is not numerical!")
print("-" * 100)
# Trying to get number of instances in classes and theirs means to generate
# dataset
counts = [] # An empty list to store instance counts of classes in dataset
for i in range(n_classes):
while True:
try:
user_count = int(
input(f"Enter The number of instances for class_{i+1}: ")
)
if user_count > 0:
counts.append(user_count)
break
else:
print(
f"Your entered value is {user_count}, Number of "
"instances should be positive!"
)
continue
except ValueError:
print("Your entered value is not numerical!")
print("-" * 100)
# An empty list to store values of user-entered means of classes
user_means = []
for a in range(n_classes):
while True:
try:
user_mean = float(
input(f"Enter the value of mean for class_{a+1}: ")
)
if isinstance(user_mean, float):
user_means.append(user_mean)
break
print(f"You entered an invalid value: {user_mean}")
except ValueError:
print("Your entered value is not numerical!")
print("-" * 100)
print("Standard deviation: ", std_dev)
# print out the number of instances in classes in separated line
for i, count in enumerate(counts, 1):
print(f"Number of instances in class_{i} is: {count}")
print("-" * 100)
# print out mean values of classes separated line
for i, user_mean in enumerate(user_means, 1):
print(f"Mean of class_{i} is: {user_mean}")
print("-" * 100)
# Generating training dataset drawn from gaussian distribution
x = [
gaussian_distribution(user_means[j], std_dev, counts[j])
for j in range(n_classes)
]
print("Generated Normal Distribution: \n", x)
print("-" * 100)
# Generating Ys to detecting corresponding classes
y = y_generator(n_classes, counts)
print("Generated Corresponding Ys: \n", y)
print("-" * 100)
# Calculating the value of actual mean for each class
actual_means = [calculate_mean(counts[k], x[k]) for k in range(n_classes)]
# for loop iterates over number of elements in 'actual_means' list and print
# out them in separated line
for i, actual_mean in enumerate(actual_means, 1):
print(f"Actual(Real) mean of class_{i} is: {actual_mean}")
print("-" * 100)
# Calculating the value of probabilities for each class
probabilities = [
calculate_probabilities(counts[i], sum(counts)) for i in range(n_classes)
]
# for loop iterates over number of elements in 'probabilities' list and print
# out them in separated line
for i, probability in enumerate(probabilities, 1):
print(f"Probability of class_{i} is: {probability}")
print("-" * 100)
# Calculating the values of variance for each class
variance = calculate_variance(x, actual_means, sum(counts))
print("Variance: ", variance)
print("-" * 100)
# Predicting Y values
# storing predicted Y values in 'pre_indexes' variable
pre_indexes = predict_y_values(x, actual_means, variance, probabilities)
print("-" * 100)
# Calculating Accuracy of the model
print(f"Accuracy: {accuracy(y, pre_indexes)}")
print("-" * 100)
print(" DONE ".center(100, "+"))
if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
print("\n" + "GoodBye!".center(100, "-") + "\n")
break
system("cls" if name == "nt" else "clear")
if __name__ == "__main__":
main()
| 42.527094
| 87
| 0.585486
|
b8e33a53db98d0eb2f1eff2bd14a9b6a63f8b76f
| 1,343
|
py
|
Python
|
egs/fisher_callhome_spanish_st/st1/local/callhome_make_spk2gender.py
|
eastonYi/espnet
|
61a89c182daa9149a57e3fc91ef4971e2727cc38
|
[
"Apache-2.0"
] | 3
|
2019-10-11T11:41:27.000Z
|
2020-05-21T09:08:44.000Z
|
egs/fisher_callhome_spanish_st/st1/local/callhome_make_spk2gender.py
|
eastonYi/espnet
|
61a89c182daa9149a57e3fc91ef4971e2727cc38
|
[
"Apache-2.0"
] | null | null | null |
egs/fisher_callhome_spanish_st/st1/local/callhome_make_spk2gender.py
|
eastonYi/espnet
|
61a89c182daa9149a57e3fc91ef4971e2727cc38
|
[
"Apache-2.0"
] | 2
|
2019-03-27T04:34:33.000Z
|
2019-04-12T17:30:18.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# Note(kamo-naoyuki) 31,Jan,2019:
# This file is copied from kaldi/egs/fisher_callhome_spanish/s5/local/callhole_make_spk2gender.sh
# and modified for py2/3 compatibility.
# Copyright 2014 Gaurav Kumar. Apache 2.0
# Gets the unique speakers from the file created by fsp_make_trans.pl
# Note that if a speaker appears multiple times, it is categorized as female
from __future__ import print_function
from __future__ import unicode_literals
import codecs
from io import open
import sys
PY2 = sys.version_info[0] == 2
sys.stdin = codecs.getreader('utf-8')(
sys.stdin if PY2 else sys.stdin.buffer)
sys.stdout = codecs.getwriter('utf-8')(
sys.stdout if PY2 else sys.stdout.buffer)
if __name__ == '__main__':
tmpFileLocation = 'data/local/tmp/callhome_spk2gendertmp'
tmpFile = None
try:
tmpFile = open(tmpFileLocation, encoding='utf-8')
except IOError:
print('The file spk2gendertmp does not exist. Run fsp_make_trans.pl first?',
file=sys.stderr)
raise
speakers = {}
for line in tmpFile:
comp = line.split(' ')
if comp[0] in speakers:
speakers[comp[0]] = "f"
else:
speakers[comp[0]] = comp[1]
for speaker, gender in speakers.items():
print(speaker + " " + gender)
| 27.408163
| 97
| 0.679077
|
cd851347f9e765837ac59c8a91b93d9f0085ef70
| 2,218
|
py
|
Python
|
migrations/versions/70888e184c42_initial_migration.py
|
Daniel6996-arch/verbose-pitches
|
b50877381e126c37b6a83e8d0b67921538be8bb8
|
[
"MIT"
] | null | null | null |
migrations/versions/70888e184c42_initial_migration.py
|
Daniel6996-arch/verbose-pitches
|
b50877381e126c37b6a83e8d0b67921538be8bb8
|
[
"MIT"
] | null | null | null |
migrations/versions/70888e184c42_initial_migration.py
|
Daniel6996-arch/verbose-pitches
|
b50877381e126c37b6a83e8d0b67921538be8bb8
|
[
"MIT"
] | null | null | null |
"""Initial Migration
Revision ID: 70888e184c42
Revises: 2b316ef6f12d
Create Date: 2021-11-11 06:57:51.350092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '70888e184c42'
down_revision = '2b316ef6f12d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('newpitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('author', sa.String(length=255), nullable=True),
sa.Column('pitch', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_newpitches_author'), 'newpitches', ['author'], unique=False)
op.create_index(op.f('ix_newpitches_pitch'), 'newpitches', ['pitch'], unique=False)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['newpitches.id'], ),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
op.drop_index(op.f('ix_newpitches_pitch'), table_name='newpitches')
op.drop_index(op.f('ix_newpitches_author'), table_name='newpitches')
op.drop_table('newpitches')
# ### end Alembic commands ###
| 36.966667
| 89
| 0.681695
|
08f1e762f729952dedd3816fc865d4deca3e2eb0
| 1,156
|
py
|
Python
|
ramm_tox/kegg2pubchem.py
|
sorgerlab/ramm_tox
|
8c8ff7d3e22382272af8f1d06db9c40826b8fe5a
|
[
"MIT"
] | null | null | null |
ramm_tox/kegg2pubchem.py
|
sorgerlab/ramm_tox
|
8c8ff7d3e22382272af8f1d06db9c40826b8fe5a
|
[
"MIT"
] | null | null | null |
ramm_tox/kegg2pubchem.py
|
sorgerlab/ramm_tox
|
8c8ff7d3e22382272af8f1d06db9c40826b8fe5a
|
[
"MIT"
] | null | null | null |
"""Get PubChem CID from KEGG compound/drug accession."""
import sys
import csv
import bs4
import requests
import re
reader = csv.DictReader(open(sys.argv[1]))
writer = csv.DictWriter(sys.stdout, reader.fieldnames)
re_pubchem = re.compile(r'^PubChem:')
kegg_url = 'http://www.kegg.jp/dbget-bin/www_bget?{}'
pubchem_url = 'http://pubchem.ncbi.nlm.nih.gov/rest/pug/substance/sid/{}/cids/txt'
writer.writeheader()
for r in reader:
if not len(r['PubChem']) and len(r['KEGG']):
kegg_req = requests.get(kegg_url.format(r['KEGG']))
assert kegg_req.ok
soup = bs4.BeautifulSoup(kegg_req.content)
divs = soup.find_all('div', text=re_pubchem)
assert len(divs) <= 1
if len(divs) == 1:
sid = divs[0].nextSibling.find('a').text
pubchem_req = requests.get(pubchem_url.format(sid))
if pubchem_req.status_code != 404:
assert pubchem_req.ok
cids = pubchem_req.content.rstrip().split('\n')
assert len(cids) <= 1
if len(cids) == 1:
r['PubChem'] = cids[0]
writer.writerow(r)
sys.stdout.flush()
| 33.028571
| 82
| 0.613322
|
0ec54666dce4e8ad870af4ecc2d1890bf5b31e84
| 768
|
py
|
Python
|
LeetCode/Sum Root to Leaf Numbers - Iterative.py
|
UtkarshPathrabe/Competitive-Coding
|
ba322fbb1b88682d56a9b80bdd92a853f1caa84e
|
[
"MIT"
] | 13
|
2021-09-02T07:30:02.000Z
|
2022-03-22T19:32:03.000Z
|
LeetCode/Sum Root to Leaf Numbers - Iterative.py
|
UtkarshPathrabe/Competitive-Coding
|
ba322fbb1b88682d56a9b80bdd92a853f1caa84e
|
[
"MIT"
] | null | null | null |
LeetCode/Sum Root to Leaf Numbers - Iterative.py
|
UtkarshPathrabe/Competitive-Coding
|
ba322fbb1b88682d56a9b80bdd92a853f1caa84e
|
[
"MIT"
] | 3
|
2021-08-24T16:06:22.000Z
|
2021-09-17T15:39:53.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
rootToLeafSum = 0
stack = [(root, 0)]
while len(stack) != 0:
node, currentNumber = stack.pop()
if node is not None:
currentNumber = currentNumber * 10 + node.val
if node.left is None and node.right is None:
rootToLeafSum += currentNumber
else:
stack.append((node.left, currentNumber))
stack.append((node.right, currentNumber))
return rootToLeafSum
| 38.4
| 61
| 0.545573
|
ab047ab8aa767e2099e0f578a6182925f8cd414b
| 4,027
|
py
|
Python
|
youtuatools/extractor/tvc.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 47
|
2021-01-02T07:44:50.000Z
|
2022-02-28T22:02:13.000Z
|
youtuatools/extractor/tvc.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 4
|
2021-02-07T03:35:13.000Z
|
2021-10-31T19:23:53.000Z
|
youtuatools/extractor/tvc.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 8
|
2021-01-03T05:44:39.000Z
|
2021-11-01T05:46:32.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
)
class TVCIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?tvc\.ru/video/iframe/id/(?P<id>\d+)"
_TEST = {
"url": "http://www.tvc.ru/video/iframe/id/74622/isPlay/false/id_stat/channel/?acc_video_id=/channel/brand/id/17/show/episodes/episode_id/39702",
"md5": "bbc5ff531d1e90e856f60fc4b3afd708",
"info_dict": {
"id": "74622",
"ext": "mp4",
"title": 'События. "События". Эфир от 22.05.2015 14:30',
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 1122,
},
}
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:http:)?//(?:www\.)?tvc\.ru/video/iframe/id/[^"]+)\1',
webpage,
)
if mobj:
return mobj.group("url")
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"http://www.tvc.ru/video/json/id/%s" % video_id, video_id
)
formats = []
for info in video.get("path", {}).get("quality", []):
video_url = info.get("url")
if not video_url:
continue
format_id = self._search_regex(
r"cdnvideo/([^/]+?)(?:-[^/]+?)?/", video_url, "format id", default=None
)
formats.append(
{
"url": video_url,
"format_id": format_id,
"width": int_or_none(info.get("width")),
"height": int_or_none(info.get("height")),
"tbr": int_or_none(info.get("bitrate")),
}
)
self._sort_formats(formats)
return {
"id": video_id,
"title": video["title"],
"thumbnail": video.get("picture"),
"duration": int_or_none(video.get("duration")),
"formats": formats,
}
class TVCArticleIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?tvc\.ru/(?!video/iframe/id/)(?P<id>[^?#]+)"
_TESTS = [
{
"url": "http://www.tvc.ru/channel/brand/id/29/show/episodes/episode_id/39702/",
"info_dict": {
"id": "74622",
"ext": "mp4",
"title": 'События. "События". Эфир от 22.05.2015 14:30',
"description": "md5:ad7aa7db22903f983e687b8a3e98c6dd",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 1122,
},
},
{
"url": "http://www.tvc.ru/news/show/id/69944",
"info_dict": {
"id": "75399",
"ext": "mp4",
"title": "Эксперты: в столице встал вопрос о максимально безопасных остановках",
"description": "md5:f2098f71e21f309e89f69b525fd9846e",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 278,
},
},
{
"url": "http://www.tvc.ru/channel/brand/id/47/show/episodes#",
"info_dict": {
"id": "2185",
"ext": "mp4",
"title": "Ещё не поздно. Эфир от 03.08.2013",
"description": "md5:51fae9f3f8cfe67abce014e428e5b027",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 3316,
},
},
]
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
return {
"_type": "url_transparent",
"ie_key": "TVC",
"url": self._og_search_video_url(webpage),
"title": clean_html(self._og_search_title(webpage)),
"description": clean_html(self._og_search_description(webpage)),
"thumbnail": self._og_search_thumbnail(webpage),
}
| 33.840336
| 152
| 0.488205
|
fb8c58b893554c57540e72290c175eed6570f79b
| 4,468
|
py
|
Python
|
mayan/apps/sources/wizards.py
|
nadwiabd/insight_edms
|
90a09d7ca77cb111c791e307b55a603e82042dfe
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/sources/wizards.py
|
nadwiabd/insight_edms
|
90a09d7ca77cb111c791e307b55a603e82042dfe
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/sources/wizards.py
|
nadwiabd/insight_edms
|
90a09d7ca77cb111c791e307b55a603e82042dfe
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from formtools.wizard.views import SessionWizardView
from common.mixins import ViewPermissionCheckMixin
from documents.forms import DocumentTypeSelectForm
from metadata.forms import DocumentMetadataFormSet
from tags.forms import TagMultipleSelectionForm
from .literals import STEP_DOCUMENT_TYPE, STEP_METADATA, STEP_TAGS
from .models import InteractiveSource
def has_metadata_types(wizard):
"""
Skip the 2nd step if document type has no associated metadata
"""
cleaned_data = wizard.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE) or {}
document_type = cleaned_data.get('document_type')
if document_type:
return document_type.metadata.exists()
class DocumentCreateWizard(ViewPermissionCheckMixin, SessionWizardView):
condition_dict = {STEP_METADATA: has_metadata_types}
extra_context = {}
form_list = (
DocumentTypeSelectForm, DocumentMetadataFormSet,
TagMultipleSelectionForm
)
form_titles = {
DocumentTypeSelectForm: _('Step 1 of 3: Select document type'),
DocumentMetadataFormSet: _('Step 2 of 3: Enter document metadata'),
TagMultipleSelectionForm: _('Step 3 of 3: Select tags'),
}
template_name = 'appearance/generic_wizard.html'
def dispatch(self, request, *args, **kwargs):
if not InteractiveSource.objects.filter(enabled=True).exists():
messages.error(
request,
_(
'No interactive document sources have been defined or '
'none have been enabled, create one before proceeding.'
)
)
return HttpResponseRedirect(reverse('sources:setup_source_list'))
return super(
DocumentCreateWizard, self
).dispatch(request, *args, **kwargs)
def get_context_data(self, form, **kwargs):
context = super(
DocumentCreateWizard, self
).get_context_data(form=form, **kwargs)
context.update({
'step_title': self.form_titles[form.__class__],
'submit_label': _('Next step'),
'submit_icon': 'fa fa-arrow-right',
'title': _('Document upload wizard'),
})
return context
def get_form_initial(self, step):
if step == STEP_METADATA:
initial = []
for document_type_metadata_type in self.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE)['document_type'].metadata.all():
initial.append(
{
'document_type': self.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE)['document_type'],
'metadata_type': document_type_metadata_type.metadata_type,
}
)
return initial
return self.initial_dict.get(step, {})
def get_form_kwargs(self, step):
# Tags form needs the user instance to determine which tags to
# display
if step == STEP_DOCUMENT_TYPE:
return {'user': self.request.user}
if step == STEP_TAGS:
return {
'help_text': _('Tags to be attached.'),
'user': self.request.user
}
return {}
def done(self, *args, **kwargs):
query_dict = {}
try:
query_dict['document_type_id'] = self.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE)['document_type'].pk
except AttributeError:
pass
try:
for identifier, metadata in enumerate(self.get_cleaned_data_for_step(STEP_METADATA)):
if metadata.get('update'):
query_dict['metadata%s_id' % identifier] = metadata['id']
query_dict['metadata%s_value' % identifier] = metadata['value']
except TypeError:
pass
try:
query_dict['tags'] = ([unicode(tag.pk) for tag in self.get_cleaned_data_for_step(STEP_TAGS)['tags']])
except AttributeError:
pass
url = '?'.join(
[
reverse('sources:upload_interactive'),
urlencode(query_dict, doseq=True)
]
)
return HttpResponseRedirect(url)
| 33.593985
| 130
| 0.629588
|
d397e3a54df3ba02bc4a63d61af692765410f367
| 3,138
|
py
|
Python
|
url_demo/url_demo/settings.py
|
gaohj/2001django
|
0da7227acb37b7cdb3a9595bd96e0e1afd63e760
|
[
"Apache-2.0"
] | null | null | null |
url_demo/url_demo/settings.py
|
gaohj/2001django
|
0da7227acb37b7cdb3a9595bd96e0e1afd63e760
|
[
"Apache-2.0"
] | null | null | null |
url_demo/url_demo/settings.py
|
gaohj/2001django
|
0da7227acb37b7cdb3a9595bd96e0e1afd63e760
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for url_demo project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2q-ck=wldt4#i16fgx1-ev@bp*uz)&6=eqfpev3491#91eg=4$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'url_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'url_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 25.721311
| 91
| 0.695347
|
95b38e9906138f00fdd649f71f5a1f8f6ad940a9
| 7,534
|
py
|
Python
|
scrapy/http/request/form.py
|
younthOL/scrapy
|
6c3970e6722191b642fd99c6c1bfed0d93010cab
|
[
"BSD-3-Clause"
] | 2
|
2018-06-01T03:40:23.000Z
|
2018-06-01T03:40:26.000Z
|
scrapy/http/request/form.py
|
faheel/scrapy
|
72d0899bce06190de5a453b24dd66c8910e6d0ee
|
[
"BSD-3-Clause"
] | 1
|
2022-03-22T20:26:08.000Z
|
2022-03-22T20:26:08.000Z
|
scrapy/http/request/form.py
|
faheel/scrapy
|
72d0899bce06190de5a453b24dd66c8910e6d0ee
|
[
"BSD-3-Clause"
] | 1
|
2019-02-18T10:44:20.000Z
|
2019-02-18T10:44:20.000Z
|
"""
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
action = form.get('action')
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend((k, v) for k, v in formdata.items() if v is not None)
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
| 36.047847
| 98
| 0.596894
|
5b0f5db509c37053a4f94488d2b2b98ddfcfc36d
| 616
|
py
|
Python
|
os_faults/api/util.py
|
mail2nsrajesh/os-faults
|
3610f8dcfe69130c9a4543f1efd04ed41fe2037c
|
[
"Apache-2.0"
] | null | null | null |
os_faults/api/util.py
|
mail2nsrajesh/os-faults
|
3610f8dcfe69130c9a4543f1efd04ed41fe2037c
|
[
"Apache-2.0"
] | null | null | null |
os_faults/api/util.py
|
mail2nsrajesh/os-faults
|
3610f8dcfe69130c9a4543f1efd04ed41fe2037c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def public(funcobj):
funcobj.__public__ = True
return funcobj
| 34.222222
| 69
| 0.756494
|
9f1a15fe2d885f1abdeb36e7dc8cb382da0f11c9
| 1,828
|
py
|
Python
|
python-watcher-2.0.0/watcher/decision_engine/messaging/audit_endpoint.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
python-watcher-2.0.0/watcher/decision_engine/messaging/audit_endpoint.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
python-watcher-2.0.0/watcher/decision_engine/messaging/audit_endpoint.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from concurrent import futures
from oslo_config import cfg
from oslo_log import log
from watcher.decision_engine.audit import continuous as c_handler
from watcher.decision_engine.audit import oneshot as o_handler
from watcher import objects
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class AuditEndpoint(object):
def __init__(self, messaging):
self._messaging = messaging
self._executor = futures.ThreadPoolExecutor(
max_workers=CONF.watcher_decision_engine.max_workers)
self._oneshot_handler = o_handler.OneShotAuditHandler()
self._continuous_handler = c_handler.ContinuousAuditHandler().start()
@property
def executor(self):
return self._executor
def do_trigger_audit(self, context, audit_uuid):
audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True)
self._oneshot_handler.execute(audit, context)
def trigger_audit(self, context, audit_uuid):
LOG.debug("Trigger audit %s", audit_uuid)
self.executor.submit(self.do_trigger_audit,
context,
audit_uuid)
return audit_uuid
| 32.642857
| 77
| 0.718818
|
1c25993c0c3cfefa42e58028c11afe254f13513b
| 874
|
py
|
Python
|
src/estimators.py
|
mkashifn/celosia
|
2caa776620b9e7c3f63c41329f10c8c91fb4857f
|
[
"MIT"
] | null | null | null |
src/estimators.py
|
mkashifn/celosia
|
2caa776620b9e7c3f63c41329f10c8c91fb4857f
|
[
"MIT"
] | null | null | null |
src/estimators.py
|
mkashifn/celosia
|
2caa776620b9e7c3f63c41329f10c8c91fb4857f
|
[
"MIT"
] | null | null | null |
#!/bin/python
import numpy as np
from functions import softmax
class Estimator:
def __init__(self):
pass
def __call__(self, A, B):
return self.fx(A, B)
def fx(self, A, B):
return 0
def dfx(self, A, B):
return 0
class MSE(Estimator):
def __init__(self):
pass
def fx(self, A, B):
return np.square(np.subtract(A, B)).mean()
def dfx(self, A, B):
n = A.shape[1] #number of columns
return (2*np.subtract(A, B))/n
class CrossEntropy(Estimator):
def fx(self, A, B):
'''A = predicted output, B is target output.'''
B = B.argmax(axis=1)
m = B.shape[0]
p = softmax(A)
log_likelihood = -np.log(p[range(m),B])
return np.sum(log_likelihood) / m
def dfx(self, A, B):
B = B.argmax(axis=1)
m = B.shape[0]
p = softmax(A)
p[range(m),B] -= 1
return p/m
mse = MSE()
cross_entropy = CrossEntropy()
| 21.317073
| 51
| 0.599542
|
7ce96e050e0980bc413111d8acdc5504b97c2db2
| 1,360
|
py
|
Python
|
examples/pybadger_pyportal_touchscreen.py
|
dglaude/Adafruit_CircuitPython_PyBadger
|
6878e34cccc1a533d62d3adc6634abacad1358c1
|
[
"MIT"
] | null | null | null |
examples/pybadger_pyportal_touchscreen.py
|
dglaude/Adafruit_CircuitPython_PyBadger
|
6878e34cccc1a533d62d3adc6634abacad1358c1
|
[
"MIT"
] | null | null | null |
examples/pybadger_pyportal_touchscreen.py
|
dglaude/Adafruit_CircuitPython_PyBadger
|
6878e34cccc1a533d62d3adc6634abacad1358c1
|
[
"MIT"
] | null | null | null |
"""Simpletest example using Adafruit PyPortal. Uses the touchscreen to advance between examples."""
import board
from adafruit_pybadger import pybadger
import adafruit_touchscreen
# pylint: disable=invalid-name
# These pins are used as both analog and digital! XL, XR and YU must be analog
# and digital capable. YD just need to be digital
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XL,
board.TOUCH_XR,
board.TOUCH_YD,
board.TOUCH_YU,
calibration=((5200, 59000), (5800, 57000)),
size=(320, 240),
)
pybadger.show_badge(
name_string="Blinka", hello_scale=2, my_name_is_scale=2, name_scale=3
)
cur_example = 0
prev_touch = None
while True:
p = ts.touch_point
if p and not prev_touch:
cur_example += 1
if cur_example >= 3:
cur_example = 0
print(cur_example)
prev_touch = p
if cur_example == 0:
pybadger.show_business_card(
image_name="Blinka_PyPortal.bmp",
name_string="Blinka",
name_scale=2,
email_string_one="blinka@",
email_string_two="adafruit.com",
)
elif cur_example == 1:
pybadger.show_qr_code(data="https://circuitpython.org")
elif cur_example == 2:
pybadger.show_badge(
name_string="Blinka", hello_scale=2, my_name_is_scale=2, name_scale=3
)
| 28.333333
| 99
| 0.663235
|
e88f042b71bb97911518cdfe0148a2b15c55c989
| 4,478
|
py
|
Python
|
aiobungie/crate/application.py
|
nxtlo/aiobungie
|
ca0a842bf5a7217662f0d8d63456ad277f313ef1
|
[
"MIT"
] | 36
|
2021-07-09T19:26:18.000Z
|
2022-03-26T09:12:43.000Z
|
aiobungie/crate/application.py
|
nxtlo/aiobungie
|
ca0a842bf5a7217662f0d8d63456ad277f313ef1
|
[
"MIT"
] | 149
|
2021-07-13T21:46:21.000Z
|
2022-03-29T12:21:38.000Z
|
aiobungie/crate/application.py
|
nxtlo/aiobungie
|
ca0a842bf5a7217662f0d8d63456ad277f313ef1
|
[
"MIT"
] | 4
|
2021-07-21T05:33:11.000Z
|
2022-02-19T11:15:31.000Z
|
# MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Basic implementation of a Bungie a application."""
from __future__ import annotations
__all__ = ("Application", "ApplicationOwner")
import typing
import attrs
from aiobungie import undefined
from aiobungie import url
from aiobungie.crate import user
from aiobungie.internal import enums
if typing.TYPE_CHECKING:
from datetime import datetime
from aiobungie import traits
from aiobungie import typedefs
from aiobungie.internal import assets
@attrs.define(hash=False, kw_only=True, weakref_slot=False)
class ApplicationOwner(user.UserLike):
"""Represents a Bungie Application owner."""
net: traits.Netrunner = attrs.field(repr=False)
"""A network state used for making external requests."""
name: undefined.UndefinedOr[str] = attrs.field(repr=True, hash=False, eq=False)
"""The application owner name. This can be `UNDEFINED` if not found."""
type: enums.MembershipType = attrs.field(repr=True, hash=False, eq=True)
"""The membership of the application owner."""
id: int = attrs.field(repr=True, hash=True, eq=True)
"""The application owner's id."""
icon: assets.MaybeImage = attrs.field(repr=False)
"""The application owner's icon."""
is_public: bool = attrs.field(repr=True)
"""The application owner's profile privacy."""
code: typedefs.NoneOr[int] = attrs.field(repr=True)
"""The user like's unique display name code.
This can be None if the user hasn't logged in after season of the lost update.
"""
async def fetch_self(self) -> user.BungieUser:
"""Fetch the bungie user for this application owner.
Returns
-------
`aiobungie.crate.BungieUser`
A Bungie net user.
Raises
------
`aiobungie.NotFound`
The user was not found.
"""
user_ = await self.net.request.fetch_user(self.id)
assert isinstance(user_, user.BungieUser)
return user_
@property
def unique_name(self) -> str:
"""The application owner's unique name."""
return self.unique_name
@property
def last_seen_name(self) -> str:
# This is always undefined since an application
# dev doesn't have this field.
return str(undefined.Undefined)
@property
def link(self) -> str:
return f"{url.BASE}/en/Profile/index/{int(self.type)}/{self.id}"
@attrs.define(hash=False, kw_only=True, weakref_slot=False)
class Application:
"""Represents a Bungie developer application."""
id: int = attrs.field(repr=True, hash=True, eq=True)
"""App id"""
name: str = attrs.field(repr=True, hash=False, eq=False)
"""App name"""
redirect_url: typing.Optional[str] = attrs.field(repr=True)
"""App redirect url"""
created_at: datetime = attrs.field(repr=True)
"""App creation date in UTC timezone"""
published_at: datetime = attrs.field(repr=True)
"""App's publish date in UTC timezone"""
link: str = attrs.field(repr=True)
"""App's link"""
status: int = attrs.field(repr=False)
"""App's status"""
scope: undefined.UndefinedOr[str] = attrs.field(repr=False)
"""App's scope"""
owner: ApplicationOwner = attrs.field(repr=True)
"""App's owner"""
def __str__(self) -> str:
return self.name
def __int__(self) -> int:
return self.id
| 31.314685
| 83
| 0.687807
|
6fe6aa5e2add1e4a96bff34247542c3e59728881
| 1,470
|
py
|
Python
|
python/fasterparallel/benchmark2multi.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | 1
|
2022-03-30T12:59:44.000Z
|
2022-03-30T12:59:44.000Z
|
python/fasterparallel/benchmark2multi.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | null | null | null |
python/fasterparallel/benchmark2multi.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | 3
|
2019-08-13T11:33:36.000Z
|
2022-03-08T22:00:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals, annotations)
from collections import defaultdict
from multiprocessing import Pool
import numpy as np
import psutil
num_cpus = psutil.cpu_count(logical=False)
def accumulate_prefixes(args):
running_prefix_count, running_popular_prefixes, document = args
for word in document:
for i in range(1, len(word)):
prefix = word[:i]
running_prefix_count[prefix] += 1
if running_prefix_count[prefix] > 3:
running_popular_prefixes.add(prefix)
return running_prefix_count, running_popular_prefixes
def main():
# Time the code below.
pool = Pool(num_cpus)
running_prefix_counts = [defaultdict(int) for _ in range(4)]
running_popular_prefixes = [set() for _ in range(4)]
for _ in range(10):
documents = [[np.random.bytes(20) for _ in range(10000)]
for _ in range(num_cpus)]
results = pool.map(
accumulate_prefixes,
zip(running_prefix_counts, running_popular_prefixes, documents))
running_prefix_counts = [result[0] for result in results]
running_popular_prefixes = [result[1] for result in results]
popular_prefixes = set()
for prefixes in running_popular_prefixes:
popular_prefixes |= prefixes
if __name__ == '__main__':
main()
| 30
| 76
| 0.669388
|
cb12bf07f210836543f58db3d10f41ada53b6baa
| 1,217
|
py
|
Python
|
2019/day17/solutions.py
|
ivobatkovic/advent-of-code
|
e43489bcd2307f0f3ac8b0ec4e850f0a201f9944
|
[
"MIT"
] | 3
|
2019-12-14T16:24:50.000Z
|
2020-12-06T16:40:13.000Z
|
2019/day17/solutions.py
|
ivobatkovic/advent-of-code
|
e43489bcd2307f0f3ac8b0ec4e850f0a201f9944
|
[
"MIT"
] | 4
|
2019-12-03T14:18:13.000Z
|
2020-12-03T08:29:32.000Z
|
2019/day17/solutions.py
|
ivobatkovic/advent-of-code
|
e43489bcd2307f0f3ac8b0ec4e850f0a201f9944
|
[
"MIT"
] | 2
|
2019-12-06T07:25:57.000Z
|
2020-12-08T12:42:37.000Z
|
from os.path import dirname
from os.path import realpath
from os.path import join
import time
import sys
sys.path.append(join(dirname(realpath(__file__)), *[".."]))
from day17.asci import Asci
def part1(input_):
asci = Asci(input_)
return asci.compute_intersections()
def part2(input_):
asci = Asci(input_)
asci.compute_intersections()
return asci.collect_dust(False)
def main():
# Open data file and read through all lines
file_location = "data/input.txt"
try:
dir_path = dirname(realpath(__file__))
with open(join(dir_path, file_location), "r") as f:
input_ = f.read()
t0 = time.time()
sol_part1 = part1(input_)
time_end = round((time.time() - t0) * 1e3)
print(
"Solution to part one: %s (time taken %s[ms])"
% (sol_part1, time_end)
)
t0 = time.time()
sol_part2 = part2(input_)
time_end = round((time.time() - t0) * 1e3)
print(
"Solution to part two: %s (time taken %s[ms])"
% (sol_part2, time_end)
)
except IOError:
print("Cannot find file at: " + file_location)
if __name__ == "__main__":
main()
| 22.962264
| 59
| 0.59244
|
c8cf272f8de5099f73beb31eba0bf4209189e46c
| 2,724
|
py
|
Python
|
tests/zenora/test_errors.py
|
orikalinski/zenora
|
5c942c94ef3e95f060f2cdeafc93679ef14599ef
|
[
"MIT"
] | null | null | null |
tests/zenora/test_errors.py
|
orikalinski/zenora
|
5c942c94ef3e95f060f2cdeafc93679ef14599ef
|
[
"MIT"
] | null | null | null |
tests/zenora/test_errors.py
|
orikalinski/zenora
|
5c942c94ef3e95f060f2cdeafc93679ef14599ef
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 K.M Ahnaf Zamil
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from unittest import mock
from zenora.exceptions import APIError, RateLimitException
from zenora.errors import raise_error_or_return
import pytest
import requests
def test_handle_rate_limit():
with mock.patch.object(requests, "request") as r:
# Should throw rate limit error
r.return_value.headers = {
"X-RateLimit-Remaining": 0,
"X-RateLimit-Reset": 1470173023,
"x-ratelimit-reset-after": 1234.23414,
"X-RateLimit-Bucket": "abcd1234",
}
r.return_value.ok = False
r.return_value.json.return_value = {
"errors": {
"avatar": {
"_errors": [
{"message": "You are changing avatars too fast"}
]
}
}
}
with pytest.raises(RateLimitException) as e:
raise_error_or_return(r())
assert e.message == "You are changing avatars too fast"
def test_handle_error():
with mock.patch.object(requests, "request") as r:
# Exception should be raised
r.return_value.ok = False
r.return_value.json.return_value = {
"code": 12345,
"errors": {"avatar": [{"message": "Invalid form body"}]},
}
with pytest.raises(APIError) as e:
raise_error_or_return(r())
assert e.message == "Invalid form body"
# No error should be raised
r.return_value.ok = True
r.return_value.json.return_value = {"test": 123}
json_data = raise_error_or_return(r())
assert json_data == {"test": 123}
| 38.914286
| 80
| 0.654552
|
da03e92064e68898a47672c305fcf1badd9b66b5
| 14,318
|
py
|
Python
|
datar/forcats/lvl_value.py
|
pdwaggoner/datar
|
a03f1c0ca0de1270059178e59cea151a51a6e7aa
|
[
"MIT"
] | null | null | null |
datar/forcats/lvl_value.py
|
pdwaggoner/datar
|
a03f1c0ca0de1270059178e59cea151a51a6e7aa
|
[
"MIT"
] | null | null | null |
datar/forcats/lvl_value.py
|
pdwaggoner/datar
|
a03f1c0ca0de1270059178e59cea151a51a6e7aa
|
[
"MIT"
] | null | null | null |
"""Provides forcats verbs to manipulate factor level values"""
from typing import Any, Callable, Iterable, List, Mapping
import numpy as np
from ..core.backends import pandas as pd
from ..core.backends.pandas import Categorical, DataFrame
from pipda import register_verb
from pipda.utils import CallingEnvs, functype
from ..base import (
levels,
match,
nlevels,
paste0,
sample,
table,
order,
rank,
)
from ..core.contexts import Context
from ..core.utils import logger, regcall
from ..dplyr import recode_factor, if_else
from .utils import check_factor, ForcatsRegType
from .lvls import lvls_reorder, lvls_revalue
from .lvl_order import fct_relevel
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_anon(
_f,
prefix: str = "",
) -> Categorical:
"""Anonymise factor levels
Args:
f: A factor.
prefix: A character prefix to insert in front of the random labels.
Returns:
The factor with levels anonymised
"""
_f = check_factor(_f)
nlvls = regcall(nlevels, _f)
ndigits = len(str(nlvls))
lvls = regcall(
paste0,
prefix,
[str(i).rjust(ndigits, "0") for i in range(nlvls)],
)
_f = regcall(lvls_revalue, _f, regcall(sample, lvls))
return regcall(
lvls_reorder,
_f,
regcall(match, lvls, regcall(levels, _f)),
)
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_recode(
_f,
*args: Mapping[Any, Any],
**kwargs: Any,
) -> Categorical:
"""Change factor levels by hand
Args:
_f: A factor
*args: and
**kwargs: A sequence of named character vectors where the name
gives the new level, and the value gives the old level.
Levels not otherwise mentioned will be left as is. Levels can
be removed by naming them `NULL`.
As `NULL/None` cannot be a name of keyword arguments, replacement
has to be specified as a dict
(i.e. `fct_recode(x, {NULL: "apple"})`)
If you want to replace multiple values with the same old value,
use a `set`/`list`/`numpy.ndarray`
(i.e. `fct_recode(x, fruit=["apple", "banana"])`).
This is a safe way, since `set`/`list`/`numpy.ndarray` is
not hashable to be a level of a factor.
Do NOT use a `tuple`, as it's hashable!
Note that the order of the name-value is in the reverse way as
`dplyr.recode()` and `dplyr.recode_factor()`
Returns:
The factor recoded with given recodings
"""
_f = check_factor(_f)
recodings = {} # new => old
for arg in args:
if not isinstance(arg, dict):
raise ValueError("`*args` have to be all mappings.")
recodings.update(arg)
recodings.update(kwargs)
lvls = regcall(levels, _f)
for_recode = dict(zip(lvls, lvls)) # old => new
unknown = set()
for key, val in recodings.items():
if isinstance(val, (np.ndarray, set, list)):
for value in val:
if value not in lvls:
unknown.add(value)
else:
for_recode[value] = key
else:
if val not in lvls:
unknown.add(val)
else:
for_recode[val] = key
if unknown:
logger.warning("[fct_recode] Unknown levels in `_f`: %s", unknown)
return regcall(recode_factor, _f, for_recode).values
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_collapse(
_f,
other_level: Any = None,
**kwargs: List,
) -> Categorical:
"""Collapse factor levels into manually defined groups
Args:
_f: A factor
**kwargs: The levels to collapse.
Like `name=[old_level, old_level1, ...]`. The old levels will
be replaced with `name`
other_level: Replace all levels not named in `kwargs`.
If not, don't collapse them.
Returns:
The factor with levels collapsed.
"""
_f = check_factor(_f)
levs = set(lev for sublevs in kwargs.values() for lev in sublevs)
if other_level is not None:
lvls = regcall(levels, _f)
kwargs[other_level] = set(lvls) - levs
out = fct_recode(_f, kwargs)
if other_level in kwargs:
return regcall(
fct_relevel,
out,
other_level,
after=-1,
)
return out
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_lump_min(
_f,
min: int,
w=None,
other_level: Any = "Other",
) -> Categorical:
"""lumps levels that appear fewer than `min` times.
Args:
_f: A factor
min: Preserve levels that appear at least `min` number of times.
w: An optional numeric vector giving weights for frequency of
each value (not level) in f.
other_level: Value of level used for "other" values. Always
placed at end of levels.
Returns:
The factor with levels lumped.
"""
calcs = check_calc_levels(_f, w)
_f = calcs["_f"]
if min < 0:
raise ValueError("`min` must be a positive number.")
new_levels = regcall(
if_else,
calcs["count"] >= min,
regcall(levels, _f),
other_level,
)
if other_level in new_levels:
_f = regcall(lvls_revalue, _f, new_levels)
return fct_relevel(_f, other_level, after=-1)
return _f
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_lump_prop(
_f,
prop,
w=None,
other_level: Any = "Other",
) -> Categorical:
"""Lumps levels that appear in fewer `prop * n` times.
Args:
_f: A factor
prop: Positive `prop` lumps values which do not appear at least
`prop` of the time. Negative `prop` lumps values that
do not appear at most `-prop` of the time.
w: An optional numeric vector giving weights for frequency of
each value (not level) in f.
other_level: Value of level used for "other" values. Always
placed at end of levels.
Returns:
The factor with levels lumped.
"""
calcs = check_calc_levels(_f, w)
_f = calcs["_f"]
prop_n = calcs["count"] / calcs["total"]
if prop < 0:
new_levels = regcall(
if_else,
prop_n <= -prop,
regcall(levels, _f),
other_level,
)
else:
new_levels = regcall(
if_else,
prop_n > prop,
regcall(levels, _f),
other_level,
)
if prop > 0 and sum(prop_n <= prop) <= 1:
return _f
if other_level in new_levels:
_f = regcall(lvls_revalue, _f, new_levels)
return fct_relevel(_f, other_level, after=-1)
return _f
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_lump_n(
_f,
n: int,
w=None,
other_level: Any = "Other",
ties_method: str = "min",
) -> Categorical:
"""Lumps all levels except for the `n` most frequent.
Args:
f: A factor
n: Positive `n` preserves the most common `n` values.
Negative `n` preserves the least common `-n` values.
It there are ties, you will get at least `abs(n)` values.
w: An optional numeric vector giving weights for frequency of
each value (not level) in f.
other_level: Value of level used for "other" values. Always
placed at end of levels.
ties_method A character string specifying how ties are treated.
One of: `average`, `first`, `dense`, `max`, and `min`.
Returns:
The factor with levels lumped.
"""
calcs = check_calc_levels(_f, w)
_f = calcs["_f"]
if n < 0:
rnk = regcall(rank, calcs["count"], ties_method=ties_method)
n = -n
else:
rnk = regcall(rank, -calcs["count"], ties_method=ties_method)
new_levels = regcall(
if_else,
rnk <= n,
regcall(levels, _f),
other_level,
)
if sum(rnk > n) <= 1:
return _f
if other_level in new_levels:
_f = regcall(lvls_revalue, _f, new_levels)
return fct_relevel(_f, other_level, after=-1)
return _f # pragma: no cover
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_lump_lowfreq(_f, other_level: Any = "Other"):
"""lumps together the least frequent levels, ensuring
that "other" is still the smallest level.
Args:
f: A factor
other_level: Value of level used for "other" values. Always
placed at end of levels.
Returns:
The factor with levels lumped.
"""
calcs = check_calc_levels(_f)
_f = calcs["_f"]
new_levels = regcall(
if_else,
~in_smallest(calcs["count"]),
regcall(levels, _f),
other_level,
)
if other_level in new_levels:
_f = regcall(lvls_revalue, _f, new_levels)
return fct_relevel(_f, other_level, after=-1)
return _f
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_lump(
_f,
n: int = None,
prop=None,
w=None,
other_level: Any = "Other",
ties_method: str = "min",
) -> Categorical:
"""Lump together factor levels into "other"
Args:
f: A factor
n: Positive `n` preserves the most common `n` values.
Negative `n` preserves the least common `-n` values.
It there are ties, you will get at least `abs(n)` values.
prop: Positive `prop` lumps values which do not appear at least
`prop` of the time. Negative `prop` lumps values that
do not appear at most `-prop` of the time.
w: An optional numeric vector giving weights for frequency of
each value (not level) in f.
other_level: Value of level used for "other" values. Always
placed at end of levels.
ties_method A character string specifying how ties are treated.
One of: `average`, `first`, `dense`, `max`, and `min`.
Returns:
The factor with levels lumped.
"""
check_calc_levels(_f, w)
if n is None and prop is None:
return fct_lump_lowfreq(_f, other_level=other_level)
if prop is None:
return fct_lump_n(
_f,
n=n,
w=w,
other_level=other_level,
ties_method=ties_method,
)
if n is None:
return fct_lump_prop(_f, prop=prop, w=w, other_level=other_level)
raise ValueError("Must supply only one of `n` and `prop`")
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_other(
_f,
keep: Iterable = None,
drop: Iterable = None,
other_level: Any = "Other",
) -> Categorical:
"""Replace levels with "other"
Args:
_f: A factor
keep: and
drop: Pick one of `keep` and `drop`:
- `keep` will preserve listed levels, replacing all others with
`other_level`.
- `drop` will replace listed levels with `other_level`, keeping all
as is.
other_level: Value of level used for "other" values. Always
placed at end of levels.
Returns:
The factor with levels replaced.
"""
_f = check_factor(_f)
if (keep is None and drop is None) or (
keep is not None and drop is not None
):
raise ValueError("Must supply exactly one of `keep` and `drop`")
lvls = regcall(levels, _f)
if keep is not None:
lvls[~np.isin(lvls, keep)] = other_level
else:
lvls[np.isin(lvls, drop)] = other_level
_f = regcall(lvls_revalue, _f, lvls)
return regcall(
fct_relevel,
_f,
other_level,
after=-1,
)
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_relabel(
_f,
_fun: Callable,
*args: Any,
**kwargs: Any,
) -> Categorical:
"""Automatically relabel factor levels, collapse as necessary
Args:
_f: A factor
_fun: A function to be applied to each level. Must accept the old
levels and return a character vector of the same length
as its input.
*args: and
**kwargs: Addtional arguments to `_fun`
Returns:
The factor with levels relabeled
"""
_f = check_factor(_f)
old_levels = regcall(levels, _f)
if functype(_fun) != "plain":
kwargs["__calling_env"] = CallingEnvs.REGULAR
new_levels = _fun(old_levels, *args, **kwargs)
return regcall(lvls_revalue, _f, new_levels)
# -------------
# helpers
# -------------
def check_weights(w, n: int = None):
"""Check the weights"""
if w is None:
return w
if n is None: # pragma: no cover
n = len(w)
if len(w) != n:
raise ValueError(
f"`w` must be the same length as `f` ({n}), "
f"not length {len(w)}."
)
for weight in w:
if weight < 0 or pd.isnull(weight):
raise ValueError(
f"All `w` must be non-negative and non-missing, got {weight}."
)
return w
def check_calc_levels(_f, w=None):
"""Check levels to be calculated"""
_f = check_factor(_f)
w = check_weights(w, len(_f))
if w is None:
cnt = table(_f).iloc[0, :].values
total = len(_f)
else:
cnt = (
DataFrame({"w": w, "f": _f})
.groupby("f", observed=False)
.agg("sum")
.iloc[:, 0]
.values
)
total = sum(w)
return {"_f": _f, "count": cnt, "total": total}
def lump_cutoff(x) -> int:
"""Lump together smallest groups, ensuring that the collective
"other" is still the smallest group. Assumes x is vector
of counts in descending order"""
left = sum(x)
for i, elem in enumerate(x):
# After group, there are this many left
left -= elem
if elem > left:
return i + 1
return len(x) # pragma: no cover
def in_smallest(x) -> Iterable[bool]:
"""Check if elements in x are the smallest of x"""
ord_x = regcall(order, x, decreasing=True)
idx = lump_cutoff(x[ord_x])
to_lump = np.arange(len(x)) >= idx
return to_lump[regcall(order, ord_x)]
| 27.376673
| 79
| 0.590446
|
42e5b48173a3a0d113b334fe3dbb5bbabaf47286
| 104
|
py
|
Python
|
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/app/config/base.py
|
LemegetonX/valefor
|
127b0a1ca646f80740073bddcfa243d775df5cc7
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/app/config/base.py
|
LemegetonX/valefor
|
127b0a1ca646f80740073bddcfa243d775df5cc7
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/app/config/base.py
|
LemegetonX/valefor
|
127b0a1ca646f80740073bddcfa243d775df5cc7
|
[
"MIT"
] | null | null | null |
import os
class BaseConfig:
APP_DIR: str = os.path.abspath("./{{cookiecutter.project_name}}/app")
| 17.333333
| 73
| 0.701923
|
8ae59e162a7b393806fdfb40fac25d312a16d9b7
| 3,909
|
py
|
Python
|
ucsmsdk/mometa/os/OsEthBondModeBalancedTLB.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/os/OsEthBondModeBalancedTLB.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/os/OsEthBondModeBalancedTLB.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for OsEthBondModeBalancedTLB ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class OsEthBondModeBalancedTLBConsts:
LB_TYPE_RECEIVE_XMIT = "receive-xmit"
LB_TYPE_XMIT_ONLY = "xmit-only"
TYPE_ACTIVE_ACTIVE = "active-active"
TYPE_ACTIVE_PASSIVE = "active-passive"
XMIT_HASH_TYPE_ENCAP2_3 = "encap2+3"
XMIT_HASH_TYPE_ENCAP3_4 = "encap3+4"
XMIT_HASH_TYPE_LAYER2 = "layer2"
XMIT_HASH_TYPE_LAYER2_3 = "layer2+3"
XMIT_HASH_TYPE_LAYER3_4 = "layer3+4"
class OsEthBondModeBalancedTLB(ManagedObject):
"""This is OsEthBondModeBalancedTLB class."""
consts = OsEthBondModeBalancedTLBConsts()
naming_props = set([])
mo_meta = MoMeta("OsEthBondModeBalancedTLB", "osEthBondModeBalancedTLB", "eth-bond-mode", VersionMeta.Version302c, "InputOutput", 0x3f, [], ["read-only"], [u'osEthBondIntf'], [u'osPrimarySlave'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version302c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"igmp_resend_count": MoPropertyMeta("igmp_resend_count", "igmpResendCount", "ushort", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-255"]),
"lb_type": MoPropertyMeta("lb_type", "lbType", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["receive-xmit", "xmit-only"], []),
"lp_interval": MoPropertyMeta("lp_interval", "lpInterval", "uint", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version302c, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version302c, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["active-active", "active-passive"], []),
"xmit_hash_type": MoPropertyMeta("xmit_hash_type", "xmitHashType", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["encap2+3", "encap3+4", "layer2", "layer2+3", "layer3+4"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"igmpResendCount": "igmp_resend_count",
"lbType": "lb_type",
"lpInterval": "lp_interval",
"name": "name",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
"xmitHashType": "xmit_hash_type",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.igmp_resend_count = None
self.lb_type = None
self.lp_interval = None
self.name = None
self.sacl = None
self.status = None
self.type = None
self.xmit_hash_type = None
ManagedObject.__init__(self, "OsEthBondModeBalancedTLB", parent_mo_or_dn, **kwargs)
| 56.652174
| 248
| 0.664364
|
2660f4e42f65c5b0bdb50207efda5a13435d980f
| 1,872
|
py
|
Python
|
macbot_physical/nodes/tf_broadcaster.py
|
eechhx/macbot
|
7de8e12b11c6a643ff27061a6d5ddd7dc1aa0f3c
|
[
"Apache-2.0"
] | 1
|
2021-01-21T20:05:02.000Z
|
2021-01-21T20:05:02.000Z
|
macbot_physical/nodes/tf_broadcaster.py
|
eechhx/macbot
|
7de8e12b11c6a643ff27061a6d5ddd7dc1aa0f3c
|
[
"Apache-2.0"
] | null | null | null |
macbot_physical/nodes/tf_broadcaster.py
|
eechhx/macbot
|
7de8e12b11c6a643ff27061a6d5ddd7dc1aa0f3c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import rospy
import tf_conversions
import tf2_ros
import geometry_msgs.msg
from math import pi
from std_msgs.msg import (Int32)
class macbot_tf2_broadcaster():
def __init__(self, direction_wheel):
self.br = tf2_ros.TransformBroadcaster()
self.t = geometry_msgs.msg.TransformStamped()
self.direction = str(direction_wheel)
self.direction_check()
self.t.header.frame_id = "base_link"
self.t.child_frame_id = self.directionString + "_wheel"
def direction_check(self):
if self.direction == "lwheel":
self.directionString = "left"
self.t.transform.translation.x = 0.0753
self.t.transform.translation.y = 0.137
self.t.transform.translation.z = -0.004
elif self.direction == "rwheel":
self.directionString = "right"
self.t.transform.translation.x = 0.0753
self.t.transform.translation.y = -0.137
self.t.transform.translation.z = -0.004
def publish_tf(self, msg):
# 1122 ticks per revolution / 360*
revs = (-msg.data/1122.0) - int(-msg.data/1122)
self.t.header.stamp = rospy.Time.now()
self.q = tf_conversions.transformations.quaternion_from_euler(0, (360*revs)*(pi/180), 0)
self.t.transform.rotation.x = self.q[0]
self.t.transform.rotation.y = self.q[1]
self.t.transform.rotation.z = self.q[2]
self.t.transform.rotation.w = self.q[3]
self.br.sendTransform(self.t)
if __name__ == "__main__":
rospy.init_node('macbot_tf2_broadcaster')
left_wheel_tf = macbot_tf2_broadcaster("lwheel")
right_wheel_tf = macbot_tf2_broadcaster("rwheel")
rospy.Subscriber("lwheel_ticks", Int32, left_wheel_tf.publish_tf)
rospy.Subscriber("rwheel_ticks", Int32, right_wheel_tf.publish_tf)
rospy.spin()
| 37.44
| 97
| 0.660256
|
3753111bdd1bdbc268a22f2449febb01de9d0463
| 866
|
py
|
Python
|
app.py
|
gsingh1629/SentAnalysis
|
dd7401105334825279ccba269649cc2f1361e339
|
[
"MIT"
] | null | null | null |
app.py
|
gsingh1629/SentAnalysis
|
dd7401105334825279ccba269649cc2f1361e339
|
[
"MIT"
] | null | null | null |
app.py
|
gsingh1629/SentAnalysis
|
dd7401105334825279ccba269649cc2f1361e339
|
[
"MIT"
] | null | null | null |
from flask import Flask,render_template,url_for,request
import numpy as np
import pickle
import pandas as pd
import flasgger
from flasgger import Swagger
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
app=Flask(__name__)
Swagger(app)
mnb = pickle.load(open('Naive_Bayes_model_imdb.pkl','rb'))
countVect = pickle.load(open('countVect_imdb.pkl','rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
if request.method == 'POST':
Reviews = request.form['Reviews']
data = [Reviews]
vect = countVect.transform(data).toarray()
my_prediction = mnb.predict(vect)
return render_template('Output_Content.html',prediction = my_prediction)
if __name__ == '__main__':
app.run(debug=True)
| 25.470588
| 76
| 0.726328
|
045db200f816b7ebb77cc3be01202f4eb6031a6a
| 13,838
|
py
|
Python
|
train.py
|
ohmygod481999/a-PyTorch-Tutorial-to-Image-Captioning
|
10f324f63c3eee74ec25a7c8a1127985ab164d34
|
[
"MIT"
] | null | null | null |
train.py
|
ohmygod481999/a-PyTorch-Tutorial-to-Image-Captioning
|
10f324f63c3eee74ec25a7c8a1127985ab164d34
|
[
"MIT"
] | null | null | null |
train.py
|
ohmygod481999/a-PyTorch-Tutorial-to-Image-Captioning
|
10f324f63c3eee74ec25a7c8a1127985ab164d34
|
[
"MIT"
] | null | null | null |
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from models import Encoder, DecoderWithAttention
from datasets import *
from utils import *
from nltk.translate.bleu_score import corpus_bleu
# Data parameters
data_folder = '/content/drive/MyDrive/lame_caption_recommendation/out_flickr8k' # folder with data files saved by create_input_files.py
data_name = 'flickr8k_5_cap_per_img_5_min_word_freq' # base name shared by data files
# Model parameters
emb_dim = 512 # dimension of word embeddings
attention_dim = 512 # dimension of attention linear layers
decoder_dim = 512 # dimension of decoder RNN
dropout = 0.5
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# Training parameters
start_epoch = 0
epochs = 120 # number of epochs to train for (if early stopping is not triggered)
epochs_since_improvement = 0 # keeps track of number of epochs since there's been an improvement in validation BLEU
batch_size = 32
workers = 1 # for data-loading; right now, only 1 works with h5py
encoder_lr = 1e-4 # learning rate for encoder if fine-tuning
decoder_lr = 4e-4 # learning rate for decoder
grad_clip = 5. # clip gradients at an absolute value of
alpha_c = 1. # regularization parameter for 'doubly stochastic attention', as in the paper
best_bleu4 = 0. # BLEU-4 score right now
print_freq = 100 # print training/validation stats every __ batches
fine_tune_encoder = False # fine-tune encoder?
checkpoint = "/content/drive/MyDrive/lame_caption_recommendation/model/checkpoint_flickr8k_5_cap_per_img_5_min_word_freq.pth.tar" # path to checkpoint, None if none
def main():
"""
Training and validation.
"""
global best_bleu4, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder, data_name, word_map
# Read word map
word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
with open(word_map_file, 'r') as j:
word_map = json.load(j)
# Initialize / load checkpoint
if checkpoint is None:
decoder = DecoderWithAttention(attention_dim=attention_dim,
embed_dim=emb_dim,
decoder_dim=decoder_dim,
vocab_size=len(word_map),
dropout=dropout)
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=decoder_lr)
encoder = Encoder()
encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=encoder_lr) if fine_tune_encoder else None
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
epochs_since_improvement = checkpoint['epochs_since_improvement']
best_bleu4 = checkpoint['bleu-4']
decoder = checkpoint['decoder']
decoder_optimizer = checkpoint['decoder_optimizer']
encoder = checkpoint['encoder']
encoder_optimizer = checkpoint['encoder_optimizer']
if fine_tune_encoder is True and encoder_optimizer is None:
encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=encoder_lr)
# Move to GPU, if available
decoder = decoder.to(device)
encoder = encoder.to(device)
# Loss function
criterion = nn.CrossEntropyLoss().to(device)
# Custom dataloaders
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),
batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
CaptionDataset(data_folder, data_name, 'VAL', transform=transforms.Compose([normalize])),
batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
# Epochs
for epoch in range(start_epoch, epochs):
# Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
if epochs_since_improvement == 20:
break
if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
adjust_learning_rate(decoder_optimizer, 0.8)
if fine_tune_encoder:
adjust_learning_rate(encoder_optimizer, 0.8)
# One epoch's training
train(train_loader=train_loader,
encoder=encoder,
decoder=decoder,
criterion=criterion,
encoder_optimizer=encoder_optimizer,
decoder_optimizer=decoder_optimizer,
epoch=epoch)
# One epoch's validation
recent_bleu4 = validate(val_loader=val_loader,
encoder=encoder,
decoder=decoder,
criterion=criterion)
# Check if there was an improvement
is_best = recent_bleu4 > best_bleu4
best_bleu4 = max(recent_bleu4, best_bleu4)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(data_name, epoch, epochs_since_improvement, encoder, decoder, encoder_optimizer,
decoder_optimizer, recent_bleu4, is_best)
def train(train_loader, encoder, decoder, criterion, encoder_optimizer, decoder_optimizer, epoch):
"""
Performs one epoch's training.
:param train_loader: DataLoader for training data
:param encoder: encoder model
:param decoder: decoder model
:param criterion: loss layer
:param encoder_optimizer: optimizer to update encoder's weights (if fine-tuning)
:param decoder_optimizer: optimizer to update decoder's weights
:param epoch: epoch number
"""
decoder.train() # train mode (dropout and batchnorm is used)
encoder.train()
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss (per word decoded)
top5accs = AverageMeter() # top5 accuracy
start = time.time()
# Batches
for i, (imgs, caps, caplens) in enumerate(train_loader):
data_time.update(time.time() - start)
# Move to GPU, if available
imgs = imgs.to(device)
caps = caps.to(device)
caplens = caplens.to(device)
# Forward prop.
imgs = encoder(imgs)
scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(imgs, caps, caplens)
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)
# print("test", test)
# scores, _ = test
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)
scores = scores.data
targets = targets.data
# Calculate loss
loss = criterion(scores, targets)
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Back prop.
decoder_optimizer.zero_grad()
if encoder_optimizer is not None:
encoder_optimizer.zero_grad()
loss.backward()
# Clip gradients
if grad_clip is not None:
clip_gradient(decoder_optimizer, grad_clip)
if encoder_optimizer is not None:
clip_gradient(encoder_optimizer, grad_clip)
# Update weights
decoder_optimizer.step()
if encoder_optimizer is not None:
encoder_optimizer.step()
# Keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses,
top5=top5accs))
def validate(val_loader, encoder, decoder, criterion):
"""
Performs one epoch's validation.
:param val_loader: DataLoader for validation data.
:param encoder: encoder model
:param decoder: decoder model
:param criterion: loss layer
:return: BLEU-4 score
"""
decoder.eval() # eval mode (no dropout or batchnorm)
if encoder is not None:
encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
references = list() # references (true captions) for calculating BLEU-4 score
hypotheses = list() # hypotheses (predictions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, (imgs, caps, caplens, allcaps) in enumerate(val_loader):
# Move to device, if available
imgs = imgs.to(device)
caps = caps.to(device)
caplens = caplens.to(device)
# Forward prop.
if encoder is not None:
imgs = encoder(imgs)
scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(imgs, caps, caplens)
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)
scores = scores.data
targets = targets.data
# Calculate loss
loss = criterion(scores, targets)
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Keep track of metrics
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(val_loader), batch_time=batch_time,
loss=losses, top5=top5accs))
# Store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
# References
allcaps = allcaps[sort_ind] # because images were sorted in the decoder
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(
map(lambda c: [w for w in c if w not in {word_map['<start>'], word_map['<pad>']}],
img_caps)) # remove <start> and pads
references.append(img_captions)
# Hypotheses
_, preds = torch.max(scores_copy, dim=2)
preds = preds.tolist()
temp_preds = list()
for j, p in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]]) # remove pads
preds = temp_preds
hypotheses.extend(preds)
assert len(references) == len(hypotheses)
# Calculate BLEU-4 scores
bleu4 = corpus_bleu(references, hypotheses)
print(
'\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}\n'.format(
loss=losses,
top5=top5accs,
bleu=bleu4))
return bleu4
if __name__ == '__main__':
main()
| 40.820059
| 165
| 0.61454
|
f27fa3f09458d2eb1cc80182721989f249077d95
| 1,150
|
py
|
Python
|
students/K33422/laboratory_works/Kirillov_Nikolay/laboratory_work_4/hotel_app/serializers.py
|
NikolayKirillov/ITMO_ICT_WebDevelopment_2020-2021
|
77ea82c38eb25c8fd61815b92e4cb006708a6de7
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Kirillov_Nikolay/laboratory_work_4/hotel_app/serializers.py
|
NikolayKirillov/ITMO_ICT_WebDevelopment_2020-2021
|
77ea82c38eb25c8fd61815b92e4cb006708a6de7
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Kirillov_Nikolay/laboratory_work_4/hotel_app/serializers.py
|
NikolayKirillov/ITMO_ICT_WebDevelopment_2020-2021
|
77ea82c38eb25c8fd61815b92e4cb006708a6de7
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from django.contrib.auth.hashers import make_password
from .models import *
class RoomSerializer(serializers.ModelSerializer):
class Meta:
model = Room
fields = "__all__"
class StaffSerializer(serializers.ModelSerializer):
class Meta:
model = Staff
fields = "__all__"
class GuestSerializer(serializers.ModelSerializer):
class Meta:
model = Guest
fields = "__all__"
class CleaningSerializer(serializers.ModelSerializer):
class Meta:
model = Cleaning
fields = "__all__"
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = "__all__"
def create(self, validated_data):
user = User.objects.create(
email=validated_data['email'],
username=validated_data['username'],
password=make_password(validated_data['password']),
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
return user
| 25
| 63
| 0.658261
|
8eccc0a5443bf2eb32dbbfffbfc154a4111380ce
| 3,174
|
py
|
Python
|
parse.py
|
karlek/avant-graph
|
e4faa52b76d4b989cb6da646270973e72965718e
|
[
"Unlicense"
] | null | null | null |
parse.py
|
karlek/avant-graph
|
e4faa52b76d4b989cb6da646270973e72965718e
|
[
"Unlicense"
] | 1
|
2016-03-12T11:28:00.000Z
|
2018-05-31T09:01:50.000Z
|
parse.py
|
karlek/avant-graph
|
e4faa52b76d4b989cb6da646270973e72965718e
|
[
"Unlicense"
] | null | null | null |
import pprint
import re
from graph_tool.all import *
from table import (Table, Type)
vertices = {}
edges = {}
group_numbers = {
5956:0,
5903:1,
6023:2,
5977:3,
5994:4,
5850:5,
6172:6,
6091:7,
5923:8,
6161:9,
}
# 5956:cerise 'Skolan för datavetenskap och kommunikation (CSC)', Type.department),
# 5903:gul? 'Skolan för bioteknologi (BIO)', Type.department),
# 6023:brun 'Skolan för industriell teknik och management (ITM)', Type.department),
# 5977:vit 'Skolan för elektro- och systemteknik (EES)', Type.department),
# 5994:blå? 'Skolan för informations- och kommunikationsteknik (ICT)', Type.department),
# 5850:purple 'Skolan för arkitektur och samhällsbyggnad (ABE)', Type.department),
# 6172:svart/röd 'Skolan för teknikvetenskaplig kommunikation och lärande (ECE)', Type.department),
# 6091:fysik 'Skolan för teknikvetenskap (SCI)', Type.department),
# 5923:gul 'Skolan för kemivetenskap (CHE)', Type.department),
# 6161:blå/vit 'Skolan för teknik och hälsa (STH)', Type.department),
color_table = {
5956: [.8862745098039215, .0, .4980392156862745, 1.],
# 5903:"gul?",
6023:[.40, .20, 0., 1.],
5977:[1., 1., 1., 1.],
5994:[.86, .60, 1., 1.],
5850:[.5, 0, .5, 1.],
# 6172:"svart/r;d",
6091:[1., .39, .16, 1.],
5923:[1., .93, 0., 1.],
6161:[.67, .84, .90, 1.],
}
PAT = re.compile(r"\[.*?\]")
def name(n, uids):
n = Name("", "", "", 0, "", [], [], [])
for uid in uids:
if len(uid) == 19:
# ResearcherID
# 0000-0001-7788-6127
n.rid = uid
elif uid.isdigit() and int(uid) in Table:
e = Table[int(uid)].type
if e == Type.uni:
n.uni.append(Code(int(uid)))
elif e == Type.department:
n.department.append(Code(int(uid)))
elif e == Type.field:
n.field.append(Code(int(uid)))
elif uid.isdigit():
if int(uid) not in Table:
# ORCID
n.orcid = int(uid)
continue
else:
# Username
n.user = uid
return n
def names(names):
nobjs = []
for n in names:
# Find everything within square brackets: [...].
uids = [uid[1:-1] for uid in PAT.findall(n)]
if not uids:
continue
nobjs.append(name(n, uids))
return nobjs
class Code(object):
def __init__(self, code):
self.code = code
def __eq__(self, c2):
return self.code == c2.code
def __str__(self):
if self.code in Table:
return Table[self.code].v
else:
return self.code + " "
class Name(object):
def __init__(self, first, family, user, orcid, rid, uni, department, field):
self.first = first
self.family = family
self.user = user
self.orcid = orcid
self.rid = rid
self.uni = uni
self.department = department
self.field = field
self.color = ""
def __str__(self):
return pprint.pformat([self.first, self.family, self.user, self.rid, self.orcid, self.uni, self.department, self.field])
| 28.339286
| 128
| 0.561122
|
050db92a969b29ca0549317ea2a0cfd6d0e429fe
| 2,707
|
py
|
Python
|
gnn_agglomeration/pyg_datasets/toy_datasets/random_graph_dataset.py
|
bentaculum/gnn_agglomeration
|
e5b8a693ba78433b8a3721ac3102630c2a79a1e4
|
[
"MIT"
] | 2
|
2021-05-19T01:56:52.000Z
|
2021-07-08T20:50:38.000Z
|
gnn_agglomeration/pyg_datasets/toy_datasets/random_graph_dataset.py
|
benjamin9555/gnn_agglomeration
|
e5b8a693ba78433b8a3721ac3102630c2a79a1e4
|
[
"MIT"
] | 14
|
2019-07-17T19:23:09.000Z
|
2021-02-02T22:01:49.000Z
|
gnn_agglomeration/pyg_datasets/toy_datasets/random_graph_dataset.py
|
benjamin9555/gnn_agglomeration
|
e5b8a693ba78433b8a3721ac3102630c2a79a1e4
|
[
"MIT"
] | 2
|
2019-07-17T20:14:03.000Z
|
2019-07-27T16:20:52.000Z
|
import torch
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
import torch_geometric.transforms as T
import numpy as np
import json
import os
from abc import ABC, abstractmethod
class RandomGraphDataset(InMemoryDataset, ABC):
def __init__(self, root, config):
self.config = config
transform = getattr(T, config.data_transform)(norm=True, cat=True)
super(RandomGraphDataset, self).__init__(
root=root, transform=transform, pre_transform=None)
self.data, self.slices = torch.load(self.processed_paths[0])
self.check_dataset_vs_config()
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return ['processed_data.pt']
def download(self):
raise NotImplementedError('Dataset not available for download')
@abstractmethod
def create_datapoint(self):
pass
def process(self):
# Read data into huge `Data` list.
data_list = []
# TODO use sacred logger
print('Creating {} new random graphs ... '.format(self.config.samples))
for i in range(self.config.samples):
print('Create graph {} ...'.format(i))
graph = self.create_datapoint()
data_list.append(graph)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
with open(os.path.join(self.root, 'config.json'), 'w') as f:
json.dump(vars(self.config), f)
def check_dataset_vs_config(self):
with open(os.path.join(self.root, 'config.json'), 'r') as json_file:
data_config = json.load(json_file)
run_conf_dict = vars(self.config)
for key in self.check_config_vars:
if key in data_config:
assert run_conf_dict[key] == data_config[key],\
'Run config does not match dataset config\nrun_conf_dict[{}]={}, data_config[{}]={}'.format(
key, run_conf_dict[key], key, data_config[key])
def update_config(self, config):
pass
def print_summary(self):
pass
def targets_mean_std(self):
# TODO this should be preprocessed and saved to file for large datasets
targets = []
for i in range(self.__len__()):
targets.extend(self.get(i).y)
targets = np.array(targets)
return np.mean(targets), np.std(targets)
| 33.012195
| 112
| 0.637237
|
a728f737a973cedc7c038ba39f679b2d7aa81b50
| 246
|
py
|
Python
|
backoffice/routes/membership.py
|
MedPy-C/backend
|
262834adb1f4f5714c4bd490595fdfa1f49c9675
|
[
"MIT"
] | null | null | null |
backoffice/routes/membership.py
|
MedPy-C/backend
|
262834adb1f4f5714c4bd490595fdfa1f49c9675
|
[
"MIT"
] | 1
|
2021-05-20T16:08:35.000Z
|
2021-05-20T16:08:35.000Z
|
backoffice/routes/membership.py
|
MedPy-C/backend
|
262834adb1f4f5714c4bd490595fdfa1f49c9675
|
[
"MIT"
] | null | null | null |
from django.urls import path
from backoffice.view import membership
members_routes = [
path('user/<str:user_login_code>/group/<str:slug_name>/members/', membership.MembershipView.as_view(
{'get': 'list'}), name='group_membership')]
| 30.75
| 104
| 0.731707
|
24f17f634d6468da1265c79c4f3b10fd7bc9adaa
| 6,889
|
py
|
Python
|
sources/hud.py
|
mthenault/MICshooter
|
2f816d9b96fb525e7f607fad03ca900df9bb32c7
|
[
"MIT"
] | 2
|
2015-01-12T14:44:44.000Z
|
2018-05-07T10:49:23.000Z
|
sources/hud.py
|
antismap/MICshooter
|
2f816d9b96fb525e7f607fad03ca900df9bb32c7
|
[
"MIT"
] | null | null | null |
sources/hud.py
|
antismap/MICshooter
|
2f816d9b96fb525e7f607fad03ca900df9bb32c7
|
[
"MIT"
] | 2
|
2016-02-27T09:56:40.000Z
|
2019-07-19T09:47:22.000Z
|
import os
import common_pygame
import random
pygame = common_pygame.pygame
screen = common_pygame.screen
# def __init__(self):
#self.color=(0, 0, 255)
#self.y1 = screen.get_height()/2
#self.y2 = self.y1 +20
# self.max_width=800-40
##self.font = pygame.font.Font(None,64)
# self.textHeight=self.y1-80
def negtozero(x):
if x < 0:
return 0
return x
def updateProgbar(percent, x1, y1, max_width, color, direction, single_sprites):
s = pygame.Surface((max_width, 15)) # the size of your rect
s.set_alpha(64) # alpha level
s.fill(color) # this fills the entire surface
screen.blit(s, (x1, y1)) # (0,0) are the top-left coordinates
##txtpercent = self.font.render(str(percent)+"%", True, self.color)
##screen.blit(txtpercent, (20,y1+30))
(r, g, b) = color
if direction == 1:
for i in range((percent * max_width) / 100):
screen.blit(single_sprites['barArmor.png'], (x1 + i, y1))
# pygame.draw.rect(screen, newcol, (x1,y1+i,(percent*max_width)/100,2), 0)
else:
for i in range((percent * max_width) / 100):
screen.blit(single_sprites['barLife.png'],
(x1 + max_width - i, y1))
#pygame.draw.rect(screen, color, (x1+(max_width-(percent*max_width)/100),y1,(percent*max_width)/100,15), 0)
#pygame.draw.rect(screen, color2, (x1,y1,max_width,15), 1 )
class Hud():
def __init__(self, single_sprites, menu, sounds):
self.sounds = sounds
self.menu = menu
self.single_sprites = single_sprites
# Create a font
self.tinyfont = pygame.font.Font(None, 16)
self.font = pygame.font.Font(None, 32)
self.font2 = pygame.font.Font(None, 150)
self.score_label = self.tinyfont.render("score", True, (255, 255, 0))
self.inf = self.font.render("Inf.", True, (0, 130, 255))
self.communication = pygame.font.Font("A.TTF", 13)
self.communicationCall = pygame.font.Font("BITSUMIS.TTF", 50)
# self.offset=0
self.johnsonOffset = 0
self.textline1 = "Johnson here."
self.textline2 = "Your goal today is to destroy"
self.textline3 = "every enemy ship. "
self.textline4 = "I'm counting on you !"
def blit(self, ship, level):
# Render the text
#life_txt = self.font.render(str(ship.life), True, (255,0, 0))
score_txt = self.font.render(str(ship.score), True, (255, 255, 255))
#armor_txt = self.font.render(str(ship.armor), True, (255,255, 0))
level_txt = self.tinyfont.render(
"level " + str(level), True, (255, 255, 0))
# show the HUD
screen.blit(self.single_sprites['lifemask.png'], (0, common_pygame.screenheight
- self.single_sprites['lifemask.png'].get_height()))
# show the life and the score
screen.blit(self.score_label, (680, common_pygame.screenheight - 50))
screen.blit(score_txt, (725, common_pygame.screenheight - 55))
#screen.blit(level_txt, (455,common_pygame.screenheight-215-30 -self.offset))
# print(common_pygame.screenheight)
# progress bar for the armor
updateProgbar(ship.armor, 25, common_pygame.screenheight -
23, 150, (7, 200, 0), 1, self.single_sprites)
screen.blit(self.single_sprites[
'armorbonus.png'], (0, common_pygame.screenheight - 32))
# progress bar for the life
updateProgbar(ship.life, common_pygame.screenwidth - 25 - 150,
common_pygame.screenheight - 23, 150, (0, 181, 200), 0, self.single_sprites)
screen.blit(self.single_sprites[
'lifebonus.png'], (common_pygame.screenwidth - 25, common_pygame.screenheight - 32))
#screen.blit(armor_txt, (35,common_pygame.screenheight-227-self.offset ))
# blit the current weapon and the ammo
if ship.weapon == 1:
screen.blit(self.single_sprites['sprite_laser.png'], (5,
common_pygame.screenheight - 55))
screen.blit(self.inf, (25, common_pygame.screenheight - 55))
else:
ammo_txt = self.font.render(str(ship.ammo), True, (0, 130, 255))
screen.blit(self.single_sprites['ball1.png'], (5,
common_pygame.screenheight - 55))
screen.blit(ammo_txt, (25, common_pygame.screenheight - 55))
# beggining of the game : blit johnson
if level == 0:
if self.johnsonOffset % 1 == 0 and self.johnsonOffset > 30 and self.johnsonOffset < 180:
self.menu.play_sound(self.sounds["click.wav"])
if self.johnsonOffset == 230:
return 1
else:
self.johnsonOffset = self.johnsonOffset + 1
if self.johnsonOffset == 1:
self.menu.play_sound(self.sounds["noise.wav"])
# first 30 frames
if self.johnsonOffset < 30:
if self.johnsonOffset % 8 > 4:
screen.blit(self.communicationCall.render(
"Incoming call", True, (255, 255, 255)), (20, 15))
elif self.johnsonOffset <= 200:
screen.blit(self.single_sprites['johnson.png'], (10, 10))
if self.johnsonOffset >= 30:
screen.blit(self.communication.render(self.textline1[
:(self.johnsonOffset - 30) * len(self.textline1) / 30], True, (255, 255, 128)), (114, 26))
# next 60 ones
if self.johnsonOffset >= 70:
screen.blit(self.communication.render(self.textline2[
:(self.johnsonOffset - 70) * len(self.textline2) / 30], True, (255, 255, 128)), (114, 44))
if self.johnsonOffset >= 100:
screen.blit(self.communication.render(self.textline3[
:(self.johnsonOffset - 100) * len(self.textline3) / 30], True, (255, 255, 128)), (114, 44 + 18))
if self.johnsonOffset >= 150:
screen.blit(self.communication.render(self.textline4[
:(self.johnsonOffset - 150) * len(self.textline4) / 30], True, (255, 255, 128)), (114, 44 + 18 + 18))
else:
if self.johnsonOffset == 201:
self.menu.play_sound(self.sounds["noise.wav"])
if self.johnsonOffset % 8 > 4:
screen.blit(self.communicationCall.render(
"Transmission end", True, (255, 255, 255)), (20, 15))
return level
| 46.547297
| 137
| 0.55712
|
72ce65eeeb6e69a332139f5a033f1631f5fe070a
| 891
|
py
|
Python
|
app/core/admin.py
|
arunjohn96/recipe_app_api
|
efc0a10cdd45c792f479089209d38f976c46b353
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
arunjohn96/recipe_app_api
|
efc0a10cdd45c792f479089209d38f976c46b353
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
arunjohn96/recipe_app_api
|
efc0a10cdd45c792f479089209d38f976c46b353
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{
'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Importnant dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredients)
admin.site.register(models.Recipe)
| 27
| 68
| 0.583614
|
97d65921a40d455ea70c6c09aab6abf51e7ffca5
| 5,817
|
py
|
Python
|
app/cascade/query_layers/base.py
|
inmadria/cascade-server
|
0ae612d97a5bad60b57a611ac59d491495e4cef1
|
[
"BSD-3-Clause"
] | 205
|
2017-08-30T19:53:53.000Z
|
2022-03-29T17:55:32.000Z
|
app/cascade/query_layers/base.py
|
inmadria/cascade-server
|
0ae612d97a5bad60b57a611ac59d491495e4cef1
|
[
"BSD-3-Clause"
] | 14
|
2017-08-31T15:00:11.000Z
|
2021-06-01T22:00:06.000Z
|
app/cascade/query_layers/base.py
|
inmadria/cascade-server
|
0ae612d97a5bad60b57a611ac59d491495e4cef1
|
[
"BSD-3-Clause"
] | 51
|
2017-08-30T19:58:06.000Z
|
2022-03-30T15:54:03.000Z
|
# NOTICE
#
# This software was produced for the U. S. Government
# under Basic Contract No. W15P7T-13-C-A802, and is
# subject to the Rights in Noncommercial Computer Software
# and Noncommercial Computer Software Documentation
# Clause 252.227-7014 (FEB 2012)
#
# (C) 2017 The MITRE Corporation.
import logging
from mongoengine import Document, StringField, ReferenceField, EmbeddedDocument
from app.cascade.data_model.query import QueryTerm, Operation
from app.cascade.data_model.event import DataModelQuery
from app.cascade.analytics import CascadeAnalytic, AnalyticReference
from app.cascade.data_model.parser import lift_query
logger = logging.getLogger(__name__)
class DatabaseInfo(Document):
database_type = "BaseDatabase"
name = StringField(required=True, unique=True)
meta = {'abstract': False, 'allow_inheritance': True}
def add_user(self, **kwargs):
raise NotImplementedError()
@classmethod
def get_schemas(cls):
schemas = []
for subcls in cls.__subclasses__():
fields = {k: {'type': type(v).__name__, 'default': (None if hasattr(v.default, '__call__') else v.default)}
for k, v in subcls._fields.items()}
fields.pop('_cls')
fields.pop('id')
schemas.append({'_cls': subcls._class_name, 'fields': fields, 'name': subcls.database_type})
return schemas
class UserDatabaseInfo(EmbeddedDocument):
meta = {'abstract': True, 'allow_inheritance': True}
database = ReferenceField(DatabaseInfo)
def login(self):
""" :rtype: DataModelQueryLayer """
raise NotImplementedError
@classmethod
def get_schemas(cls):
return [{'_cls': subcls._class_name,
'fields': {k: {'type': type(v).__name__, 'default': v.default} for k, v in subcls._fields.items()},
'name': subcls.database_type} for subcls in cls.__subclasses__()]
class QueryError(Exception):
pass
class DataModelQueryLayer(object):
_cache_dir = 'cache'
_missing_cache = False
platform = 'Data Model AST'
@classmethod
def get_data_model(cls, expression):
""" :return (DataModelEventMeta | DataModelEvent, str): """
if isinstance(expression, DataModelQuery):
return expression.object, expression.action
elif isinstance(expression, (CascadeAnalytic, AnalyticReference)):
return cls.get_data_model(expression.query)
elif isinstance(expression, Operation):
event_type = None
event_action = None
for term in expression.terms:
try:
term_object, term_action = cls.get_data_model(term)
except QueryError:
# if there is no term item, then just skip it
continue
if term_object is None and term_action is None:
continue
if (event_type and term_object != event_type) or (event_action and term_action != event_action):
raise QueryError("{} mismatch".format(type(DataModelQuery).__name))
event_type = term_object
event_action = term_action
if event_type is None and event_action is None:
raise QueryError("Unable to identify data model event")
return event_type, event_action
else:
raise QueryError(expression)
@classmethod
def optimize(cls, expression, dereference=False):
try:
optimized = cls._optimize(expression, dereference=dereference)
except QueryError:
return expression
try:
event_type, event_action = cls.get_data_model(expression)
optimized = DataModelQuery(event_type, event_action, query=optimized)
except QueryError:
pass
finally:
return optimized
@classmethod
def _optimize(cls, expression, dereference=False):
if isinstance(expression, (CascadeAnalytic, AnalyticReference)) and dereference:
return cls._optimize(expression.query, dereference=dereference)
if isinstance(expression, DataModelQuery):
return cls._optimize(expression.query, dereference=dereference)
elif isinstance(expression, Operation):
optimized_terms = []
for term in expression.terms:
if isinstance(term, Operation) and term.operator == expression.operator:
optimized_terms.extend(cls._optimize(term, dereference=dereference).terms)
else:
optimized_terms.append(cls._optimize(term, dereference=dereference))
return Operation(terms=optimized_terms, operator=expression.operator)
else:
return expression
@classmethod
def parse_expression(cls, expression, *args, **kwargs):
return expression
def query(self, expression, **kwargs):
""" The query function takes an abstract query over the data model, and fetches the corresponding
content from the database. This function returns a list of events, which are represented as dictionaries of
fields, etc.
:type expression: QueryTerm
:rtype: list[dict]
"""
raise NotImplementedError("'query' not supported for {}".format(type(self)))
@property
def external_analytics(self):
""" Returns a list of the analytics provided by this database.
"""
raise NotImplementedError("'analytics' property not supported for {}".format(type(self)))
class CascadeQueryLayer(DataModelQueryLayer):
platform = 'Data Model Query Language'
@classmethod
def parse_expression(cls, expression, *args, **kwargs):
return lift_query(expression)
| 36.130435
| 119
| 0.653086
|
bde034cc56effa2226dd050d0510bc87cde947d8
| 1,682
|
py
|
Python
|
molecule/layouts/tests/test_minio_default.py
|
Cloud-Temple/ansible-minio
|
401e45be33428e6502b9dcf28cbc717ea410f5a5
|
[
"MIT"
] | 4
|
2019-06-11T22:10:43.000Z
|
2022-01-26T04:55:01.000Z
|
molecule/layouts/tests/test_minio_default.py
|
Cloud-Temple/ansible-minio
|
401e45be33428e6502b9dcf28cbc717ea410f5a5
|
[
"MIT"
] | 5
|
2020-10-29T22:01:21.000Z
|
2021-03-30T15:59:27.000Z
|
molecule/layouts/tests/test_minio_default.py
|
Cloud-Temple/ansible-minio
|
401e45be33428e6502b9dcf28cbc717ea410f5a5
|
[
"MIT"
] | 5
|
2019-09-10T12:46:17.000Z
|
2022-01-14T15:47:11.000Z
|
import os
import yaml
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
dir_path = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def AnsibleDefaults():
with open(os.path.join(dir_path, './../../../defaults/main.yml'), 'r') as stream:
return yaml.load(stream)
@pytest.fixture()
def AnsiblePlaybook():
with open(os.path.join(dir_path, './../playbook.yml'), 'r') as stream:
return yaml.load(stream)
@pytest.mark.parametrize('minio_bin_var', [
'minio_server_bin',
'minio_client_bin',
])
def test_minio_installed(host, AnsibleDefaults, minio_bin_var):
f = host.file(AnsibleDefaults[minio_bin_var])
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
assert oct(f.mode) == '0o755'
def test_minio_server_data_directory(host, AnsibleDefaults, AnsiblePlaybook):
playbpook = AnsiblePlaybook[0]
for role in playbpook['roles']:
layoutName = role['vars']['minio_layout']
datadir = "/var/lib/minio-{}".format(layoutName)
d = host.file(datadir)
assert d.is_directory
assert d.exists
assert d.user == AnsibleDefaults['minio_user']
assert d.group == AnsibleDefaults['minio_group']
assert oct(d.mode) == '0o750'
def test_minio_server_webservers(host, AnsibleDefaults):
for layoutName in AnsibleDefaults['minio_layouts'].keys():
server_addr = AnsibleDefaults['minio_layouts'][layoutName]['server_addr']
addr = "tcp://127.0.0.1{}".format(server_addr)
host.socket(addr).is_listening
| 30.035714
| 85
| 0.689655
|
7e41a96654f0d185568e541b57a2893cb091aed6
| 976
|
py
|
Python
|
mall_spider/model/cmm_sys_stream_handle_task.py
|
524243642/taobao_spider
|
9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e
|
[
"Unlicense"
] | 12
|
2019-06-06T12:23:08.000Z
|
2021-06-15T17:50:07.000Z
|
mall_spider/model/cmm_sys_stream_handle_task.py
|
524243642/mall_spider
|
9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e
|
[
"Unlicense"
] | 3
|
2021-03-31T19:02:47.000Z
|
2022-02-11T03:43:15.000Z
|
mall_spider/model/cmm_sys_stream_handle_task.py
|
524243642/taobao_spider
|
9cdaed1c7a67fc1f35ee2af2e18313cedf3b1e5e
|
[
"Unlicense"
] | 5
|
2019-09-17T03:55:56.000Z
|
2020-12-18T03:34:03.000Z
|
# coding: utf-8
from sqlalchemy import BigInteger, Column, Integer, String
from sqlalchemy.schema import FetchedValue
from common.column import JSONEncodedLongColumn
from mall_spider import model
from mall_spider.model.base import VersionMixedIn
class CmmSysStreamHandleTask(model.Base, VersionMixedIn):
__tablename__ = 'cmm_sys_stream_handle_task'
id = Column(BigInteger, primary_key=True)
type = Column(Integer, nullable=False, server_default=FetchedValue())
raw_data = Column(JSONEncodedLongColumn, nullable=False, server_default=FetchedValue())
create_by = Column(BigInteger, nullable=False, server_default=FetchedValue())
update_by = Column(BigInteger, nullable=False, server_default=FetchedValue())
# content = Column(String(3000), nullable=False, server_default=FetchedValue())
origin_id = Column(BigInteger, nullable=False, server_default=FetchedValue())
date = Column(String(10), nullable=False, server_default=FetchedValue())
| 46.47619
| 91
| 0.792008
|
18baba04496f4ceff333648ef16afe75683913b1
| 3,418
|
py
|
Python
|
paint using hand/paint.py
|
Jeevananthamcse/Palanisamy
|
9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c
|
[
"Unlicense"
] | 2
|
2021-08-30T08:04:04.000Z
|
2021-09-27T06:01:05.000Z
|
paint using hand/paint.py
|
Jeevananthamcse/Palanisamy
|
9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c
|
[
"Unlicense"
] | 1
|
2022-02-08T00:01:16.000Z
|
2022-02-08T00:01:16.000Z
|
paint using hand/paint.py
|
Jeevananthamcse/Palanisamy
|
9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c
|
[
"Unlicense"
] | 1
|
2021-09-13T07:03:11.000Z
|
2021-09-13T07:03:11.000Z
|
import cv2
import numpy as np
import time
import os
import HandTrackingModule as htm
#######################
brushThickness = 25
eraserThickness = 100
########################
folderPath = "Header"
myList = os.listdir(folderPath)
print(myList)
overlayList = []
for imPath in myList:
image = cv2.imread(f'{folderPath}/{imPath}')
overlayList.append(image)
print(len(overlayList))
header = overlayList[0]
drawColor = (255, 0, 255)
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
detector = htm.handDetector(detectionCon=0.65,maxHands=1)
xp, yp = 0, 0
imgCanvas = np.zeros((720, 1280, 3), np.uint8)
while True:
# 1. Import image
success, img = cap.read()
img = cv2.flip(img, 1)
# 2. Find Hand Landmarks
img = detector.findHands(img)
lmList = detector.findPosition(img, draw=False)
if len(lmList) != 0:
print(lmList)
# tip of index and middle fingers
x1, y1 = lmList[8][1:]
x2, y2 = lmList[12][1:]
# 3. Check which fingers are up
fingers = detector.fingersUp()
# print(fingers)
# 4. If Selection Mode - Two finger are up
if fingers[1] and fingers[2]:
# xp, yp = 0, 0
print("Selection Mode")
# # Checking for the click
if y1 < 125:
if 250 < x1 < 450:
header = overlayList[0]
drawColor = (255, 0, 255)
elif 550 < x1 < 750:
header = overlayList[1]
drawColor = (255, 0, 0)
elif 800 < x1 < 950:
header = overlayList[2]
drawColor = (0, 255, 0)
elif 1050 < x1 < 1200:
header = overlayList[3]
drawColor = (0, 0, 0)
cv2.rectangle(img, (x1, y1 - 25), (x2, y2 + 25), drawColor, cv2.FILLED)
# 5. If Drawing Mode - Index finger is up
if fingers[1] and fingers[2] == False:
cv2.circle(img, (x1, y1), 15, drawColor, cv2.FILLED)
print("Drawing Mode")
if xp == 0 and yp == 0:
xp, yp = x1, y1
cv2.line(img, (xp, yp), (x1, y1), drawColor, brushThickness)
# if drawColor == (0, 0, 0):
# cv2.line(img, (xp, yp), (x1, y1), drawColor, eraserThickness)
# cv2.line(imgCanvas, (xp, yp), (x1, y1), drawColor, eraserThickness)
#
# else:
# cv2.line(img, (xp, yp), (x1, y1), drawColor, brushThickness)
# cv2.line(imgCanvas, (xp, yp), (x1, y1), drawColor, brushThickness)
xp, yp = x1, y1
# # Clear Canvas when all fingers are up
if all (x >= 1 for x in fingers):
imgCanvas = np.zeros((720, 1280, 3), np.uint8)
imgGray = cv2.cvtColor(imgCanvas, cv2.COLOR_BGR2GRAY)
_, imgInv = cv2.threshold(imgGray, 50, 255, cv2.THRESH_BINARY_INV)
imgInv = cv2.cvtColor(imgInv,cv2.COLOR_GRAY2BGR)
img = cv2.bitwise_and(img,imgInv)
img = cv2.bitwise_or(img,imgCanvas)
# Setting the header image
img[0:125, 0:1280] = header
# img = cv2.addWeighted(img,0.5,imgCanvas,0.5,0)
cv2.imshow("Image", img)
cv2.imshow("Canvas", imgCanvas)
cv2.imshow("Inv", imgInv)
cv2.waitKey(1)
| 30.792793
| 86
| 0.522235
|
1c8bcbba2e8dbe66dcca76c6c83e1735b8afa044
| 1,856
|
py
|
Python
|
sympy/strategies/tests/test_rl.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T12:38:24.000Z
|
2022-01-17T12:38:24.000Z
|
sympy/strategies/tests/test_rl.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/strategies/tests/test_rl.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.core.singleton import S
from sympy.strategies.rl import (rm_id, glom, flatten, unpack, sort, distribute,
subs, rebuild)
from sympy.core.basic import Basic
def test_rm_id():
rmzeros = rm_id(lambda x: x == 0)
assert rmzeros(Basic(0, 1)) == Basic(1)
assert rmzeros(Basic(0, 0)) == Basic(0)
assert rmzeros(Basic(2, 1)) == Basic(2, 1)
def test_glom():
from sympy.core.add import Add
from sympy.abc import x
key = lambda x: x.as_coeff_Mul()[1]
count = lambda x: x.as_coeff_Mul()[0]
newargs = lambda cnt, arg: cnt * arg
rl = glom(key, count, newargs)
result = rl(Add(x, -x, 3*x, 2, 3, evaluate=False))
expected = Add(3*x, 5)
assert set(result.args) == set(expected.args)
def test_flatten():
assert flatten(Basic(1, 2, Basic(3, 4))) == Basic(1, 2, 3, 4)
def test_unpack():
assert unpack(Basic(2)) == 2
assert unpack(Basic(2, 3)) == Basic(2, 3)
def test_sort():
assert sort(str)(Basic(3,1,2)) == Basic(1,2,3)
def test_distribute():
class T1(Basic): pass
class T2(Basic): pass
distribute_t12 = distribute(T1, T2)
assert distribute_t12(T1(1, 2, T2(3, 4), 5)) == \
T2(T1(1, 2, 3, 5),
T1(1, 2, 4, 5))
assert distribute_t12(T1(1, 2, 3)) == T1(1, 2, 3)
def test_distribute_add_mul():
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.symbol import symbols
x, y = symbols('x, y')
expr = Mul(2, Add(x, y), evaluate=False)
expected = Add(Mul(2, x), Mul(2, y))
distribute_mul = distribute(Mul, Add)
assert distribute_mul(expr) == expected
def test_subs():
rl = subs(1, 2)
assert rl(1) == 2
assert rl(3) == 3
def test_rebuild():
from sympy.core.add import Add
expr = Basic.__new__(Add, S(1), S(2))
assert rebuild(expr) == 3
| 29.460317
| 80
| 0.606681
|
4f25c2b59a9426c79388b3325030f9c286f0a6d9
| 2,042
|
py
|
Python
|
supervised/algorithms/registry.py
|
eladmw/mljar-supervised
|
df0257c449a726b8303e7c8a2babe97a02db5003
|
[
"MIT"
] | null | null | null |
supervised/algorithms/registry.py
|
eladmw/mljar-supervised
|
df0257c449a726b8303e7c8a2babe97a02db5003
|
[
"MIT"
] | null | null | null |
supervised/algorithms/registry.py
|
eladmw/mljar-supervised
|
df0257c449a726b8303e7c8a2babe97a02db5003
|
[
"MIT"
] | null | null | null |
# tasks that can be handled by the package
BINARY_CLASSIFICATION = "binary_classification"
MULTICLASS_CLASSIFICATION = "multiclass_classification"
REGRESSION = "regression"
class AlgorithmsRegistry:
registry = {
BINARY_CLASSIFICATION: {},
MULTICLASS_CLASSIFICATION: {},
REGRESSION: {},
}
@staticmethod
def add(
task_name,
model_class,
model_params,
required_preprocessing,
additional,
default_params,
):
model_information = {
"class": model_class,
"params": model_params,
"required_preprocessing": required_preprocessing,
"additional": additional,
"default_params": default_params,
}
AlgorithmsRegistry.registry[task_name][
model_class.algorithm_short_name
] = model_information
@staticmethod
def get_supported_ml_tasks():
return AlgorithmsRegistry.registry.keys()
@staticmethod
def get_algorithm_class(ml_task, algorithm_name):
return AlgorithmsRegistry.registry[ml_task][algorithm_name]["class"]
@staticmethod
def get_long_name(ml_task, algorithm_name):
return AlgorithmsRegistry.registry[ml_task][algorithm_name][
"class"
].algorithm_name
@staticmethod
def get_max_rows_limit(ml_task, algorithm_name):
return AlgorithmsRegistry.registry[ml_task][algorithm_name]["additional"][
"max_rows_limit"
]
@staticmethod
def get_max_cols_limit(ml_task, algorithm_name):
return AlgorithmsRegistry.registry[ml_task][algorithm_name]["additional"][
"max_cols_limit"
]
# Import algorithm to be registered
import supervised.algorithms.random_forest
import supervised.algorithms.xgboost
import supervised.algorithms.baseline
import supervised.algorithms.lightgbm
import supervised.algorithms.extra_trees
import supervised.algorithms.catboost
import supervised.algorithms.linear
import supervised.algorithms.nn
| 28.760563
| 82
| 0.696866
|
3e1b4af16a0f26e3b3c4b23558c638e351b587dc
| 633
|
py
|
Python
|
ef/config/data_class.py
|
JacobMSD/ef_python
|
13d785c10dd293c60ab90065c518e5afb14e5a02
|
[
"MIT"
] | null | null | null |
ef/config/data_class.py
|
JacobMSD/ef_python
|
13d785c10dd293c60ab90065c518e5afb14e5a02
|
[
"MIT"
] | null | null | null |
ef/config/data_class.py
|
JacobMSD/ef_python
|
13d785c10dd293c60ab90065c518e5afb14e5a02
|
[
"MIT"
] | null | null | null |
# https://codereview.stackexchange.com/questions/131761/lombokython-automatic-eq-hash-repr
# https://github.com/alexprengere/reprmixin
class DataClass:
repr_arg_separator = ', '
def __eq__(self, other):
if isinstance(self, other.__class__):
return repr(self) == repr(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def __repr__(self):
return '{name}({values})'.format(
name=type(self).__name__,
values=self.repr_arg_separator.join(map(lambda pair: "{}={!r}".format(*pair), vars(self).items())))
| 35.166667
| 111
| 0.652449
|
02cc062fce31b97580b0e852ee0853ae040cd5b7
| 885
|
py
|
Python
|
yewdoc/__init__.py
|
paul-wolf/yewdoc-client
|
590c78cd13001b105526f801ea52ee348ea14906
|
[
"BSD-3-Clause"
] | 8
|
2017-01-31T12:50:06.000Z
|
2019-08-04T10:44:44.000Z
|
yewdoc/__init__.py
|
paul-wolf/yewdoc-client
|
590c78cd13001b105526f801ea52ee348ea14906
|
[
"BSD-3-Clause"
] | 5
|
2017-01-31T17:26:11.000Z
|
2017-02-06T09:22:23.000Z
|
yewdoc/__init__.py
|
paul-wolf/yewdoc-client
|
590c78cd13001b105526f801ea52ee348ea14906
|
[
"BSD-3-Clause"
] | 3
|
2017-01-31T17:13:25.000Z
|
2017-02-14T03:05:33.000Z
|
# -*- coding: utf-8 -*-
"""
Yewdocs
~~~~~~~
Yewdocs is a personal document manager that makes creating and
editing text documents from the command line easier than using an
editor and filesystem commands.
:copyright: (c) 2017 by Paul Wolf.
:license: BSD, see LICENSE for more details.
"""
__version__ = "0.2.0"
__author__ = "Paul Wolf"
__license__ = "BSD"
from .shared import cli
from .cmd import (
apply,
cp,
generate_index,
path,
purge,
attach,
status,
ls,
ping,
info,
sync,
edit,
register,
user_pref,
read,
take,
configure,
authenticate,
create,
tag,
tags,
convert,
browse,
context,
encrypt,
decrypt,
api,
kind,
find,
rename,
head,
tail,
push,
archive,
delete,
show,
describe,
verify,
diff,
rls,
)
| 14.508197
| 66
| 0.574011
|
b888ccbce0ed336869eea1adde3f1d52767662a1
| 23,686
|
py
|
Python
|
invenio_records/api.py
|
max-moser/invenio-records
|
bb4448a2c2abf448c3ba647590729cd6bc8478df
|
[
"MIT"
] | null | null | null |
invenio_records/api.py
|
max-moser/invenio-records
|
bb4448a2c2abf448c3ba647590729cd6bc8478df
|
[
"MIT"
] | null | null | null |
invenio_records/api.py
|
max-moser/invenio-records
|
bb4448a2c2abf448c3ba647590729cd6bc8478df
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2020 CERN.
# Copyright (C) 2021 RERO.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Record API."""
import inspect
import warnings
from copy import deepcopy
from flask import current_app
from invenio_db import db
from jsonpatch import apply_patch
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy_continuum.utils import parent_class
from werkzeug.local import LocalProxy
from .dictutils import clear_none, dict_lookup
from .dumpers import Dumper
from .errors import MissingModelError
from .models import RecordMetadata
from .signals import after_record_delete, after_record_insert, \
after_record_revert, after_record_update, before_record_delete, \
before_record_insert, before_record_revert, before_record_update
_records_state = LocalProxy(lambda: current_app.extensions['invenio-records'])
class RecordBase(dict):
"""Base class for Record and RecordRevision to share common features."""
model_cls = RecordMetadata
"""SQLAlchemy model class defining which table stores the records."""
format_checker = None
"""Class-level attribute to specify a default JSONSchema format checker."""
validator = None
"""Class-level attribute to specify a JSONSchema validator class."""
dumper = Dumper()
"""Class-level attribute to specify the default data dumper/loader.
For backward compatibility the dumper used here just produces a deep copy
of the record.
"""
enable_jsonref = True
"""Class-level attribute to control if JSONRef replacement is supported."""
_extensions = []
"""Record extensions registry.
Allows extensions (like system fields) to be registered on the record.
"""
def __init__(self, data, model=None, **kwargs):
"""Initialize instance with dictionary data and SQLAlchemy model.
:param data: Dict with record metadata.
:param model: :class:`~invenio_records.models.RecordMetadata` instance.
"""
self.model = model
for e in self._extensions:
e.pre_init(self, data, model=model, **kwargs)
super(RecordBase, self).__init__(data or {})
for e in self._extensions:
e.post_init(self, data, model=model, **kwargs)
@property
def id(self):
"""Get model identifier."""
return self.model.id if self.model else None
@property
def revision_id(self):
"""Get revision identifier."""
return self.model.version_id-1 if self.model else None
@property
def created(self):
"""Get creation timestamp."""
return self.model.created if self.model else None
@property
def updated(self):
"""Get last updated timestamp."""
return self.model.updated if self.model else None
@property
def is_deleted(self):
"""Get creation timestamp."""
return self.model.is_deleted if self.model else None
def validate(self, format_checker=None, validator=None, **kwargs):
r"""Validate record according to schema defined in ``$schema`` key.
:Keyword Arguments:
* **format_checker** --
A ``format_checker`` is an instance of class
:class:`jsonschema.FormatChecker` containing business logic to
validate arbitrary formats. For example:
>>> from jsonschema import FormatChecker
>>> from jsonschema.validators import validate
>>> checker = FormatChecker()
>>> checker.checks('foo')(lambda el: el.startswith('foo'))
<function <lambda> at ...>
>>> validate('foo', {'format': 'foo'}, format_checker=checker)
returns ``None``, which means that the validation was successful,
while
>>> validate('bar', {'format': 'foo'},
... format_checker=checker) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'bar' is not a 'foo'
...
raises a :class:`jsonschema.exceptions.ValidationError`.
* **validator** --
A :class:`jsonschema.IValidator` class used for record validation.
It will be used as `cls` argument when calling
:func:`jsonschema.validate`. For example
>>> from jsonschema.validators import extend, Draft4Validator
>>> NoRequiredValidator = extend(
... Draft4Validator,
... validators={'required': lambda v, r, i, s: None}
... )
>>> schema = {
... 'type': 'object',
... 'properties': {
... 'name': { 'type': 'string' },
... 'email': { 'type': 'string' },
... 'address': {'type': 'string' },
... 'telephone': { 'type': 'string' }
... },
... 'required': ['name', 'email']
... }
>>> from jsonschema.validators import validate
>>> validate({}, schema, NoRequiredValidator)
returns ``None``, which means that the validation was successful,
while
>>> validate({}, schema) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValidationError: 'name' is a required property
...
raises a :class:`jsonschema.exceptions.ValidationError`.
"""
# 1) For backward compatibility we do not change the method signature
# (i.e. return a ``None`` value on successful validation).
# The actual implementation of the validation method is implemented
# below in _validate() which is also the one used internally to avoid
# double encoding of the dict to JSON.
# 2) We ignore **kwargs (but keep it for backward compatibility) as
# the jsonschema.IValidator only takes the two keyword arguments
# formater_checker and cls (i.e. validator).
self._validate(format_checker=format_checker, validator=validator)
def _validate(self, format_checker=None, validator=None, use_model=False):
"""Implementation of the JSONSchema validation."""
# Use the encoder to transform Python dictionary into JSON document
# prior to validation unless we explicitly ask to use the already
# encoded JSON in the model.
if use_model:
json = self.model.json
else:
json = self.model_cls.encode(dict(self))
if '$schema' in self and self['$schema'] is not None:
# Validate (an error will raise an exception)
_records_state.validate(
json,
self['$schema'],
# Use defaults of class if not specified by user.
format_checker=format_checker or self.format_checker,
cls=validator or self.validator
)
# Return encoded data, so we don't have to double encode.
return json
def replace_refs(self):
"""Replace the ``$ref`` keys within the JSON."""
if self.enable_jsonref:
return _records_state.replace_refs(self)
else:
return self
def clear_none(self, key=None):
"""Helper method to clear None, empty dict and list values.
Modifications are done in place.
"""
clear_none(dict_lookup(self, key) if key else self)
def dumps(self, dumper=None):
"""Make a dump of the record (defaults to a deep copy of the dict).
This method produces a version of a record that can be persisted on
storage such as the database, Elasticsearch or other mediums depending
on the dumper class used.
:param dumper: Dumper to use when dumping the record.
:returns: A ``dict``.
"""
dumper = dumper or self.dumper
data = {}
# Run pre dump extensions
for e in self._extensions:
pre_dump_params = inspect.signature(e.pre_dump).parameters
if 'data' in pre_dump_params:
e.pre_dump(self, data, dumper=dumper)
else:
# TODO: Remove in v1.6.0 or later
warnings.warn(
"The pre_dump hook must take a positional argument data.",
DeprecationWarning
)
e.pre_dump(self, dumper=dumper)
dump_params = inspect.signature(dumper.dump).parameters
if 'data' in dump_params:
# Execute the dump - for backwards compatibility we use the default
# dumper which returns a deepcopy.
data = dumper.dump(self, data)
else:
# TODO: Remove in v1.6.0 or later
warnings.warn(
"The dumper.dump() must take a positional argument data.",
DeprecationWarning
)
data = dumper.dump(self)
for e in self._extensions:
e.post_dump(self, data, dumper=dumper)
return data
@classmethod
def loads(cls, data, loader=None):
"""Load a record dump.
:param loader: Loader class to use when loading the record.
:returns: A new :class:`Record` instance.
"""
# The method is named with in plural to align with dumps (which is
# named with s even if it should probably have been called "dump"
# instead.
loader = loader or cls.dumper
data = deepcopy(data) # avoid mutating the original object
# Run pre load extensions
for e in cls._extensions:
e.pre_load(data, loader=loader)
record = loader.load(data, cls)
# Run post load extensions
for e in cls._extensions:
post_load_params = inspect.signature(e.post_load).parameters
if 'data' in post_load_params:
e.post_load(record, data, loader=loader)
else:
# TODO: Remove in v1.6.0 or later
warnings.warn(
"The post_load hook must take a positional argument data.",
DeprecationWarning
)
e.post_load(record, loader=loader)
return record
class Record(RecordBase):
"""Define API for metadata creation and manipulation."""
send_signals = True
"""Class-level attribute to control if signals should be sent."""
@classmethod
def create(cls, data, id_=None, **kwargs):
r"""Create a new record instance and store it in the database.
#. Send a signal :data:`invenio_records.signals.before_record_insert`
with the new record as parameter.
#. Validate the new record data.
#. Add the new record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_insert`
with the new created record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:param data: Dict with the record metadata.
:param id_: Specify a UUID to use for the new record, instead of
automatically generated.
:returns: A new :class:`Record` instance.
"""
with db.session.begin_nested():
# For backward compatibility we pop them here.
format_checker = kwargs.pop('format_checker', None)
validator = kwargs.pop('validator', None)
# Create the record and the model
record = cls(
data,
model=cls.model_cls(id=id_, data=data),
**kwargs
)
if cls.send_signals:
before_record_insert.send(
current_app._get_current_object(),
record=record
)
# Run pre create extensions
for e in cls._extensions:
e.pre_create(record)
# Validate also encodes the data
record._validate(
format_checker=format_checker,
validator=validator,
use_model=True # use model (already encoded) and didn't change
)
db.session.add(record.model)
if cls.send_signals:
after_record_insert.send(
current_app._get_current_object(),
record=record
)
# Run post create extensions
for e in cls._extensions:
e.post_create(record)
return record
@classmethod
def get_record(cls, id_, with_deleted=False):
"""Retrieve the record by id.
Raise a database exception if the record does not exist.
:param id_: record ID.
:param with_deleted: If `True` then it includes deleted records.
:returns: The :class:`Record` instance.
"""
with db.session.no_autoflush:
query = cls.model_cls.query.filter_by(id=id_)
if not with_deleted:
query = query.filter(cls.model_cls.is_deleted != True) # noqa
obj = query.one()
return cls(obj.data, model=obj)
@classmethod
def get_records(cls, ids, with_deleted=False):
"""Retrieve multiple records by id.
:param ids: List of record IDs.
:param with_deleted: If `True` then it includes deleted records.
:returns: A list of :class:`Record` instances.
"""
with db.session.no_autoflush:
query = cls.model_cls.query.filter(cls.model_cls.id.in_(ids))
if not with_deleted:
query = query.filter(cls.model_cls.is_deleted != True) # noqa
return [cls(obj.data, model=obj) for obj in query.all()]
def patch(self, patch):
"""Patch record metadata.
:params patch: Dictionary of record metadata.
:returns: A new :class:`Record` instance.
"""
warnings.warn(
"The patch() method is deprecated and will be removed.",
DeprecationWarning
)
data = apply_patch(dict(self), patch)
return self.__class__(data, model=self.model)
def commit(self, format_checker=None, validator=None, **kwargs):
r"""Store changes of the current record instance in the database.
#. Send a signal :data:`invenio_records.signals.before_record_update`
with the current record to be committed as parameter.
#. Validate the current record data.
#. Commit the current record in the database.
#. Send a signal :data:`invenio_records.signals.after_record_update`
with the committed record as parameter.
:Keyword Arguments:
* **format_checker** --
An instance of the class :class:`jsonschema.FormatChecker`, which
contains validation rules for formats. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
* **validator** --
A :class:`jsonschema.IValidator` class that will be used to
validate the record. See
:func:`~invenio_records.api.RecordBase.validate` for more details.
:returns: The :class:`Record` instance.
"""
if self.model is None or self.model.is_deleted:
raise MissingModelError()
with db.session.begin_nested():
if self.send_signals:
before_record_update.send(
current_app._get_current_object(),
record=self
)
# Run pre commit extensions
for e in self._extensions:
e.pre_commit(self, **kwargs)
# Validate also encodes the data
json = self._validate(
format_checker=format_checker, validator=validator)
# Thus, we pass the encoded JSON directly to the model to avoid
# double encoding.
self.model.json = json
flag_modified(self.model, 'json')
db.session.merge(self.model)
if self.send_signals:
after_record_update.send(
current_app._get_current_object(),
record=self
)
# Run post commit extensions
for e in self._extensions:
e.post_commit(self)
return self
def delete(self, force=False):
"""Delete a record.
If `force` is ``False``, the record is soft-deleted: record data will
be deleted but the record identifier and the history of the record will
be kept. This ensures that the same record identifier cannot be used
twice, and that you can still retrieve its history. If `force` is
``True``, then the record is completely deleted from the database.
#. Send a signal :data:`invenio_records.signals.before_record_delete`
with the current record as parameter.
#. Delete or soft-delete the current record.
#. Send a signal :data:`invenio_records.signals.after_record_delete`
with the current deleted record as parameter.
:param force: if ``True``, completely deletes the current record from
the database, otherwise soft-deletes it.
:returns: The deleted :class:`Record` instance.
"""
if self.model is None:
raise MissingModelError()
with db.session.begin_nested():
if self.send_signals:
before_record_delete.send(
current_app._get_current_object(),
record=self
)
# Run pre delete extensions
for e in self._extensions:
e.pre_delete(self, force=force)
if force:
db.session.delete(self.model)
else:
self.model.is_deleted = True
db.session.merge(self.model)
if self.send_signals:
after_record_delete.send(
current_app._get_current_object(),
record=self
)
# Run post delete extensions
for e in self._extensions:
e.post_delete(self, force=force)
return self
def undelete(self):
"""Undelete a soft-deleted record."""
if self.model is None:
raise MissingModelError()
self.model.is_deleted = False
return self
def revert(self, revision_id):
"""Revert the record to a specific revision.
#. Send a signal :data:`invenio_records.signals.before_record_revert`
with the current record as parameter.
#. Revert the record to the revision id passed as parameter.
#. Send a signal :data:`invenio_records.signals.after_record_revert`
with the reverted record as parameter.
:param revision_id: Specify the record revision id
:returns: The :class:`Record` instance corresponding to the revision id
"""
if self.model is None:
raise MissingModelError()
revision = self.revisions[revision_id]
with db.session.begin_nested():
if self.send_signals:
# TODO: arguments to this signal does not make sense.
# Ought to be both record and revision.
before_record_revert.send(
current_app._get_current_object(),
record=self
)
for e in self._extensions:
e.pre_revert(self, revision)
# Here we explicitly set the json column in order to not
# encode/decode the json data via the ``data`` property.
self.model.json = revision.model.json
flag_modified(self.model, 'json')
db.session.merge(self.model)
if self.send_signals:
# TODO: arguments to this signal does not make sense.
# Ought to be the class being returned just below and should
# include the revision.
after_record_revert.send(
current_app._get_current_object(),
record=self
)
record = self.__class__(self.model.data, model=self.model)
for e in self._extensions:
e.post_revert(record, revision)
return record
@property
def revisions(self):
"""Get revisions iterator."""
if self.model is None:
raise MissingModelError()
return RevisionsIterator(self.model)
class RecordRevision(RecordBase):
"""API for record revisions."""
def __init__(self, model):
"""Initialize instance with the SQLAlchemy model."""
super(RecordRevision, self).__init__(
# The version model class does not have the properties of the
# parent model class, and thus ``model.data`` won't work (which is
# a Python property on RecordMetadataBase).
parent_class(model.__class__).decode(model.json),
model=model
)
class RevisionsIterator(object):
"""Iterator for record revisions."""
def __init__(self, model):
"""Initialize instance with the SQLAlchemy model."""
self._it = None
self.model = model
def __len__(self):
"""Get number of revisions."""
return self.model.versions.count()
def __iter__(self):
"""Get iterator."""
self._it = iter(self.model.versions)
return self
def __next__(self):
"""Get next revision item."""
return RecordRevision(next(self._it))
def __getitem__(self, revision_id):
"""Get a specific revision.
Revision id is always smaller by 1 from version_id. This was initially
to ensure that record revisions was zero-indexed similar to arrays
(e.g. you could do ``record.revisions[0]``). Due to SQLAlchemy
increasing the version counter via Python instead of the SQL
insert/update query it's possible to have an "array with holes" and
thus having it zero-indexed does not make much sense (thus it's like
this for historical reasons and has not been changed because it's
diffcult to change - e.g. implies all indexed records in existing
instances having to be updated.)
"""
if revision_id < 0:
return RecordRevision(self.model.versions[revision_id])
try:
return RecordRevision(
self.model.versions.filter_by(
version_id=revision_id + 1
).one()
)
except NoResultFound:
raise IndexError
def __contains__(self, revision_id):
"""Test if revision exists."""
try:
self[revision_id]
return True
except IndexError:
return False
def __reversed__(self):
"""Allows to use reversed operator."""
for version_index in range(self.model.versions.count()):
yield RecordRevision(self.model.versions[-(version_index+1)])
| 35.142433
| 79
| 0.598919
|
ad8c281250d63a7ebe168051862299a2e048ba2d
| 267
|
py
|
Python
|
wotpy/codecs/enums.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | 24
|
2019-02-15T09:00:27.000Z
|
2021-12-23T05:45:03.000Z
|
wotpy/codecs/enums.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | 20
|
2020-03-17T09:41:51.000Z
|
2021-07-14T12:29:02.000Z
|
wotpy/codecs/enums.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | 5
|
2019-10-10T13:38:20.000Z
|
2021-12-22T14:22:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Enumeration classes related to codecs.
"""
from wotpy.utils.enums import EnumListMixin
class MediaTypes(EnumListMixin):
"""Enumeration of media types."""
JSON = "application/json"
TEXT = "text/plain"
| 16.6875
| 43
| 0.670412
|
e1fe4819fde6c1d3f977360ff181a50b8c7edf95
| 1,196
|
py
|
Python
|
src/wrappers/injector.py
|
ScottDay/DFN-Maintenance-GUI-Backend
|
bfb05c75747fa9c334224b99609baef7321860a4
|
[
"MIT"
] | 2
|
2017-03-31T00:57:35.000Z
|
2017-08-04T10:38:28.000Z
|
src/wrappers/injector.py
|
CPedersen3245/Desert-Fireball-Maintainence-GUI
|
bfb05c75747fa9c334224b99609baef7321860a4
|
[
"MIT"
] | 10
|
2017-03-29T04:13:14.000Z
|
2017-08-14T06:14:52.000Z
|
src/wrappers/injector.py
|
ScottDay/DFN-Maintenance-GUI-Backend
|
bfb05c75747fa9c334224b99609baef7321860a4
|
[
"MIT"
] | 4
|
2017-12-23T03:16:00.000Z
|
2018-06-20T07:15:50.000Z
|
from flask import current_app
from functools import wraps
from inspect import getargspec
def injector(function):
'''
Injects objects from current_app into the decorated method. Must have the injected objects as params in the
decorated method after the methods normal parameters.
Can also inject an array of key / value pairs from config. E.g.
@injector(config = ['VERBOSE'])
def method(config):
'''
@wraps(function)
def decorator(*args, **kwargs):
argsspec = getargspec(function)
if 'handler' in argsspec.args:
kwargs['handler'] = current_app.handler
if 'log' in argsspec.args:
kwargs['log'] = current_app.handler.log
# TODO: Add error handling for config retrieval.
if 'config' in argsspec.args:
class Config():
pass
config = Config()
for entry in current_app.config:
setattr(config, entry.lower(), current_app.config[entry])
kwargs['config'] = config
return function(*args, **kwargs)
return decorator
# TODO: Decorator called 'conditionallly', requires a condition to be true in order to execute e.g. @conditionally(config.verbose, True).
# https://stackoverflow.com/questions/3773555/python3-decorating-conditionally#answer-3865534
| 27.181818
| 137
| 0.737458
|
4b9ba2f2b6287c2d2acf28938f7a3ee9ce90bf20
| 5,693
|
py
|
Python
|
inputFiles/ourIA/packagev2.py
|
dimtion/jml
|
dba4db760280cc5ed8c384e36e41d6c7a310fb4f
|
[
"MIT"
] | 1
|
2015-10-07T19:18:55.000Z
|
2015-10-07T19:18:55.000Z
|
inputFiles/ourIA/packagev2.py
|
dimtion/jml
|
dba4db760280cc5ed8c384e36e41d6c7a310fb4f
|
[
"MIT"
] | 1
|
2015-10-07T19:28:25.000Z
|
2015-10-08T19:01:47.000Z
|
inputFiles/ourIA/packagev2.py
|
dimtion/jml
|
dba4db760280cc5ed8c384e36e41d6c7a310fb4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import lib.utils as u
import lib.algorithms as algo
import lib.interface as api
import math
#import statistics as stats
IAName = "packagev2"
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = api.initGame(IAName)
best_weight = float("inf")
best_path = []
packages = {}
route_table = {}
def exhaustive(left, node, path, weight, coins_graph):
"""Fill best_path with the ROUTE to get all the coins in a minimal time"""
global best_weight, best_path
if len(left) == 0:
if weight < best_weight:
best_weight = weight
best_path[:] = path
else:
for i in left:
new_left = []
new_left[:] = left
new_left.remove(i)
if weight + coins_graph[1][node][i] > best_weight:
break
exhaustive(new_left, i, path + coins_graph[0][node][i], weight + coins_graph[1][node][i], coins_graph)
def fill_packages(coins, mazeMap):
"""fill packages, also create the route table from any coin to any coin """
global packages, route_table, dists
used = []
dists, route_table = u.dists_from_each(coins + [playerLocation], mazeMap)
dists_list = sorted([d for di in dists for d in di])
quart_dist = dists_list[len(dists_list)//4]
api.debug(quart_dist)
visited =[coins[0]]
left_coins = coins[:]
while len(left_coins) != 0:
meta_current_node = left_coins.pop(0)
packages[meta_current_node] = [meta_current_node]
visited.append(meta_current_node)
while len(visited) !=0:
current_node = visited.pop(0)
for c in left_coins:
if dists[c][current_node] < quart_dist:
packages[meta_current_node].append(c)
left_coins.remove(c)
visited.append(c)
packages = [packages[p] for p in packages]
packages = list(reversed(sorted(packages, key=lambda x: (len(x)+3)/min([dists[playerLocation][c] for c in x]))))
for k in range(len(packages)):
n = len(packages[k])
if n > 5:
p1 = packages[k][:n//2]
p2 = packages[k][n//2:]
if len(p1)> 5:
p1prime = p1[:len(p1)//2]
p1sec = p1[len(p1)//2:]
p2prime = p2[:len(p2)//2]
p2sec = p2[len(p2)//2:]
packages[k] = p1prime
packages.insert(k+1,p1sec)
packages.insert(k+2,p2prime)
packages.insert(k+3,p2sec)
else:
packages[k] = p1
packages.insert(k+1,p2)
def find_players(packages, opponentLocation, playerLocation):
"""Check if opponent is in packages"""
i1,i2 = (-1,-1)
j1,j2 = (-1,-1)
acc = 0
for i in range(len(packages)):
for k in range(len(packages[i])):
if opponentLocation == packages[i][k]:
i1, i2 = i, k
acc = acc +1
elif playerLocation == packages[i][k] and playerLocation != opponentLocation:
j1, j2 = i, k
acc = acc +1
if acc > 1:
break
return i1,i2,j1,j2
def initialisationTurn(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins) :
"""Function called once at the begining of the game"""
global route_table, packages, best_weight, best_path, current_package, dists
fill_packages(coins, mazeMap)
current_package = packages.pop(0)
exhaustive(current_package, playerLocation, [], 0, (route_table, dists))
def determineNextMove(playerLocation, opponentLocation, coins):
"""Function called at each turn, must return the next move of the player"""
global packages, dists, route_table, best_path, best_weight, current_package
if playerLocation in current_package:
current_package.remove(playerLocation)
i1,i2,j1,j2 = find_players(packages, opponentLocation, playerLocation)
if i1 >= 0:
packages[i1].remove(packages[i1][i2])
if len(packages[i1]) == 0:
packages.remove(packages[i1])
if j1 >= 0:
api.debug(packages[j1])
packages[j1].remove(packages[j1][j2])
if len(packages[j1]) == 0:
packages.remove(packages[j1])
if opponentLocation in current_package:
dists, route_table = u.update_dists_from_each(dists, route_table, playerLocation, mazeMap, coins)
if len(current_package) > 1:
current_package.remove(opponentLocation)
else:
current_package = packages.pop(0)
best_weight = float("inf")
best_path = []
exhaustive(current_package, playerLocation, [], 0, (route_table, dists))
if len(best_path) == 0:
packages = list(reversed(sorted(packages, key=lambda x: (len(x)+3)/min([dists[playerLocation][c] for c in x]))))
best_weight = float("inf")
best_path = []
current_package = packages.pop(0)
if len(current_package) == 1 and packages != []:
current_package = current_package + packages.pop(0)
exhaustive(current_package, playerLocation, [], 0, (route_table, dists))
return u.direction(playerLocation, best_path.pop(0))
# Init our AI
initialisationTurn(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins)
# Starts the game
api.startGameMainLoop(determineNextMove)
| 37.20915
| 135
| 0.596346
|
8ea4a0ded94a23fa5ffaec0c701833186c345fca
| 1,991
|
py
|
Python
|
CSCI3230/Assignment 2/CSCI3230_kmeans Clustering.py
|
Arie123777/cuhkCourseWork
|
355f1e560ef513f3ba9bec2e9eb35caa1f6f7107
|
[
"FSFAP"
] | null | null | null |
CSCI3230/Assignment 2/CSCI3230_kmeans Clustering.py
|
Arie123777/cuhkCourseWork
|
355f1e560ef513f3ba9bec2e9eb35caa1f6f7107
|
[
"FSFAP"
] | null | null | null |
CSCI3230/Assignment 2/CSCI3230_kmeans Clustering.py
|
Arie123777/cuhkCourseWork
|
355f1e560ef513f3ba9bec2e9eb35caa1f6f7107
|
[
"FSFAP"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import math
color=['g', 'r', 'b']
tit = ["mm", "Assign Point Clusters", "Update Cluster Centroid"]
file = open("CSCI3230_ClusteringData.csv")
title = np.loadtxt(file, delimiter=",", dtype = str, max_rows=1)
pointData = np.loadtxt(file, delimiter=",")
addCol = np.ones((pointData.shape[0], 1))*1000
pointData = np.append(pointData, addCol, axis=1)
file.close()
file = open("CSCI3230_initialCluster.csv")
initCluster = np.loadtxt(file, delimiter=",", skiprows=1)
file.close()
iterationTime = 0
change = True
while change == True:
change = False
iterationTime += 1
for row in initCluster:
x1 = row[0]
y1 = row[1]
cNum = row[2]
for record in pointData:
dist = ((record[0] - x1)**2 + (record[1] - y1)**2)**0.5
if dist < record[3]:
record[3] = dist
record[2] = cNum
change = True
print("Iteration %d:" % iterationTime)
print(pointData)
for record in pointData:
plt.scatter(record[0], record[1], c=color[int(record[2])])
for row in initCluster:
plt.scatter(row[0], row[1], marker='x', c=color[int(row[2])])
#plt.title("Iteration %d" % iterationTime)
plt.title(tit[iterationTime])
plt.show()
clusterCount = np.zeros(initCluster.shape[0])
for row in initCluster:
row[0] = 0
row[1] = 0
for record in pointData:
initCluster[int(record[2])-1][0] += record[0]
initCluster[int(record[2])-1][1] += record[1]
clusterCount[int(record[2])-1] += 1
for row, count in zip(initCluster, clusterCount):
row[0] /= count
row[1] /= count
print("%f %f" % (row[0], row[1]))
for record in pointData:
record[3] = ((record[0] - initCluster[int(record[2])-1][0])**2 + (record[1] - initCluster[int(record[2])-1][1])**2)**0.5
print("Iteration ends.")
| 35.553571
| 130
| 0.573581
|
15a67314918168e0ee60889c1d7aabbea33d5e73
| 39,368
|
py
|
Python
|
python/tests/sklearn/preprocessing/data_test.py
|
rodrigo196/mleap
|
abe8ebc1cf8bfdd7ee732912ef124bb2463b5466
|
[
"Apache-2.0"
] | 1
|
2020-02-18T22:12:38.000Z
|
2020-02-18T22:12:38.000Z
|
python/tests/sklearn/preprocessing/data_test.py
|
rodrigo196/mleap
|
abe8ebc1cf8bfdd7ee732912ef124bb2463b5466
|
[
"Apache-2.0"
] | 1
|
2020-01-30T21:12:13.000Z
|
2020-01-30T21:12:13.000Z
|
python/tests/sklearn/preprocessing/data_test.py
|
juhoautio/mleap
|
54bc171265e02936d09238ecd0a25fa34efc9c65
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pandas as pd
import numpy as np
import os
import shutil
import json
import uuid
from mleap.sklearn.preprocessing.data import FeatureExtractor, MathUnary, MathBinary, StringMap
from mleap.sklearn.preprocessing.data import StandardScaler, MinMaxScaler, LabelEncoder, Imputer, Binarizer, PolynomialFeatures
from mleap.sklearn.preprocessing.data import OneHotEncoder
from pandas.util.testing import assert_frame_equal
class TransformerTests(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
self.tmp_dir = "/tmp/mleap.python.tests/{}".format(uuid.uuid1())
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
os.makedirs(self.tmp_dir)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_standard_scaler_serializer(self):
standard_scaler = StandardScaler(with_mean=True,
with_std=True
)
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
standard_scaler.mlinit(prior_tf=feature_extractor,
output_features='a_scaled')
standard_scaler.fit(self.df[['a']])
standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)
expected_mean = self.df.a.mean()
expected_std = np.sqrt(np.var(self.df.a))
expected_model = {
"op": "standard_scaler",
"attributes": {
"mean": {
"double": [expected_mean],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
},
"std": {
"double": [expected_std],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
}
}
}
self.assertAlmostEqual(expected_mean, standard_scaler.mean_.tolist()[0], places = 7)
self.assertAlmostEqual(expected_std, np.sqrt(standard_scaler.var_.tolist()[0]), places = 7)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, standard_scaler.name)) as json_data:
model = json.load(json_data)
self.assertEqual(standard_scaler.op, expected_model['op'])
self.assertEqual(expected_model['attributes']['mean']['shape']['dimensions'][0]['size'], model['attributes']['mean']['shape']['dimensions'][0]['size'])
self.assertEqual(expected_model['attributes']['std']['shape']['dimensions'][0]['size'], model['attributes']['std']['shape']['dimensions'][0]['size'])
self.assertAlmostEqual(expected_model['attributes']['mean']['double'][0], model['attributes']['mean']['double'][0], places = 7)
self.assertAlmostEqual(expected_model['attributes']['std']['double'][0], model['attributes']['std']['double'][0], places = 7)
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, standard_scaler.name)) as json_data:
node = json.load(json_data)
self.assertEqual(standard_scaler.name, node['name'])
self.assertEqual(standard_scaler.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(standard_scaler.output_features, node['shape']['outputs'][0]['name'])
def test_standard_scaler_deserializer(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
# Serialize a standard scaler to a bundle
standard_scaler = StandardScaler(with_mean=True,
with_std=True
)
standard_scaler.mlinit(prior_tf=feature_extractor,
output_features='a_scaled')
standard_scaler.fit(self.df[['a']])
standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)
# Now deserialize it back
node_name = "{}.node".format(standard_scaler.name)
standard_scaler_tf = StandardScaler()
standard_scaler_tf = standard_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = standard_scaler.transform(self.df[['a']])
res_b = standard_scaler_tf.transform(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(standard_scaler.name, standard_scaler_tf.name)
self.assertEqual(standard_scaler.op, standard_scaler_tf.op)
self.assertEqual(standard_scaler.mean_, standard_scaler_tf.mean_)
self.assertEqual(standard_scaler.scale_, standard_scaler_tf.scale_)
def test_standard_scaler_multi_deserializer(self):
extract_features = ['a', 'b']
feature_extractor = FeatureExtractor(input_scalars=['a', 'b'],
output_vector='extracted_multi_outputs',
output_vector_items=["{}_out".format(x) for x in extract_features])
# Serialize a standard scaler to a bundle
standard_scaler = StandardScaler(with_mean=True,
with_std=True
)
standard_scaler.mlinit(prior_tf=feature_extractor,
output_features=['a_scaled', 'b_scaled'])
standard_scaler.fit(self.df[['a', 'b']])
standard_scaler.serialize_to_bundle(self.tmp_dir, standard_scaler.name)
# Now deserialize it back
node_name = "{}.node".format(standard_scaler.name)
standard_scaler_tf = StandardScaler()
standard_scaler_tf = standard_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = standard_scaler.transform(self.df[['a', 'b']])
res_b = standard_scaler_tf.transform(self.df[['a', 'b']])
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[0][1], res_b[0][1])
self.assertEqual(standard_scaler.name, standard_scaler_tf.name)
self.assertEqual(standard_scaler.op, standard_scaler_tf.op)
self.assertEqual(standard_scaler.mean_[0], standard_scaler_tf.mean_[0])
self.assertEqual(standard_scaler.mean_[1], standard_scaler_tf.mean_[1])
self.assertEqual(standard_scaler.scale_[0], standard_scaler_tf.scale_[0])
self.assertEqual(standard_scaler.scale_[1], standard_scaler_tf.scale_[1])
def test_min_max_scaler_serializer(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
scaler = MinMaxScaler()
scaler.mlinit(prior_tf = feature_extractor,
output_features='a_scaled')
scaler.fit(self.df[['a']])
scaler.serialize_to_bundle(self.tmp_dir, scaler.name)
expected_min = self.df.a.min()
expected_max = self.df.a.max()
expected_model = {
"op": "min_max_scaler",
"attributes": {
"min": {
"double": [expected_min],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
},
"max": {
"double": [expected_max],
"shape": {
"dimensions": [{
"size": 1,
"name": ""
}]
},
"type": "tensor"
}
}
}
self.assertEqual(expected_min, scaler.data_min_.tolist()[0])
self.assertEqual(expected_max, scaler.data_max_.tolist()[0])
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, scaler.name)) as json_data:
model = json.load(json_data)
self.assertEqual(scaler.op, expected_model['op'])
self.assertEqual(expected_model['attributes']['min']['shape']['dimensions'][0]['size'], model['attributes']['min']['shape']['dimensions'][0]['size'])
self.assertEqual(expected_model['attributes']['max']['shape']['dimensions'][0]['size'], model['attributes']['max']['shape']['dimensions'][0]['size'])
self.assertEqual(expected_model['attributes']['min']['double'][0], model['attributes']['min']['double'][0])
self.assertEqual(expected_model['attributes']['max']['double'][0], model['attributes']['max']['double'][0])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, scaler.name)) as json_data:
node = json.load(json_data)
self.assertEqual(scaler.name, node['name'])
self.assertEqual(scaler.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(scaler.output_features, node['shape']['outputs'][0]['name'])
def test_min_max_scaler_deserializer(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
scaler = MinMaxScaler()
scaler.mlinit(prior_tf=feature_extractor,
output_features='a_scaled')
scaler.fit(self.df[['a']])
scaler.serialize_to_bundle(self.tmp_dir, scaler.name)
# Deserialize the MinMaxScaler
node_name = "{}.node".format(scaler.name)
min_max_scaler_tf = MinMaxScaler()
min_max_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = scaler.transform(self.df[['a']])
res_b = min_max_scaler_tf.transform(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(scaler.name, min_max_scaler_tf.name)
self.assertEqual(scaler.op, min_max_scaler_tf.op)
def test_min_max_scaler_multi_deserializer(self):
extract_features = ['a', 'b']
feature_extractor = FeatureExtractor(input_scalars=['a', 'b'],
output_vector='extracted_multi_outputs',
output_vector_items=["{}_out".format(x) for x in extract_features])
scaler = MinMaxScaler()
scaler.mlinit(prior_tf=feature_extractor,
output_features=['a_scaled', 'b_scaled'])
scaler.fit(self.df[['a']])
scaler.serialize_to_bundle(self.tmp_dir, scaler.name)
# Deserialize the MinMaxScaler
node_name = "{}.node".format(scaler.name)
min_max_scaler_tf = MinMaxScaler()
min_max_scaler_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = scaler.transform(self.df[['a', 'b']])
res_b = min_max_scaler_tf.transform(self.df[['a', 'b']])
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[0][1], res_b[0][1])
self.assertEqual(scaler.name, min_max_scaler_tf.name)
self.assertEqual(scaler.op, min_max_scaler_tf.op)
def label_encoder_test(self):
labels = ['a', 'b', 'c']
le = LabelEncoder(input_features=['label_feature'],
output_features='label_feature_le_encoded')
le.fit(labels)
self.assertEqual(labels, le.classes_.tolist())
le.serialize_to_bundle(self.tmp_dir, le.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, le.name)) as json_data:
model = json.load(json_data)
self.assertEqual(le.op, model['op'])
self.assertTrue('nullable_input' in model['attributes'])
self.assertTrue('labels' in model['attributes'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, le.name)) as json_data:
node = json.load(json_data)
self.assertEqual(le.name, node['name'])
self.assertEqual(le.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(le.output_features, node['shape']['outputs'][0]['name'])
def label_encoder_deserializer_test(self):
labels = ['a', 'b', 'c']
le = LabelEncoder(input_features=['label_feature'],
output_features='label_feature_le_encoded')
le.fit(labels)
self.assertEqual(labels, le.classes_.tolist())
le.serialize_to_bundle(self.tmp_dir, le.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, le.name)) as json_data:
model = json.load(json_data)
# Deserialize the LabelEncoder
node_name = "{}.node".format(le.name)
label_encoder_tf = LabelEncoder()
label_encoder_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = le.transform(labels)
res_b = label_encoder_tf.transform(labels)
print("le.output_features: {}".format(le.output_features))
print("label_encoder_tf.output_features: {}".format(label_encoder_tf.output_features))
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
self.assertEqual(le.input_features, label_encoder_tf.input_features)
self.assertEqual(le.output_features, label_encoder_tf.output_features[0])
def one_hot_encoder_serializer_test(self):
labels = ['a', 'b', 'c']
le = LabelEncoder(input_features=['label_feature'],
output_features='label_feature_le_encoded')
oh_data = le.fit_transform(labels).reshape(3, 1)
one_hot_encoder_tf = OneHotEncoder(sparse=False)
one_hot_encoder_tf.mlinit(prior_tf=le,
output_features='{}_one_hot_encoded'.format(le.output_features))
one_hot_encoder_tf.fit(oh_data)
one_hot_encoder_tf.serialize_to_bundle(self.tmp_dir, one_hot_encoder_tf.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, one_hot_encoder_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(one_hot_encoder_tf.op, model['op'])
self.assertEqual(3, model['attributes']['size']['long'])
self.assertEqual(False, model['attributes']['drop_last']['boolean'])
def one_hot_encoder_deserializer_test(self):
labels = ['a', 'b', 'c']
le = LabelEncoder(input_features=['label_feature'],
output_features='label_feature_le_encoded')
oh_data = le.fit_transform(labels).reshape(3, 1)
one_hot_encoder_tf = OneHotEncoder(sparse=False)
one_hot_encoder_tf.mlinit(prior_tf = le,
output_features='{}_one_hot_encoded'.format(le.output_features))
one_hot_encoder_tf.fit(oh_data)
one_hot_encoder_tf.serialize_to_bundle(self.tmp_dir, one_hot_encoder_tf.name)
# Deserialize the OneHotEncoder
node_name = "{}.node".format(one_hot_encoder_tf.name)
one_hot_encoder_tf_ds = OneHotEncoder()
one_hot_encoder_tf_ds.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = one_hot_encoder_tf.transform(oh_data)
res_b = one_hot_encoder_tf_ds.transform(oh_data)
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[1][0], res_b[1][0])
self.assertEqual(res_a[2][0], res_b[2][0])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, one_hot_encoder_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(one_hot_encoder_tf_ds.name, node['name'])
self.assertEqual(one_hot_encoder_tf_ds.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(one_hot_encoder_tf_ds.output_features[0], node['shape']['outputs'][0]['name'])
def feature_extractor_test(self):
extract_features = ['a', 'd']
feature_extractor = FeatureExtractor(input_scalars=extract_features,
output_vector='extract_features_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
res = feature_extractor.fit_transform(self.df)
self.assertEqual(len(res.columns), 2)
feature_extractor.serialize_to_bundle(self.tmp_dir, feature_extractor.name)
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, feature_extractor.name)) as json_data:
node = json.load(json_data)
self.assertEqual(feature_extractor.name, node['name'])
self.assertEqual(feature_extractor.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(feature_extractor.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(feature_extractor.output_vector, node['shape']['outputs'][0]['name'])
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, feature_extractor.name)) as json_data:
model = json.load(json_data)
expected_model = {
"op": "vector_assembler",
"attributes": {
"input_shapes": {
"data_shape": [
{
"base": "scalar",
"isNullable": False
},
{
"base": "scalar",
"isNullable": False
}],
"type": "list"
}
}
}
self.assertEqual(expected_model['op'], model['op'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][0]['base'],
model['attributes']['input_shapes']['data_shape'][0]['base'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][0]['isNullable'],
model['attributes']['input_shapes']['data_shape'][0]['isNullable'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][1]['base'],
model['attributes']['input_shapes']['data_shape'][1]['base'])
self.assertEqual(expected_model['attributes']['input_shapes']['data_shape'][1]['isNullable'],
model['attributes']['input_shapes']['data_shape'][1]['isNullable'])
def imputer_test(self):
def _set_nulls(df):
row = df['index']
if row in [2,5]:
return np.NaN
return df.a
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
imputer = Imputer(strategy='mean')
imputer.mlinit(prior_tf=feature_extractor,
output_features='a_imputed')
df2 = self.df
df2.reset_index(inplace=True)
df2['a'] = df2.apply(_set_nulls, axis=1)
imputer.fit(df2[['a']])
self.assertAlmostEqual(imputer.statistics_[0], df2.a.mean(), places = 7)
imputer.serialize_to_bundle(self.tmp_dir, imputer.name)
expected_model = {
"op": "imputer",
"attributes": {
"surrogate_value": {
"double": df2.a.mean()
},
"strategy": {
"string": "mean"
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, imputer.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['strategy']['string'], model['attributes']['strategy']['string'])
self.assertAlmostEqual(expected_model['attributes']['surrogate_value']['double'], model['attributes']['surrogate_value']['double'], places = 7)
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, imputer.name)) as json_data:
node = json.load(json_data)
self.assertEqual(imputer.name, node['name'])
self.assertEqual(imputer.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(imputer.output_features, node['shape']['outputs'][0]['name'])
def binarizer_test(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='a_binary')
Xres = binarizer.fit_transform(self.df[['a']])
# Test that the binarizer functions as expected
self.assertEqual(float(len(self.df[self.df.a >= 0]))/10.0, Xres.mean())
binarizer.serialize_to_bundle(self.tmp_dir, binarizer.name)
expected_model = {
"op": "sklearn_binarizer",
"attributes": {
"threshold": {
"double": 0.0
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, binarizer.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['threshold']['double'],
model['attributes']['threshold']['double'])
self.assertEqual(expected_model['op'], model['op'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, binarizer.name)) as json_data:
node = json.load(json_data)
self.assertEqual(binarizer.name, node['name'])
self.assertEqual(binarizer.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(binarizer.output_features, node['shape']['outputs'][0]['name'])
def binarizer_deserializer_test(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0.0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='a_binary')
Xres = binarizer.fit_transform(self.df[['a']])
# Test that the binarizer functions as expected
self.assertEqual(float(len(self.df[self.df.a >= 0]))/10.0, Xres.mean())
binarizer.serialize_to_bundle(self.tmp_dir, binarizer.name)
# Deserialize the Binarizer
node_name = "{}.node".format(binarizer.name)
binarizer_tf_ds = Binarizer()
binarizer_tf_ds.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = binarizer.transform(self.df[['a']])
res_b = binarizer_tf_ds.transform(self.df[['a']])
self.assertEqual(res_a[0][0], res_b[0][0])
self.assertEqual(res_a[1][0], res_b[1][0])
self.assertEqual(res_a[2][0], res_b[2][0])
self.assertEqual(res_a[3][0], res_b[3][0])
def polynomial_expansion_test(self):
extract_features = ['a']
feature_extractor = FeatureExtractor(input_scalars=['a'],
output_vector='extracted_a_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
polynomial_exp = PolynomialFeatures(degree=2, include_bias=False)
polynomial_exp.mlinit(prior_tf=feature_extractor,
output_features='poly')
Xres = polynomial_exp.fit_transform(self.df[['a']])
self.assertEqual(Xres[0][1], Xres[0][0] * Xres[0][0])
polynomial_exp.serialize_to_bundle(self.tmp_dir, polynomial_exp.name)
expected_model = {
"op": "sklearn_polynomial_expansion",
"attributes": {
"combinations": {
"string": "[x0,x0^2]"
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, polynomial_exp.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['op'], model['op'])
self.assertEqual(expected_model['attributes']['combinations']['string'], model['attributes']['combinations']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, polynomial_exp.name)) as json_data:
node = json.load(json_data)
self.assertEqual(polynomial_exp.name, node['name'])
self.assertEqual(polynomial_exp.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(polynomial_exp.output_features, node['shape']['outputs'][0]['name'])
def math_unary_exp_test(self):
math_unary_tf = MathUnary(input_features=['a'], output_features='exp_a', transform_type='exp')
Xres = math_unary_tf.fit_transform(self.df.a)
self.assertEqual(np.exp(self.df.a[0]), Xres[0])
math_unary_tf.serialize_to_bundle(self.tmp_dir, math_unary_tf.name)
expected_model = {
"op": "math_unary",
"attributes": {
"operation": {
"string": 'exp'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_unary_tf.name, node['name'])
self.assertEqual(math_unary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_unary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_unary_deserialize_exp_test(self):
math_unary_tf = MathUnary(input_features=['a'], output_features='exp_a', transform_type='exp')
Xres = math_unary_tf.fit_transform(self.df.a)
self.assertEqual(np.exp(self.df.a[0]), Xres[0])
math_unary_tf.serialize_to_bundle(self.tmp_dir, math_unary_tf.name)
node_name = "{}.node".format(math_unary_tf.name)
math_unary_ds_tf = MathUnary()
math_unary_ds_tf = math_unary_ds_tf.deserialize_from_bundle(self.tmp_dir, node_name)
with open("{}/{}.node/model.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
model = json.load(json_data)
res_a = math_unary_tf.transform(self.df['a'])
res_b = math_unary_ds_tf.transform(self.df['a'])
self.assertEqual(res_a[0], res_b[0])
def math_unary_sin_test(self):
math_unary_tf = MathUnary(input_features=['a'], output_features='sin_a', transform_type='sin')
Xres = math_unary_tf.fit_transform(self.df.a)
self.assertEqual(np.sin(self.df.a[0]), Xres[0])
math_unary_tf.serialize_to_bundle(self.tmp_dir, math_unary_tf.name)
expected_model = {
"op": "math_unary",
"attributes": {
"operation": {
"string": 'sin'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_unary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_unary_tf.name, node['name'])
self.assertEqual(math_unary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_unary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_plus_b', transform_type='add')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a + self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'add'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_deserialize_add_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_plus_b', transform_type='add')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a + self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
node_name = "{}.node".format(math_binary_tf.name)
math_binary_ds_tf = MathBinary()
math_binary_ds_tf = math_binary_ds_tf.deserialize_from_bundle(self.tmp_dir, node_name)
res_a = math_binary_tf.transform(self.df[['a', 'b']])
res_b = math_binary_ds_tf.transform(self.df[['a', 'b']])
assert_frame_equal(res_a, res_b)
def math_binary_subtract_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_less_b', transform_type='sub')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a - self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'sub'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_multiply_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_mul_b', transform_type='mul')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a * self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'mul'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def math_binary_divide_test(self):
math_binary_tf = MathBinary(input_features=['a', 'b'], output_features='a_mul_b', transform_type='div')
Xres = math_binary_tf.fit_transform(self.df[['a', 'b']])
assert_frame_equal(pd.DataFrame(self.df.a / self.df.b, columns=['a']), Xres)
math_binary_tf.serialize_to_bundle(self.tmp_dir, math_binary_tf.name)
expected_model = {
"op": "math_binary",
"attributes": {
"operation": {
"string": 'div'
}
}
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['operation']['string'], model['attributes']['operation']['string'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, math_binary_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(math_binary_tf.name, node['name'])
self.assertEqual(math_binary_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(math_binary_tf.input_features[1], node['shape']['inputs'][1]['name'])
self.assertEqual(math_binary_tf.output_features, node['shape']['outputs'][0]['name'])
def string_map_test(self):
df = pd.DataFrame(['test_one', 'test_two', 'test_one', 'test_one', 'test_two'], columns=['a'])
string_map_tf = StringMap(input_features=['a'], output_features='a_mapped', labels={"test_one":1.0, "test_two": 0.0})
Xres = string_map_tf.fit_transform(df)
self.assertEqual(1.0, Xres[0])
self.assertEqual(0.0, Xres[1])
self.assertEqual(1.0, Xres[2])
self.assertEqual(1.0, Xres[3])
self.assertEqual(0.0, Xres[4])
string_map_tf.serialize_to_bundle(self.tmp_dir, string_map_tf.name)
expected_model = {
"op": "string_map",
"attributes": {
"labels": {
"type": "list",
"string": ["test_one", "test_two"]
},
"values": {
"type": "list",
"double": [1.0, 0.0]
}
}
}
#
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, string_map_tf.name)) as json_data:
model = json.load(json_data)
self.assertEqual(expected_model['attributes']['labels']['string'], model['attributes']['labels']['string'])
self.assertEqual(expected_model['attributes']['values']['double'], model['attributes']['values']['double'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, string_map_tf.name)) as json_data:
node = json.load(json_data)
self.assertEqual(string_map_tf.name, node['name'])
self.assertEqual(string_map_tf.input_features[0], node['shape']['inputs'][0]['name'])
self.assertEqual(string_map_tf.output_features, node['shape']['outputs'][0]['name'])
def string_map_deserializer_test(self):
df = pd.DataFrame(['test_one', 'test_two', 'test_one', 'test_one', 'test_two'], columns=['a'])
string_map = StringMap(input_features=['a'], output_features='a_mapped', labels={"test_one":1.0, "test_two": 0.0})
string_map.serialize_to_bundle(self.tmp_dir, string_map.name)
# Now deserialize it back
node_name = "{}.node".format(string_map.name)
string_map_tf = StringMap()
string_map_tf = string_map_tf.deserialize_from_bundle(self.tmp_dir, node_name)
# Transform some sample data
res_a = string_map.fit_transform(df)
res_b = string_map_tf.fit_transform(df)
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
self.assertEqual(res_a[3], res_b[3])
self.assertEqual(res_a[4], res_b[4])
self.assertEqual(string_map.name, string_map_tf.name)
self.assertEqual(string_map.op, string_map_tf.op)
self.assertEqual(string_map.labels, string_map_tf.labels)
| 40.585567
| 159
| 0.599675
|
6b657400b5b437b48b21809bd6801e2ef293e7c5
| 1,466
|
py
|
Python
|
experiments/examples/example_s2_create_net_bench.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 18
|
2020-11-22T16:03:08.000Z
|
2022-03-15T12:11:46.000Z
|
experiments/examples/example_s2_create_net_bench.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 2
|
2022-01-04T08:10:17.000Z
|
2022-01-05T08:13:14.000Z
|
experiments/examples/example_s2_create_net_bench.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 6
|
2021-03-08T07:08:52.000Z
|
2022-02-24T12:00:43.000Z
|
from uninas.main import Main
"""
create a bench from a s1 net
run example_run_bench_s1.py first
most arguments are taken from the s1's run_config, e.g. network design, metrics, trainer, ...
beware that s1 may be using fake data
"""
args = {
"cls_task": "CreateSearchNetBenchTask",
# "{cls_task}.s1_path": "{path_tmp}/run_bench_s1/",
"{cls_task}.s1_path": "{path_tmp}/run_config/",
"{cls_task}.save_dir": "{path_tmp}/s2_bench/",
"{cls_task}.save_del_old": True,
"{cls_task}.is_test_run": True,
"{cls_task}.measure_min": 20,
"cls_benchmarks": "MiniNASTabularBenchmark",
"{cls_benchmarks#0}.path": "{path_data}/bench/sin/SIN_fairnas_v0.1.pt",
"cls_hpo_self_algorithm": "RandomHPO",
"{cls_hpo_self_algorithm}.num_eval": 100,
"{cls_data}.batch_size_train": 16,
"cls_hpo_estimators": "NetValueEstimator, NetMacsEstimator",
"{cls_hpo_estimators#0}.key": "acc1/valid",
"{cls_hpo_estimators#0}.is_objective": True,
"{cls_hpo_estimators#0}.load": False,
"{cls_hpo_estimators#0}.batches_forward": 0,
"{cls_hpo_estimators#0}.batches_train": 5,
"{cls_hpo_estimators#0}.batches_eval": -1,
"{cls_hpo_estimators#0}.value": "val/accuracy/1",
"{cls_hpo_estimators#1}.key": "flops",
"{cls_hpo_estimators#1}.is_objective": True,
}
if __name__ == "__main__":
# ignore the command line, use "args" instead
task = Main.new_task([], args_changes=args)
task.load()
task.run()
| 30.541667
| 93
| 0.683492
|
d19e67d9bca1408b80fb252808ea3c2fde0245e6
| 2,142
|
py
|
Python
|
testproject/testproject/app/tests/test_databasewrapper.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/testproject/app/tests/test_databasewrapper.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/testproject/app/tests/test_databasewrapper.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import datetime
from django.core.management.sql import sql_flush
from mock import Mock
from django_cassandra_engine.test import TestCase
from django_cassandra_engine.connection import CassandraConnection
from django_cassandra_engine.utils import (
get_cql_models,
get_installed_apps,
get_cassandra_connection
)
from testproject.app.models import ExampleModel, ExampleModel2
class DatabaseWrapperTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.connection = get_cassandra_connection()
cls.all_models = []
apps = get_installed_apps()
for app in apps:
cls.all_models.extend(get_cql_models(app))
cls.all_django_tables = \
[model.column_family_name(include_keyspace=False)
for model in cls.all_models]
def test_auto_connect(self):
self.assertIsNotNone(self.connection.connection)
self.assertTrue(self.connection.connected)
self.assertIsInstance(self.connection.connection, CassandraConnection)
def test_sql_flush_works(self):
mock_style = Mock()
ExampleModel.objects.create(id='1', created_at=datetime.now(),
deleted=False)
ExampleModel2.objects.create(id='3')
self.assertEqual(ExampleModel.objects.count(), 1)
self.assertEqual(ExampleModel2.objects.count(), 1)
statements = sql_flush(mock_style, self.connection)
self.assertEqual(statements, [])
self.assertEqual(ExampleModel.objects.count(), 0)
self.assertEqual(ExampleModel2.objects.count(), 0)
def test_connection_introspection_table_names(self):
tables = self.connection.introspection.table_names()
self.assertEqual(set(tables), set(self.all_django_tables))
def test_connection_introspection_django_table_names(self):
self.assertEqual(
set(self.connection.introspection.django_table_names()),
set(self.all_django_tables))
def test_connection_introspection_sequence_list(self):
self.assertEqual(self.connection.introspection.sequence_list(), [])
| 32.454545
| 78
| 0.712885
|
6ef44b81bf58bdc70549197e37f61af852f05994
| 637
|
py
|
Python
|
dencam/networking.py
|
icr-ctl/dencam
|
96618d275f88a5ead7e16475dba41e980fddca21
|
[
"MIT"
] | 2
|
2021-11-15T16:40:51.000Z
|
2021-12-02T05:58:25.000Z
|
dencam/networking.py
|
icr-ctl/dencam
|
96618d275f88a5ead7e16475dba41e980fddca21
|
[
"MIT"
] | 37
|
2020-01-31T02:43:54.000Z
|
2022-02-11T20:40:37.000Z
|
dencam/networking.py
|
icr-ctl/dencam
|
96618d275f88a5ead7e16475dba41e980fddca21
|
[
"MIT"
] | 3
|
2021-11-24T21:35:00.000Z
|
2022-02-10T13:44:32.000Z
|
import socket
import subprocess
import netifaces as ni
def get_network_info():
interfaces = ni.interfaces()
text = (socket.gethostname() + '\n')
for interface in interfaces:
try:
ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']
except KeyError:
# This try block is a quick way to just skip an
# interface if it isn't connected.
continue
text += ('{}: {}\n'.format(interface, ip))
if interface == 'wlan0':
ssid = subprocess.check_output(['iwgetid'])
text += (ssid.decode('utf-8').split(' ')[-1])
return text
| 26.541667
| 65
| 0.569859
|
eebd988ba99658643b83e7a28b083c7de4d4679b
| 9,775
|
py
|
Python
|
bin/ADFRsuite/lib/python2.7/site-packages/saga/messages/endpoint.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/lib/python2.7/site-packages/saga/messages/endpoint.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/lib/python2.7/site-packages/saga/messages/endpoint.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils.signatures as rus
import saga.adaptors.base as sab
import saga.attributes as sa
import saga.session as ss
import saga.task as st
import saga.url as surl
from saga.messages.constants import *
from saga.constants import SYNC, ASYNC, TASK
# ------------------------------------------------------------------------------
#
class Endpoint (sa.Attributes) :
# --------------------------------------------------------------------------
#
@rus.takes ('Endpoint',
rus.optional ((surl.Url, basestring)),
rus.optional (int, rus.nothing), # topology
rus.optional (int, rus.nothing), # reliability
rus.optional (int, rus.nothing), # atomicity
rus.optional (int, rus.nothing), # ordering
rus.optional (int, rus.nothing), # correctness
rus.optional (sab.Base),
rus.optional (dict),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (rus.nothing)
def __init__ (self, url=None, reliability=None, topology=None,
atomicity=None, ordering=None, correctness=None,
_adaptor=None, _adaptor_state={}, _ttype=None) :
# param checks
if not topology : topology = ANY
if not reliability : reliability = ANY
if not atomicity : atomicity = ANY
if not ordering : ordering = ANY
if not correctness : correctness = ANY
url = surl.Url (url)
self._super = super (Endpoint, self)
self._super.__init__ (url, flags, session,
_adaptor, _adaptor_state, _ttype=_ttype)
# set attribute interface properties
self._attributes_allow_private (True)
self._attributes_camelcasing (True)
self._attributes_extensible (False)
# register properties with the attribute interface
self._attributes_register (URL , None, sa.STRING, sa.SCALAR, sa.READONLY)
self._attributes_register (RECEIVERS , None, sa.STRING, sa.VECTOR, sa.READONLY)
self._attributes_register (STATE , ANY , sa.ENUM , sa.SCALAR, sa.READONLY)
self._attributes_register (TOPOLOGY , ANY , sa.ENUM , sa.SCALAR, sa.READONLY)
self._attributes_register (RELIABILITY, ANY , sa.ENUM , sa.SCALAR, sa.READONLY)
self._attributes_register (ATOMICITY , ANY , sa.ENUM , sa.SCALAR, sa.READONLY)
self._attributes_register (ORDERING , ANY , sa.ENUM , sa.SCALAR, sa.READONLY)
self._attributes_register (CORRECTNESS, ANY , sa.ENUM , sa.SCALAR, sa.READONLY)
self._attributes_set_enums (STATE , [OPEN, CLOSED ])
self._attributes_set_enums (TOPOLOGY , [POINT_TO_POINT ,
MULTICAST ,
PUBLISH_SUBSCRIBE,
PEER_TO_PEER ])
self._attributes_set_enums (RELIABILITY, [UNRELIABLE ,
CONSISTENT ,
SEMI_RELIABLE ,
RELIABLE ])
self._attributes_set_enums (ATOMICITY , [AT_MOST_ONCE ,
AT_LEAST_ONCE ,
EXACTLY_ONCE ])
self._attributes_set_enums (ORDERING , [UNVERIFIED ,
VERIFIED ])
self._attributes_set_enums (CORRECTNESS, [UNORDERED ,
ORDERED ,
GLOBALLY_ORDERED ])
# metrics
self._attributes_register (CONNECT , ANY, sa.STRING, sa.SCALAR, sa.READONLY)
self._attributes_register (CLOSE , ANY, sa.STRING, sa.SCALAR, sa.READONLY)
self._attributes_register (MESSAGE , ANY, sa.STRING, sa.SCALAR, sa.READONLY)
# --------------------------------------------------------------------------
#
@classmethod
@rus.takes ('Endpoint',
rus.optional ((surl.Url, basestring)),
rus.optional (int, rus.nothing), # topology
rus.optional (int, rus.nothing), # reliability
rus.optional (int, rus.nothing), # atomicity
rus.optional (int, rus.nothing), # ordering
rus.optional (int, rus.nothing), # correctness
rus.optional (ss.Session),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (st.Task)
def create (cls, url=None, reliability=None, topology=None,
atomicity=None, ordering=None, correctness=None,
_ttype=None):
if not topology : topology = ANY
if not reliability : reliability = ANY
if not atomicity : atomicity = ANY
if not ordering : ordering = ANY
if not correctness : correctness = ANY
if not session :
session = ss.Session (default=True)
return cls(url, topology, reliability, atomicity, ordering, correctness,
session, _ttype=ttype)._init_task
# --------------------------------------------------------------------------
#
# class methods
#
@rus.takes ('Endpoint',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((saga.Url, st.Task))
def get_url (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: saga.Url / saga.Task
Return the complete url pointing to the endpoint.
The call will return the complete url pointing to
this endpoint as a saga.Url object.
'''
return self._adaptor.get_url (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Endpoint',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.list_of(saga.Url), st.Task))
def get_receivers (self, ttype=None) :
return self._adaptor.get_receivers (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Endpoint',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.list_of (rus.anything), st.Task))
def _attribute_lister (self, ttype=None) :
return self._adaptor.attribute_lister ()
# --------------------------------------------------------------------------
#
@rus.takes ('Endpoint',
basestring,
int,
callable,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.anything, st.Task))
def _attribute_caller (self, key, id, cb, ttype=None) :
return self._adaptor.attribute_caller (key, id, cb)
# ----------------------------------------------------------------
#
# advert methods
#
@rus.takes ('Endpoint',
(surl.Url, basestring),
float,
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def set_ttl (self, tgt=None, ttl=-1.0, ttype=None) :
"""
tgt : saga.Url / None
ttl : int
ttype: saga.task.type enum
ret: None / saga.Task
"""
if tgt : return self._adaptor.set_ttl (tgt, ttl, ttype=ttype)
else : return self._adaptor.set_ttl_self ( ttl, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Endpoint',
rus.optional ((surl.Url, basestring)),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((float, st.Task))
def get_ttl (self, tgt=None, ttype=None) :
"""
tgt : saga.Url / None
ttype: saga.task.type enum
ret: int / saga.Task
"""
if tgt : return self._adaptor.get_ttl (tgt, ttype=ttype)
else : return self._adaptor.get_ttl_self ( ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Endpoint',
rus.optional (basestring),
rus.optional (basestring),
rus.optional ((basestring, object)),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.list_of (surl.Url), st.Task))
def find (self, name_pattern, attr_pattern=None, obj_type=None,
flags=RECURSIVE, ttype=None) :
"""
name_pattern: string
attr_pattern: string
obj_type: string
flags: flags enum
ret: list [saga.Url]
"""
if not flags : flags = 0
if attr_pattern or obj_type :
return self._adaptor.find_adverts (name_pattern, attr_pattern, obj_type, flags, ttype=ttype)
else :
return self._nsdirec.find (name_pattern, flags, ttype=ttype)
| 40.226337
| 104
| 0.479488
|
738a4d967fcf397869c7da35cffd6c6a1060e3d6
| 1,135
|
py
|
Python
|
runtests.py
|
gonzaloamadio/django-safedelete
|
3c12f3df926d39339d125e0fdfeeaf55d2ffa7d3
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
gonzaloamadio/django-safedelete
|
3c12f3df926d39339d125e0fdfeeaf55d2ffa7d3
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
gonzaloamadio/django-safedelete
|
3c12f3df926d39339d125e0fdfeeaf55d2ffa7d3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
if __name__ == '__main__':
print("\n#######################################")
print(" RUN TESTS WITHOUT BOOLEAN FIELD ")
print("#######################################\n")
os.environ['DJANGO_SETTINGS_MODULE'] = 'safedelete.tests.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
tests = ['safedelete.tests'] if len(sys.argv) == 1 else sys.argv[1:]
failures = test_runner.run_tests(tests)
if bool(failures):
sys.exit(bool(failures))
print("\n#######################################")
print(" RUN TESTS WITH BOOLEAN FIELD ON ")
print("#######################################\n")
os.environ['DJANGO_SETTINGS_MODULE'] = 'safedelete.tests.settings_use_boolean_field'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
tests = ['safedelete.tests'] if len(sys.argv) == 1 else sys.argv[1:]
failures = test_runner.run_tests(tests)
sys.exit(bool(failures))
| 32.428571
| 88
| 0.586784
|
a90ba1493de83d087f51a1cbbd8d4f8978868ccd
| 4,735
|
py
|
Python
|
src/cmds/create_plots.py
|
cryptonoob42/chia-blockchain
|
b6506a67d5b5e2e54c844074872e936dbde5a834
|
[
"Apache-2.0"
] | null | null | null |
src/cmds/create_plots.py
|
cryptonoob42/chia-blockchain
|
b6506a67d5b5e2e54c844074872e936dbde5a834
|
[
"Apache-2.0"
] | null | null | null |
src/cmds/create_plots.py
|
cryptonoob42/chia-blockchain
|
b6506a67d5b5e2e54c844074872e936dbde5a834
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from copy import deepcopy
from pathlib import Path
from blspy import PrivateKey, PublicKey
from chiapos import DiskPlotter
from src.types.proof_of_space import ProofOfSpace
from src.types.sized_bytes import bytes32
from src.util.config import config_path_for_filename, load_config, save_config
from src.util.default_root import DEFAULT_ROOT_PATH
from src.util.path import make_path_relative, mkdir, path_from_root
def main():
"""
Script for creating plots and adding them to the plot config file.
"""
root_path = DEFAULT_ROOT_PATH
plot_config_filename = config_path_for_filename(root_path, "plots.yaml")
key_config_filename = config_path_for_filename(root_path, "keys.yaml")
parser = argparse.ArgumentParser(description="Chia plotting script.")
parser.add_argument("-k", "--size", help="Plot size", type=int, default=26)
parser.add_argument(
"-n", "--num_plots", help="Number of plots", type=int, default=1
)
parser.add_argument("-i", "--index", help="First plot index", type=int, default=0)
parser.add_argument(
"-p", "--pool_pub_key", help="Hex public key of pool", type=str, default=""
)
parser.add_argument(
"-t",
"--tmp_dir",
help="Temporary directory for plotting files (relative to final directory)",
type=Path,
default=Path("./plots.tmp"),
)
new_plots_root = path_from_root(
root_path,
load_config(root_path, "config.yaml")
.get("harvester", {})
.get("new_plot_root", "plots"),
)
parser.add_argument(
"-d",
"--final_dir",
help="Final directory for plots (relative or absolute)",
type=Path,
default=new_plots_root,
)
# We need the keys file, to access pool keys (if the exist), and the sk_seed.
args = parser.parse_args()
if not key_config_filename.exists():
raise RuntimeError("Keys not generated. Run `chia generate keys`")
# The seed is what will be used to generate a private key for each plot
key_config = load_config(root_path, key_config_filename)
sk_seed: bytes = bytes.fromhex(key_config["sk_seed"])
pool_pk: PublicKey
if len(args.pool_pub_key) > 0:
# Use the provided pool public key, useful for using an external pool
pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key))
else:
# Use the pool public key from the config, useful for solo farming
pool_sk = PrivateKey.from_bytes(bytes.fromhex(key_config["pool_sks"][0]))
pool_pk = pool_sk.get_public_key()
print(
f"Creating {args.num_plots} plots, from index {args.index} to "
f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}"
)
tmp_dir = args.final_dir / args.tmp_dir
mkdir(tmp_dir)
mkdir(args.final_dir)
for i in range(args.index, args.index + args.num_plots):
# Generate a sk based on the seed, plot size (k), and index
sk: PrivateKey = PrivateKey.from_seed(
sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big")
)
# The plot seed is based on the pool and plot pks
plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed(
pool_pk, sk.get_public_key()
)
filename: str = f"plot-{i}-{args.size}-{plot_seed}.dat"
full_path: Path = args.final_dir / filename
if not full_path.exists():
# Creates the plot. This will take a long time for larger plots.
plotter: DiskPlotter = DiskPlotter()
plotter.create_plot_disk(
str(tmp_dir),
str(args.final_dir),
filename,
args.size,
bytes([]),
plot_seed,
)
else:
print(f"Plot {filename} already exists")
# Updates the config if necessary.
plot_config = load_config(root_path, plot_config_filename)
plot_config_plots_new = deepcopy(plot_config.get("plots", []))
relative_path = make_path_relative(full_path, root_path)
if (
relative_path not in plot_config_plots_new
and full_path not in plot_config_plots_new
):
plot_config_plots_new[str(full_path)] = {
"sk": bytes(sk).hex(),
"pool_pk": bytes(pool_pk).hex(),
}
plot_config["plots"].update(plot_config_plots_new)
# Dumps the new config to disk.
save_config(root_path, plot_config_filename, plot_config)
try:
tmp_dir.rmdir()
except Exception:
print(f"warning: couldn't delete {tmp_dir}")
if __name__ == "__main__":
main()
| 36.423077
| 104
| 0.639282
|
f75a77cf9d9c0d160d5e67a71c082139499d6512
| 8,242
|
py
|
Python
|
examples/twisted_service.py
|
bwind/pika
|
c00648bf40635c8e8032814f30d2488df44961ec
|
[
"BSD-3-Clause"
] | 1
|
2019-08-28T10:10:56.000Z
|
2019-08-28T10:10:56.000Z
|
examples/twisted_service.py
|
deslum/pika
|
d8af8a573b3535e02540c2e5a14c7b34e276adc0
|
[
"BSD-3-Clause"
] | null | null | null |
examples/twisted_service.py
|
deslum/pika
|
d8af8a573b3535e02540c2e5a14c7b34e276adc0
|
[
"BSD-3-Clause"
] | null | null | null |
"""
# -*- coding:utf-8 -*-
# based on:
# - txamqp-helpers by Dan Siemon <dan@coverfire.com> (March 2010)
# http://git.coverfire.com/?p=txamqp-twistd.git;a=tree
# - Post by Brian Chandler
# https://groups.google.com/forum/#!topic/pika-python/o_deVmGondk
# - Pika Documentation
# https://pika.readthedocs.io/en/latest/examples/twisted_example.html
Fire up this test application via `twistd -ny twisted_service.py`
The application will answer to requests to exchange "foobar" and any of the
routing_key values: "request1", "request2", or "request3"
with messages to the same exchange, but with routing_key "response"
When a routing_key of "task" is used on the exchange "foobar",
the application can asynchronously run a maximum of 2 tasks at once
as defined by PREFETCH_COUNT
"""
import logging
import sys
import pika
from pika import spec
from pika.adapters import twisted_connection
from twisted.internet import protocol
from twisted.application import internet
from twisted.application import service
from twisted.internet.defer import inlineCallbacks
from twisted.internet import ssl, defer, task
from twisted.python import log
from twisted.internet import reactor
PREFETCH_COUNT = 2
class PikaService(service.MultiService):
name = 'amqp'
def __init__(self, parameter):
service.MultiService.__init__(self)
self.parameters = parameter
def startService(self):
self.connect()
service.MultiService.startService(self)
def getFactory(self):
if len(self.services) > 0:
return self.services[0].factory
def connect(self):
f = PikaFactory(self.parameters)
if self.parameters.ssl_options:
s = ssl.ClientContextFactory()
serv = internet.SSLClient(host=self.parameters.host, port=self.parameters.port, factory=f, contextFactory=s)
else:
serv = internet.TCPClient(host=self.parameters.host, port=self.parameters.port, factory=f)
serv.factory = f
f.service = serv
name = '%s%s:%d' % ('ssl:' if self.parameters.ssl_options else '', self.parameters.host, self.parameters.port)
serv.__repr__ = lambda : '<AMQP Connection to %s>' % name
serv.setName(name)
serv.parent = self
self.addService(serv)
class PikaProtocol(twisted_connection.TwistedProtocolConnection):
connected = False
name = 'AMQP:Protocol'
@inlineCallbacks
def onConnected(self, connection):
self.channel = yield connection.channel()
yield self.channel.basic_qos(prefetch_count=PREFETCH_COUNT)
self.connected = True
yield self.channel.confirm_delivery()
for (exchange, routing_key, callback,) in self.factory.read_list:
yield self.setup_read(exchange, routing_key, callback)
self.send()
@inlineCallbacks
def read(self, exchange, routing_key, callback):
"""Add an exchange to the list of exchanges to read from."""
if self.connected:
yield self.setup_read(exchange, routing_key, callback)
@inlineCallbacks
def setup_read(self, exchange, routing_key, callback):
"""This function does the work to read from an exchange."""
if exchange:
yield self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True, auto_delete=False)
yield self.channel.queue_declare(queue=routing_key, durable=True)
if exchange:
yield self.channel.queue_bind(queue=routing_key, exchange=exchange)
(queue, consumer_tag,) = yield self.channel.basic_consume(queue=routing_key, auto_ack=False)
d = queue.get()
d.addCallback(self._read_item, queue, callback)
d.addErrback(self._read_item_err)
def _read_item(self, item, queue, callback):
"""Callback function which is called when an item is read."""
d = queue.get()
d.addCallback(self._read_item, queue, callback)
d.addErrback(self._read_item_err)
(channel, deliver, props, msg,) = item
log.msg('%s (%s): %s' % (deliver.exchange, deliver.routing_key, repr(msg)), system='Pika:<=')
d = defer.maybeDeferred(callback, item)
d.addCallbacks(
lambda _: channel.basic_ack(deliver.delivery_tag),
lambda _: channel.basic_nack(deliver.delivery_tag)
)
def _read_item_err(self, error):
print(error)
def send(self):
"""If connected, send all waiting messages."""
if self.connected:
while len(self.factory.queued_messages) > 0:
(exchange, r_key, message,) = self.factory.queued_messages.pop(0)
self.send_message(exchange, r_key, message)
@inlineCallbacks
def send_message(self, exchange, routing_key, msg):
"""Send a single message."""
log.msg('%s (%s): %s' % (exchange, routing_key, repr(msg)), system='Pika:=>')
yield self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True, auto_delete=False)
prop = spec.BasicProperties(delivery_mode=2)
try:
yield self.channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg, properties=prop)
except Exception as error:
log.msg('Error while sending message: %s' % error, system=self.name)
class PikaFactory(protocol.ReconnectingClientFactory):
name = 'AMQP:Factory'
def __init__(self, parameters):
self.parameters = parameters
self.client = None
self.queued_messages = []
self.read_list = []
def startedConnecting(self, connector):
log.msg('Started to connect.', system=self.name)
def buildProtocol(self, addr):
self.resetDelay()
log.msg('Connected', system=self.name)
self.client = PikaProtocol(self.parameters)
self.client.factory = self
self.client.ready.addCallback(self.client.onConnected)
return self.client
def clientConnectionLost(self, connector, reason):
log.msg('Lost connection. Reason: %s' % reason.value, system=self.name)
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
log.msg('Connection failed. Reason: %s' % reason.value, system=self.name)
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
def send_message(self, exchange = None, routing_key = None, message = None):
self.queued_messages.append((exchange, routing_key, message))
if self.client is not None:
self.client.send()
def read_messages(self, exchange, routing_key, callback):
"""Configure an exchange to be read from."""
self.read_list.append((exchange, routing_key, callback))
if self.client is not None:
self.client.read(exchange, routing_key, callback)
application = service.Application("pikaapplication")
ps = PikaService(pika.ConnectionParameters(host="localhost", virtual_host="/", credentials=pika.PlainCredentials("guest", "guest")))
ps.setServiceParent(application)
class TestService(service.Service):
def task(self, msg):
"""
Method for a time consuming task.
This function must return a deferred. If it is successfull,
a `basic.ack` will be sent to AMQP. If the task was not completed a
`basic.nack` will be sent. In this example it will always return
successfully after a 2 second pause.
"""
return task.deferLater(reactor, 2, lambda: log.msg("task completed"))
def respond(self, msg):
self.amqp.send_message('foobar', 'response', msg[3])
def startService(self):
self.amqp = self.parent.getServiceNamed("amqp").getFactory()
self.amqp.read_messages("foobar", "request1", self.respond)
self.amqp.read_messages("foobar", "request2", self.respond)
self.amqp.read_messages("foobar", "request3", self.respond)
self.amqp.read_messages("foobar", "task", self.task)
ts = TestService()
ts.setServiceParent(application)
observer = log.PythonLoggingObserver()
observer.start()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
| 37.634703
| 132
| 0.683329
|
cea04949bfa5a0fcad51edcae7223b68ae847183
| 2,619
|
py
|
Python
|
dist/pyecharts/charts/basic_charts/radar.py
|
dummerchen/epidemic
|
7b4522f7b2f36739a69ebbf71c91b36db29b8d6c
|
[
"MIT"
] | 1
|
2020-05-12T13:30:03.000Z
|
2020-05-12T13:30:03.000Z
|
dist/pyecharts/charts/basic_charts/radar.py
|
dummerchen/epidemic
|
7b4522f7b2f36739a69ebbf71c91b36db29b8d6c
|
[
"MIT"
] | null | null | null |
dist/pyecharts/charts/basic_charts/radar.py
|
dummerchen/epidemic
|
7b4522f7b2f36739a69ebbf71c91b36db29b8d6c
|
[
"MIT"
] | null | null | null |
from ... import options as opts
from ... import types
from ...charts.chart import Chart
from ...globals import ChartType
class Radar(Chart):
"""
<<< Radar >>>
Radar maps are mainly used to represent multivariable data.
"""
def add_schema(
self,
schema: types.Sequence[types.Union[opts.RadarIndicatorItem, dict]],
shape: types.Optional[str] = None,
center: types.Optional[types.Sequence] = None,
radius: types.Optional[types.Union[types.Sequence, str]] = None,
textstyle_opts: types.TextStyle = opts.TextStyleOpts(),
splitline_opt: types.SplitLine = opts.SplitLineOpts(is_show=True),
splitarea_opt: types.SplitArea = opts.SplitAreaOpts(),
axisline_opt: types.AxisLine = opts.AxisLineOpts(),
radiusaxis_opts: types.RadiusAxis = None,
angleaxis_opts: types.AngleAxis = None,
polar_opts: types.Polar = None,
):
self.options.update(
radiusAxis=radiusaxis_opts, angleAxis=angleaxis_opts, polar=polar_opts
)
indicators = []
for s in schema:
if isinstance(s, opts.RadarIndicatorItem):
s = s.opts
indicators.append(s)
self.options.update(
radar={
"indicator": indicators,
"shape": shape,
"center": center,
"radius": radius,
"name": {"textStyle": textstyle_opts},
"splitLine": splitline_opt,
"splitArea": splitarea_opt,
"axisLine": axisline_opt,
}
)
return self
def add(
self,
series_name: str,
data: types.Sequence,
*,
is_selected: bool = True,
symbol: types.Optional[str] = None,
color: types.Optional[str] = None,
label_opts: opts.LabelOpts = opts.LabelOpts(),
linestyle_opts: opts.LineStyleOpts = opts.LineStyleOpts(),
areastyle_opts: opts.AreaStyleOpts = opts.AreaStyleOpts(),
tooltip_opts: types.Tooltip = None,
):
self._append_legend(series_name, is_selected)
self.options.get("series").append(
{
"type": ChartType.RADAR,
"name": series_name,
"data": data,
"symbol": symbol,
"label": label_opts,
"itemStyle": {"normal": {"color": color}},
"lineStyle": linestyle_opts,
"areaStyle": areastyle_opts,
"tooltip": tooltip_opts,
}
)
return self
| 32.333333
| 82
| 0.556701
|
ca0d2209d959d1bdd85ae2d1743737a585a73dd2
| 5,910
|
py
|
Python
|
script.module.fantastic/lib/resources/lib/sources/en/123netflix.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
script.module.fantastic/lib/resources/lib/sources/en/123netflix.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.fantastic/lib/resources/lib/sources/en/123netflix.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urlparse, urllib, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123netflix.com']
self.base_link = 'http://123netflix.com'
self.search_link = '/search-movies/%s.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('.+?elease:\s*(\d{4})</', i),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if
(cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if
cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = r[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
return url[0][1]
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = cache.get(client.request, 1, url)
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return
def resolve(self, url):
if self.base_link in url:
url = client.request(url)
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url
| 40.758621
| 135
| 0.488663
|
027693db73def88fdc7af12ff3dd5cbdf252c3ba
| 264
|
py
|
Python
|
ineedtiles.py
|
Marinkovich77/pands---problems
|
be732f4e9e3195c3d04947428d179144768a0236
|
[
"MIT"
] | null | null | null |
ineedtiles.py
|
Marinkovich77/pands---problems
|
be732f4e9e3195c3d04947428d179144768a0236
|
[
"MIT"
] | null | null | null |
ineedtiles.py
|
Marinkovich77/pands---problems
|
be732f4e9e3195c3d04947428d179144768a0236
|
[
"MIT"
] | null | null | null |
#This program calculates how many tiles you
#need when tiling a floor (in m2)
length = float(input("Enter room length:"))
width = float(input("Enter room width:"))
area = length * width
needed = area * 1.05
print("You need", needed, "tiles in squared metres")
| 24
| 52
| 0.704545
|
ec2044da94ad30371adc41b552c4ff1c514d9aad
| 4,634
|
py
|
Python
|
LSDGDALBatchProcessing.py
|
simon-m-mudd/LSDMappingTools
|
d9137710ea18e54f3dc5b6782c5696cafdd2999f
|
[
"MIT"
] | 34
|
2017-01-31T17:03:26.000Z
|
2021-09-15T17:23:21.000Z
|
LSDGDALBatchProcessing.py
|
simon-m-mudd/LSDMappingTools
|
d9137710ea18e54f3dc5b6782c5696cafdd2999f
|
[
"MIT"
] | 14
|
2017-01-11T19:45:08.000Z
|
2020-11-03T16:36:38.000Z
|
LSDGDALBatchProcessing.py
|
LSDtopotools/LSDMappingTools
|
d9137710ea18e54f3dc5b6782c5696cafdd2999f
|
[
"MIT"
] | 21
|
2015-11-26T10:24:19.000Z
|
2021-09-15T17:23:22.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 09 16:52:27 2015
@author: smudd
"""
import numpy as np
from glob import glob
import LSDOSystemTools as LSDost
import os
import shutil
import subprocess
# This function looks for all the files of a certain format in a directory and
# then translates them into a new format into a subdirectory named
# after the target format.
def GDALBatchConvert(DataDirectory,raster_format,target_format):
NewDataDirectory = LSDost.ReformatSeperators(DataDirectory)
DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory)
# Check the target format
if target_format == "ENVI":
target_extension = ".bil"
elif target_format == "EHdr":
target_extension = ".bil"
elif target_format == "GTiff":
target_extension = ".tiff"
else:
print "You have not selcted a valid raster format!"
print "Options are ENVI, EHdr and GTiff"
target_extension = "NULL"
# now make a directory
if target_extension != "NULL":
target_directory = DataDirectory+target_format
if not os.access(target_directory,os.F_OK):
print "Making path: "
os.mkdir(target_directory)
print "I made a directory: " + target_directory
else:
print "Path: " +target_directory+" already exists."
# Now check the source format
if raster_format == "ENVI":
raster_extension = ".bil"
elif raster_format == "EHdr":
raster_extension = ".bil"
elif raster_format == "GTiff":
raster_extension = ".tif"
else:
print "You have not selcted a valid raster format!"
print "Options are ENVI, EHdr and GTiff"
raster_extension = "NULL"
# find all the dataset of the source format
print "The data directory is: " + DataDirectory
print "The raster extension is: " + raster_extension
if raster_extension != "NULL":
for FileName in glob(DataDirectory+"*"+raster_extension):
print "found file: " + FileName
subprocess.call(['gdalinfo',FileName])
def GDALBatchMerge(DataDirectory,merge_subfolder_name,merge_filename,raster_format,target_format):
NewDataDirectory = LSDost.ReformatSeperators(DataDirectory)
DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory)
# get the name of the data directory into which the file should be merged
merge_DataDirectory = DataDirectory+merge_subfolder_name
mDataDriectory = LSDost.AppendSepToDirectoryPath(merge_DataDirectory)
# make the directory
if not os.access(mDataDriectory,os.F_OK):
print "Making path: "
os.mkdir(mDataDriectory)
print "I made a directory: " + mDataDriectory
else:
print "Path: " +mDataDriectory+" already exists."
# Check the source format
if raster_format == "ENVI":
raster_extension = ".bil"
elif raster_format == "EHdr":
raster_extension = ".bil"
elif raster_format == "GTiff":
raster_extension = ".tif"
else:
print "You have not selcted a valid raster format!"
print "Options are ENVI, EHdr and GTiff"
raster_extension = "NULL"
# Check the target format. Default is geotiff
if target_format == "ENVI":
target_extension = ".bil"
elif target_format == "EHdr":
target_extension = ".bil"
elif target_format == "GTiff":
target_extension = ".tif"
else:
print "You have not selcted a valid raster format!"
print "Defaulting to GTiff"
target_format == "GTiff"
target_extension = ".tif"
# set the name of the target file
target_FileName = mDataDriectory+merge_filename+target_extension
# find all the dataset of the source format
print "The data directory is: " + DataDirectory
print "The raster extension is: " + raster_extension
if raster_extension != "NULL":
# Set up the list for holding command prompt commands
command_prompt = []
command_prompt.append("gdal_merge.py")
command_prompt.append("-of")
command_prompt.append(target_format)
command_prompt.append("-o")
command_prompt.append(target_FileName)
for FileName in glob(DataDirectory+"*"+raster_extension):
print "found file: " + FileName
command_prompt.append(FileName)
print "The subprocess call is: "
print command_prompt
subprocess.call(command_prompt)
| 34.842105
| 98
| 0.646741
|
d4de43f82d90871041d37da0cb495053b274f467
| 22,345
|
py
|
Python
|
swagger_django_generator/generator.py
|
ClizzyJ/swagger-django-generator
|
a456b0aac949fbb34e6b45cbae19e0e2a6c02a41
|
[
"BSD-3-Clause"
] | null | null | null |
swagger_django_generator/generator.py
|
ClizzyJ/swagger-django-generator
|
a456b0aac949fbb34e6b45cbae19e0e2a6c02a41
|
[
"BSD-3-Clause"
] | null | null | null |
swagger_django_generator/generator.py
|
ClizzyJ/swagger-django-generator
|
a456b0aac949fbb34e6b45cbae19e0e2a6c02a41
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import json
import os
import re
import sys
import parser
import click
import jinja2
from swagger_parser import SwaggerParser
DEFAULT_OUTPUT_DIR = "./generated"
DEFAULT_MODULE = "generated"
# Defaults used when the path is "/".
ROOT_CLASS_NAME = u"Root"
ROOT_OPERATION = u"root"
# Known extensions in lowercase
YAML_EXTENSIONS = ["yaml", "yml"]
JSON_EXTENSIONS = ["json"]
# Choices provided when specifying the specification format
SPEC_JSON = "json"
SPEC_YAML = "yaml"
SPEC_CHOICES = [SPEC_JSON, SPEC_YAML]
BACKEND_CHOICES = ["django"]
major, minor = sys.version_info[0:2]
if major > 3 or major == 3 and minor >= 5:
BACKEND_CHOICES.append("aiohttp")
# from swagger_tester import swagger_test
# parser.base_path contains the base URL, e.g. "/portal/v1"
# parser.paths is a dictionary of the form:
# {
# path: {
# http_verb: {
# "consumes": ["application/json"],
# "produces": ["application/json"],
# "responses": {
# status_code: {
# "description": "",
# "schema": JSONSchema
# },
# ...
# },
# "parameters": {
# name: {
# "description": "The description",
# "in": "path",
# "name": name,
# "required": True,
# "type": "string"
# },
# "body": {
# "description": "The message payload",
# "in": "body",
# "name": "body",
# "required": True,
# "schema": {
# "$ref": "#/definitions/message",
# "x-scope": [""]
# }
# },
# ...
# },
# },
# ...
#
# parser.operation is a dictionary of the form:
# {operation: (path, http_verb, tag)}
#
# * Paths map to class based views.
# * (path, http_verb) combinations map to operations.
# Swagger fields used in parameter definition, but which are unknown to jsonschema.
_SWAGGER_FIELDS = frozenset(["name", "in", "required", "collectionFormat", "description"])
def clean_schema(schema):
# type: (Dict) -> Dict
"""Transform a Swagger parameter definition to a valid JSONSchema
Remove known Swagger fields as well as any custom definitions
(starting with "x-").
"""
return {k: v for k, v in schema.items()
if k not in _SWAGGER_FIELDS and not k.lower().startswith("x-")}
SEPARATORS = {
"pipes": "|",
"tsv": "\t",
"ssv": " ",
"csv": ","
}
def parse_array(schema):
# type: (Dict) -> string
return '{name} = {name}.split("{separator}")'.format(
name=schema["name"],
separator=SEPARATORS[schema.get("collectionFormat", "csv")]
)
def capitalize_splitter(value):
parts = re.findall('[A-Z][^A-Z]*', value)
parts_lower = [x.lower() for x in parts]
return '-'.join(parts_lower)
def render_to_string(backend, filename, context):
# type: (str, str, Dict) -> str
"""
Render a template using the specified context
:param backend: The backend for which the template is rendered
:param filename: The template name
:param context: The data to use when rendering the template
:return: The rendered template as a string
"""
template_directory = "./swagger_django_generator/templates/{}".format(backend)
loaders = [jinja2.FileSystemLoader(template_directory)]
try:
import swagger_django_generator
loaders.append(jinja2.PackageLoader("swagger_django_generator", "templates/{}".format(backend)))
except ImportError:
pass
environment = jinja2.Environment(
loader=jinja2.ChoiceLoader(loaders),
trim_blocks=True,
lstrip_blocks=True,
)
environment.filters["clean_schema"] = clean_schema
environment.filters["parse_array"] = parse_array
environment.filters["capitalize_splitter"] = capitalize_splitter
return environment.get_template(filename).render(context)
def path_to_class_name(path):
# type: (unicode) -> unicode
"""
We map paths (typically only the relative part) to a canonical
class name. In the event that the path is "/", ROOT_CLASS_NAME will be
returned.
:param path: A path of the form "/some/path/{foo_id}/bar/{barId}/"
:return: A class name of the form "SomePathFooIdBarBarId"
"""
character_map = {
ord("{"): None,
ord("}"): None,
ord("_"): u"/"
}
sanitised = path.translate(character_map)
class_name = u"".join(
# Uppercase the first letter of each non-empty word, while
# preserving the case of the letters thereafter.
p[0].upper() + p[1:] for p in sanitised.split("/") if p
)
return class_name or ROOT_CLASS_NAME
def path_to_operation(path, verb):
# type: (unicode, unicode) -> unicode
"""
We map paths (typically only the relative part) to a canonical
operation name. The operation name is used as the name of the function
that must provide the server-side logic.
Typically the operation name is provided in the Swagger space via the
`operationId` field. This function is used as a fallback mechanism when
it is not defined explicitly.
:param path: A path of the form "/some/path/{id}/foo/{bar}/"
:param verb: The HTTP verb, e.g. "get"
:return: An operation name of the form "get_some_path_id_foo_bar"
"""
character_map = {
ord("{"): None,
ord("}"): None,
ord("_"): u"/"
}
if path == u"/":
operation = ROOT_OPERATION
else:
sanitised = path.translate(character_map)
operation = u"_".join(p for p in sanitised.split("/"))
return "{}_{}".format(verb, operation)
def fixup_parameters(url, backend):
"""
Parameters in the Swagger spec paths are wrapped in curly braces.
We change these to a named regex match for use be Django.
E.g. "/foo/{bar_id}/" => "/foo/(?P<bar_id>.+)/"
:param url: The URL from the Swagger spec
:param backend: The backend for which to generate the parameters
:return: The URL with parameters changed into regexes.
"""
result = url
if backend == "django":
result = url.replace("{", "(?P<").replace("}", ">.+)")
return result
class Generator(object):
PATH_VERB_OPERATION_MAP = {}
def __init__(self, backend, module_name=DEFAULT_MODULE):
self.backend = backend
self.parser = None
self.module_name = module_name
self._classes = None
def load_specification(self, specification_path, spec_format=None):
# If the swagger spec format is not specified explicitly, we try to
# derive it from the specification path
if not spec_format:
filename = os.path.basename(specification_path)
extension = filename.rsplit(".", 1)[-1]
if extension in YAML_EXTENSIONS:
spec_format = SPEC_YAML
elif extension in JSON_EXTENSIONS:
spec_format = SPEC_JSON
else:
raise RuntimeError("Could not infer specification format. Use "
"--spec-format to specify it explicitly.")
click.secho("Using spec format '{}'".format(spec_format), fg="green")
if spec_format == SPEC_YAML:
with open(specification_path, "r") as f:
self.parser = SwaggerParser(swagger_yaml=f)
else:
self.parser = SwaggerParser(swagger_path=specification_path)
# Build (path, http_verb) => operation mapping
self.PATH_VERB_OPERATION_MAP = {
(path, http_verb): operation
for operation, (path, http_verb, tag) in
self.parser.operation.items()
}
self._make_class_definitions()
self._make_security_definitions()
def resolve_schema_references(self, definition):
# type: (Generator, Dict) -> None
"""
JSONSchema definitions may contain references.
This function replaces all references with their full definitions.
In-place mods are made.
:param definition: A JSONSchema definition
:return: The expended definition.
"""
if "$ref" in definition:
schema_reference = definition.pop("$ref")
section, name = schema_reference.split("/")[-2:]
referenced_definition = self.parser.specification[section][name]
definition.update(referenced_definition)
for value in definition.values():
if isinstance(value, dict):
self.resolve_schema_references(value)
def _make_security_definitions(self):
"""Process available security definition types:
* basic
* apiKey + JWT/Bearer option as a definition
- for now there's no support for OAuth2
- for now only 'in: header' is implemented
"""
self.security_defs = {}
sec_defs = self.parser.specification.get("securityDefinitions", {})
for sec_desc, sec_type in sec_defs.items():
if sec_type['type'] in ['basic', 'apiKey']:
if sec_type.get('in') == 'header':
sec_def = {'desc': sec_desc}
sec_def.update(sec_type)
self.security_defs[sec_type['type']] = sec_def
def _make_class_definitions(self):
self._classes = {}
for path, verbs in self.parser.paths.items():
relative_url = path.replace(self.parser.base_path, "")
class_name = path_to_class_name(relative_url)
self._classes[class_name] = {}
for verb, io in verbs.items(): # io => input/output options
# Look up the name of the operation and construct one if not found
operation = self.PATH_VERB_OPERATION_MAP.get(
(path, verb), path_to_operation(path, verb)
)
payload = {
"operation": operation,
"required_args": [],
"optional_args": [],
"form_data": [],
"response_schema": "schemas.__UNSPECIFIED__",
"secure": False,
}
# Add arguments
for name, detail in io["parameters"].items():
location = detail["in"]
click.secho(location)
if location == "path":
section = "required_args" if detail["required"] else \
"optional_args"
payload[section].append(detail)
elif location == "header":
# continue;
break
elif location == "query":
section = "required_args" if detail["required"] else \
"optional_args"
payload[section].append(detail)
elif location == "body":
# There cannot be more than one body parameter
payload["body"] = copy.deepcopy(detail)
schema = payload["body"]["schema"]
schema_reference = schema.get("$ref", None)
if schema_reference:
# TODO: Fix this crude lookup code
# It expects a reference to have the form
# "#/definitions/name"
lookup = schema_reference.split("/")[-1]
payload["body"]["schema"] = "schemas.{}".format(
lookup)
else:
# Inline schema definitions do not reference the
# schema module. For now the definitions are
# (inefficiently) inlined in the generated
# view. TODO: Optimise by loading these schemas
# on initialisation and referencing it thereafter.
# Also, we it would be nice to be able to reference
# the definitions in schemas.py...will significantly
# reduce size of the generated code in views.py.
self.resolve_schema_references(schema)
payload["body"]["schema"] = \
'json.loads("""{}""")'.format(
json.dumps(schema, indent=4, sort_keys=True),
strict=False
)
elif location == "formData":
payload["form_data"].append(detail)
else:
continue
# msg = "Code generation for parameter type '{}' not " \
# "implemented yet. Operation '{}' parameter '{" \
# "}'".format(location, operation, name)
# click.secho(msg, fg="red")
click.secho("test",fg="red")
# Added response
for name, detail in io["responses"].items():
if name == "default":
continue
elif 200 <= int(name) < 300 and "schema" in detail:
# There should only be one response code defined in
# the 200 to 299 range.
schema = copy.deepcopy(detail["schema"])
schema_reference = schema.get("$ref", None)
if schema_reference:
# TODO: Fix this crude lookup code
# It expects a reference to have the form
# "#/definitions/name"
lookup = schema_reference.split("/")[-1]
payload["response_schema"] = "schemas.{}".format(lookup)
else:
# Inline schema definitions do not reference the
# schema module. For now the definitions are
# (inefficiently) inlined in the generated
# view. TODO: Optimise by loading these schemas
# on initialisation and referencing it thereafter.
# Also, we it would be nice to be able to reference
# the definitions in schemas.py...will significantly
# reduce size of the generated code in views.py.
self.resolve_schema_references(schema)
payload["response_schema"] = \
'json.loads("""{}""",strict=False)'.format(
json.dumps(schema, indent=4, sort_keys=True)
)
# TODO: At this stage we do not look at the type of security, we
# simply flag that it should be secured.
# Also, the parser does not contain the security info,
# so we have to refer back to the original spec.
if "security" in self.parser.specification:
# Global security indicator
payload["secure"] = True
else:
# Path and verb specific indicator
specref = self.parser.specification["paths"].get(
relative_url, {}
).get(verb, {})
payload["secure"] = "security" in specref
self._classes[class_name][verb] = payload
def generate_urls(self):
# type: (Generator) -> str
"""
Generate a `urls.py` file from the given specification.
:return: str
"""
relative_urls = [path.replace(self.parser.base_path + "/", "")
for path in self.parser.paths]
entries = {
fixup_parameters(relative_url, self.backend): path_to_class_name(relative_url)
for relative_url in relative_urls
}
return render_to_string(
self.backend, "urls.py", {
"entries": entries,
"module": self.module_name
})
def generate_schemas(self):
# type: (Generator) -> str
"""
Generate a `schemas.py` file from the given specification.
:return: str
"""
schemas = {}
for name, definition in self.parser.specification.get("definitions",
{}).items():
schema = copy.deepcopy(definition)
self.resolve_schema_references(schema)
schemas[name] = json.dumps(schema, indent=4, sort_keys=True)
return render_to_string(
self.backend, "schemas.py", {
"schemas": schemas,
"module": self.module_name
})
def generate_views(self):
# type: (Generator) -> str
"""
Generate a `views.py` file from the given specification.
:return: str
"""
return render_to_string(
self.backend, "views.py", {
"classes": self._classes,
'host': self.parser.specification.get('host'),
'basePath': self.parser.specification['basePath'],
"module": self.module_name,
"specification": json.dumps(self.parser.specification, indent=4,
sort_keys=True).replace("\\", "\\\\"),
})
def generate_stubs(self):
# type: (Generator) -> str
"""
Generate a `stubs.py` file from the given specification.
:return: str
"""
return render_to_string(
self.backend, "stubs.py", {
"classes": self._classes,
"module": self.module_name
})
def generate_utils(self):
# type: (Generator) -> str
"""
Generate a `utils.py` file from the given specification.
:return: str
"""
return render_to_string(
self.backend,
"utils.py",
{
"security_defs": self.security_defs
},
)
@click.command()
@click.argument("specification_path", type=click.Path(dir_okay=False, exists=True))
@click.option("--spec-format", type=click.Choice(SPEC_CHOICES))
@click.option("--backend", type=click.Choice(BACKEND_CHOICES),
default="django")
@click.option("--verbose/--no-verbose", default=False)
@click.option("--output-dir", type=click.Path(file_okay=False, exists=True,
writable=True),
default=DEFAULT_OUTPUT_DIR)
@click.option("--module-name", type=str, default=DEFAULT_MODULE,
help="The name of the module where the generated code will be "
"used, e.g. myproject.some_application")
@click.option("--urls-file", type=str, default="urls.py",
help="Use an alternative filename for the urls.")
@click.option("--views-file", type=str, default="views.py",
help="Use an alternative filename for the views.")
@click.option("--schemas-file", type=str, default="schemas.py",
help="Use an alternative filename for the schemas.")
@click.option("--utils-file", type=str, default="utils.py",
help="Use an alternative filename for the utilities.")
@click.option("--stubs-file", type=str, default="stubs.py",
help="Use an alternative filename for the utilities.")
def main(specification_path, spec_format, backend, verbose, output_dir, module_name,
urls_file, views_file, schemas_file, utils_file, stubs_file):
generator = Generator(backend, module_name=module_name)
try:
click.secho("Loading specification file...", fg="green")
generator.load_specification(specification_path, spec_format)
click.secho("Generating URLs file...", fg="green")
with open(os.path.join(output_dir, urls_file), "w") as f:
data = generator.generate_urls()
f.write(data)
if verbose:
print(data)
click.secho("Generating views file...", fg="green")
with open(os.path.join(output_dir, views_file), "w") as f:
data = generator.generate_views()
f.write(data)
if verbose:
print(data)
click.secho("Generating schemas file...", fg="green")
with open(os.path.join(output_dir, schemas_file), "w") as f:
data = generator.generate_schemas()
f.write(data)
if verbose:
print(data)
click.secho("Generating utils file...", fg="green")
with open(os.path.join(output_dir, utils_file), "w") as f:
data = generator.generate_utils()
f.write(data)
if verbose:
print(data)
click.secho("Generating stubs file...", fg="green")
with open(os.path.join(output_dir, stubs_file), "w") as f:
data = generator.generate_stubs()
f.write(data)
if verbose:
print(data)
click.secho("To perform validation for uri, date-time and color formats, install the "
"packages indicated in the link below in YOUR project:")
click.secho("http://python-jsonschema.readthedocs.io/en/stable/validate/#jsonschema.FormatChecker")
click.secho("This tool adds validation for the UUID format in {}.".format(utils_file))
click.secho("Done.", fg="green")
except Exception as e:
click.secho(str(e), fg="red")
click.secho("""
If you get schema validation errors from a yaml Swagger spec that passes validation on other
validators, it may be because of single apostrophe's (') used in some descriptions. The
parser used does not like it at all.
""")
if __name__ == "__main__":
main()
| 39.548673
| 107
| 0.54567
|
00ab51945850eed752d1d940ec4d28ae8a5faea7
| 23,423
|
py
|
Python
|
Lib/site-packages/greenlet/tests/test_greenlet.py
|
KarmaScripter/PiggyPy
|
25ba1d0c8933a0cb655f09db6c228f74f4d52894
|
[
"MIT"
] | null | null | null |
Lib/site-packages/greenlet/tests/test_greenlet.py
|
KarmaScripter/PiggyPy
|
25ba1d0c8933a0cb655f09db6c228f74f4d52894
|
[
"MIT"
] | null | null | null |
Lib/site-packages/greenlet/tests/test_greenlet.py
|
KarmaScripter/PiggyPy
|
25ba1d0c8933a0cb655f09db6c228f74f4d52894
|
[
"MIT"
] | null | null | null |
import gc
import sys
import time
import threading
import unittest
from abc import ABCMeta, abstractmethod
from greenlet import greenlet
# We manually manage locks in many tests
# pylint:disable=consider-using-with
class SomeError(Exception):
pass
def fmain(seen):
try:
greenlet.getcurrent().parent.switch()
except:
seen.append(sys.exc_info()[0])
raise
raise SomeError
def send_exception(g, exc):
# note: send_exception(g, exc) can be now done with g.throw(exc).
# the purpose of this test is to explicitely check the propagation rules.
def crasher(exc):
raise exc
g1 = greenlet(crasher, parent=g)
g1.switch(exc)
class TestGreenlet(unittest.TestCase):
def test_simple(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
lst.append(3)
g = greenlet(f)
lst.append(0)
g.switch()
lst.append(2)
g.switch()
lst.append(4)
self.assertEqual(lst, list(range(5)))
def test_parent_equals_None(self):
g = greenlet(parent=None)
self.assertIsNotNone(g)
self.assertIs(g.parent, greenlet.getcurrent())
def test_run_equals_None(self):
g = greenlet(run=None)
self.assertIsNotNone(g)
self.assertIsNone(g.run)
def test_two_children(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
lst.extend([1, 1])
g = greenlet(f)
h = greenlet(f)
g.switch()
self.assertEqual(len(lst), 1)
h.switch()
self.assertEqual(len(lst), 2)
h.switch()
self.assertEqual(len(lst), 4)
self.assertEqual(h.dead, True)
g.switch()
self.assertEqual(len(lst), 6)
self.assertEqual(g.dead, True)
def test_two_recursive_children(self):
lst = []
def f():
lst.append(1)
greenlet.getcurrent().parent.switch()
def g():
lst.append(1)
g = greenlet(f)
g.switch()
lst.append(1)
g = greenlet(g)
g.switch()
self.assertEqual(len(lst), 3)
self.assertEqual(sys.getrefcount(g), 2)
def test_threads(self):
success = []
def f():
self.test_simple()
success.append(True)
ths = [threading.Thread(target=f) for i in range(10)]
for th in ths:
th.start()
for th in ths:
th.join()
self.assertEqual(len(success), len(ths))
def test_exception(self):
seen = []
g1 = greenlet(fmain)
g2 = greenlet(fmain)
g1.switch(seen)
g2.switch(seen)
g2.parent = g1
self.assertEqual(seen, [])
self.assertRaises(SomeError, g2.switch)
self.assertEqual(seen, [SomeError])
g2.switch()
self.assertEqual(seen, [SomeError])
def test_send_exception(self):
seen = []
g1 = greenlet(fmain)
g1.switch(seen)
self.assertRaises(KeyError, send_exception, g1, KeyError)
self.assertEqual(seen, [KeyError])
def test_dealloc(self):
seen = []
g1 = greenlet(fmain)
g2 = greenlet(fmain)
g1.switch(seen)
g2.switch(seen)
self.assertEqual(seen, [])
del g1
gc.collect()
self.assertEqual(seen, [greenlet.GreenletExit])
del g2
gc.collect()
self.assertEqual(seen, [greenlet.GreenletExit, greenlet.GreenletExit])
def test_dealloc_other_thread(self):
seen = []
someref = []
lock = threading.Lock()
lock.acquire()
lock2 = threading.Lock()
lock2.acquire()
def f():
g1 = greenlet(fmain)
g1.switch(seen)
someref.append(g1)
del g1
gc.collect()
lock.release()
lock2.acquire()
greenlet() # trigger release
lock.release()
lock2.acquire()
t = threading.Thread(target=f)
t.start()
lock.acquire()
self.assertEqual(seen, [])
self.assertEqual(len(someref), 1)
del someref[:]
gc.collect()
# g1 is not released immediately because it's from another thread
self.assertEqual(seen, [])
lock2.release()
lock.acquire()
self.assertEqual(seen, [greenlet.GreenletExit])
lock2.release()
t.join()
def test_frame(self):
def f1():
f = sys._getframe(0) # pylint:disable=protected-db
self.assertEqual(f.f_back, None)
greenlet.getcurrent().parent.switch(f)
return "meaning of life"
g = greenlet(f1)
frame = g.switch()
self.assertTrue(frame is g.gr_frame)
self.assertTrue(g)
from_g = g.switch()
self.assertFalse(g)
self.assertEqual(from_g, 'meaning of life')
self.assertEqual(g.gr_frame, None)
def test_thread_bug(self):
def runner(x):
g = greenlet(lambda: time.sleep(x))
g.switch()
t1 = threading.Thread(target=runner, args=(0.2,))
t2 = threading.Thread(target=runner, args=(0.3,))
t1.start()
t2.start()
t1.join()
t2.join()
def test_switch_kwargs(self):
def run(a, b):
self.assertEqual(a, 4)
self.assertEqual(b, 2)
return 42
x = greenlet(run).switch(a=4, b=2)
self.assertEqual(x, 42)
def test_switch_kwargs_to_parent(self):
def run(x):
greenlet.getcurrent().parent.switch(x=x)
greenlet.getcurrent().parent.switch(2, x=3)
return x, x ** 2
g = greenlet(run)
self.assertEqual({'x': 3}, g.switch(3))
self.assertEqual(((2,), {'x': 3}), g.switch())
self.assertEqual((3, 9), g.switch())
def test_switch_to_another_thread(self):
data = {}
error = None
created_event = threading.Event()
done_event = threading.Event()
def run():
data['g'] = greenlet(lambda: None)
created_event.set()
done_event.wait()
thread = threading.Thread(target=run)
thread.start()
created_event.wait()
try:
data['g'].switch()
except greenlet.error:
error = sys.exc_info()[1]
self.assertIsNotNone(error, "greenlet.error was not raised!")
done_event.set()
thread.join()
def test_exc_state(self):
def f():
try:
raise ValueError('fun')
except: # pylint:disable=bare-except
exc_info = sys.exc_info()
greenlet(h).switch()
self.assertEqual(exc_info, sys.exc_info())
def h():
self.assertEqual(sys.exc_info(), (None, None, None))
greenlet(f).switch()
def test_instance_dict(self):
def f():
greenlet.getcurrent().test = 42
def deldict(g):
del g.__dict__
def setdict(g, value):
g.__dict__ = value
g = greenlet(f)
self.assertEqual(g.__dict__, {})
g.switch()
self.assertEqual(g.test, 42)
self.assertEqual(g.__dict__, {'test': 42})
g.__dict__ = g.__dict__
self.assertEqual(g.__dict__, {'test': 42})
self.assertRaises(TypeError, deldict, g)
self.assertRaises(TypeError, setdict, g, 42)
def test_threaded_reparent(self):
data = {}
created_event = threading.Event()
done_event = threading.Event()
def run():
data['g'] = greenlet(lambda: None)
created_event.set()
done_event.wait()
def blank():
greenlet.getcurrent().parent.switch()
def setparent(g, value):
g.parent = value
thread = threading.Thread(target=run)
thread.start()
created_event.wait()
g = greenlet(blank)
g.switch()
self.assertRaises(ValueError, setparent, g, data['g'])
done_event.set()
thread.join()
def test_deepcopy(self):
import copy
self.assertRaises(TypeError, copy.copy, greenlet())
self.assertRaises(TypeError, copy.deepcopy, greenlet())
def test_parent_restored_on_kill(self):
hub = greenlet(lambda: None)
main = greenlet.getcurrent()
result = []
def worker():
try:
# Wait to be killed
main.switch()
except greenlet.GreenletExit:
# Resurrect and switch to parent
result.append(greenlet.getcurrent().parent)
result.append(greenlet.getcurrent())
hub.switch()
g = greenlet(worker, parent=hub)
g.switch()
del g
self.assertTrue(result)
self.assertEqual(result[0], main)
self.assertEqual(result[1].parent, hub)
def test_parent_return_failure(self):
# No run causes AttributeError on switch
g1 = greenlet()
# Greenlet that implicitly switches to parent
g2 = greenlet(lambda: None, parent=g1)
# AttributeError should propagate to us, no fatal errors
self.assertRaises(AttributeError, g2.switch)
def test_throw_exception_not_lost(self):
class mygreenlet(greenlet):
def __getattribute__(self, name):
try:
raise Exception()
except: # pylint:disable=bare-except
pass
return greenlet.__getattribute__(self, name)
g = mygreenlet(lambda: None)
self.assertRaises(SomeError, g.throw, SomeError())
def test_throw_doesnt_crash(self):
result = []
def worker():
greenlet.getcurrent().parent.switch()
def creator():
g = greenlet(worker)
g.switch()
result.append(g)
t = threading.Thread(target=creator)
t.start()
t.join()
self.assertRaises(greenlet.error, result[0].throw, SomeError())
def test_recursive_startup(self):
class convoluted(greenlet):
def __init__(self):
greenlet.__init__(self)
self.count = 0
def __getattribute__(self, name):
if name == 'run' and self.count == 0:
self.count = 1
self.switch(43)
return greenlet.__getattribute__(self, name)
def run(self, value):
while True:
self.parent.switch(value)
g = convoluted()
self.assertEqual(g.switch(42), 43)
def test_unexpected_reparenting(self):
another = []
def worker():
g = greenlet(lambda: None)
another.append(g)
g.switch()
t = threading.Thread(target=worker)
t.start()
t.join()
class convoluted(greenlet):
def __getattribute__(self, name):
if name == 'run':
self.parent = another[0] # pylint:disable=attribute-defined-outside-init
return greenlet.__getattribute__(self, name)
g = convoluted(lambda: None)
self.assertRaises(greenlet.error, g.switch)
def test_threaded_updatecurrent(self):
# released when main thread should execute
lock1 = threading.Lock()
lock1.acquire()
# released when another thread should execute
lock2 = threading.Lock()
lock2.acquire()
class finalized(object):
def __del__(self):
# happens while in green_updatecurrent() in main greenlet
# should be very careful not to accidentally call it again
# at the same time we must make sure another thread executes
lock2.release()
lock1.acquire()
# now ts_current belongs to another thread
def deallocator():
greenlet.getcurrent().parent.switch()
def fthread():
lock2.acquire()
greenlet.getcurrent()
del g[0]
lock1.release()
lock2.acquire()
greenlet.getcurrent()
lock1.release()
main = greenlet.getcurrent()
g = [greenlet(deallocator)]
g[0].bomb = finalized()
g[0].switch()
t = threading.Thread(target=fthread)
t.start()
# let another thread grab ts_current and deallocate g[0]
lock2.release()
lock1.acquire()
# this is the corner stone
# getcurrent() will notice that ts_current belongs to another thread
# and start the update process, which would notice that g[0] should
# be deallocated, and that will execute an object's finalizer. Now,
# that object will let another thread run so it can grab ts_current
# again, which would likely crash the interpreter if there's no
# check for this case at the end of green_updatecurrent(). This test
# passes if getcurrent() returns correct result, but it's likely
# to randomly crash if it's not anyway.
self.assertEqual(greenlet.getcurrent(), main)
# wait for another thread to complete, just in case
t.join()
def test_dealloc_switch_args_not_lost(self):
seen = []
def worker():
# wait for the value
value = greenlet.getcurrent().parent.switch()
# delete all references to ourself
del worker[0]
initiator.parent = greenlet.getcurrent().parent
# switch to main with the value, but because
# ts_current is the last reference to us we
# return immediately
try:
greenlet.getcurrent().parent.switch(value)
finally:
seen.append(greenlet.getcurrent())
def initiator():
return 42 # implicitly falls thru to parent
worker = [greenlet(worker)]
worker[0].switch() # prime worker
initiator = greenlet(initiator, worker[0])
value = initiator.switch()
self.assertTrue(seen)
self.assertEqual(value, 42)
def test_tuple_subclass(self):
if sys.version_info[0] > 2:
# There's no apply in Python 3.x
def _apply(func, a, k):
func(*a, **k)
else:
_apply = apply # pylint:disable=undefined-variable
class mytuple(tuple):
def __len__(self):
greenlet.getcurrent().switch()
return tuple.__len__(self)
args = mytuple()
kwargs = dict(a=42)
def switchapply():
_apply(greenlet.getcurrent().parent.switch, args, kwargs)
g = greenlet(switchapply)
self.assertEqual(g.switch(), kwargs)
def test_abstract_subclasses(self):
AbstractSubclass = ABCMeta(
'AbstractSubclass',
(greenlet,),
{'run': abstractmethod(lambda self: None)})
class BadSubclass(AbstractSubclass):
pass
class GoodSubclass(AbstractSubclass):
def run(self):
pass
GoodSubclass() # should not raise
self.assertRaises(TypeError, BadSubclass)
def test_implicit_parent_with_threads(self):
if not gc.isenabled():
return # cannot test with disabled gc
N = gc.get_threshold()[0]
if N < 50:
return # cannot test with such a small N
def attempt():
lock1 = threading.Lock()
lock1.acquire()
lock2 = threading.Lock()
lock2.acquire()
recycled = [False]
def another_thread():
lock1.acquire() # wait for gc
greenlet.getcurrent() # update ts_current
lock2.release() # release gc
t = threading.Thread(target=another_thread)
t.start()
class gc_callback(object):
def __del__(self):
lock1.release()
lock2.acquire()
recycled[0] = True
class garbage(object):
def __init__(self):
self.cycle = self
self.callback = gc_callback()
l = []
x = range(N*2)
current = greenlet.getcurrent()
g = garbage()
for _ in x:
g = None # lose reference to garbage
if recycled[0]:
# gc callback called prematurely
t.join()
return False
last = greenlet()
if recycled[0]:
break # yes! gc called in green_new
l.append(last) # increase allocation counter
else:
# gc callback not called when expected
gc.collect()
if recycled[0]:
t.join()
return False
self.assertEqual(last.parent, current)
for g in l:
self.assertEqual(g.parent, current)
return True
for _ in range(5):
if attempt():
break
def test_issue_245_reference_counting_subclass_no_threads(self):
# https://github.com/python-greenlet/greenlet/issues/245
# Before the fix, this crashed pretty reliably on
# Python 3.10, at least on macOS; but much less reliably on other
# interpreters (memory layout must have changed).
# The threaded test crashed more reliably on more interpreters.
from greenlet import getcurrent
from greenlet import GreenletExit
class Greenlet(greenlet):
pass
initial_refs = sys.getrefcount(Greenlet)
# This has to be an instance variable because
# Python 2 raises a SyntaxError if we delete a local
# variable referenced in an inner scope.
self.glets = [] # pylint:disable=attribute-defined-outside-init
def greenlet_main():
try:
getcurrent().parent.switch()
except GreenletExit:
self.glets.append(getcurrent())
# Before the
for _ in range(10):
Greenlet(greenlet_main).switch()
del self.glets
self.assertEqual(sys.getrefcount(Greenlet), initial_refs)
def test_issue_245_reference_counting_subclass_threads(self):
# https://github.com/python-greenlet/greenlet/issues/245
from threading import Thread
from threading import Event
from greenlet import getcurrent
class MyGreenlet(greenlet):
pass
glets = []
ref_cleared = Event()
def greenlet_main():
getcurrent().parent.switch()
def thread_main(greenlet_running_event):
mine = MyGreenlet(greenlet_main)
glets.append(mine)
# The greenlets being deleted must be active
mine.switch()
# Don't keep any reference to it in this thread
del mine
# Let main know we published our greenlet.
greenlet_running_event.set()
# Wait for main to let us know the references are
# gone and the greenlet objects no longer reachable
ref_cleared.wait()
# The creating thread must call getcurrent() (or a few other
# greenlet APIs) because that's when the thread-local list of dead
# greenlets gets cleared.
getcurrent()
# We start with 3 references to the subclass:
# - This module
# - Its __mro__
# - The __subclassess__ attribute of greenlet
# - (If we call gc.get_referents(), we find four entries, including
# some other tuple ``(greenlet)`` that I'm not sure about but must be part
# of the machinery.)
#
# On Python 3.10 it's often enough to just run 3 threads; on Python 2.7,
# more threads are needed, and the results are still
# non-deterministic. Presumably the memory layouts are different
initial_refs = sys.getrefcount(MyGreenlet)
thread_ready_events = []
for _ in range(
initial_refs + 45
):
event = Event()
thread = Thread(target=thread_main, args=(event,))
thread_ready_events.append(event)
thread.start()
for done_event in thread_ready_events:
done_event.wait()
del glets[:]
ref_cleared.set()
# Let any other thread run; it will crash the interpreter
# if not fixed (or silently corrupt memory and we possibly crash
# later).
time.sleep(1)
self.assertEqual(sys.getrefcount(MyGreenlet), initial_refs)
class TestRepr(unittest.TestCase):
def assertEndsWith(self, got, suffix):
self.assertTrue(got.endswith(suffix), (got, suffix))
def test_main_while_running(self):
r = repr(greenlet.getcurrent())
self.assertEndsWith(r, " current active started main>")
def test_main_in_background(self):
main = greenlet.getcurrent()
def run():
return repr(main)
g = greenlet(run)
r = g.switch()
self.assertEndsWith(r, ' suspended active started main>')
def test_initial(self):
r = repr(greenlet())
self.assertEndsWith(r, ' pending>')
def test_main_from_other_thread(self):
main = greenlet.getcurrent()
class T(threading.Thread):
original_main = thread_main = None
main_glet = None
def run(self):
self.original_main = repr(main)
self.main_glet = greenlet.getcurrent()
self.thread_main = repr(self.main_glet)
t = T()
t.start()
t.join(10)
self.assertEndsWith(t.original_main, ' suspended active started main>')
self.assertEndsWith(t.thread_main, ' current active started main>')
r = repr(t.main_glet)
# main greenlets, even from dead threads, never really appear dead
# TODO: Can we find a better way to differentiate that?
assert not t.main_glet.dead
self.assertEndsWith(r, ' suspended active started main>')
def test_dead(self):
g = greenlet(lambda: None)
g.switch()
self.assertEndsWith(repr(g), ' dead>')
self.assertNotIn('suspended', repr(g))
self.assertNotIn('started', repr(g))
self.assertNotIn('active', repr(g))
def test_formatting_produces_native_str(self):
# https://github.com/python-greenlet/greenlet/issues/218
# %s formatting on Python 2 was producing unicode, not str.
g_dead = greenlet(lambda: None)
g_not_started = greenlet(lambda: None)
g_cur = greenlet.getcurrent()
for g in g_dead, g_not_started, g_cur:
self.assertIsInstance(
'%s' % (g,),
str
)
self.assertIsInstance(
'%r' % (g,),
str,
)
if __name__ == '__main__':
unittest.main()
| 32.130316
| 92
| 0.55945
|
05ef8cb3d6e2cbdd744b9692d7dce6c5717a4789
| 2,809
|
py
|
Python
|
python_developer_tools/files/xml_utils.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 32
|
2021-06-21T04:49:48.000Z
|
2022-03-29T05:46:59.000Z
|
python_developer_tools/files/xml_utils.py
|
HonestyBrave/python_developer_tools
|
fc0dcf5c4ef088e2e535206dc82f09bbfd01f280
|
[
"Apache-2.0"
] | 1
|
2021-11-12T03:45:55.000Z
|
2021-11-12T03:45:55.000Z
|
python_developer_tools/files/xml_utils.py
|
HonestyBrave/python_developer_tools
|
fc0dcf5c4ef088e2e535206dc82f09bbfd01f280
|
[
"Apache-2.0"
] | 10
|
2021-06-03T08:05:05.000Z
|
2021-12-13T03:10:42.000Z
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:4/29/2021 4:22 PM
# @File:xml_utils
from xml.dom.minidom import parse
from lxml.etree import Element, SubElement, tostring
def read_predict_xml(label_path):
# 读取xml中的内容
gt_lists = []
# 获取所有xml中object对象
small_dom_tree = parse(label_path)
small_root_node = small_dom_tree.documentElement
objects = small_root_node.getElementsByTagName('object')
for obj in objects:
name = obj.getElementsByTagName('name')[0].childNodes[0].nodeValue
if name in ["cell", "barcode"]:
continue
xmin = obj.getElementsByTagName('xmin')[0].childNodes[0].nodeValue
ymin = obj.getElementsByTagName('ymin')[0].childNodes[0].nodeValue
xmax = obj.getElementsByTagName('xmax')[0].childNodes[0].nodeValue
ymax = obj.getElementsByTagName('ymax')[0].childNodes[0].nodeValue
gt = {"coordinate": "{},{},{},{}".format(xmin, ymin, xmax, ymax),
"col_idx": "",
"row_idx": "",
"prob": "",
"tag": name}
gt_lists.append(gt)
#
# pt_x1, pt_y1, pt_x2, pt_y2 = pt["coordinate"].split(",")
# pt_col, pt_row = pt["col_idx"], pt["row_idx"]
# pt_label = pt["tag"]
return gt_lists
def json_2_xml_save(save_xml_path, jsons, origin_imgname):
# 将jsons保存为xml
node_root = Element('annotation')
node_folder = SubElement(node_root, 'folder')
node_folder.text = 'images'
node_filename = SubElement(node_root, 'filename')
node_filename.text = origin_imgname
node_size = SubElement(node_root, 'size')
node_width = SubElement(node_size, 'width')
node_width.text = str(0)
node_height = SubElement(node_size, 'height')
node_height.text = str(0)
node_depth = SubElement(node_size, 'depth')
node_depth.text = '3'
for box in jsons:
node_object = SubElement(node_root, 'object')
node_name = SubElement(node_object, 'name')
node_name.text = box['tag']
node_difficult = SubElement(node_object, 'difficult')
node_difficult.text = "0"
node_bndbox = SubElement(node_object, 'bndbox')
coord_list = list(map(lambda x: int(float(x)), box['coordinate'].split(',')))
node_xmin = SubElement(node_bndbox, 'xmin')
node_xmin.text = str(coord_list[0])
node_ymin = SubElement(node_bndbox, 'ymin')
node_ymin.text = str(coord_list[1])
node_xmax = SubElement(node_bndbox, 'xmax')
node_xmax.text = str(coord_list[2])
node_ymax = SubElement(node_bndbox, 'ymax')
node_ymax.text = str(coord_list[3])
xml = tostring(node_root, pretty_print=True) # 格式化显示,该换行的换行
with open(save_xml_path, 'wb') as f:
f.write(xml)
| 35.1125
| 85
| 0.635457
|
b82a55cad3c44fd3b3b5289b4d34f0e4093fde44
| 3,528
|
py
|
Python
|
src/eval_classification.py
|
HuyVu0508/sentence2vector
|
d3704469d8f256789ce342b81ae1c2e05cf0e97a
|
[
"MIT"
] | 3
|
2019-02-10T16:35:22.000Z
|
2020-12-02T15:27:03.000Z
|
src/eval_classification.py
|
HuyVu0508/sentence2vector
|
d3704469d8f256789ce342b81ae1c2e05cf0e97a
|
[
"MIT"
] | null | null | null |
src/eval_classification.py
|
HuyVu0508/sentence2vector
|
d3704469d8f256789ce342b81ae1c2e05cf0e97a
|
[
"MIT"
] | null | null | null |
# Experiment scripts for binary classification benchmarks (e.g. MR, CR, MPQA, SUBJ)
import numpy as np
import sys
import nbsvm
import dataset_handler
from scipy.sparse import hstack
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold
def eval_nested_kfold(encoder, name, loc='./data/', k=10, seed=1234, use_nb=False):
"""
Evaluate features with nested K-fold cross validation
Outer loop: Held-out evaluation
Inner loop: Hyperparameter tuning
Datasets can be found at http://nlp.stanford.edu/~sidaw/home/projects:nbsvm
Options for name are 'MR', 'CR', 'SUBJ' and 'MPQA'
"""
# Load the dataset and extract features
z, features = dataset_handler.load_data(encoder, name, loc=loc, seed=seed)
scan = [2**t for t in range(0,9,1)]
npts = len(z['text'])
kf = KFold(npts, n_folds=k, random_state=seed)
scores = []
for train, test in kf:
# Split data
X_train = features[train]
y_train = z['labels'][train]
X_test = features[test]
y_test = z['labels'][test]
Xraw = [z['text'][i] for i in train]
Xraw_test = [z['text'][i] for i in test]
scanscores = []
for s in scan:
# Inner KFold
innerkf = KFold(len(X_train), n_folds=k, random_state=seed+1)
innerscores = []
for innertrain, innertest in innerkf:
# Split data
X_innertrain = X_train[innertrain]
y_innertrain = y_train[innertrain]
X_innertest = X_train[innertest]
y_innertest = y_train[innertest]
Xraw_innertrain = [Xraw[i] for i in innertrain]
Xraw_innertest = [Xraw[i] for i in innertest]
# NB (if applicable)
if use_nb:
NBtrain, NBtest = compute_nb(Xraw_innertrain, y_innertrain, Xraw_innertest)
X_innertrain = hstack((X_innertrain, NBtrain))
X_innertest = hstack((X_innertest, NBtest))
# Train classifier
clf = LogisticRegression(C=s)
clf.fit(X_innertrain, y_innertrain)
acc = clf.score(X_innertest, y_innertest)
innerscores.append(acc)
print (s, acc)
# Append mean score
scanscores.append(np.mean(innerscores))
# Get the index of the best score
s_ind = np.argmax(scanscores)
s = scan[s_ind]
print (scanscores)
print (s)
# NB (if applicable)
if use_nb:
NBtrain, NBtest = compute_nb(Xraw, y_train, Xraw_test)
X_train = hstack((X_train, NBtrain))
X_test = hstack((X_test, NBtest))
# Train classifier
clf = LogisticRegression(C=s)
clf.fit(X_train, y_train)
# Evaluate
acc = clf.score(X_test, y_test)
scores.append(acc)
print (scores)
return scores
def compute_nb(X, y, Z):
"""
Compute NB features
"""
labels = [int(t) for t in y]
ptrain = [X[i] for i in range(len(labels)) if labels[i] == 0]
ntrain = [X[i] for i in range(len(labels)) if labels[i] == 1]
poscounts = nbsvm.build_dict(ptrain, [1,2])
negcounts = nbsvm.build_dict(ntrain, [1,2])
dic, r = nbsvm.compute_ratio(poscounts, negcounts)
trainX = nbsvm.process_text(X, dic, r, [1,2])
devX = nbsvm.process_text(Z, dic, r, [1,2])
return trainX, devX
| 30.947368
| 95
| 0.585034
|
caebcffd0e966af1dd55eec9fb4b900673c8e66d
| 6,552
|
py
|
Python
|
python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py
|
DesmonDay/Paddle
|
a082b8d7f11e4d366d814b0dfc22b7b42edaba8f
|
[
"Apache-2.0"
] | 1
|
2021-09-06T15:52:29.000Z
|
2021-09-06T15:52:29.000Z
|
python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py
|
XYZ916829/Paddle
|
1833a2311a9528b09ccba6ed8ebfa104db5147ff
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/mkldnn/test_slice_mkldnn_op.py
|
XYZ916829/Paddle
|
1833a2311a9528b09ccba6ed8ebfa104db5147ff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle
@OpTestTool.skip_if(core.is_compiled_with_cuda(),
"CUDA required dygraph so oneDNN UT must be skipped")
class TestSliceOneDNNOp(OpTest):
def setUp(self):
self.op_type = "slice"
self.config()
self.set_inputs()
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
'starts': self.starts,
'ends': self.ends,
'infer_flags': self.infer_flags,
'use_mkldnn': True
}
self.set_attrs()
def set_inputs(self):
self.inputs = {'Input': self.input}
def set_attrs(self):
pass
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [1, 0, 2]
self.ends = [3, 3, 4]
self.axes = [0, 1, 2]
self.infer_flags = [1, 1, 1]
self.out = self.input[1:3, 0:3, 2:4, :]
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['Input'], 'Out')
class TestSliceOneDNNOp1(TestSliceOneDNNOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 2]
self.infer_flags = [1, 1, 1]
self.out = self.input[-3:3, 0:100, 2:-1, :]
class TestSliceOneDNNOp2(TestSliceOneDNNOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 3]
self.infer_flags = [1, 1, 1]
self.out = self.input[-3:3, 0:100, :, 2:-1]
class TestSliceDecrease1AxisOneDNNOp(TestSliceOneDNNOp):
def set_attrs(self):
self.attrs['decrease_axis'] = self.decrease_axis
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [1, 0, 2]
self.ends = [2, 3, 4]
self.axes = [0, 1, 2]
self.decrease_axis = [0]
self.infer_flags = [1, 1, 1]
self.out = self.input[1, 0:3, 2:4, :]
class TestSliceDecrease2AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [1, 0, 2]
self.ends = [2, 1, 4]
self.axes = [0, 1, 2]
self.decrease_axis = [0, 1]
self.infer_flags = [1, 1, 1]
self.out = self.input[1, 0, 2:4, :]
class TestSliceDecrease3AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-1, 0, 2]
self.ends = [1000000, 1, 4]
self.axes = [0, 1, 2]
self.decrease_axis = [0, 1]
self.infer_flags = [1, 1, 1]
self.out = self.input[-1, 0, 2:4, :]
class TestSliceDecrease4AxesOneDNNOp(TestSliceDecrease1AxisOneDNNOp):
def config(self):
self.input = np.random.random([3, 4, 5, 7]).astype("float32")
self.starts = [0, 1, 2, 3]
self.ends = [1, 2, 3, 4]
self.axes = [0, 1, 2, 3]
self.decrease_axis = [0, 1, 2, 3]
self.infer_flags = [1, 1, 1]
self.out = self.input[0, 1, 2, 3:4]
class TestSlice5DOneDNNOp(TestSliceDecrease1AxisOneDNNOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6, 7]).astype("float32")
self.starts = [-1]
self.ends = [1000000]
self.axes = [4]
self.decrease_axis = [4]
self.infer_flags = [1, 1, 1]
self.out = self.input[:, :, :, :, -1]
class TestSlice3DOneDNNOp(TestSliceDecrease1AxisOneDNNOp):
def config(self):
self.input = np.random.random([5, 4, 5]).astype("float32")
self.starts = [-1]
self.ends = [1000000]
self.axes = [2]
self.decrease_axis = [2]
self.infer_flags = [1, 1, 1]
self.out = self.input[:, :, -1]
# BF16 TESTS
def create_bf16_test_class(parent):
@OpTestTool.skip_if_not_cpu_bf16()
class TestSliceBF16OneDNNOp(parent):
def set_inputs(self):
self.dtype = np.uint16
self.inputs = {'Input': convert_float_to_uint16(self.input)}
def calculate_grads(self):
self.dout = self.out
self.dx = np.zeros(shape=self.input.shape)
begin = [None] * self.input.ndim
end = [None] * self.input.ndim
for i in range(len(self.axes)):
begin[self.axes[i]] = self.starts[i]
end[self.axes[i]] = self.ends[i]
self.dx[begin[0]:end[0], begin[1]:end[1], begin[2]:end[2], begin[3]:
end[3]] = self.dout
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
def test_check_grad(self):
self.calculate_grads()
self.check_grad_with_place(
core.CPUPlace(), ["Input"],
"Out",
user_defined_grads=[self.dx],
user_defined_grad_outputs=[convert_float_to_uint16(self.dout)])
cls_name = "{0}_{1}".format(parent.__name__, "BF16")
TestSliceBF16OneDNNOp.__name__ = cls_name
globals()[cls_name] = TestSliceBF16OneDNNOp
create_bf16_test_class(TestSliceOneDNNOp)
create_bf16_test_class(TestSliceOneDNNOp1)
create_bf16_test_class(TestSliceDecrease1AxisOneDNNOp)
create_bf16_test_class(TestSliceDecrease2AxesOneDNNOp)
create_bf16_test_class(TestSliceDecrease3AxesOneDNNOp)
create_bf16_test_class(TestSliceDecrease4AxesOneDNNOp)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| 32.76
| 92
| 0.607601
|
a343c23484eb1b9cf7679d226a6f56c2c141186e
| 248
|
py
|
Python
|
airbyte-integrations/connectors/source-harvest/main.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 22
|
2020-08-27T00:47:20.000Z
|
2020-09-17T15:39:39.000Z
|
airbyte-integrations/connectors/source-harvest/main.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 116
|
2020-08-27T01:11:27.000Z
|
2020-09-19T02:47:52.000Z
|
airbyte-integrations/connectors/source-harvest/main.py
|
onaio/airbyte
|
38302e82a25f1b66742c3febfbff0668556920f2
|
[
"MIT"
] | 1
|
2020-09-15T06:10:01.000Z
|
2020-09-15T06:10:01.000Z
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import sys
from airbyte_cdk.entrypoint import launch
from source_harvest import SourceHarvest
if __name__ == "__main__":
source = SourceHarvest()
launch(source, sys.argv[1:])
| 17.714286
| 56
| 0.729839
|
daea384c2220ea044a0e7b537ba656e4cb043997
| 2,998
|
py
|
Python
|
allennlp/data/dataset_readers/__init__.py
|
entslscheia/allennlp
|
eeba62e34c8e211ed5963f830528c957f178607b
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/dataset_readers/__init__.py
|
entslscheia/allennlp
|
eeba62e34c8e211ed5963f830528c957f178607b
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/dataset_readers/__init__.py
|
entslscheia/allennlp
|
eeba62e34c8e211ed5963f830528c957f178607b
|
[
"Apache-2.0"
] | 1
|
2021-09-21T12:03:27.000Z
|
2021-09-21T12:03:27.000Z
|
"""
A :class:`~allennlp.data.dataset_readers.dataset_reader.DatasetReader`
reads a file and converts it to a collection of
:class:`~allennlp.data.instance.Instance` s.
The various subclasses know how to read specific filetypes
and produce datasets in the formats required by specific models.
"""
from allennlp.data.dataset_readers.ccgbank import CcgBankDatasetReader
from allennlp.data.dataset_readers.conll2003 import Conll2003DatasetReader
from allennlp.data.dataset_readers.conll2000 import Conll2000DatasetReader
from allennlp.data.dataset_readers.ontonotes_ner import OntonotesNamedEntityRecognition
from allennlp.data.dataset_readers.coreference_resolution import ConllCorefReader, WinobiasReader
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.event2mind import Event2MindDatasetReader
from allennlp.data.dataset_readers.interleaving_dataset_reader import InterleavingDatasetReader
from allennlp.data.dataset_readers.language_modeling import LanguageModelingReader
from allennlp.data.dataset_readers.masked_language_modeling import MaskedLanguageModelingReader
from allennlp.data.dataset_readers.next_token_lm import NextTokenLmReader
from allennlp.data.dataset_readers.multiprocess_dataset_reader import MultiprocessDatasetReader
from allennlp.data.dataset_readers.penn_tree_bank import PennTreeBankConstituencySpanDatasetReader
from allennlp.data.dataset_readers.reading_comprehension import (
DropReader,
SquadReader,
TriviaQaReader,
QuACReader,
QangarooReader,
)
from allennlp.data.dataset_readers.semantic_role_labeling import SrlReader
from allennlp.data.dataset_readers.semantic_dependency_parsing import (
SemanticDependenciesDatasetReader,
)
from allennlp.data.dataset_readers.seq2seq import Seq2SeqDatasetReader
from allennlp.data.dataset_readers.sequence_tagging import SequenceTaggingDatasetReader
from allennlp.data.dataset_readers.snli import SnliReader
from allennlp.data.dataset_readers.universal_dependencies import UniversalDependenciesDatasetReader
from allennlp.data.dataset_readers.universal_dependencies_multilang import (
UniversalDependenciesMultiLangDatasetReader,
)
from allennlp.data.dataset_readers.stanford_sentiment_tree_bank import (
StanfordSentimentTreeBankDatasetReader,
)
from allennlp.data.dataset_readers.quora_paraphrase import QuoraParaphraseDatasetReader
from allennlp.data.dataset_readers.semantic_parsing import (
WikiTablesDatasetReader,
AtisDatasetReader,
NlvrDatasetReader,
TemplateText2SqlDatasetReader,
)
from allennlp.data.dataset_readers.semantic_parsing.quarel import QuarelDatasetReader
from allennlp.data.dataset_readers.simple_language_modeling import (
SimpleLanguageModelingDatasetReader,
)
from allennlp.data.dataset_readers.babi import BabiReader
from allennlp.data.dataset_readers.copynet_seq2seq import CopyNetDatasetReader
from allennlp.data.dataset_readers.text_classification_json import TextClassificationJsonReader
| 51.689655
| 99
| 0.878586
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.