hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fba885aa5f0287571e2d253dd5d007fd48c35d4
| 588
|
py
|
Python
|
ethgasstation.py
|
ethgasstation/ethgasstation-adaptive-oracle
|
e5983adfa0cb8cd0c3952aa6a9869e9f402156b9
|
[
"MIT"
] | 47
|
2017-12-08T02:02:19.000Z
|
2018-01-06T10:30:56.000Z
|
ethgasstation.py
|
ethgasstation/ethgasstation-adaptive-oracle
|
e5983adfa0cb8cd0c3952aa6a9869e9f402156b9
|
[
"MIT"
] | 11
|
2017-12-08T08:03:56.000Z
|
2018-01-06T23:39:32.000Z
|
ethgasstation.py
|
ethgasstation/ethgasstation-adaptive-oracle
|
e5983adfa0cb8cd0c3952aa6a9869e9f402156b9
|
[
"MIT"
] | 7
|
2017-12-08T07:27:18.000Z
|
2018-01-06T04:06:46.000Z
|
#!/usr/bin/env python3
"""
ETH Gas Station
Primary backend.
"""
import argparse
from egs.main import master_control
from egs.output import Output
def main():
"""Parse command line options."""
parser = argparse.ArgumentParser(description="An adaptive gas price oracle for Ethereum.")
parser.add_argument('--generate-report', action='store_true', help="Generate reports for ethgasstation-frontend.")
args = parser.parse_args()
# kick off the egs main event loop
master_control(args)
if __name__ == '__main__':
o = Output()
o.banner()
main()
| 23.52
| 118
| 0.693878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 268
| 0.455782
|
3fbafb8b47bed64f592d7b2645ff31b682526b8a
| 3,675
|
py
|
Python
|
app/tests/controls/tests_template_helpers.py
|
madussault/FlaskyPress
|
677bfbb3473a239c08d9e120e959d3e31c456964
|
[
"MIT"
] | null | null | null |
app/tests/controls/tests_template_helpers.py
|
madussault/FlaskyPress
|
677bfbb3473a239c08d9e120e959d3e31c456964
|
[
"MIT"
] | null | null | null |
app/tests/controls/tests_template_helpers.py
|
madussault/FlaskyPress
|
677bfbb3473a239c08d9e120e959d3e31c456964
|
[
"MIT"
] | null | null | null |
"""Contains tests for the functions found in ``controls/templates_helpers.py``
To run this particular test file use the following command line:
nose2 -v app.tests.controls.tests_template_helpers
"""
from app import db, create_app
import unittest
from unittest import TestCase
from config import Config
from app.tests.utils import (dummy_post, control_categories,
control_search_bar,
add_three_dummy_widget_positions)
from app.models import SearchBarControls, CategoriesControls
from app.controls.template_helpers import (ordered_widgets,
categories_presence,
sidebar_widget_count,
search_bar_placement)
class TestConfig(Config):
""" Custom configuration for our tests.
Attributes
----------
TESTING : bool
Enable testing mode. Exceptions are propagated rather than handled by
the app’s error handlers.
Must be set to True to prevent the mail logger from sending email
warnings.
WHOOSHEE_MEMORY_STORAGE : bool
When set to True use the memory as storage. We need that during our
tests so the data that we write in the in-memory SQLite database do
not become indexed.
SQLALCHEMY_DATABASE_URI : str
Make SQLAlchemy to use an in-memory SQLite database during the tests,
so this way we are not writing dummy test data to our production
database.
"""
TESTING = True
WHOOSHEE_MEMORY_STORAGE = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
class TemplateGlobal(TestCase):
"""Contains tests for the blueprint's custom template global functions.
These functions can be found in the ``controls/templates_helpers.py``. They
are decorated with ``app_template_global``.
"""
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.drop_all()
db.create_all()
dummy_post()
control_categories("no_posts")
control_search_bar('navbar')
def tearDown(self):
self.app_context.pop()
def test_search_bar_placement(self):
"""Testing of the ``search_bar_placement`` function.
"""
search_bar_placement()
query = SearchBarControls.query.first()
self.assertEqual(query.placement, 'navbar',
"Function was not capable to get the value "
"representing the search bar placement.")
def test_categories_presence(self):
categories_presence()
query = CategoriesControls.query.first()
self.assertEqual(query.presence, 'no_posts',
"Function was not capable to get the value "
"representing where the categories can be found on "
"the page.")
def test_sidebar_widget_count(self):
add_three_dummy_widget_positions()
self.assertEqual(sidebar_widget_count(), 3,
"Total number of entry in the the table of the"
" ``WidgetOrder`` is not what is expected.")
def test_ordered_widgets(self):
add_three_dummy_widget_positions()
ow = ordered_widgets()
expected = ['Search Bar Widget', 'Category Widget',
'Dummy Content Widget']
self.assertEqual(expected, ow, "List of widgets name was not returned"
"with the expected order.")
if __name__ == '__main__':
unittest.main(verbosity=2)
| 37.121212
| 79
| 0.628844
| 2,820
| 0.76693
| 0
| 0
| 0
| 0
| 0
| 0
| 1,655
| 0.450095
|
3fbbc3f8ef8d51546060cccaf53257878c60d288
| 2,299
|
py
|
Python
|
MAModule/networks/predict.py
|
MrReochen/MultiAgentModule
|
20216dee265501f09fba7b73fafdbef63b297725
|
[
"MIT"
] | null | null | null |
MAModule/networks/predict.py
|
MrReochen/MultiAgentModule
|
20216dee265501f09fba7b73fafdbef63b297725
|
[
"MIT"
] | null | null | null |
MAModule/networks/predict.py
|
MrReochen/MultiAgentModule
|
20216dee265501f09fba7b73fafdbef63b297725
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from ..networks.basic.util import check
from ..networks.basic.predict import PredictNet, PredictLayer, OutLayer
from ..utils.util import get_shape_from_obs_space
class OneHot:
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor):
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.long(), 1)
return y_onehot.float()
class Predict(nn.Module):
def __init__(self, args, obs_space, action_space, cent_obs_space, device=torch.device("cpu")):
super(Predict, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
cent_obs_shape = get_shape_from_obs_space(cent_obs_space)
self.net = PredictNet(args, obs_shape, action_space, cent_obs_shape, use_projector=True)
self.onehot = OneHot(action_space.n)
self.to(device)
def forward(self, obs, actions):
obs = check(obs).to(**self.tpdv)
actions = self.onehot.transform(check(actions)).to(**self.tpdv)
x = torch.cat((obs, actions), dim=-1)
out = self.net(x)
return out
class Projector(nn.Module):
def __init__(self, args, cent_obs_space, device=torch.device("cpu")):
super(Projector, self).__init__()
self.args = args
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self.tpdv = dict(dtype=torch.float32, device=device)
cent_obs_shape = get_shape_from_obs_space(cent_obs_space)
if args.predict_dim:
self.mlp = PredictLayer(cent_obs_shape[0], args.predict_dim,
1, self._use_orthogonal, False)
# self.out = OutLayer(cent_obs_shape[0] // 4, self.hidden_size, self._use_orthogonal, False)
self.to(device)
def forward(self, cent_obs):
x = check(cent_obs).to(**self.tpdv)
if self.args.predict_dim:
out = self.mlp(x)
# out = self.out(out)
else:
out = x
return out
| 34.833333
| 104
| 0.646368
| 2,097
| 0.912136
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.053502
|
3fbbd88b176cdd6b237953ffa51d4f9d25ca08a8
| 1,431
|
py
|
Python
|
repositories/gae/blob_dataset.py
|
singhj/locality-sensitive-hashing
|
99baa87d3b532ba5aa03fd80ed967275eb98d422
|
[
"MIT"
] | 19
|
2015-02-22T15:47:33.000Z
|
2020-10-19T12:29:24.000Z
|
repositories/gae/blob_dataset.py
|
singhj/locality-sensitive-hashing
|
99baa87d3b532ba5aa03fd80ed967275eb98d422
|
[
"MIT"
] | 7
|
2015-02-10T10:52:06.000Z
|
2019-04-01T15:27:00.000Z
|
repositories/gae/blob_dataset.py
|
singhj/locality-sensitive-hashing
|
99baa87d3b532ba5aa03fd80ed967275eb98d422
|
[
"MIT"
] | 7
|
2015-02-23T19:22:11.000Z
|
2022-02-04T10:27:15.000Z
|
from google.appengine.ext import ndb
from repositories.gae.dataset import Dataset
from repositories.gae.dataset import calculate_max_hashes, get_random_bits
class BlobDataset(Dataset):
filename = ndb.StringProperty()
blob_key = ndb.BlobKeyProperty()
@classmethod
def create(cls, blob_key, **kwargs):
blob_key = blob_key
filename = kwargs.get('filename')
rows = kwargs.get('rows', 5)
bands = kwargs.get('bands', 40)
buckets_per_band = kwargs.get('buckets_per_band', 100)
shingle_type = kwargs.get('shingle_type', 'c4')
minhash_modulo = kwargs.get('minhash_modulo', 5000)
max_hashes = calculate_max_hashes(rows, bands)
dataset = cls.get(blob_key)
if not dataset:
dataset = BlobDataset(
filename = filename,
blob_key = blob_key,
random_seeds = get_random_bits(max_hashes),
rows = rows,
bands = bands,
buckets_per_band = buckets_per_band,
shingle_type = shingle_type,
minhash_modulo = minhash_modulo)
else:
dataset.filename = filename
return dataset.put()
@classmethod
def get(cls, key):
return Dataset.query(cls.blob_key == key).get()
| 34.902439
| 74
| 0.566038
| 1,273
| 0.889588
| 0
| 0
| 1,160
| 0.810622
| 0
| 0
| 75
| 0.052411
|
3fbcfad926cc78ab95112a6eecc4dc811aaf6f09
| 1,394
|
py
|
Python
|
wetrunner/tests/test_evmat.py
|
DavidMStraub/python-wetrunner
|
be9549535aab3a00e496002a515c647d112ea090
|
[
"MIT"
] | null | null | null |
wetrunner/tests/test_evmat.py
|
DavidMStraub/python-wetrunner
|
be9549535aab3a00e496002a515c647d112ea090
|
[
"MIT"
] | 4
|
2018-01-11T10:29:16.000Z
|
2018-04-12T15:36:20.000Z
|
wetrunner/tests/test_evmat.py
|
DavidMStraub/python-wetrunner
|
be9549535aab3a00e496002a515c647d112ea090
|
[
"MIT"
] | 2
|
2018-01-11T10:20:55.000Z
|
2018-03-07T22:13:34.000Z
|
"""Compare evolution matrices to v0.1 numerics"""
import wetrunner
import unittest
from pkg_resources import resource_filename
import numpy as np
import numpy.testing as npt
def getUs_new(classname):
arg = (0.56, 5, 0.12, 1/127, 0, 0, 0, 1.2, 4.2, 0, 0, 1.8)
return wetrunner.rge.getUs(classname, *arg)
def getUe_new(classname):
arg = (0.56, 5, 0.12, 1/127, 0, 0, 0, 1.2, 4.2, 0, 0, 1.8)
return wetrunner.rge.getUe(classname, *arg)
class TestEvMat(unittest.TestCase):
def test_evmat(self):
Usold = dict(np.load(resource_filename('wetrunner', 'tests/data/evmat_s_old.npz')))
Ueold = dict(np.load(resource_filename('wetrunner', 'tests/data/evmat_e_old.npz')))
Usnew = {k: getUs_new(k) for k in ['I', 'II', 'III', 'IV', 'Vb']}
Uenew = {k: getUe_new(k) for k in ['I', 'II', 'III', 'IV', 'Vb']}
Usnew['V'] = getUs_new('Vsb')
Uenew['V'] = getUe_new('Vsb')
for k in ['I', 'II', 'III', 'IV', 'V', 'Vb']:
npt.assert_array_almost_equal(Usold[k], Usnew[k],
err_msg="Failed for {} QCD".format(k))
for k in ['I', 'II', 'III', 'IV', 'Vb']: # NB, skipping V!
npt.assert_array_almost_equal(100*Ueold[k], 100*Uenew[k],
decimal=2,
err_msg="Failed for {} QED".format(k))
| 39.828571
| 91
| 0.553085
| 938
| 0.672884
| 0
| 0
| 0
| 0
| 0
| 0
| 281
| 0.201578
|
3fbfff6f4fb61ab0d9c6aaa91429d122a201c789
| 4,310
|
py
|
Python
|
python/clockwork/ena/submit_files.py
|
jeff-k/clockwork
|
d6e9ac80bb46ec806acd7db85ed5c3430c3f2438
|
[
"MIT"
] | 18
|
2018-01-18T13:02:10.000Z
|
2022-03-25T05:56:02.000Z
|
python/clockwork/ena/submit_files.py
|
jeff-k/clockwork
|
d6e9ac80bb46ec806acd7db85ed5c3430c3f2438
|
[
"MIT"
] | 54
|
2018-01-25T15:47:25.000Z
|
2022-03-30T17:02:23.000Z
|
python/clockwork/ena/submit_files.py
|
jeff-k/clockwork
|
d6e9ac80bb46ec806acd7db85ed5c3430c3f2438
|
[
"MIT"
] | 15
|
2018-01-18T11:21:33.000Z
|
2022-03-30T16:55:48.000Z
|
import configparser
import random
import string
from clockwork import utils
class Error(Exception):
pass
def _make_dummy_success_receipt(outfile, object_type):
accession = "".join(
[random.choice(string.ascii_uppercase + string.digits) for _ in range(10)]
)
with open(outfile, "w") as f:
print(
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="receipt.xsl"?>
<RECEIPT receiptDate="2017-08-31T11:07:50.251+01:00" submissionFile="submission.xml" success="true">
<"""
+ object_type.upper()
+ r''' accession="'''
+ accession
+ r"""" alias="alias" status="PRIVATE">
<EXT_ID accession="SAMEA123456789" type="biosample"/>
</"""
+ object_type.upper()
+ r""">
<SUBMISSION accession="ERA1234567" alias="alias 42"/>
<MESSAGES>
<INFO>Submission has been committed.</INFO>
<INFO>This submission is a TEST submission and will be discarded within 24 hours</INFO>
</MESSAGES>
<ACTIONS>ADD</ACTIONS>
</RECEIPT>
""",
file=f,
)
def _make_dummy_fail_receipt(outfile):
with open(outfile, "w") as f:
print(
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="receipt.xsl"?>
<RECEIPT receiptDate="2017-09-01T14:13:19.573+01:00" submissionFile="submission.xml" success="false">
<SUBMISSION alias="Submission alias run 1"/>
<MESSAGES>
<ERROR>In submission, alias:"Submission alias run 1", accession:"". The object being added already exists in the submission account.</ERROR>
<INFO>Submission has been rolled back.</INFO>
<INFO>This submission is a TEST submission and will be discarded within 24 hours</INFO>
</MESSAGES>
<ACTIONS>ADD</ACTIONS>
</RECEIPT>
""",
file=f,
)
def parse_config_file(ini_file):
config = configparser.ConfigParser()
try:
config.read(ini_file)
except:
raise Error("Error! Parsing config file " + ini_file)
if "ena_login" not in config:
raise Error("Error! [ena_login] section not found in config file " + ini_file)
if not ("user" in config["ena_login"] and "password" in config["ena_login"]):
raise Error(
"Error! user and password not found in [ena_login] section of config file "
+ ini_file
)
return config["ena_login"]["user"], config["ena_login"]["password"]
def submit_xml_files(
ini_file,
outfile,
files=None,
use_test_server=False,
unit_test=None,
unit_test_obj_type=None,
):
username, password = parse_config_file(ini_file)
if files is None:
files_command = None
else:
files_command_list = [
'-F "' + key + "=@" + value + '"' for key, value in files.items()
]
files_command = " ".join(files_command_list)
if use_test_server:
url = "https://www-test.ebi.ac.uk/ena/submit/drop-box/submit/?auth=ENA%20"
else:
url = "https://www.ebi.ac.uk/ena/submit/drop-box/submit/?auth=ENA%20"
command_list = [
"curl -k",
files_command,
'"' + url + username + "%20" + password + '"',
">",
outfile,
]
command = " ".join([x for x in command_list if x is not None])
if unit_test is None:
utils.syscall(command)
elif unit_test == "success":
_make_dummy_success_receipt(outfile, unit_test_obj_type)
elif unit_test == "fail":
_make_dummy_fail_receipt(outfile)
else:
raise Error("unit_test must be None, success, or fail. Got: " + unit_test)
def upload_file_to_ena_ftp(ini_file, filename, uploaded_name):
# paranoid about passwords and running ps? Looks like curl is ok:
# https://unix.stackexchange.com/questions/385339/how-does-curl-protect-a-password-from-appearing-in-ps-output
# "wipe the next argument out so that the username:password isn't
# displayed in the system process list"
username, password = parse_config_file(ini_file)
cmd = " ".join(
[
"curl -T",
filename,
"ftp://webin.ebi.ac.uk/" + uploaded_name,
"--user",
username + ":" + password,
]
)
utils.syscall(cmd)
| 31.459854
| 148
| 0.615777
| 32
| 0.007425
| 0
| 0
| 0
| 0
| 0
| 0
| 2,053
| 0.476334
|
3fc20da9e836148d0b5e35676fbdec51d080a74c
| 3,350
|
py
|
Python
|
tools/add_new_quantization_parameters.py
|
xiao1228/nncf
|
307262119ee3f50eec2fa4022b2ef96693fd8448
|
[
"Apache-2.0"
] | 310
|
2020-10-29T09:22:42.000Z
|
2022-03-31T04:53:34.000Z
|
tools/add_new_quantization_parameters.py
|
xiao1228/nncf
|
307262119ee3f50eec2fa4022b2ef96693fd8448
|
[
"Apache-2.0"
] | 615
|
2020-10-28T10:22:25.000Z
|
2022-03-29T18:09:23.000Z
|
tools/add_new_quantization_parameters.py
|
xiao1228/nncf
|
307262119ee3f50eec2fa4022b2ef96693fd8448
|
[
"Apache-2.0"
] | 86
|
2020-10-28T11:34:34.000Z
|
2022-03-31T08:00:35.000Z
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from argparse import ArgumentParser
from typing import NamedTuple, Any
import torch
from os import listdir, makedirs
from os.path import isfile, join, exists
from shutil import copyfile
from nncf.torch.quantization.layers import SymmetricQuantizer, AsymmetricQuantizer
class ParameterToAdd(NamedTuple):
name: str
value: Any
def main(argv):
parser = ArgumentParser()
parser.add_argument('-i', '--input-folder', help='Path to directory with given checkpoints to modify',
required=True)
parser.add_argument('-o', '--output-folder', help='Path to directory to save modified checkpoints', required=True)
parser.add_argument('-b', '--bitwidth', help='Bitwidth to initialize quantizer',
required=False, default=8, type=int)
parser.add_argument('-v', '--verbose', help='Print all new names of parameters', required=False,
action='store_true')
args = parser.parse_args(args=argv)
src_dir = args.input_folder
dst_dir = args.output_folder
if not exists(dst_dir):
makedirs(dst_dir)
param_list = [ParameterToAdd('_num_bits', torch.IntTensor([args.bitwidth])),
ParameterToAdd('enabled', torch.IntTensor([1]))]
pth_files = [(join(src_dir, f), join(dst_dir, f)) for f in listdir(src_dir) if
isfile(join(src_dir, f)) and ('.pth' in f or '.sd' in f)]
files_to_copy = []
for pair in pth_files:
src_file, dst_file = pair
if 'binarization' in src_file:
files_to_copy.append(pair)
continue
sd = pth = torch.load(src_file)
if 'state_dict' in pth:
sd = pth['state_dict']
hooks = [SymmetricQuantizer.SCALE_PARAM_NAME, AsymmetricQuantizer.INPUT_LOW_PARAM_NAME]
new_keys = {}
for new_parameter in param_list:
old_keys = list(sd.keys())
for k in sd.keys():
for h in hooks:
new_key = k.replace(h, new_parameter.name)
if ('.' + h in k) and ('.' + new_parameter.name not in k) and (new_key not in old_keys):
new_keys[new_key] = new_parameter.value
if new_keys:
print(f'\nAdding #{len(new_keys)} of new keys')
if args.verbose:
print('New keys:', new_keys, sep='\n')
for new_key, value in new_keys.items():
sd[new_key] = value
pth['state_dict'] = sd
torch.save(pth, dst_file)
else:
files_to_copy.append(pair)
for src_file, dst_file in files_to_copy:
print("\nCopying {}".format(dst_file))
copyfile(src_file, dst_file)
if __name__ == '__main__':
main(sys.argv[1:])
| 37.222222
| 118
| 0.638507
| 62
| 0.018507
| 0
| 0
| 0
| 0
| 0
| 0
| 996
| 0.297313
|
3fc217a661811f79c1778a9b4610e13c10ae7b95
| 1,106
|
py
|
Python
|
src/backend/marsha/core/migrations/0019_auto_20200609_0820.py
|
marin-leonard/marsha
|
b5d6bf98fda27acd3a08577b82dd98bcd39bfd8d
|
[
"MIT"
] | 64
|
2018-04-26T23:46:14.000Z
|
2022-03-26T21:32:23.000Z
|
src/backend/marsha/core/migrations/0019_auto_20200609_0820.py
|
marin-leonard/marsha
|
b5d6bf98fda27acd3a08577b82dd98bcd39bfd8d
|
[
"MIT"
] | 533
|
2018-04-17T10:17:24.000Z
|
2022-03-31T13:07:49.000Z
|
src/backend/marsha/core/migrations/0019_auto_20200609_0820.py
|
marin-leonard/marsha
|
b5d6bf98fda27acd3a08577b82dd98bcd39bfd8d
|
[
"MIT"
] | 16
|
2018-09-21T12:52:34.000Z
|
2021-11-29T16:44:51.000Z
|
# Generated by Django 3.0.6 on 2020-05-19 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0018_auto_20200603_0620"),
]
operations = [
migrations.AddField(
model_name="video",
name="live_info",
field=models.JSONField(
blank=True,
help_text="Information needed to manage live streaming",
null=True,
verbose_name="Live info",
),
),
migrations.AddField(
model_name="video",
name="live_state",
field=models.CharField(
blank=True,
choices=[
("idle", "idle"),
("starting", "starting"),
("running", "running"),
("stopped", "stopped"),
],
help_text="state of the live mode.",
max_length=20,
null=True,
verbose_name="live state",
),
),
]
| 26.97561
| 72
| 0.454792
| 1,013
| 0.915913
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.249548
|
3fc23e96afc2ac687938892c8fa39f0756c775dd
| 11,766
|
py
|
Python
|
models/video_base.py
|
vedaldi/dynamic-video-depth
|
274f5f59604a10121a2445f7b30df4a9ff075946
|
[
"Apache-2.0"
] | 1
|
2022-03-24T23:59:26.000Z
|
2022-03-24T23:59:26.000Z
|
models/video_base.py
|
vedaldi/dynamic-video-depth
|
274f5f59604a10121a2445f7b30df4a9ff075946
|
[
"Apache-2.0"
] | null | null | null |
models/video_base.py
|
vedaldi/dynamic-video-depth
|
274f5f59604a10121a2445f7b30df4a9ff075946
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import join, dirname
import numpy as np
import torch
from ..models.netinterface import NetInterface
from os import makedirs
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.colors import ListedColormap
from ..third_party.util_colormap import turbo_colormap_data
# matplotlib.cm.register_cmap('turbo', cmap=ListedColormap(turbo_colormap_data))
import matplotlib
import shutil
class VideoBaseModel(NetInterface):
def disp_loss(self, d1, d2):
if self.opt.use_disp:
t1 = torch.clamp(d1, min=1e-3)
t2 = torch.clamp(d2, min=1e-3)
return 300 * torch.abs((1 / t1) - (1 / t2))
else:
return torch.abs(d1 - d2)
def _train_on_batch(self, epoch, batch_ind, batch):
for n in self._nets:
n.zero_grad()
# self.net_depth.eval() # freeze bn to check
self.load_batch(batch)
batch_size = batch['img_1'].shape[0]
pred = self._predict_on_batch()
loss, loss_data = self._calc_loss(pred)
loss.backward()
for optimizer in self._optimizers:
optimizer.step()
if np.mod(epoch, self.opt.vis_every_train) == 0:
indx = batch_ind if self.opt.vis_at_start else self.opt.epoch_batches - batch_ind
if indx <= self.opt.vis_batches_train:
for k, v in pred.items():
pred[k] = v.data.cpu().numpy()
outdir = join(self.full_logdir, 'visualize', 'epoch%04d_train' % epoch)
makedirs(outdir, exist_ok=True)
output = self.pack_output(pred, batch)
if self.global_rank == 0:
if self.visualizer is not None:
self.visualizer.visualize(output, indx + (1000 * epoch), outdir)
np.savez(join(outdir, 'rank%04d_batch%04d' % (self.global_rank, batch_ind)), **output)
batch_log = {'size': batch_size, 'loss': loss.item(), **loss_data}
return batch_log
@staticmethod
def depth2disp(depth):
valid = depth > 1e-2
valid = valid.float()
return (1 / (depth + (1 - valid) * 1e-8)) * valid
def disp_vali(self, d1, d2):
vali = d2 > 1e-2
return torch.nn.functional.mse_loss(self.depth2disp(d1) * vali, self.depth2disp(d2) * vali)
def _vali_on_batch(self, epoch, batch_idx, batch):
for n in self._nets:
n.eval()
self.load_batch(batch)
with torch.no_grad():
pred = self._predict_on_batch(is_train=False)
gt_depth = batch['depth_mvs'].to(pred['depth'].device)
# try:
loss = self.disp_vali(pred['depth'], gt_depth).item()
# except:
# print('error when eval losses, might be in test mode')
# pass
if np.mod(epoch, self.opt.vis_every_vali) == 0:
if batch_idx < self.opt.vis_batches_vali:
for k, v in pred.items():
pred[k] = v.cpu().numpy()
outdir = join(self.full_logdir, 'visualize', 'epoch%04d_vali' % epoch)
makedirs(outdir, exist_ok=True)
output = self.pack_output(pred, batch)
if self.global_rank == 0:
if self.visualizer is not None:
self.visualizer.visualize(output, batch_idx + (1000 * epoch), outdir)
np.savez(join(outdir, 'rank%04d_batch%04d' % (self.global_rank, batch_idx)), **output)
batch_size = batch['img'].shape[0]
batch_log = {'size': batch_size, 'loss': loss}
return batch_log
def pack_output(self, pred_all, batch):
batch_size = len(batch['pair_path'])
if 'img' not in batch.keys():
img_1 = batch['img_1'].cpu().numpy()
img_2 = batch['img_2'].cpu().numpy()
else:
img_1 = batch['img']
img_2 = batch['img']
output = {'batch_size': batch_size, 'img_1': img_1, 'img_2': img_2, **pred_all}
if 'img' not in batch.keys():
output['flow_1_2'] = self._input.flow_1_2.cpu().numpy()
output['flow_2_1'] = self._input.flow_2_1.cpu().numpy()
output['depth_nn_1'] = batch['depth_pred_1'].cpu().numpy()
else:
output['depth_nn'] = batch['depth_pred'].cpu().numpy()
output['depth_gt'] = batch['depth_mvs'].cpu().numpy()
output['cam_c2w'] = batch['cam_c2w'].cpu().numpy()
output['K'] = batch['K'].cpu().numpy()
output['pair_path'] = batch['pair_path']
return output
def test_on_batch(self, batch_idx, batch):
if not hasattr(self, 'test_cache'):
self.test_cache = []
for n in self._nets:
n.eval()
self.load_batch(batch)
with torch.no_grad():
pred = self._predict_on_batch(is_train=False)
if not hasattr(self, 'test_loss'):
self.test_loss = 0
for k, v in pred.items():
pred[k] = v.cpu().numpy()
epoch_string = 'best' if self.opt.epoch < 0 else '%04d' % self.opt.epoch
outdir = join(self.opt.output_dir, 'epoch%s_test' % epoch_string)
if not hasattr(self, 'outdir'):
self.outdir = outdir
makedirs(outdir, exist_ok=True)
output = self.pack_output(pred, batch)
if batch_idx == 223:
output['depth'][0, 0, 0, :] = output['depth'][0, 0, 2, :]
output['depth'][0, 0, 1, :] = output['depth'][0, 0, 2, :]
self.test_cache.append(output.copy())
if self.global_rank == 0:
if self.visualizer is not None:
self.visualizer.visualize(output, batch_idx, outdir)
np.savez(join(outdir, 'batch%04d' % (batch_idx)), **output)
def on_test_end(self):
# make test video:
from subprocess import call
from dvd.util.util_html import Webpage
from tqdm import tqdm
depth_pred = []
depth_nn = []
depth_gt = []
imgs = []
c2ws = []
Ks = []
for pack in self.test_cache:
depth_pred.append(pack['depth'])
depth_nn.append(pack['depth_nn'])
imgs.append(pack['img_1'])
c2ws.append(pack['cam_c2w'])
Ks.append(pack['K'])
depth_gt.append(pack['depth_gt'])
depth_pred = np.concatenate(depth_pred, axis=0)
depth_nn = np.concatenate(depth_nn, axis=0)
imgs = np.concatenate(imgs, axis=0)
c2ws = np.concatenate(c2ws, axis=0)
Ks = np.concatenate(Ks, axis=0)
depth_gt = np.concatenate(depth_gt, axis=0)
pred_max = depth_pred.max()
pred_min = depth_pred.min()
print(pred_max, pred_min)
depth_cmap = 'turbo'
mask_valid = np.where(depth_gt > 1e-8, 1, 0)
for i in tqdm(range(depth_pred.shape[0])):
plt.figure(figsize=[60, 20], dpi=40, facecolor='black')
plt.subplot(1, 3, 1)
plt.title('Refined', fontsize=100, color='w')
plt.imshow(1 / depth_pred[i, 0, ...], cmap=depth_cmap, vmax=1 / pred_min, vmin=1 / pred_max)
cbar = plt.colorbar(fraction=0.048 * 0.5, pad=0.01)
plt.axis('off')
cbar.ax.yaxis.set_tick_params(color='w', labelsize=40)
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
plt.subplot(1, 3, 2)
plt.title('Initial', fontsize=100, color='w')
plt.imshow(1 / depth_nn[i, 0, ...], cmap=depth_cmap, vmax=1 / pred_min, vmin=1 / pred_max)
plt.axis('off')
cbar = plt.colorbar(fraction=0.048 * 0.5, pad=0.01)
cbar.ax.yaxis.set_tick_params(color='w', labelsize=40)
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
plt.subplot(1, 3, 3)
plt.title('GT', fontsize=100, color='w')
plt.imshow(mask_valid[i, 0, ...] / (depth_gt[i, 0, ...] + 1e-8), cmap=depth_cmap, vmax=1 / pred_min, vmin=1 / pred_max)
plt.axis('off')
cbar = plt.colorbar(fraction=0.048 * 0.5, pad=0.01)
cbar.ax.yaxis.set_tick_params(color='w', labelsize=40)
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
plt.savefig(join(self.outdir, 'compare_%04d.png' % i), bbox_inches='tight', facecolor='black', dpi='figure')
plt.close()
plt.imshow(imgs[i, ...].transpose(1, 2, 0))
plt.axis('off')
plt.savefig(join(self.outdir, 'rgb_%04d.png' % i), bbox_inches='tight', facecolor='black', dpi='figure')
plt.close()
epoch_string = self.outdir.split('/')[-1]
gen_vid_command = 'ffmpeg -y -r 30 -i {img_template} -vcodec libx264 -crf 25 -pix_fmt yuv420p -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" {video_path} > /dev/null'
gen_vid_command_slow = 'ffmpeg -y -r 2 -i {img_template} -vcodec libx264 -crf 25 -pix_fmt yuv420p -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" {video_path} > /dev/null'
for r_number in range(120, 140):
plt.figure(figsize=[60, 20], dpi=20, facecolor='black')
plt.subplot(1, 2, 1)
plt.title('Refined', fontsize=100, color='w')
plt.imshow(1 / depth_pred[:, 0, r_number, :], cmap=depth_cmap)
cbar = plt.colorbar(fraction=0.048 * 0.5, pad=0.01)
plt.axis('off')
cbar.ax.yaxis.set_tick_params(color='w', labelsize=40)
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
plt.subplot(1, 2, 2)
plt.title('Initial', fontsize=100, color='w')
plt.imshow(1 / depth_nn[:, 0, r_number, :], cmap=depth_cmap)
plt.axis('off')
cbar = plt.colorbar(fraction=0.048 * 0.5, pad=0.01)
cbar.ax.yaxis.set_tick_params(color='w', labelsize=40)
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
plt.savefig(join(self.outdir, 'temporal_slice_%04d.png' % (r_number - 120)), bbox_inches='tight', facecolor='black', dpi='figure')
plt.close()
img_template = join(self.outdir, 'compare_%04d.png')
img_template_t = join(self.outdir, 'temporal_slice_%04d.png')
video_path = join(dirname(self.outdir), epoch_string + '.mp4')
video_path_t = join(dirname(self.outdir), epoch_string + '_temporal.mp4')
gen_vid_command_c = gen_vid_command.format(img_template=img_template, video_path=video_path)
call(gen_vid_command_c, shell=True)
gen_vid_command_t = gen_vid_command_slow.format(img_template=img_template_t, video_path=video_path_t)
call(gen_vid_command_t, shell=True)
web = Webpage()
web.add_video(epoch_string + '_rgb.mp4', title='original video')
web.add_video(epoch_string + '.mp4', title=f'Disparity loss {self.test_loss}')
web.save(join(dirname(self.outdir), epoch_string + '.html'))
@staticmethod
def copy_and_make_dir(src, target):
fname = dirname(target)
makedirs(fname, exist_ok=True)
shutil.copy(src, target)
@staticmethod
def scale_tesnor(t):
t = (t - t.min()) / (t.max() - t.min() + 1e-9)
return t
# %%
| 40.996516
| 167
| 0.586011
| 10,773
| 0.915604
| 0
| 0
| 424
| 0.036036
| 0
| 0
| 2,032
| 0.172701
|
3fc28ee2166df5c930e27ec1974997e4a8df5d90
| 1,232
|
py
|
Python
|
update-tuya-address.py
|
mwinters-stuff/octoprint-update-tuya-address
|
6b4f95613d573eb45af2de936615d75dfd01a77b
|
[
"Apache-2.0"
] | null | null | null |
update-tuya-address.py
|
mwinters-stuff/octoprint-update-tuya-address
|
6b4f95613d573eb45af2de936615d75dfd01a77b
|
[
"Apache-2.0"
] | null | null | null |
update-tuya-address.py
|
mwinters-stuff/octoprint-update-tuya-address
|
6b4f95613d573eb45af2de936615d75dfd01a77b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import subprocess
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
#MAC address of the smart plug
MAC_ADDRESS = '84:f3:eb:32:e3:b4'
# netmask of your network
NET_MASK = '192.168.1.1/24'
#octopi config
OCTOPI_CONFIG = '/home/pi/.octoprint/config.yaml'
if __name__ == "__main__":
subprocess.check_call(['/usr/bin/nmap','-sP','-T4',NET_MASK])
p = subprocess.Popen(['/usr/sbin/arp','-n'], stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
str = line.decode('utf-8')
if(MAC_ADDRESS in str):
ipaddress = str.split(' ')[0]
print('Found Plug with address %s' % ipaddress)
config = open(OCTOPI_CONFIG)
configy = yaml.load(config, Loader=Loader)
config.close()
# print(configy)
print(configy['plugins']['tuyasmartplug']['arrSmartplugs'][0]['ip'])
if configy['plugins']['tuyasmartplug']['arrSmartplugs'][0]['ip'] != ipaddress:
configy['plugins']['tuyasmartplug']['arrSmartplugs'][0]['ip'] = ipaddress
config = open(OCTOPI_CONFIG, mode='w')
config.write(yaml.safe_dump(configy, allow_unicode=False))
config.close()
break
| 31.589744
| 84
| 0.660714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 398
| 0.323052
|
3fc2b905de175376e48fc847adcd762ca15c7b7a
| 2,571
|
py
|
Python
|
sarpy_gui_apps/apps/canvas_demo/panels/canvas_demo_button_panel.py
|
spowlas/sarpy
|
c4a404203607d404b93a556459aa5311d7148e95
|
[
"MIT"
] | null | null | null |
sarpy_gui_apps/apps/canvas_demo/panels/canvas_demo_button_panel.py
|
spowlas/sarpy
|
c4a404203607d404b93a556459aa5311d7148e95
|
[
"MIT"
] | 1
|
2019-11-14T20:55:57.000Z
|
2019-12-27T18:11:29.000Z
|
sarpy_gui_apps/apps/canvas_demo/panels/canvas_demo_button_panel.py
|
spowlas/sarpy
|
c4a404203607d404b93a556459aa5311d7148e95
|
[
"MIT"
] | 2
|
2020-02-12T20:11:00.000Z
|
2020-03-26T16:55:03.000Z
|
from tkinter_gui_builder.panel_templates.widget_panel.widget_panel import AbstractWidgetPanel
from tkinter_gui_builder.widgets import basic_widgets
class CanvasDemoButtonPanel(AbstractWidgetPanel):
fname_select = basic_widgets.Button
zoom_in = basic_widgets.Button
zoom_out = basic_widgets.Button
rect_select = basic_widgets.Button
update_rect_image = basic_widgets.Button
pan = basic_widgets.Button
draw_line_w_drag = basic_widgets.Button
draw_line_w_click = basic_widgets.Button
draw_arrow_w_drag = basic_widgets.Button
draw_arrow_w_click = basic_widgets.Button
draw_rect_w_drag = basic_widgets.Button
draw_rect_w_click = basic_widgets.Button
draw_polygon_w_click = basic_widgets.Button
draw_point_w_click = basic_widgets.Button
modify_existing_shape = basic_widgets.Button
color_selector = basic_widgets.Button
save_kml = basic_widgets.Button
select_existing_shape = basic_widgets.Combobox # type: basic_widgets.Combobox
remap_dropdown = basic_widgets.Combobox # type: basic_widgets.Combobox
def __init__(self, parent):
AbstractWidgetPanel.__init__(self, parent)
controls = ["fname_select",
"zoom_in",
"zoom_out",
"pan",
"draw_line_w_drag",
"draw_line_w_click",
"draw_arrow_w_drag",
"draw_arrow_w_click",
"draw_rect_w_drag",
"draw_rect_w_click",
"draw_polygon_w_click",
"draw_point_w_click",
"select_existing_shape",
"modify_existing_shape",
"save_kml",
"color_selector",
"rect_select",
"update_rect_image",
"remap_dropdown"]
self.init_w_box_layout(controls, 4, column_widths=20)
self.remap_dropdown.update_combobox_values(["density",
"brighter",
"darker",
"high contrast",
"linear",
"log",
"pedf",
"nrl"])
self.set_label_text("taser buttons")
if __name__ == '__main__':
print(dir(AbstractWidgetPanel))
| 40.171875
| 93
| 0.552703
| 2,355
| 0.915986
| 0
| 0
| 0
| 0
| 0
| 0
| 464
| 0.180475
|
3fc57348ab10be442b6950d805f656ab0d9bf881
| 1,360
|
py
|
Python
|
GCN/test_until.py
|
Alienge/Graph-Network
|
559cccb6af4e6ca50c44fd51cac8df5713f255bf
|
[
"MIT"
] | 3
|
2020-06-10T03:29:11.000Z
|
2020-10-21T09:03:13.000Z
|
GCN/test_until.py
|
Alienge/Graph-Network
|
559cccb6af4e6ca50c44fd51cac8df5713f255bf
|
[
"MIT"
] | null | null | null |
GCN/test_until.py
|
Alienge/Graph-Network
|
559cccb6af4e6ca50c44fd51cac8df5713f255bf
|
[
"MIT"
] | 1
|
2020-06-25T06:15:28.000Z
|
2020-06-25T06:15:28.000Z
|
import torch
import numpy as np
import scipy.sparse as sp
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
a = np.array([1,2,3])
b = np.array([4,5,6])
c = a + b.reshape((3,1))
print(c)
#a = torch.randint(1,100,(2,2,5))
#print(a)
#print(a.repeat(1,1,2))
#b = a.repeat(1,1,2).view(2,4,5)
#print(b)
#c = a.repeat(1,2,1)
#print(torch.cat([b,c],dim=2).view(2,))
#print(range(10))
#a = torch.randint(1,100,(3,3)).numpy()
#print(a)
#a = a + np.multiply(a.T,a.T > a) - np.multiply(a,a.T > a)
#print(a)
'''
a = torch.randint(0,2,(3,3))
b = sp.coo_matrix(a)
c = np.array(b.sum(1))
print(a)
print(b)
print(c)
'''
'''
a = torch.randn((3,3))
rowsum = np.array(a.sum(1))
r_inv = np.power(rowsum,-1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
print(r_inv)
print(r_mat_inv)
print(a)
features = r_mat_inv.dot(a)
print(features)
print(sparse_to_tuple(features))
'''
| 17.894737
| 58
| 0.609559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 728
| 0.535294
|
3fc7b2fa4d7d1bb8ba7c895d8420a155ae6e1ca0
| 5,056
|
py
|
Python
|
mechmean/fabric_tensors.py
|
JulianKarlBauer/meanfieldmechanics
|
a53b38655d0e9558cc3e676c359fa13dfe3d9112
|
[
"MIT"
] | null | null | null |
mechmean/fabric_tensors.py
|
JulianKarlBauer/meanfieldmechanics
|
a53b38655d0e9558cc3e676c359fa13dfe3d9112
|
[
"MIT"
] | null | null | null |
mechmean/fabric_tensors.py
|
JulianKarlBauer/meanfieldmechanics
|
a53b38655d0e9558cc3e676c359fa13dfe3d9112
|
[
"MIT"
] | 1
|
2022-02-25T19:37:20.000Z
|
2022-02-25T19:37:20.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import mechkit
import mechmean
class KanataniFactory(object):
def __init__(self, N):
self.con = mechkit.notation.Converter()
self._I2 = mechkit.tensors.Basic().I2
self.N = N = self.con.to_tensor(N)
self.degree = len(N.shape)
degrees = [x for x in range(1, self.degree + 1) if x % 2 == 0]
for degree in reversed(degrees):
N = self.first_kind(degree)
setattr(self, "N{}".format(degree), N)
setattr(self, "F{}".format(degree), self.second_kind(N))
setattr(self, "D{}".format(degree), self.third_kind(N))
def __getitem__(self, key):
"""Make attributes accessible dict-like."""
return getattr(self, key)
def first_kind(self, degree):
nbr_times_decrease = int((self.degree - degree) / 2)
N = self.N
for i in range(nbr_times_decrease):
N = self.decrease_first_kind_by_one_degree(N)
return N
def decrease_first_kind_by_one_degree(self, N):
return np.einsum("...ij, ...ij->...", N, self._I2)
def second_kind(self, N):
degree = len(N.shape)
func = self._get_func_second_kind(degree=degree)
return func(N)
def _get_func_second_kind(self, degree):
funcs = {
2: self.second_kind_N2,
4: self.second_kind_N4,
}
return funcs[degree]
def second_kind_N2(self, N):
return 15.0 / 2.0 * (N - 1.0 / 5.0 * self._I2)
def second_kind_N4(self, N):
return (
315.0
/ 8.0
* (
N
- 2.0
/ 3.0
* mechmean.operators.sym(
np.multiply.outer(self._I2, self.first_kind(degree=2))
)
+ 1.0
/ 21.0
* mechmean.operators.sym(np.multiply.outer(self._I2, self._I2))
)
)
def third_kind(self, N):
degree = len(N.shape)
func = self._get_func_third_kind(degree=degree)
return func(N)
def _get_func_third_kind(self, degree):
funcs = {2: self.third_kind_N2, 4: self.third_kind_N4}
return funcs[degree]
def third_kind_N2(self, N):
return 15.0 / 2.0 * (N - 1.0 / 3.0 * self._I2)
def third_kind_N4(self, N):
return (
315.0
/ 8.0
* (
N
- 6.0
/ 7.0
* mechmean.operators.sym(
np.multiply.outer(self._I2, self.first_kind(degree=2))
)
+ 3.0
/ 35.0
* mechmean.operators.sym(np.multiply.outer(self._I2, self._I2))
)
)
def evenly_distributed_vectors_on_sphere(nbr_vectors=1000):
"""
Define nbr_vectors evenly distributed vectors on a sphere
Using the golden spiral method kindly provided by
stackoverflow-user "CR Drost"
https://stackoverflow.com/a/44164075/8935243
"""
from numpy import pi, cos, sin, arccos, arange
indices = arange(0, nbr_vectors, dtype=float) + 0.5
phi = arccos(1 - 2 * indices / nbr_vectors)
theta = pi * (1 + 5 ** 0.5) * indices
x, y, z = cos(theta) * sin(phi), sin(theta) * sin(phi), cos(phi)
orientations = np.column_stack((x, y, z))
return orientations
def first_kind_discrete(orientations, order=4):
"""
Calc orientation tensors of ... kind
"""
# Normalize orientations
orientations = [np.array(v) / np.linalg.norm(v) for v in orientations]
# Symmetrize orientations
# orientations_reversed = [-v for v in orientations]
# orientations = orientations + orientations_reversed
einsumStrings = {
1: "ij -> j",
2: "ij, ik -> jk",
3: "ij, ik, il -> jkl",
4: "ij, ik, il, im -> jklm",
5: "ij, ik, il, im, in -> jklmn",
6: "ij, ik, il, im, in, ip -> jklmnp",
}
if order > 6:
einsumStrings[order] = einsum_str_fabric_tensor_first_kind_discrete(order=order)
einsumArgs = [orientations for i in range(order)]
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], *einsumArgs)
return N
def einsum_str_fabric_tensor_first_kind_discrete(order):
"""
Generalize to higher orders:
N = sum_i 'order'-times_dyad_product(vector)
=
1: 'ij -> j',
2: 'ij, ik -> jk',
3: 'ij, ik, il -> jkl',
4: 'ij, ik, il, im -> jklm',
5: 'ij, ik, il, im, in -> jklmn',
6: 'ij, ik, il, im, in, ip -> jklmnp',
...
"""
# Get list of all available characters
import string
letters = list(string.ascii_letters)
letters.remove("i")
# Create einsum string and arguments
einsumInput = ",".join(["i" + letters[index] for index in range(order)])
einsumOut = "".join(letters[0:order])
einsumString = einsumInput + "->" + einsumOut
return einsumString
| 28.727273
| 88
| 0.548655
| 2,702
| 0.534415
| 0
| 0
| 0
| 0
| 0
| 0
| 1,116
| 0.220728
|
3fc90063242679a163aee4c95d494aed3302ce11
| 961
|
py
|
Python
|
setup.py
|
adamltyson/fancylog
|
8e9047759d5c394e98b5af7c2d68f70f2fbeb40e
|
[
"MIT"
] | 3
|
2020-09-30T09:02:08.000Z
|
2021-06-12T20:43:43.000Z
|
setup.py
|
adamltyson/fancylog
|
8e9047759d5c394e98b5af7c2d68f70f2fbeb40e
|
[
"MIT"
] | 8
|
2020-01-07T11:27:05.000Z
|
2021-07-13T11:07:11.000Z
|
setup.py
|
adamltyson/fancylog
|
8e9047759d5c394e98b5af7c2d68f70f2fbeb40e
|
[
"MIT"
] | 2
|
2020-02-05T19:03:04.000Z
|
2021-07-07T09:42:24.000Z
|
from setuptools import setup, find_packages
setup(
name="fancylog",
version="0.2.6",
description="Fancier logging in Python",
install_requires=["packaging", "rich"],
extras_require={
"dev": [
"black",
"pytest-cov",
"pytest",
"coverage",
"bump2version",
"pre-commit",
"flake8",
]
},
python_requires=">=3.6",
packages=find_packages(),
include_package_data=True,
url="https://github.com/adamltyson/fancylog",
author="Adam Tyson",
author_email="code@adamltyson.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
],
)
| 26.694444
| 49
| 0.552549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 468
| 0.486993
|
3fc96ed41bb550e4bca1d28b2bc1be7197a1cd5d
| 5,620
|
py
|
Python
|
tests/test_identity.py
|
multiscale/ymmsl-python
|
f8a63232823ad655530a83570443d9c045ef9929
|
[
"Apache-2.0"
] | 1
|
2018-12-13T18:09:09.000Z
|
2018-12-13T18:09:09.000Z
|
tests/test_identity.py
|
multiscale/ymmsl-python
|
f8a63232823ad655530a83570443d9c045ef9929
|
[
"Apache-2.0"
] | 10
|
2018-11-13T16:12:38.000Z
|
2021-07-21T13:16:43.000Z
|
tests/test_identity.py
|
multiscale/ymmsl-python
|
f8a63232823ad655530a83570443d9c045ef9929
|
[
"Apache-2.0"
] | null | null | null |
from ymmsl import Identifier, Reference
import pytest
import yatiml
def test_create_identifier() -> None:
part = Identifier('testing')
assert str(part) == 'testing'
part = Identifier('CapiTaLs')
assert str(part) == 'CapiTaLs'
part = Identifier('under_score')
assert str(part) == 'under_score'
part = Identifier('_underscore')
assert str(part) == '_underscore'
part = Identifier('digits123')
assert str(part) == 'digits123'
with pytest.raises(ValueError):
Identifier('1initialdigit')
with pytest.raises(ValueError):
Identifier('test.period')
with pytest.raises(ValueError):
Identifier('test-hyphen')
with pytest.raises(ValueError):
Identifier('test space')
with pytest.raises(ValueError):
Identifier('test/slash')
def test_compare_identifier() -> None:
assert Identifier('test') == Identifier('test')
assert Identifier('test1') != Identifier('test2')
assert Identifier('test') == 'test'
assert 'test' == Identifier('test') # pylint: disable=C0122
assert Identifier('test') != 'test2'
assert 'test2' != Identifier('test') # pylint: disable=C0122
def test_identifier_dict_key() -> None:
test_dict = {Identifier('test'): 1}
assert test_dict[Identifier('test')] == 1
def test_create_reference() -> None:
test_ref = Reference('_testing')
assert str(test_ref) == '_testing'
assert len(test_ref) == 1
assert isinstance(test_ref[0], Identifier)
assert str(test_ref[0]) == '_testing'
with pytest.raises(ValueError):
Reference('1test')
test_ref = Reference('test.testing')
assert len(test_ref) == 2
assert isinstance(test_ref[0], Identifier)
assert str(test_ref[0]) == 'test'
assert isinstance(test_ref[1], Identifier)
assert str(test_ref[1]) == 'testing'
assert str(test_ref) == 'test.testing'
test_ref = Reference('test[12]')
assert len(test_ref) == 2
assert isinstance(test_ref[0], Identifier)
assert str(test_ref[0]) == 'test'
assert isinstance(test_ref[1], int)
assert test_ref[1] == 12
assert str(test_ref) == 'test[12]'
test_ref = Reference('test[12].testing.ok.index[3][5]')
assert len(test_ref) == 7
assert isinstance(test_ref[0], Identifier)
assert str(test_ref[0]) == 'test'
assert isinstance(test_ref[1], int)
assert test_ref[1] == 12
assert isinstance(test_ref[2], Identifier)
assert str(test_ref[2]) == 'testing'
assert isinstance(test_ref[3], Identifier)
assert str(test_ref[3]) == 'ok'
assert isinstance(test_ref[4], Identifier)
assert str(test_ref[4]) == 'index'
assert isinstance(test_ref[5], int)
assert test_ref[5] == 3
assert isinstance(test_ref[6], int)
assert test_ref[6] == 5
assert str(test_ref) == 'test[12].testing.ok.index[3][5]'
with pytest.raises(ValueError):
Reference([4])
with pytest.raises(ValueError):
Reference([3, Identifier('test')])
with pytest.raises(ValueError):
Reference('ua",.u8[')
with pytest.raises(ValueError):
Reference('test[4')
with pytest.raises(ValueError):
Reference('test4]')
with pytest.raises(ValueError):
Reference('test[_t]')
with pytest.raises(ValueError):
Reference('testing_{3}')
with pytest.raises(ValueError):
Reference('test.(x)')
with pytest.raises(ValueError):
Reference('[3]test')
with pytest.raises(ValueError):
Reference('[4].test')
def test_reference_slicing() -> None:
test_ref = Reference('test[12].testing.ok.index[3][5]')
assert test_ref[0] == 'test'
assert test_ref[1] == 12
assert test_ref[3] == 'ok'
assert test_ref[:3] == 'test[12].testing'
assert test_ref[2:] == 'testing.ok.index[3][5]'
with pytest.raises(RuntimeError):
test_ref[0] = 'test2'
with pytest.raises(ValueError):
test_ref[1:] # pylint: disable=pointless-statement
def test_reference_dict_key() -> None:
test_dict = {Reference('test[4]'): 1}
assert test_dict[Reference('test[4]')] == 1
def test_reference_equivalence() -> None:
assert Reference('test.test[3]') == Reference('test.test[3]')
assert Reference('test.test[3]') != Reference('test1.test[3]')
assert Reference('test.test[3]') == 'test.test[3]'
assert Reference('test.test[3]') != 'test1.test[3]'
assert 'test.test[3]' == Reference('test.test[3]') # pylint: disable=C0122
assert 'test1.test[3]' != Reference(
'test.test[3]') # pylint: disable=C0122
def test_reference_concatenation() -> None:
assert Reference('test') + Reference('test2') == 'test.test2'
assert Reference('test') + Identifier('test2') == 'test.test2'
assert Reference('test') + 5 == 'test[5]'
assert Reference('test') + [Identifier('test2'), 5] == 'test.test2[5]'
assert Reference('test[5]') + Reference('test2[3]') == 'test[5].test2[3]'
assert Reference('test[5]') + Identifier('test2') == 'test[5].test2'
assert Reference('test[5]') + 3 == 'test[5][3]'
assert (Reference('test[5]') + [3, Identifier('test2')] ==
'test[5][3].test2')
def test_reference_io() -> None:
load_reference = yatiml.load_function(Reference, Identifier)
text = 'test[12]'
doc = load_reference(text)
assert str(doc[0]) == 'test'
assert doc[1] == 12
dump_reference = yatiml.dumps_function(Identifier, Reference)
doc = Reference('test[12].testing.ok.index[3][5]')
text = dump_reference(doc)
assert text == 'test[12].testing.ok.index[3][5]\n...\n'
| 30.053476
| 79
| 0.637011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,262
| 0.224555
|
3fc9e66e2e4c81e88a2f6282ca33a85ca40e0ad7
| 1,262
|
py
|
Python
|
molecule/loadbalancer/tests/test_default.py
|
mvdheenux/OpenConext-deploy
|
9c75866cba7675cafa8946e591ffac0fe528f7b3
|
[
"Apache-2.0"
] | 11
|
2015-07-05T10:38:10.000Z
|
2019-06-27T07:49:32.000Z
|
molecule/loadbalancer/tests/test_default.py
|
mvdheenux/OpenConext-deploy
|
9c75866cba7675cafa8946e591ffac0fe528f7b3
|
[
"Apache-2.0"
] | 201
|
2015-02-03T14:52:30.000Z
|
2022-03-09T08:45:00.000Z
|
molecule/loadbalancer/tests/test_default.py
|
domgon/OpenConext-deploy
|
80b28f59bdef2ac683744c07bb938c889cb43681
|
[
"Apache-2.0"
] | 48
|
2015-03-10T13:28:23.000Z
|
2021-11-28T23:15:32.000Z
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize("installed_packages", [
("haproxy20"),
("socat"),
("keepalived"),
("bind"),
])
def test_packages_installed(host, installed_packages):
rpackage = host.package(installed_packages)
assert rpackage.is_installed
@pytest.mark.parametrize("services", [
("haproxy"),
# ("keepalive"),
("named"),
])
def test_services_running_and_enabled(host, services):
service = host.service(services)
assert service.is_enabled
assert service.is_running
@pytest.mark.parametrize("files", [
("/etc/pki/haproxy/star_haproxy.pem"),
])
def test_star_haproxy_pem(host, files):
star_haproxy_pem = host.file(files)
assert star_haproxy_pem.user == "root"
assert star_haproxy_pem.group == "root"
assert star_haproxy_pem.mode == 0o600
assert star_haproxy_pem.contains('-----BEGIN CERTIFICATE-----')
assert star_haproxy_pem.contains('-----BEGIN RSA PRIVATE KEY-----')
def test_sysctl_non_local_bind(host):
non_local_bind = host.sysctl("net.ipv4.ip_nonlocal_bind")
assert non_local_bind == 1
| 26.291667
| 71
| 0.717116
| 0
| 0
| 0
| 0
| 932
| 0.73851
| 0
| 0
| 271
| 0.214739
|
3fca93e41547a1ae70fbe0d1268f42c02b9128db
| 4,237
|
py
|
Python
|
tiledb/cloud/_results/results.py
|
TileDB-Inc/TileDB-Cloud-Py
|
e73f6e0ae3fc595218abd3be606c68f62ad5ac9b
|
[
"MIT"
] | 4
|
2019-12-04T23:19:35.000Z
|
2021-06-21T21:42:53.000Z
|
tiledb/cloud/_results/results.py
|
TileDB-Inc/TileDB-Cloud-Py
|
e73f6e0ae3fc595218abd3be606c68f62ad5ac9b
|
[
"MIT"
] | 106
|
2019-11-07T22:40:43.000Z
|
2022-03-29T22:31:18.000Z
|
tiledb/cloud/_results/results.py
|
TileDB-Inc/TileDB-Cloud-Py
|
e73f6e0ae3fc595218abd3be606c68f62ad5ac9b
|
[
"MIT"
] | 1
|
2020-10-04T18:54:37.000Z
|
2020-10-04T18:54:37.000Z
|
"""Things that help you keep track of task results and how to decode them."""
import abc
import dataclasses
import threading
import uuid
from concurrent import futures
from typing import Callable, Generic, Optional, TypeVar, Union
import urllib3
from tiledb.cloud import rest_api
from tiledb.cloud import tiledb_cloud_error as tce
from tiledb.cloud._results import decoders
from tiledb.cloud._results import stored_params
TASK_ID_HEADER = "X-TILEDB-CLOUD-TASK-ID"
_T = TypeVar("_T")
class Result(Generic[_T], metaclass=abc.ABCMeta):
@abc.abstractmethod
def get(self) -> _T:
"""Gets the value stored in this Result."""
raise NotImplementedError()
def to_stored_param(self) -> stored_params.StoredParam:
raise TypeError("This result cannot be converted to a StoredParam.")
# Not frozen to avoid generating unsafe methods like `__hash__`,
# but you should still *treat* these subclasses like they're frozen.
@dataclasses.dataclass()
class LocalResult(Result[_T], Generic[_T]):
"""A result from running a function in a Node locally."""
it: _T
def get(self) -> _T:
return self.it
@classmethod
def wrap(cls, func: Callable[..., _T]) -> Callable[..., Result[_T]]:
return lambda *args, **kwargs: cls(func(*args, **kwargs))
@dataclasses.dataclass()
class RemoteResult(Result[_T], Generic[_T]):
"""A response from running a UDF remotely."""
def get(self) -> _T:
"""Decodes the response from the server."""
try:
return self.decoder.decode(self.body)
except ValueError as ve:
inner_msg = f": {ve.args[0]}" if ve.args else ""
raise tce.TileDBCloudError(
f"Error decoding response from TileDB Cloud{inner_msg}"
) from ve
def to_stored_param(self) -> stored_params.StoredParam:
if not (self.results_stored and self.task_id):
raise ValueError("A result must be stored to create a StoredParam.")
return stored_params.StoredParam(
decoder=self.decoder,
task_id=self.task_id,
)
# The HTTP content of the body that was returned.
body: bytes
# The server-generated UUID of the task.
task_id: Optional[uuid.UUID]
# The decoder that was used to decode the results.
decoder: decoders.AbstractDecoder[_T]
# True if the results were stored, false otherwise.
results_stored: bool
class AsyncResult(Generic[_T]):
"""Asynchronous wrapper for compatibility with the old array.TaskResult."""
def __init__(self, future: "futures.Future[Result[_T]]"):
"""Creates a new AsyncResponse wrapping the given Future."""
self._future = future
self._id_lock = threading.Lock()
self._task_id: Optional[uuid.UUID] = None
self._future.add_done_callback(self._set_task_id)
def get(self, timeout: Optional[float] = None) -> _T:
"""Gets the result from this response, with Future's timeout rules."""
return self._future.result(timeout).get()
@property
def task_id(self) -> Optional[uuid.UUID]:
"""Gets the task ID, or None if not complete or failed with no ID."""
with self._id_lock:
return self._task_id
def _set_task_id(self, _):
"""Sets the task ID once the Future has completed."""
try:
res = self._future.result()
except rest_api.ApiException as exc:
with self._id_lock:
self._task_id = extract_task_id(exc)
except: # noqa: E722 We don't care about other exceptions, period.
pass
else:
with self._id_lock:
self._task_id = res.task_id
def extract_task_id(
thing: Union[rest_api.ApiException, urllib3.HTTPResponse],
) -> Optional[uuid.UUID]:
"""Pulls the task ID out of a response or an exception."""
id_hdr = thing.headers and thing.headers.get(TASK_ID_HEADER)
return _maybe_uuid(id_hdr)
def _maybe_uuid(id_str: Optional[str]) -> Optional[uuid.UUID]:
"""Parses a hex string into a UUID if present and valid."""
if not id_str:
return None
try:
return uuid.UUID(hex=id_str)
except ValueError:
return None
| 32.343511
| 80
| 0.660609
| 3,024
| 0.713713
| 0
| 0
| 1,806
| 0.426245
| 0
| 0
| 1,320
| 0.311541
|
3fca96dd670cc10df7dd85a3eaf8c8ace2dc5c34
| 3,676
|
py
|
Python
|
test2/mining/mining.py
|
gr0mph/OceanOfCode
|
336caa00e22ae06e12d32971f84c82e3c0c9a3a4
|
[
"MIT"
] | null | null | null |
test2/mining/mining.py
|
gr0mph/OceanOfCode
|
336caa00e22ae06e12d32971f84c82e3c0c9a3a4
|
[
"MIT"
] | null | null | null |
test2/mining/mining.py
|
gr0mph/OceanOfCode
|
336caa00e22ae06e12d32971f84c82e3c0c9a3a4
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../../')
# Global variables
from test2.test_main import TREASURE_MAP
from test2.test_main import MINE_MAP
# From OceanOfCode
# Class
from OceanOfCode import MineAndTrigger
from OceanOfCode import Submarine
from OceanOfCode import Board
# Global
from OceanOfCode import EMPTY_SYMBOLS
from OceanOfCode import DIRS
# Method
from OceanOfCode import square
import unittest
class _mining(unittest.TestCase):
def _set_up(self):
print("0. START test_set_up")
me = MineAndTrigger(None)
me.set_up(TREASURE_MAP)
print(len(me.legal))
def _mine(self):
print("1. START test_mine")
me = MineAndTrigger(None)
me.set_up(TREASURE_MAP)
print("Length: {}".format(len(me.legal)))
board = Board(None)
board.x,board.y = 10,3
orientation = me.mine(board)
for p in me.minefield:
print(p)
me = MineAndTrigger(me)
print("Orientation: {} Legal Length: {} Minefield Length {}".format(
orientation, len(me.legal), len(me.minefield)))
orientation = me.mine(board)
for p in me.minefield:
print(p)
me = MineAndTrigger(me)
print("Orientation: {} Legal Length: {} Minefield Length {}".format(
orientation, len(me.legal), len(me.minefield)))
orientation = me.mine(board)
for p in me.minefield:
print(p)
me = MineAndTrigger(me)
print("Orientation: {} Legal Length: {} Minefield Length {}".format(
orientation, len(me.legal), len(me.minefield)))
orientation = me.mine(board)
for p in me.minefield:
print(p)
me = MineAndTrigger(me)
print("Orientation: {} Legal Length: {} Minefield Length {}".format(
orientation, len(me.legal), len(me.minefield)))
orientation = me.mine(board)
for p in me.minefield:
print(p)
me = MineAndTrigger(me)
print("Orientation: {} Legal Length: {} Minefield Length {}".format(
orientation, len(me.legal), len(me.minefield)))
def _nearby(self):
print("3. START test_nearby")
me = MineAndTrigger(None)
me.set_up(TREASURE_MAP)
print("Length: {}".format(len(me.legal)))
board1 = Board(None)
board1.x,board1.y = 10,3
orientation = me.mine(board1)
for p in me.minefield:
print(p)
print("Orientation: {} Legal Length: {} Minefield Length {}".format(
orientation, len(me.legal), len(me.minefield)))
board2 = Board(None)
board2.x,board2.y = 10,3
mine = me.nearby(board1,board2)
print("Mine: {}".format(mine))
if mine is not None:
me.trigger(mine)
print("Legal Length: {} Minefield Length {}".format(
len(me.legal), len(me.minefield)))
board1.x,board1.y = 5,5
mine = me.nearby(board1,board2)
print("Mine: {}".format(mine))
if mine is not None:
me.trigger(mine)
print("Legal Length: {} Minefield Length {}".format(
len(me.legal), len(me.minefield)))
def test_filter(self):
#print("4. START test_filter")
lambda_n = lambda t1, m1 : '.' if t1 == '.' and m1 == '.' else ' '
for i1 in range(15):
MINE_MAP[i1] = [ lambda_n(t1,m1) for t1, m1 in zip(TREASURE_MAP[i1],MINE_MAP[i1]) ]
print()
print("TREASURE_MAP",end='\t')
print("MINE_MAP")
for t_r, m_r in zip(TREASURE_MAP, MINE_MAP):
print(''.join(t_r),end='\t')
print(''.join(m_r))
if __name__ == '__main__':
unittest.main()
| 28.276923
| 95
| 0.586235
| 3,225
| 0.877312
| 0
| 0
| 0
| 0
| 0
| 0
| 663
| 0.180359
|
3fcb54fe77197f1040f44681301381a698a9f1e0
| 831
|
py
|
Python
|
dfdatetime/__init__.py
|
joachimmetz/dfdatetime
|
de6da30114b952abeac8909906862e78aa7286dc
|
[
"Apache-2.0"
] | null | null | null |
dfdatetime/__init__.py
|
joachimmetz/dfdatetime
|
de6da30114b952abeac8909906862e78aa7286dc
|
[
"Apache-2.0"
] | null | null | null |
dfdatetime/__init__.py
|
joachimmetz/dfdatetime
|
de6da30114b952abeac8909906862e78aa7286dc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Digital Forensics Date and Time (dfDateTime).
dfDateTime, or Digital Forensics date and time, provides date and time
objects to preserve accuracy and precision.
"""
# Imports for date time values factory.
from dfdatetime import apfs_time
from dfdatetime import cocoa_time
from dfdatetime import delphi_date_time
from dfdatetime import fat_date_time
from dfdatetime import filetime
from dfdatetime import hfs_time
from dfdatetime import golang_time
from dfdatetime import java_time
from dfdatetime import ole_automation_date
from dfdatetime import posix_time
from dfdatetime import rfc2579_date_time
from dfdatetime import semantic_time
from dfdatetime import systemtime
from dfdatetime import time_elements
from dfdatetime import uuid_time
from dfdatetime import webkit_time
__version__ = '20211229'
| 29.678571
| 70
| 0.839952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.288809
|
3fcbaff4724b7b17a4f047879b9dde04a4754a7c
| 1,267
|
py
|
Python
|
common/data/selectionhelper.py
|
alainjungo/reliability-challenges-uncertainty
|
21e86f6e2a5d2520b5767dce48bbcf2b11773788
|
[
"MIT"
] | 56
|
2019-07-10T06:02:11.000Z
|
2021-12-21T08:11:22.000Z
|
common/data/selectionhelper.py
|
alainjungo/reliability-challenges-uncertainty
|
21e86f6e2a5d2520b5767dce48bbcf2b11773788
|
[
"MIT"
] | 4
|
2019-09-26T08:51:58.000Z
|
2021-06-08T20:27:53.000Z
|
common/data/selectionhelper.py
|
alainjungo/reliability-challenges-uncertainty
|
21e86f6e2a5d2520b5767dce48bbcf2b11773788
|
[
"MIT"
] | 8
|
2019-10-21T12:43:08.000Z
|
2021-12-02T08:14:38.000Z
|
import logging
import json
import zlib
import os
import pymia.data.extraction as extr
def save_indices(file_path: str, indices: list):
config = {'indices': indices}
with open(file_path, 'w') as f:
json.dump(config, f)
def load_indices(file_path: str):
with open(file_path, 'r') as f:
config = json.load(f)
return config['indices']
def calculate_or_load_indices(dataset: extr.ParameterizableDataset, selection: extr.SelectionStrategy):
to_hash = os.path.basename(dataset.dataset_path) + ''.join(sorted(dataset.subject_subset)) + \
repr(dataset.indexing_strategy) + repr(selection)
crc32 = hex(zlib.crc32(bytes(to_hash, encoding='utf-8')) & 0xffffffff)
indices_dir = os.path.join(os.path.dirname(dataset.dataset_path), 'indices')
file_path = os.path.join(indices_dir, '{}.json'.format(crc32))
if os.path.exists(file_path):
return load_indices(file_path)
logging.info('\t- need to calculate indices: {}'.format(repr(selection)))
indices = extr.select_indices(dataset, selection)
if not os.path.isdir(indices_dir):
os.makedirs(indices_dir)
save_indices(file_path, indices)
logging.info('\t- written to file {}'.format(file_path))
return indices
| 28.155556
| 103
| 0.694554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.086819
|
3fce44aec3e71efcc655d5db43f3f8dd2acd2d0c
| 1,191
|
py
|
Python
|
src/main/TextLemmatizer.py
|
alschmut/code2semantics
|
af1daf0b8320b534344c5352ae972fb600e21e43
|
[
"MIT"
] | 2
|
2020-02-26T22:50:38.000Z
|
2020-10-29T10:46:10.000Z
|
src/main/TextLemmatizer.py
|
alschmut/linguistic-parser
|
af1daf0b8320b534344c5352ae972fb600e21e43
|
[
"MIT"
] | null | null | null |
src/main/TextLemmatizer.py
|
alschmut/linguistic-parser
|
af1daf0b8320b534344c5352ae972fb600e21e43
|
[
"MIT"
] | null | null | null |
import sys
from util.Timer import Timer
from util.FileOpener import FileOpener
from util.Logger import Logger
from util.PathExtractor import PathExtractor
from util.PathValidator import PathValidator
from service import SpacyModel
def lemmatize_text(file_path: str, timer: Timer):
logger = Logger()
output_file = FileOpener().get_new_file("wiki.en.lemmatized.txt", "a")
with open(file_path, "r") as file:
for line in file:
lemmatized_list = [word.lemma_ for word in SpacyModel.instance.get_en_spacy_line(line)]
lemmazized_line = " ".join(lemmatized_list)
output_file.write(lemmazized_line)
logger.every_n_wiki_status(10, timer.get_duration())
logger.every_n_wiki_status(1)
def main():
script_name: str = PathExtractor().get_file_name(sys.argv[0])
if len(sys.argv) != 2:
Logger().usage(f"python {script_name} <wiki.en.filtered.txt>")
return
file_path = sys.argv[1]
if PathValidator().is_valid_files([file_path]):
Logger().info(f'Input file: "{file_path}"')
Logger().info("Starting to lemmatize text")
timer = Timer()
lemmatize_text(file_path, timer)
Logger().finish_script(timer.get_duration(), script_name)
if __name__ == '__main__':
main()
| 28.357143
| 90
| 0.74979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.121746
|
3fce57d4964a0ce253965c723aa6fb69e1b90bf3
| 4,069
|
py
|
Python
|
mat2py/core/graph2d.py
|
mat2py/mat2py
|
2776fbe9ca4ad2aaa3eac6aa79d17747a9ec24a8
|
[
"MIT"
] | null | null | null |
mat2py/core/graph2d.py
|
mat2py/mat2py
|
2776fbe9ca4ad2aaa3eac6aa79d17747a9ec24a8
|
[
"MIT"
] | 37
|
2021-12-23T03:22:20.000Z
|
2022-02-16T15:40:47.000Z
|
mat2py/core/graph2d.py
|
mat2py/mat2py
|
2776fbe9ca4ad2aaa3eac6aa79d17747a9ec24a8
|
[
"MIT"
] | 2
|
2022-01-23T07:59:10.000Z
|
2022-02-03T09:15:54.000Z
|
# type: ignore
__all__ = [
"plotyy",
"figtoolset",
"getscribecontextmenu",
"getobj",
"scribeeventhandler",
"setscribecontextmenu",
"putdowntext",
"gtext",
"scribetextdlg",
"doresize",
"prepdrag",
"scriberestoresavefcns",
"zoom",
"scribeclearmode",
"domymenu",
"ylabel",
"getscribeobjectdata",
"enddrag",
"texlabel",
"setscribeobjectdata",
"polar",
"getcolumn",
"plotedit",
"semilogx",
"jpropeditutils",
"basicfitdatastat",
"plot",
"box",
"axis",
"title",
"loglog",
"dokeypress",
"semilogy",
"doclick",
"middrag",
"text",
"pan",
"subplot",
"xlabel",
"rbbox",
"grid",
"moveaxis",
"getorcreateobj",
]
from mat2py.common.backends import numpy as np
from ._internal import M
def plotyy(*args):
raise NotImplementedError("plotyy")
def figtoolset(*args):
raise NotImplementedError("figtoolset")
def getscribecontextmenu(*args):
raise NotImplementedError("getscribecontextmenu")
def getobj(*args):
raise NotImplementedError("getobj")
def scribeeventhandler(*args):
raise NotImplementedError("scribeeventhandler")
def setscribecontextmenu(*args):
raise NotImplementedError("setscribecontextmenu")
def putdowntext(*args):
raise NotImplementedError("putdowntext")
def gtext(*args):
raise NotImplementedError("gtext")
def scribetextdlg(*args):
raise NotImplementedError("scribetextdlg")
def doresize(*args):
raise NotImplementedError("doresize")
def prepdrag(*args):
raise NotImplementedError("prepdrag")
def scriberestoresavefcns(*args):
raise NotImplementedError("scriberestoresavefcns")
def zoom(*args):
raise NotImplementedError("zoom")
def scribeclearmode(*args):
raise NotImplementedError("scribeclearmode")
def domymenu(*args):
raise NotImplementedError("domymenu")
def ylabel(*args):
raise NotImplementedError("ylabel")
def getscribeobjectdata(*args):
raise NotImplementedError("getscribeobjectdata")
def enddrag(*args):
raise NotImplementedError("enddrag")
def texlabel(*args):
raise NotImplementedError("texlabel")
def setscribeobjectdata(*args):
raise NotImplementedError("setscribeobjectdata")
def polar(*args):
raise NotImplementedError("polar")
def getcolumn(*args):
raise NotImplementedError("getcolumn")
def plotedit(*args):
raise NotImplementedError("plotedit")
def semilogx(*args):
raise NotImplementedError("semilogx")
def jpropeditutils(*args):
raise NotImplementedError("jpropeditutils")
def basicfitdatastat(*args):
raise NotImplementedError("basicfitdatastat")
def plot(*args):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
while args:
x, y, *args = args
if args and isinstance(args[0], str):
style, *args = args
style = (style,)
else:
style = tuple()
x = M[x].reshape(-1)
y = M[y].reshape(-1)
ax.plot(x, y, *style)
return fig
def box(*args):
raise NotImplementedError("box")
def axis(*args):
raise NotImplementedError("axis")
def title(*args):
raise NotImplementedError("title")
def loglog(*args):
raise NotImplementedError("loglog")
def dokeypress(*args):
raise NotImplementedError("dokeypress")
def semilogy(*args):
raise NotImplementedError("semilogy")
def doclick(*args):
raise NotImplementedError("doclick")
def middrag(*args):
raise NotImplementedError("middrag")
def text(*args):
raise NotImplementedError("text")
def pan(*args):
raise NotImplementedError("pan")
def subplot(*args):
raise NotImplementedError("subplot")
def xlabel(*args):
raise NotImplementedError("xlabel")
def rbbox(*args):
raise NotImplementedError("rbbox")
def grid(*args):
raise NotImplementedError("grid")
def moveaxis(*args):
raise NotImplementedError("moveaxis")
def getorcreateobj(*args):
raise NotImplementedError("getorcreateobj")
| 17.168776
| 54
| 0.673876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 974
| 0.239371
|
3fce96f02b43f368aab74e2c6e770ef0bfeee1f2
| 107
|
py
|
Python
|
run.py
|
Arthurdb1999/dash-plotly-example
|
a54dce5e233aab75746a433665a4a958a77e4b58
|
[
"MIT"
] | null | null | null |
run.py
|
Arthurdb1999/dash-plotly-example
|
a54dce5e233aab75746a433665a4a958a77e4b58
|
[
"MIT"
] | null | null | null |
run.py
|
Arthurdb1999/dash-plotly-example
|
a54dce5e233aab75746a433665a4a958a77e4b58
|
[
"MIT"
] | null | null | null |
from app import app
if __name__ == '__main__':
app.run_server(port=5000, debug=True, host='localhost')
| 26.75
| 59
| 0.719626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.196262
|
3fcf781a8eea0228e1eb54ee4f6768c262da5120
| 748
|
py
|
Python
|
rasa/train_init.py
|
BarnikRay/chatbot
|
4191f3064f6dae90b108ecf4e08130cda39ed370
|
[
"MIT"
] | 1
|
2020-05-03T07:30:18.000Z
|
2020-05-03T07:30:18.000Z
|
rasa/train_init.py
|
privykurura1/chatbot
|
4191f3064f6dae90b108ecf4e08130cda39ed370
|
[
"MIT"
] | 8
|
2019-12-04T23:20:56.000Z
|
2022-02-10T07:47:03.000Z
|
rasa/train_init.py
|
privykurura1/chatbot
|
4191f3064f6dae90b108ecf4e08130cda39ed370
|
[
"MIT"
] | 1
|
2021-11-14T07:45:24.000Z
|
2021-11-14T07:45:24.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
def run_faq(domain_file="config/faq_domain.yml",
training_data_file='data/stories.md'):
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=2), KerasPolicy(max_history=3, epochs=100, batch_size=50)])
data = agent.load_data(training_data_file)
model_path = './models/dialogue'
agent.train(data)
agent.persist(model_path)
if __name__ == '__main__':
logging.basicConfig(level="INFO")
run_faq()
| 28.769231
| 117
| 0.758021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.100267
|
3fcf9b0ce8ecad5b900f4b1d09dd5ed1b9a47ad4
| 6,123
|
py
|
Python
|
examples/passage_ranking.py
|
skirdey/FARM
|
2fc801d1d0c973cb36bc867907d4250d1084493b
|
[
"Apache-2.0"
] | 1,551
|
2019-07-17T18:21:08.000Z
|
2022-03-24T18:09:07.000Z
|
examples/passage_ranking.py
|
skirdey/FARM
|
2fc801d1d0c973cb36bc867907d4250d1084493b
|
[
"Apache-2.0"
] | 555
|
2019-07-23T09:00:54.000Z
|
2022-03-31T15:31:06.000Z
|
examples/passage_ranking.py
|
skirdey/FARM
|
2fc801d1d0c973cb36bc867907d4250d1084493b
|
[
"Apache-2.0"
] | 259
|
2019-07-22T08:12:01.000Z
|
2022-03-26T09:41:00.000Z
|
# fmt: off
import logging
from pathlib import Path
from farm.data_handler.data_silo import DataSilo
from farm.data_handler.processor import RegressionProcessor, TextPairClassificationProcessor
from farm.experiment import initialize_optimizer
from farm.infer import Inferencer
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import RegressionHead, TextClassificationHead
from farm.modeling.tokenization import Tokenizer
from farm.train import Trainer
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results
from farm.evaluation.msmarco_passage_farm import msmarco_evaluation
def text_pair_classification():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(experiment_name="Public_FARM", run_name="Run_text_pair_classification")
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
n_epochs = 2
batch_size = 64
evaluate_every = 500
lang_model = "bert-base-cased"
label_list = ["0", "1"]
train_filename = "train.tsv"
dev_filename = "dev_200k.tsv"
# The source data can be found here https://github.com/microsoft/MSMARCO-Passage-Ranking
generate_data = False
data_dir = Path("../data/msmarco_passage")
predictions_raw_filename = "predictions_raw.txt"
predictions_filename = "predictions.txt"
train_source_filename = "triples.train.1m.tsv"
qrels_filename = "qrels.dev.tsv"
queries_filename = "queries.dev.tsv"
passages_filename = "collection.tsv"
top1000_filename = "top1000.dev"
# 0. Preprocess and save MSMarco data in a format that can be ingested by FARM models. Only needs to be done once!
# The final format is a tsv file with 3 columns (text, text_b and label)
if generate_data:
reformat_msmarco_train(data_dir / train_source_filename,
data_dir / train_filename)
reformat_msmarco_dev(data_dir / queries_filename,
data_dir / passages_filename,
data_dir / qrels_filename,
data_dir / top1000_filename,
data_dir / dev_filename)
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=False)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# Evaluation during training will be performed on a slice of the train set
# We will be using the msmarco dev set as our final evaluation set
processor = TextPairClassificationProcessor(tokenizer=tokenizer,
label_list=label_list,
metric="f1_macro",
train_filename=train_filename,
test_filename=None,
dev_split=0.001,
max_seq_len=128,
data_dir=data_dir,
delimiter="\t")
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# 4. Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model)
# b) and a prediction head on top that is suited for our task
prediction_head = TextClassificationHead(num_labels=len(label_list),
class_weights=data_silo.calculate_class_weights(
task_name="text_classification"),
)
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm_output_types=["per_sequence_continuous"],
device=device)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=1e-5,
device=device,
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs)
# 6. Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device)
# 7. Let it grow
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("saved_models/passage_ranking_model")
model.save(save_dir)
processor.save(save_dir)
# 9. Load it & harvest your fruits (Inference)
# Add your own text adapted to the dataset you provide
model = Inferencer.load(save_dir, gpu=True, max_seq_len=128, batch_size=128)
result = model.inference_from_file(data_dir / dev_filename)
write_msmarco_results(result, save_dir / predictions_raw_filename)
msmarco_evaluation(preds_file=save_dir / predictions_raw_filename,
dev_file=data_dir / dev_filename,
qrels_file=data_dir / qrels_filename,
output_file=save_dir / predictions_filename)
model.close_multiprocessing_pool()
if __name__ == "__main__":
text_pair_classification()
# fmt: on
| 41.653061
| 162
| 0.644619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,708
| 0.278948
|
3fd06fe52f4de9eab41206707f2484a26a7e0deb
| 2,723
|
py
|
Python
|
extended_data_provider.py
|
NicolaOrritos/pricenet
|
1667e564d48bf0021eb16dcd529017cd00643b03
|
[
"MIT"
] | null | null | null |
extended_data_provider.py
|
NicolaOrritos/pricenet
|
1667e564d48bf0021eb16dcd529017cd00643b03
|
[
"MIT"
] | null | null | null |
extended_data_provider.py
|
NicolaOrritos/pricenet
|
1667e564d48bf0021eb16dcd529017cd00643b03
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import json
import pytz
def _get_data(file):
return pd.read_csv(file)
def _get_prices(data):
df = data
rome_tz = pytz.timezone('Europe/Rome')
df['time'] = pd.to_datetime(df['Timestamp'], unit='s')
df['time'].dt.tz_localize(pytz.UTC).dt.tz_convert(rome_tz)
del(df['Timestamp'])
del(df['Weighted_Price'])
df = df.rename(columns={'Volume_(BTC)': 'volume_btc', 'Volume_(Currency)': 'volume_fiat'})
df = df.rename(columns={'Open': 'open', 'Close': 'close'})
df = df.rename(columns={'Low': 'low', 'High': 'high'})
return df
def _group(data, step=4):
data['group_info'] = ['data' if (index+1)%step != 0 else 'target' for index, _ in data.iterrows()]
data['type'] = data['group_info'].astype('category')
del(data['group_info'])
return data
def _bundle_groups(data, index, group_size):
return np.concatenate([data.iloc[index + a] for a in range(0, group_size)])
def scale(data_frame):
data_frame -= data_frame.min()
data_frame /= data_frame.max()
return data_frame
def remove_fields(data, fields):
for field in fields:
del(data[field])
return data
def split_to_X_y(data, groups_size):
semi_grouped = _group(data, step=groups_size)
grouped_data = semi_grouped.loc[semi_grouped['type'] == 'data']
grouped_targets = semi_grouped.loc[semi_grouped['type'] == 'target']
del(grouped_data['type'])
del(grouped_targets['type'])
# Make them their own DataFrame to avoid operating on copies of `semi_grouped` one:
grouped_data = grouped_data.copy()
grouped_targets = grouped_targets.copy()
usable_items = groups_size - 1
X = [_bundle_groups(grouped_data, index, usable_items) for index in range(0, len(grouped_data), usable_items)]
y = grouped_targets['close'].values.tolist()
return X, y
def cut_trailing(data, groups_size=4):
# Cut trailing data (remember that we are grouping by 'groups_size'):
while len(data) % groups_size > 0:
data = data.drop(len(data) - 1)
return data
def load():
""" Returns `X` and `y` arrays, the former being the training data and the former the targets. """
# Get data:
data = _get_data('coinbaseUSD_1-min_data_2014-12-01_to_2018-03-27.csv')
prices = _get_prices(data)
prices['day_of_week'] = prices['time'].dt.dayofweek
prices['day_of_month'] = prices['time'].dt.day
prices['day_of_month_scaled'] = prices['time'].dt.day / prices['time'].dt.days_in_month
prices['month'] = prices['time'].dt.month
prices['time_of_day'] = prices['time'].dt.time.apply(lambda time: str(time).split(':')[0]).astype(int)
return prices
| 28.072165
| 114
| 0.667646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 725
| 0.26625
|
3fd08ba20ef96dd8738b44a611be2e078eaec1a9
| 789
|
py
|
Python
|
django_alive/tests/test_command.py
|
lincolnloop/django-alive
|
608fd0a1d94f90f51a48a943348e9038f402a504
|
[
"MIT"
] | 25
|
2018-08-22T12:42:14.000Z
|
2022-01-18T19:03:05.000Z
|
django_alive/tests/test_command.py
|
lincolnloop/django-alive
|
608fd0a1d94f90f51a48a943348e9038f402a504
|
[
"MIT"
] | 7
|
2019-11-05T23:52:49.000Z
|
2020-07-01T18:03:32.000Z
|
django_alive/tests/test_command.py
|
lincolnloop/django-alive
|
608fd0a1d94f90f51a48a943348e9038f402a504
|
[
"MIT"
] | null | null | null |
import sys
from django.core.management import CommandError, call_command
from django.test import TestCase
from .side_effects import bad_database_check
try:
from unittest.mock import patch
except ImportError:
from mock import patch
# Python 2.7 support
if sys.version_info > (3, 0):
from io import StringIO
else:
from io import BytesIO as StringIO
class CommandTestCase(TestCase):
def test_command(self):
out = StringIO()
call_command("healthcheck", stdout=out)
self.assertIn("OK", out.getvalue())
def test_command_failed(self):
with patch(
"django_alive.checks.check_database", side_effect=bad_database_check
):
with self.assertRaises(CommandError):
call_command("healthcheck")
| 24.65625
| 80
| 0.697085
| 419
| 0.531052
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.108999
|
3fd0cbcd561a8f1e4b20915840399b1fbffc54a0
| 2,579
|
py
|
Python
|
main.py
|
fanicia/file-backup
|
f85d59a77d3168def9805e70b8a2a1dacf99c69d
|
[
"MIT"
] | null | null | null |
main.py
|
fanicia/file-backup
|
f85d59a77d3168def9805e70b8a2a1dacf99c69d
|
[
"MIT"
] | null | null | null |
main.py
|
fanicia/file-backup
|
f85d59a77d3168def9805e70b8a2a1dacf99c69d
|
[
"MIT"
] | null | null | null |
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# importing the modules
import os.path
import shutil
import datetime
import time
import re
# getting the current working directory
src_dir = os.getcwd()
# printing current directory
print("########## File-backup started ###########")
class FileHandler(FileSystemEventHandler):
def on_modified(self, event):
if not event.is_directory:
self.copy_file(event.src_path)
def on_created(self, event):
if not event.is_directory:
self.copy_file(event.src_path)
def copy_file(self, src):
src_file_name = src.split(os.path.sep).pop()
destination_sub_path = self.extract_changed_sub_path(folder_to_track, src)
sub_path_list = destination_sub_path.split(os.path.sep)
changed_file_name = sub_path_list.pop()
path_to_file = f"{os.path.sep.join(sub_path_list)}{os.path.sep}"
timestamp = datetime.datetime.now().strftime("%d-%m-%y-%H-%M") # not the prettiest datetime-format, but it's filename-friendly
target = f"{destination}{path_to_file}{timestamp}-{changed_file_name}"
print(os.linesep)
print(src)
print(" |")
print(" |")
print(" V")
print(target)
print(os.linesep)
print("----------------------------------------")
os.makedirs(f"{destination}{path_to_file}", exist_ok = True)
shutil.copy(src, target)
def extract_changed_sub_path(self, base_path, changed_path):
# This turns the annoying "\" into "/", in case we are on windows
base_path = base_path.replace(os.path.sep, "/")
changed_path = changed_path.replace(os.path.sep, "/")
# use positive lookbehind assertion to find the part of the path after the base_path of the source
regex = re.compile(f"(?<={base_path})(.*)")
match = re.search(regex, changed_path)
sub_path = match.group().replace("/", os.path.sep)
return sub_path
folder_to_track = f"{os.getcwd()}{os.path.sep}testsubject{os.path.sep}source"
destination = f"{os.getcwd()}{os.path.sep}testsubject{os.path.sep}destination"
print(f"{folder_to_track} --> {destination}")
event_handler = FileHandler()
observer = Observer()
observer.schedule(event_handler, folder_to_track, recursive=True)
observer.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
observer.stop()
observer.join()
print("########## File-backup ended ###########")
| 32.2375
| 134
| 0.643273
| 1,748
| 0.677782
| 0
| 0
| 0
| 0
| 0
| 0
| 823
| 0.319116
|
3fd11ac451a6a98e1c44245e5db55488106cc6f1
| 24,449
|
py
|
Python
|
tests/ignite/distributed/comp_models/test_native.py
|
Eunjnnn/ignite
|
743089705b2b252aa5e2a0f310da3a8724d6711e
|
[
"BSD-3-Clause"
] | 4,119
|
2017-11-23T18:10:37.000Z
|
2022-03-31T05:31:27.000Z
|
tests/ignite/distributed/comp_models/test_native.py
|
Eunjnnn/ignite
|
743089705b2b252aa5e2a0f310da3a8724d6711e
|
[
"BSD-3-Clause"
] | 1,838
|
2017-11-24T11:19:25.000Z
|
2022-03-31T09:08:18.000Z
|
tests/ignite/distributed/comp_models/test_native.py
|
Eunjnnn/ignite
|
743089705b2b252aa5e2a0f310da3a8724d6711e
|
[
"BSD-3-Clause"
] | 691
|
2017-11-24T10:57:33.000Z
|
2022-03-29T02:19:44.000Z
|
import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env
# tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py
@pytest.mark.parametrize(
"hostlist, expected",
[
("localhost", "localhost"),
("compute!:b24_[1-2].r", "compute!:b24_1.r,compute!:b24_2.r"),
("quartz[4-8]", "quartz4,quartz5,quartz6,quartz7,quartz8"),
("c1001a-[11,17]", "c1001a-11,c1001a-17"),
("c1001a-s[11,17]", "c1001a-s11,c1001a-s17"),
("c1009a-s17,c1010a-s11", "c1009a-s17,c1010a-s11"),
(
"gpu-compute-on-demand-dy-g4dnxlarge-[1-4]",
"gpu-compute-on-demand-dy-g4dnxlarge-1,"
"gpu-compute-on-demand-dy-g4dnxlarge-2,"
"gpu-compute-on-demand-dy-g4dnxlarge-3,"
"gpu-compute-on-demand-dy-g4dnxlarge-4",
),
(
"node[18-19,1-16,21-22]",
"node1,node2,node3,node4,node5,"
"node6,node7,node8,node9,node10,"
"node11,node12,node13,node14,node15,"
"node16,node18,node19,node21,node22",
),
(
"node[4-8,12,16-20,22,24-26]",
"node4,node5,node6,node7,node8,"
"node12,node16,node17,node18,"
"node19,node20,node22,node24,"
"node25,node26",
),
("machine2-[02-4]vm1", "machine2-02vm1,machine2-03vm1,machine2-04vm1"),
(
"machine2-[02-3]vm1, machine4-[0003-5].vml2",
"machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2",
),
("machine2-[009-11]vm1", "machine2-009vm1,machine2-010vm1,machine2-011vm1"),
("node[1,2,3]", "node1,node2,node3"),
(
"compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]",
"compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"
"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"
"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"
"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13",
),
],
)
def test_expand_hostlist(hostlist, expected):
assert _expand_hostlist(hostlist) == expected.split(",")
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
@pytest.mark.distributed
def test__native_dist_model():
available_backends = _NativeDistModel.available_backends
if dist.is_nccl_available():
assert "nccl" in available_backends
else:
assert "nccl" not in available_backends
if dist.is_gloo_available():
assert "gloo" in available_backends
else:
assert "gloo" not in available_backends
if dist.is_mpi_available():
assert "mpi" in available_backends
else:
assert "mpi" not in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_NativeDistModel.create_from_backend("abc")
@pytest.mark.distributed
@pytest.mark.skipif(not dist.is_nccl_available(), reason="Skip if nccl not available")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_nccl_but_no_gpu(mock_gpu_is_not_available):
with pytest.raises(RuntimeError, match=r"Nccl backend is required but no cuda capable devices"):
_NativeDistModel(backend="nccl")
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_config():
import os
from datetime import timedelta
os.environ["RANK"] = "1"
with pytest.raises(RuntimeError, match=r"PyTorch distributed configuration should define env variables"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
del os.environ["RANK"]
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_slurm_config():
import os
from datetime import timedelta
os.environ["SLURM_JOB_ID"] = "1"
with pytest.raises(RuntimeError, match=r"SLURM distributed configuration is missing"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
with pytest.raises(ValueError, match=r"Arguments rank and world_size should not be specified with SLURM"):
_NativeDistModel.create_from_backend(
backend="gloo", timeout=timedelta(seconds=10), rank=1, init_method="", world_size=1
)
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_NTASKS"] = "1"
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
os.environ["RANK"] = "1"
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
model = _NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
del os.environ["RANK"]
def _assert_model(model, true_conf):
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__native_dist_model_create_from_backend_no_dist(backend, true_device):
from datetime import timedelta
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timedelta(seconds=20))
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__native_dist_model_create_from_backend_dist(init_method, local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
os.environ["RANK"] = f"{rank}"
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout, init_method=init_method)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
if init_method is None:
assert model._init_method == "env://"
else:
assert model._init_method == init_method
model.finalize()
del os.environ["RANK"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
def _test__native_dist_model_create_from_backend_slurm(local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
del os.environ["WORLD_SIZE"]
del os.environ["LOCAL_RANK"]
os.environ["SLURM_JOB_ID"] = "15000"
os.environ["SLURM_PROCID"] = str(rank)
os.environ["SLURM_LOCALID"] = str(local_rank)
os.environ["SLURM_NTASKS"] = str(world_size)
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
def _test__native_dist_model_create_from_context_no_local_rank():
if "LOCAL_RANK" in os.environ:
del os.environ["LOCAL_RANK"]
from ignite.distributed.comp_models.base import ComputationModel
if ComputationModel._ext_local_rank is not None:
ComputationModel._ext_local_rank = None
with pytest.warns(UserWarning, match=r"Local rank information for native distributed setting will be initialized"):
_NativeDistModel.create_from_context()
def _test__native_dist_model_create_from_context_env_local_rank(true_conf):
import os
remove_lrank = False
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(true_conf["local_rank"])
remove_lrank = True
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
if remove_lrank:
del os.environ["LOCAL_RANK"]
def _test__native_dist_model_create_from_context_set_local_rank(true_conf):
from ignite.distributed.comp_models.base import ComputationModel
lrank = None
if "LOCAL_RANK" in os.environ:
lrank = os.environ["LOCAL_RANK"]
del os.environ["LOCAL_RANK"]
ComputationModel._ext_local_rank = true_conf["local_rank"]
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
ComputationModel._ext_local_rank = None
if lrank is not None:
os.environ["LOCAL_RANK"] = lrank
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
def _test__native_dist_model_create_from_context_dist(local_rank, rank, world_size, true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=world_size, rank=rank)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
true_conf = {
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
def test__native_dist_model_create_no_dist_gloo(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("gloo", device)
_test__native_dist_model_create_from_context_no_dist("gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_no_dist_nccl(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("nccl", device)
_test__native_dist_model_create_from_context_no_dist("nccl", device)
@pytest.mark.distributed
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_gloo_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_dist(init_method, local_rank, local_rank, world_size, "gloo", device)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
def test__native_dist_model_create_dist_gloo_2(local_rank, world_size):
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "gloo", device)
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_nccl_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"
_test__native_dist_model_create_from_backend_dist(
init_method, local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(
local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_dist_nccl_2(local_rank, world_size):
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}")
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__native_dist_model_warning_index_less_localrank(local_rank, world_size):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group("nccl", "tcp://0.0.0.0:2222", world_size=world_size, rank=local_rank)
dist.barrier()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _NativeDistModel.create_from_context()
assert isinstance(model, _NativeDistModel), f"{type(model)} vs _NativeDistModel"
if local_rank == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
dist.destroy_process_group()
def _test_dist_spawn_fn(local_rank, backend, world_size, device):
from ignite.distributed.utils import _model
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
assert isinstance(_model, _NativeDistModel), f"{type(_model)} vs _NativeDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.device().type == torch.device(device).type
def _test__native_dist_model_spawn(backend, num_workers_per_machine, device, init_method=None, **spawn_kwargs):
_NativeDistModel.spawn(
_test_dist_spawn_fn,
args=(backend, num_workers_per_machine, device),
kwargs_dict={},
backend=backend,
nproc_per_node=num_workers_per_machine,
init_method=init_method,
**spawn_kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "env://", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_gloo(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
nproc = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_spawn("gloo", num_workers_per_machine=nproc, device=device, init_method=init_method)
if device.type == "cpu":
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, start_method="fork", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_nccl(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
num_workers_per_machine = torch.cuda.device_count()
_test__native_dist_model_spawn(
"nccl", num_workers_per_machine=num_workers_per_machine, device="cuda", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_none(world_size):
with pytest.raises(ValueError, match=r"Arguments rank and world_size should be None"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_not_none(world_size, local_rank, get_fixed_dirname):
init_method = f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size, init_method=init_method)
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", rank=local_rank, init_method=init_method)
@pytest.mark.parametrize(
"environ, expected",
[
# fmt: off
# usual SLURM env
(
{
"SLURM_PROCID": "1", "SLURM_LOCALID": "1", "SLURM_NTASKS": "2", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
},
[1, 1, 2, "c1", 17345]
),
# usual SLURM env mnode
(
{
"SLURM_PROCID": "5", "SLURM_LOCALID": "1", "SLURM_NTASKS": "8", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
},
[5, 1, 8, "c1", 17345]
),
# usual SLURM env 1 node, 1 task + torch.distributed.launch
(
{
"SLURM_PROCID": "0", "SLURM_LOCALID": "0", "SLURM_NTASKS": "1", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "2233", "RANK": "2", "LOCAL_RANK": "2", "WORLD_SIZE": "8",
},
[2, 2, 8, "127.0.0.1", 2233]
),
# usual SLURM env + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "3", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "3", "WORLD_SIZE": "4",
},
[3, 3, 4, "c1", 12233]
),
# usual SLURM env mnode + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "1", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "1", "WORLD_SIZE": "4"
},
[3, 1, 4, "c1", 12233]
),
# fmt: on
],
)
def test__setup_ddp_vars_from_slurm_env(environ, expected):
ddp_keys = ["RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT"]
ddp_vars = _setup_ddp_vars_from_slurm_env(environ)
for key, value in zip(ddp_keys, expected):
assert key in ddp_vars
assert ddp_vars[key] == value
def test__setup_ddp_vars_from_slurm_env_bad_configs():
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(RuntimeError, match=r"No hostname detected in SLURM_JOB_NODELIST by ignite"):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "[]",
"SLURM_JOB_ID": "12345",
}
_setup_ddp_vars_from_slurm_env(environ)
| 36.491045
| 120
| 0.674261
| 0
| 0
| 0
| 0
| 13,751
| 0.562436
| 0
| 0
| 7,199
| 0.29445
|
3fd17ae0190fb0fd9c0e5b30d818a6da541ac894
| 1,114
|
py
|
Python
|
src/popular_consistent_items/popular_consistent_items.py
|
masinag/popular_twitter_topics_mining
|
b86e05d7700cfca4dbf9db67cde50664d99e60f7
|
[
"MIT"
] | null | null | null |
src/popular_consistent_items/popular_consistent_items.py
|
masinag/popular_twitter_topics_mining
|
b86e05d7700cfca4dbf9db67cde50664d99e60f7
|
[
"MIT"
] | null | null | null |
src/popular_consistent_items/popular_consistent_items.py
|
masinag/popular_twitter_topics_mining
|
b86e05d7700cfca4dbf9db67cde50664d99e60f7
|
[
"MIT"
] | null | null | null |
import pandas as pd
from .apriori_opt import apriori as apriori_opt
from .apriori_basic import apriori as apriori_basic
# from memory_profiler import profile
from .utils import log
def get_frequent_items_in_time(tweets, s, r, a, start=None, end=None, basic=False):
if tweets.empty:
return []
if not start:
start = pd.Timestamp(tweets.time.min().date())
if not end:
end = tweets.time.max()
frequent_itemset_f = apriori_basic if basic else apriori_opt
log("File read")
topics_counter = {} # number of times a topic is frequent in a period of time
time_periods = 0
grouper = pd.Grouper(key = "time",
origin = start, freq=f"{a}s")
for group, batch in tweets.groupby(grouper):
if group >= end:
break
log(f"Period of {group}")
frequent_items = frequent_itemset_f(batch.tokens, s)
for i in frequent_items:
topics_counter[i] = topics_counter.get(i, 0) + 1
time_periods += 1
min_support = r * time_periods
return [(i) for i, o in topics_counter.items() if o >= min_support]
| 30.108108
| 83
| 0.653501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.123878
|
3fd22b642b9f6e7837ab39991845866acb71bd9d
| 929
|
py
|
Python
|
txt/test/teste.py
|
juliano777/apostila_python
|
521c05c1579a52d22d6b670af92e3763366b6301
|
[
"BSD-3-Clause"
] | 3
|
2020-04-18T20:07:39.000Z
|
2021-06-17T18:41:34.000Z
|
txt/test/teste.py
|
juliano777/apostila_python
|
521c05c1579a52d22d6b670af92e3763366b6301
|
[
"BSD-3-Clause"
] | null | null | null |
txt/test/teste.py
|
juliano777/apostila_python
|
521c05c1579a52d22d6b670af92e3763366b6301
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T20:07:46.000Z
|
2020-04-18T20:07:46.000Z
|
#_*_ encoding: utf-8 _*_
import time
''' Fibonacci function '''
def fibo(n):
if (n < 2): return n
else:
return fibo(n - 1) + fibo(n - 2)
''' Memoize function '''
def memoize(f):
# dictionary
mem = {}
''' Helper function '''
def memoizer(*param):
key = repr(param)
if not key in mem:
mem[key] = f(*param)
return mem[key]
return memoizer
# Start time
t1 = time.time()
# Loop
for i in range(35):
print('fib(%s) = %s' % (i, fibo(i)))
# End time
t2 = time.time()
# Total time
print('Tempo de execução: %.3fs' % (t2 - t1))
# Take a pause
raw_input('Pressione <ENTER> para continuar\n')
# Memoization of fibo (closure)
fibo = memoize(fibo)
# Start time
t1 = time.time()
# loop after memoization
for i in range(40):
print('fib(%s) = %s' % (i, fibo(i)))
# End time
t2 = time.time()
# Total time
print('Tempo de execução: %.3fs' % (t2 - t1))
| 16.298246
| 47
| 0.568353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 373
| 0.399786
|
3fd2e3175b855481fd32ee5d4ebc2f50e3468d9a
| 4,101
|
py
|
Python
|
Tests/Methods/Mesh/test_get_field.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Mesh/test_get_field.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Mesh/test_get_field.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from SciDataTool import DataTime, Data1D, DataLinspace, VectorField
from pyleecan.Classes.SolutionData import SolutionData
from pyleecan.Classes.SolutionMat import SolutionMat
from pyleecan.Classes.SolutionVector import SolutionVector
@pytest.mark.MeshSol
@pytest.mark.METHODS
class Test_get_field(TestCase):
""" Tests for get_field method from Solution classes"""
def test_SolutionMat(self):
DELTA = 1e-10
solution = SolutionMat()
solution.field = np.array([[1, 2, 3], [2, 3, 4]])
solution.axis_name = ["time", "indice"]
solution.axis_size = [2, 3]
field = solution.get_field()
correction = np.array([[1, 2, 3], [2, 3, 4]])
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)
field = solution.get_field("time[0]", "indice[1,2]")
correction = np.array([[2, 3]])
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)
def test_SolutionVector(self):
DELTA = 1e-10
Indices_Cell = Data1D(name="indice", values=[0, 1, 2, 4], is_components=True)
Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=1,
number=10,
)
H = np.ones((10, 4, 2))
# Store the results for H
componentsH = {}
Hx_data = DataTime(
name="Magnetic Field Hx",
unit="A/m",
symbol="Hx",
axes=[Time, Indices_Cell],
values=H[:, :, 0],
)
componentsH["comp_x"] = Hx_data
Hy_data = DataTime(
name="Magnetic Field Hy",
unit="A/m",
symbol="Hy",
axes=[Time, Indices_Cell],
values=H[:, :, 1],
)
componentsH["comp_y"] = Hy_data
vecH = VectorField(name="Magnetic Field", symbol="H", components=componentsH)
solution = SolutionVector(field=vecH, type_cell="triangle", label="H")
field = solution.get_field()
correction = np.ones((10, 4, 2))
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)
field = solution.get_field("time[0]", "indice[1,2]")
correction = np.ones((2, 2))
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)
def test_SolutionData(self):
DELTA = 1e-10
Indices_Cell = Data1D(name="indice", values=[0, 1, 2, 4], is_components=True)
Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=1,
number=10,
)
# Store the results for H
H = DataTime(
name="Magnetic Field Hx",
unit="A/m",
symbol="Hx",
axes=[Time, Indices_Cell],
values=np.ones((10, 4)),
)
solution = SolutionData(field=H, type_cell="triangle", label="H")
field = solution.get_field()
correction = np.ones((10, 4))
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)
field = solution.get_field("time[0]", "indice[1,2]")
correction = correction[0, 1:3]
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)
| 32.039063
| 87
| 0.566935
| 3,731
| 0.909778
| 0
| 0
| 3,773
| 0.92002
| 0
| 0
| 621
| 0.151426
|
3fd35632335b7013aa84b5d96f778f88b22e2bbe
| 17,026
|
py
|
Python
|
python/services/compute/beta/reservation.py
|
trodge/declarative-resource-client-library
|
2cb7718a5074776b3113cc18a7483b54022238f3
|
[
"Apache-2.0"
] | 16
|
2021-01-08T19:35:22.000Z
|
2022-03-23T16:23:49.000Z
|
python/services/compute/beta/reservation.py
|
trodge/declarative-resource-client-library
|
2cb7718a5074776b3113cc18a7483b54022238f3
|
[
"Apache-2.0"
] | 1
|
2021-08-18T19:12:20.000Z
|
2021-08-18T19:12:20.000Z
|
python/services/compute/beta/reservation.py
|
LaudateCorpus1/declarative-resource-client-library
|
a559c4333587fe9531cef150532e6fcafff153e4
|
[
"Apache-2.0"
] | 11
|
2021-03-18T11:27:28.000Z
|
2022-03-12T06:49:14.000Z
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import reservation_pb2
from google3.cloud.graphite.mmv2.services.google.compute import reservation_pb2_grpc
from typing import List
class Reservation(object):
def __init__(
self,
id: int = None,
self_link: str = None,
zone: str = None,
description: str = None,
name: str = None,
specific_reservation: dict = None,
commitment: str = None,
specific_reservation_required: bool = None,
status: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.id = id
self.zone = zone
self.description = description
self.name = name
self.specific_reservation = specific_reservation
self.commitment = commitment
self.specific_reservation_required = specific_reservation_required
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = reservation_pb2_grpc.ComputeBetaReservationServiceStub(channel.Channel())
request = reservation_pb2.ApplyComputeBetaReservationRequest()
if Primitive.to_proto(self.id):
request.resource.id = Primitive.to_proto(self.id)
if Primitive.to_proto(self.zone):
request.resource.zone = Primitive.to_proto(self.zone)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if ReservationSpecificReservation.to_proto(self.specific_reservation):
request.resource.specific_reservation.CopyFrom(
ReservationSpecificReservation.to_proto(self.specific_reservation)
)
else:
request.resource.ClearField("specific_reservation")
if Primitive.to_proto(self.commitment):
request.resource.commitment = Primitive.to_proto(self.commitment)
if Primitive.to_proto(self.specific_reservation_required):
request.resource.specific_reservation_required = Primitive.to_proto(
self.specific_reservation_required
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeBetaReservation(request)
self.id = Primitive.from_proto(response.id)
self.self_link = Primitive.from_proto(response.self_link)
self.zone = Primitive.from_proto(response.zone)
self.description = Primitive.from_proto(response.description)
self.name = Primitive.from_proto(response.name)
self.specific_reservation = ReservationSpecificReservation.from_proto(
response.specific_reservation
)
self.commitment = Primitive.from_proto(response.commitment)
self.specific_reservation_required = Primitive.from_proto(
response.specific_reservation_required
)
self.status = ReservationStatusEnum.from_proto(response.status)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = reservation_pb2_grpc.ComputeBetaReservationServiceStub(channel.Channel())
request = reservation_pb2.DeleteComputeBetaReservationRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.id):
request.resource.id = Primitive.to_proto(self.id)
if Primitive.to_proto(self.zone):
request.resource.zone = Primitive.to_proto(self.zone)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if ReservationSpecificReservation.to_proto(self.specific_reservation):
request.resource.specific_reservation.CopyFrom(
ReservationSpecificReservation.to_proto(self.specific_reservation)
)
else:
request.resource.ClearField("specific_reservation")
if Primitive.to_proto(self.commitment):
request.resource.commitment = Primitive.to_proto(self.commitment)
if Primitive.to_proto(self.specific_reservation_required):
request.resource.specific_reservation_required = Primitive.to_proto(
self.specific_reservation_required
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteComputeBetaReservation(request)
@classmethod
def list(self, project, zone, service_account_file=""):
stub = reservation_pb2_grpc.ComputeBetaReservationServiceStub(channel.Channel())
request = reservation_pb2.ListComputeBetaReservationRequest()
request.service_account_file = service_account_file
request.Project = project
request.Zone = zone
return stub.ListComputeBetaReservation(request).items
def to_proto(self):
resource = reservation_pb2.ComputeBetaReservation()
if Primitive.to_proto(self.id):
resource.id = Primitive.to_proto(self.id)
if Primitive.to_proto(self.zone):
resource.zone = Primitive.to_proto(self.zone)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if ReservationSpecificReservation.to_proto(self.specific_reservation):
resource.specific_reservation.CopyFrom(
ReservationSpecificReservation.to_proto(self.specific_reservation)
)
else:
resource.ClearField("specific_reservation")
if Primitive.to_proto(self.commitment):
resource.commitment = Primitive.to_proto(self.commitment)
if Primitive.to_proto(self.specific_reservation_required):
resource.specific_reservation_required = Primitive.to_proto(
self.specific_reservation_required
)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class ReservationSpecificReservation(object):
def __init__(
self,
instance_properties: dict = None,
count: int = None,
in_use_count: int = None,
):
self.instance_properties = instance_properties
self.count = count
self.in_use_count = in_use_count
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = reservation_pb2.ComputeBetaReservationSpecificReservation()
if ReservationSpecificReservationInstanceProperties.to_proto(
resource.instance_properties
):
res.instance_properties.CopyFrom(
ReservationSpecificReservationInstanceProperties.to_proto(
resource.instance_properties
)
)
else:
res.ClearField("instance_properties")
if Primitive.to_proto(resource.count):
res.count = Primitive.to_proto(resource.count)
if Primitive.to_proto(resource.in_use_count):
res.in_use_count = Primitive.to_proto(resource.in_use_count)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ReservationSpecificReservation(
instance_properties=ReservationSpecificReservationInstanceProperties.from_proto(
resource.instance_properties
),
count=Primitive.from_proto(resource.count),
in_use_count=Primitive.from_proto(resource.in_use_count),
)
class ReservationSpecificReservationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ReservationSpecificReservation.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ReservationSpecificReservation.from_proto(i) for i in resources]
class ReservationSpecificReservationInstanceProperties(object):
def __init__(
self,
machine_type: str = None,
guest_accelerators: list = None,
min_cpu_platform: str = None,
local_ssds: list = None,
):
self.machine_type = machine_type
self.guest_accelerators = guest_accelerators
self.min_cpu_platform = min_cpu_platform
self.local_ssds = local_ssds
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
reservation_pb2.ComputeBetaReservationSpecificReservationInstanceProperties()
)
if Primitive.to_proto(resource.machine_type):
res.machine_type = Primitive.to_proto(resource.machine_type)
if ReservationSpecificReservationInstancePropertiesGuestAcceleratorsArray.to_proto(
resource.guest_accelerators
):
res.guest_accelerators.extend(
ReservationSpecificReservationInstancePropertiesGuestAcceleratorsArray.to_proto(
resource.guest_accelerators
)
)
if Primitive.to_proto(resource.min_cpu_platform):
res.min_cpu_platform = Primitive.to_proto(resource.min_cpu_platform)
if ReservationSpecificReservationInstancePropertiesLocalSsdsArray.to_proto(
resource.local_ssds
):
res.local_ssds.extend(
ReservationSpecificReservationInstancePropertiesLocalSsdsArray.to_proto(
resource.local_ssds
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ReservationSpecificReservationInstanceProperties(
machine_type=Primitive.from_proto(resource.machine_type),
guest_accelerators=ReservationSpecificReservationInstancePropertiesGuestAcceleratorsArray.from_proto(
resource.guest_accelerators
),
min_cpu_platform=Primitive.from_proto(resource.min_cpu_platform),
local_ssds=ReservationSpecificReservationInstancePropertiesLocalSsdsArray.from_proto(
resource.local_ssds
),
)
class ReservationSpecificReservationInstancePropertiesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ReservationSpecificReservationInstanceProperties.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ReservationSpecificReservationInstanceProperties.from_proto(i)
for i in resources
]
class ReservationSpecificReservationInstancePropertiesGuestAccelerators(object):
def __init__(self, accelerator_type: str = None, accelerator_count: int = None):
self.accelerator_type = accelerator_type
self.accelerator_count = accelerator_count
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
reservation_pb2.ComputeBetaReservationSpecificReservationInstancePropertiesGuestAccelerators()
)
if Primitive.to_proto(resource.accelerator_type):
res.accelerator_type = Primitive.to_proto(resource.accelerator_type)
if Primitive.to_proto(resource.accelerator_count):
res.accelerator_count = Primitive.to_proto(resource.accelerator_count)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ReservationSpecificReservationInstancePropertiesGuestAccelerators(
accelerator_type=Primitive.from_proto(resource.accelerator_type),
accelerator_count=Primitive.from_proto(resource.accelerator_count),
)
class ReservationSpecificReservationInstancePropertiesGuestAcceleratorsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ReservationSpecificReservationInstancePropertiesGuestAccelerators.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ReservationSpecificReservationInstancePropertiesGuestAccelerators.from_proto(
i
)
for i in resources
]
class ReservationSpecificReservationInstancePropertiesLocalSsds(object):
def __init__(self, disk_size_gb: int = None, interface: str = None):
self.disk_size_gb = disk_size_gb
self.interface = interface
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
reservation_pb2.ComputeBetaReservationSpecificReservationInstancePropertiesLocalSsds()
)
if Primitive.to_proto(resource.disk_size_gb):
res.disk_size_gb = Primitive.to_proto(resource.disk_size_gb)
if ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum.to_proto(
resource.interface
):
res.interface = ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum.to_proto(
resource.interface
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ReservationSpecificReservationInstancePropertiesLocalSsds(
disk_size_gb=Primitive.from_proto(resource.disk_size_gb),
interface=ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum.from_proto(
resource.interface
),
)
class ReservationSpecificReservationInstancePropertiesLocalSsdsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ReservationSpecificReservationInstancePropertiesLocalSsds.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ReservationSpecificReservationInstancePropertiesLocalSsds.from_proto(i)
for i in resources
]
class ReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return reservation_pb2.ComputeBetaReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum.Value(
"ComputeBetaReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return reservation_pb2.ComputeBetaReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum.Name(
resource
)[
len(
"ComputeBetaReservationSpecificReservationInstancePropertiesLocalSsdsInterfaceEnum"
) :
]
class ReservationStatusEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return reservation_pb2.ComputeBetaReservationStatusEnum.Value(
"ComputeBetaReservationStatusEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return reservation_pb2.ComputeBetaReservationStatusEnum.Name(resource)[
len("ComputeBetaReservationStatusEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| 36.615054
| 119
| 0.681017
| 16,171
| 0.949783
| 0
| 0
| 8,482
| 0.498179
| 0
| 0
| 917
| 0.053859
|
3fd4a59a910de7324648c153841dea6bd5328a4e
| 4,682
|
py
|
Python
|
Examples/cimpleCraft/cimple4.py
|
shaesaert/TuLiPXML
|
56cf4d58a9d7e17b6f6aebe6de8d5a1231035671
|
[
"BSD-3-Clause"
] | 1
|
2021-05-28T23:44:28.000Z
|
2021-05-28T23:44:28.000Z
|
Examples/cimpleCraft/cimple4.py
|
shaesaert/TuLiPXML
|
56cf4d58a9d7e17b6f6aebe6de8d5a1231035671
|
[
"BSD-3-Clause"
] | 2
|
2017-10-03T18:54:08.000Z
|
2018-08-21T09:50:09.000Z
|
Examples/cimpleCraft/cimple4.py
|
shaesaert/TuLiPXML
|
56cf4d58a9d7e17b6f6aebe6de8d5a1231035671
|
[
"BSD-3-Clause"
] | 1
|
2018-10-06T12:58:52.000Z
|
2018-10-06T12:58:52.000Z
|
# Import modules
from __future__ import print_function
import sys
import numpy as np
from polytope import box2poly
from tulip import hybrid
from tulip.abstract import prop2part, discretize
import Interface.DSL as DSL
from Interface import Statechart as dumpsmach
from Interface.Reduce import *
from Interface.Transform import *
print("----------------------------------\n Script options \n----------------------------------")
verbose = 1 # Decrease printed output = 0, increase= 1
print("""----------------------------------\n System Definition \n----------------------------------
-- System Constants
-- System Label State Space & partition
""")
# System constants
input_bound = 1.0
disturbance_bound = 0.1
# The system dynamics
A = np.array([[1., 0, 2., 0], [0, 1., 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]])
B = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]])
E = np.array([[1., 0, 0, 0], [0, 1., 0, 0], [0, 0, 1., 0], [0, 0, 0, 1.]])
# $x^+=Ax+Bu+E W$
# Size of the sets
X = box2poly([[0, 100.], [0, 100.], [-5, 5.], [-5, 5.]])
U = box2poly(input_bound*np.array([[0, 1], [0, 1], [0, 1], [0, 1]]))
W = box2poly(disturbance_bound*np.array([[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]]))
print("----------------------------------\n Define system\n----------------------------------")
# Intermezzo polytope tutorial
# https://github.com/tulip-control/polytope/blob/master/doc/tutorial.md
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, X)
print(str(sys_dyn))
print("----------------------------------\n Define labelling \n----------------------------------")
cprops ={}
cprops["inA"] = box2poly([[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inB"] = box2poly([[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inObj1"] = box2poly([[15, 35], [30, 70], [-5, 5], [-5, 5]])
cprops["inObj2"] = box2poly([[65, 85], [30, 70], [-5, 5], [-5, 5]])
cpartition = prop2part(X, cprops)
if verbose == 1:
print("partition before refinement")
print(cpartition)
print("---------------------------------\n System partition State Space \n----------------------------------")
disc_dynamics = discretize(cpartition, sys_dyn, N=5, min_cell_volume=1, closed_loop=True, conservative=True)
states=[state for (state, label) in disc_dynamics.ts.states.find(with_attr_dict={'ap': {'inA'}})]
disc_dynamics.ts.states.initial|=states
print("----------------------------------\n Define specification \n----------------------------------")
# Specifications
# Environment variables and assumptions
env_vars = list()
env_init = list()
env_safe = list()
env_prog = list()
# System variables and requirements
sys_vars = ['inA', 'inB']
sys_init = ['inA']
sys_safe = ['!inObj1', '!inObj2']
sys_prog = ['inA', 'inB']
(ctrl_modes, grspec) = transform2control(disc_dynamics.ts, statevar='ctrl')
print("----------------------------------\n Combine sys and spec \n----------------------------------")
phi = grspec | spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
phi.qinit = '\A \E'
phi.moore = False
phi.plus_one = False
ctrl = synth.synthesize(phi,ignore_sys_init=True)
#
# print("----------------------------------\n Reduce states \n----------------------------------")
#
# Events_init = {('fullGas', True)}
#
#
# ctrl_red=reduce_mealy(ctrl,relabel=False,outputs={'ctrl'}, prune_set=Events_init, combine_trans=False)
#
print("----------------------------------\n Output results \n----------------------------------")
if verbose == 1:
print(" (Verbose) ")
try:
disc_dynamics.ts.save("cimple_aircraft_orig.png")
ctrl_modes.save("cimple_aircraft_modes.png")
# ctrl_red.save('cimple_aircraft_ctrl_red.png')
ctrl.save("cimple_aircraft_ctrl_orig.png")
print(" (Verbose): saved all Finite State Transition Systems ")
except Exception:
pass
print('nodes in ctrl:')
print(len(ctrl.nodes()))
print(len(ctrl.transitions()))
print('\n')
#
# print('nodes in ctrl_red:')
# print(len(ctrl_red.nodes()))
# print(len(ctrl_red.transitions()))
# print('\n')
#
#
print("----------------------------------\n Convert controller to Xmi \n----------------------------------")
sys.stdout.flush()
# --------------- Writing the statechart -----------
try:
filename = str(__file__)
filename = filename[0:-3] + "_gen"
except NameError:
filename = "test_gen"
# write strategy plus control modes at the same time to a statechart
with open(filename+".xml", "w") as f:
# f.write(dumpsmach.tulip_to_xmi(ctrl_red,ctrl_modes))
f.write(dumpsmach.tulip_to_xmi(ctrl, ctrl_modes))
| 32.971831
| 110
| 0.548056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,184
| 0.466467
|
3fd50d9f4c976d633be6e56345cbe4edfe16b20b
| 561
|
py
|
Python
|
CableClub/cable_club_colosseum.py
|
V-FEXrt/Pokemon-Spoof-Plus
|
d397d680742496b7f64b401511da7eb57f63c973
|
[
"MIT"
] | 2
|
2017-05-04T20:24:19.000Z
|
2017-05-04T20:58:07.000Z
|
CableClub/cable_club_colosseum.py
|
V-FEXrt/Pokemon-Spoof-Plus
|
d397d680742496b7f64b401511da7eb57f63c973
|
[
"MIT"
] | null | null | null |
CableClub/cable_club_colosseum.py
|
V-FEXrt/Pokemon-Spoof-Plus
|
d397d680742496b7f64b401511da7eb57f63c973
|
[
"MIT"
] | null | null | null |
from AI.team_manager import TeamManager
from CableClub.cable_club_constants import Com
out_byte = 0
last_recieved = 0
count = 0
def colosseum_process_byte(byte):
global out_byte, last_recieved, count
if byte >= Com.ATTACK_MOVE_1 and byte <= Com.SWITCH_POKEMON_6:
if count == 12:
last_recieved = 0
if last_recieved == byte:
count += 1
return out_byte
count = 0
last_recieved = byte
out_byte = TeamManager.colosseum.processTurn(byte)
return out_byte
return byte
| 24.391304
| 66
| 0.654189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3fd6f8b99302959fd856c0174a84ad3698e8de10
| 931
|
py
|
Python
|
workflow/wrappers/bio/popoolation2/indel_filtering_identify_indel_regions/wrapper.py
|
NBISweden/manticore-smk
|
fd0b4ccd4239dc91dac423d0ea13478d36702561
|
[
"MIT"
] | null | null | null |
workflow/wrappers/bio/popoolation2/indel_filtering_identify_indel_regions/wrapper.py
|
NBISweden/manticore-smk
|
fd0b4ccd4239dc91dac423d0ea13478d36702561
|
[
"MIT"
] | null | null | null |
workflow/wrappers/bio/popoolation2/indel_filtering_identify_indel_regions/wrapper.py
|
NBISweden/manticore-smk
|
fd0b4ccd4239dc91dac423d0ea13478d36702561
|
[
"MIT"
] | 2
|
2021-08-23T16:09:51.000Z
|
2021-11-12T21:35:56.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Per Unneberg"
__copyright__ = "Copyright 2020, Per Unneberg"
__email__ = "per.unneberg@scilifelab.se"
__license__ = "MIT"
import os
import re
import tempfile
from snakemake.shell import shell
from snakemake.utils import logger
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
conda_prefix = os.getenv("CONDA_PREFIX")
script = os.path.join(
conda_prefix, "opt/popoolation2-code/indel_filtering/identify-indel-regions.pl"
)
options = snakemake.params.get("options", "")
mpileup = snakemake.input.mpileup
tmp = os.path.basename(tempfile.mkstemp()[1])
fifo = f"{mpileup}{tmp}.fifo"
if os.path.exists(fifo):
os.unlink(fifo)
shell("mkfifo {fifo}")
shell("zcat {mpileup} > {fifo} &")
shell(
"perl "
"{script} "
"{options} "
"--input {fifo} "
"--output {snakemake.output.gtf} "
"{log}"
)
if os.path.exists(fifo):
os.unlink(fifo)
| 22.707317
| 83
| 0.692803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 364
| 0.390977
|
3fd71b6b624dc062e8df4e8fc57377ace10d329d
| 6,741
|
py
|
Python
|
open_publishing/provision/progression_rule.py
|
open-publishing/open-publishing-api
|
0d1646bb2460c6f35cba610a355941d2e07bfefd
|
[
"BSD-3-Clause"
] | null | null | null |
open_publishing/provision/progression_rule.py
|
open-publishing/open-publishing-api
|
0d1646bb2460c6f35cba610a355941d2e07bfefd
|
[
"BSD-3-Clause"
] | null | null | null |
open_publishing/provision/progression_rule.py
|
open-publishing/open-publishing-api
|
0d1646bb2460c6f35cba610a355941d2e07bfefd
|
[
"BSD-3-Clause"
] | null | null | null |
from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty
from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField
from open_publishing.user import User
from open_publishing.core.enums import ValueStatus
from open_publishing.core.enums import ProvisionRuleRole, ProvisionChannelType, ProvisionChannelBase
from open_publishing.core.enums import ProvisionRuleAlgorithm
from .rule import ProvisionRule
from .filter_list import ProvisionFilterList
class Progression(SequenceItem):
def __init__(self,
threshold,
rate):
super(Progression, self).__init__(ValueStatus.soft)
self.threshold = threshold
self.rate = rate
self._status = ValueStatus.soft
threshold = SequenceItemProperty('threshold')
rate = SequenceItemProperty('rate')
@classmethod
def from_gjp(cls, gjp, database_object):
threshold = gjp['threshold']
rate = gjp['value']
return cls(threshold,
rate)
def to_gjp(self):
return {'threshold': self.threshold,
'value': self.rate}
class ChannelProgressionList(SequenceField):
_item_type = Progression
def __init__(self,
rule):
super(ChannelProgressionList, self).__init__(rule,
'channels.*',
'progressions')
def add(self,
threshold,
value):
self._list.append(Progression(threshold,
value))
self._status = ValueStatus.hard
return self[-1]
def from_gjp(self,
gjp):
self._list = []
for item in gjp['progressions'] if gjp['progressions'] else []:
self._list.append(self._item_type.from_gjp(item, self.database_object))
self._status = ValueStatus.soft
def to_gjp(self):
return [item.to_gjp() for item in self._list]
class ProgressionChannel(SequenceItem):
def __init__(self,
rule,
channel_type,
base,
group):
super(ProgressionChannel, self).__init__(ValueStatus.soft)
self._rule = rule
self.channel_type = channel_type
self.base = base
self.progressions = ChannelProgressionList(self._rule)
self.group = group
self._status = ValueStatus.soft
channel_type = SequenceItemProperty('channel_type')
base = SequenceItemProperty('base')
group = SequenceItemProperty('group')
@property
def rule(self):
return self._rule
@property
def status(self):
if self.progressions.status is ValueStatus.hard:
return ValueStatus.hard
else:
return super(ProgressionChannel, self).status
@classmethod
def from_gjp(cls, gjp, database_object):
channel_type = ProvisionChannelType.from_id(gjp['channel'])
base = ProvisionChannelBase.from_id(gjp['basis'])
group = gjp['group']
res = cls(database_object,
channel_type,
base,
group if group !='' else None)
res.progressions.from_gjp(gjp)
return res
def to_gjp(self):
res = {'channel': self.channel_type.identifier,
'basis': self.base.identifier,
'group': self.group if self.group else '',
'progressions': self.progressions.to_gjp()}
return res
class ProgressionChannelsList(SequenceField):
_item_type = ProgressionChannel
def __init__(self,
rule):
super(ProgressionChannelsList, self).__init__(rule,
'channels.*',
'channels')
def add(self,
rate,
channel_type = ProvisionChannelType.book_and_ebook,
base = ProvisionChannelBase.net_price,
progressions = None,
group = None):
progression_channel = ProgressionChannel(self.database_object,
channel_type,
base,
group)
progression_channel.progressions.add(1, rate)
if progressions is not None:
for threshold, rate in progressions:
progression_channel.progressions.add(threshold, rate)
self._list.append(progression_channel)
self._status = ValueStatus.hard
return self[-1]
class ProgressionRule(ProvisionRule):
def __init__(self,
context,
rule_id):
super(ProgressionRule, self).__init__(context,
rule_id)
self._fields['recipient'] = DatabaseObjectField(parent=self,
aspect='*',
field_locator='recipient_user_id',
dtype=User)
self._fields['role'] = SimpleField(database_object=self,
aspect='*',
field_locator='role',
dtype=ProvisionRuleRole)
self._fields['channels'] = ProgressionChannelsList(rule=self)
recipient = FieldDescriptor('recipient')
role = FieldDescriptor('role')
channels = FieldDescriptor('channels')
class ProgressionList(ProvisionFilterList):
_filter = ProvisionRuleAlgorithm.progression
def __init__(self,
provision_list):
super(ProgressionList, self).__init__(provision_list)
def add(self,
recipient,
role):
with ProgressionRule._create(self._provision_list._database_object._context,
channels=[]) as rule:
rule._algorithm = ProvisionRuleAlgorithm.progression
rule._source_type = 'DOCUMENT'
rule._reference_id = self._provision_list._document.document_id
rule._scope = 'SALE'
rule.recipient = recipient
rule.role = role
new_rule = ProgressionRule(self._provision_list._database_object._context,
rule.rule_id)
self._provision_list._objects[rule.rule_id] = new_rule
self._provision_list._ids.append(rule.rule_id)
return self[-1]
| 35.856383
| 100
| 0.558374
| 6,209
| 0.92108
| 0
| 0
| 838
| 0.124314
| 0
| 0
| 321
| 0.047619
|
3fd8a3a7cd4b29135af9a933907e8e7ce8de084c
| 2,746
|
py
|
Python
|
forms/utils.py
|
braceio/forms
|
deb12f37447d6167ad284ae68085a02454c8f649
|
[
"MIT"
] | 36
|
2015-01-02T05:15:02.000Z
|
2018-03-06T11:36:41.000Z
|
forms/utils.py
|
braceio/forms
|
deb12f37447d6167ad284ae68085a02454c8f649
|
[
"MIT"
] | 1
|
2015-02-16T20:03:41.000Z
|
2016-01-01T23:42:25.000Z
|
forms/utils.py
|
braceio/forms
|
deb12f37447d6167ad284ae68085a02454c8f649
|
[
"MIT"
] | 20
|
2015-01-04T21:38:12.000Z
|
2021-01-17T12:59:10.000Z
|
from datetime import timedelta
from functools import update_wrapper
from flask import make_response, current_app, request, url_for, jsonify
import uuid
# decorators
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
if request.method == 'OPTIONS':
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def request_wants_json():
if request.headers.get('X_REQUESTED_WITH','').lower() == 'xmlhttprequest':
return True
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
def jsonerror(code, *args, **kwargs):
resp = jsonify(*args, **kwargs)
resp.status_code = code
return resp
def uuidslug():
return uuid2slug(uuid.uuid4())
def uuid2slug(uuidobj):
return uuidobj.bytes.encode('base64').rstrip('=\n').replace('/', '_')
def slug2uuid(slug):
return str(uuid.UUID(bytes=(slug + '==').replace('_', '/').decode('base64')))
def get_url(endpoint, secure=False, **values):
''' protocol preserving url_for '''
path = url_for(endpoint, **values)
if secure:
url_parts = request.url.split('/', 3)
path = "https://" + url_parts[2] + path
return path
| 31.563218
| 81
| 0.621267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 350
| 0.127458
|
3fd8c6ef2dca4f5f0372db69829883a2a443d40b
| 4,536
|
py
|
Python
|
tests/ref_test.py
|
lykme516/pykka
|
d66b0c49658fc0e7c4e1ae46a0f9c50c7e964ca5
|
[
"Apache-2.0"
] | 1
|
2021-01-03T09:25:23.000Z
|
2021-01-03T09:25:23.000Z
|
tests/ref_test.py
|
hujunxianligong/pykka
|
d66b0c49658fc0e7c4e1ae46a0f9c50c7e964ca5
|
[
"Apache-2.0"
] | null | null | null |
tests/ref_test.py
|
hujunxianligong/pykka
|
d66b0c49658fc0e7c4e1ae46a0f9c50c7e964ca5
|
[
"Apache-2.0"
] | null | null | null |
import time
import unittest
from pykka import ActorDeadError, ThreadingActor, ThreadingFuture, Timeout
class AnActor(object):
def __init__(self, received_message):
super(AnActor, self).__init__()
self.received_message = received_message
def on_receive(self, message):
if message.get('command') == 'ping':
self.sleep(0.01)
return 'pong'
else:
self.received_message.set(message)
class RefTest(object):
def setUp(self):
self.received_message = self.future_class()
self.ref = self.AnActor.start(self.received_message)
def tearDown(self):
self.ref.stop()
def test_repr_is_wrapped_in_lt_and_gt(self):
result = repr(self.ref)
self.assertTrue(result.startswith('<'))
self.assertTrue(result.endswith('>'))
def test_repr_reveals_that_this_is_a_ref(self):
self.assertTrue('ActorRef' in repr(self.ref))
def test_repr_contains_actor_class_name(self):
self.assertTrue('AnActor' in repr(self.ref))
def test_repr_contains_actor_urn(self):
self.assertTrue(self.ref.actor_urn in repr(self.ref))
def test_str_contains_actor_class_name(self):
self.assertTrue('AnActor' in str(self.ref))
def test_str_contains_actor_urn(self):
self.assertTrue(self.ref.actor_urn in str(self.ref))
def test_is_alive_returns_true_for_running_actor(self):
self.assertTrue(self.ref.is_alive())
def test_is_alive_returns_false_for_dead_actor(self):
self.ref.stop()
self.assertFalse(self.ref.is_alive())
def test_stop_returns_true_if_actor_is_stopped(self):
self.assertTrue(self.ref.stop())
def test_stop_does_not_stop_already_dead_actor(self):
self.ref.stop()
try:
self.assertFalse(self.ref.stop())
except ActorDeadError:
self.fail('Should never raise ActorDeadError')
def test_tell_delivers_message_to_actors_custom_on_receive(self):
self.ref.tell({'command': 'a custom message'})
self.assertEqual(
{'command': 'a custom message'}, self.received_message.get())
def test_tell_fails_if_actor_is_stopped(self):
self.ref.stop()
try:
self.ref.tell({'command': 'a custom message'})
self.fail('Should raise ActorDeadError')
except ActorDeadError as exception:
self.assertEqual('%s not found' % self.ref, str(exception))
def test_ask_blocks_until_response_arrives(self):
result = self.ref.ask({'command': 'ping'})
self.assertEqual('pong', result)
def test_ask_can_timeout_if_blocked_too_long(self):
try:
self.ref.ask({'command': 'ping'}, timeout=0)
self.fail('Should raise Timeout exception')
except Timeout:
pass
def test_ask_can_return_future_instead_of_blocking(self):
future = self.ref.ask({'command': 'ping'}, block=False)
self.assertEqual('pong', future.get())
def test_ask_fails_if_actor_is_stopped(self):
self.ref.stop()
try:
self.ref.ask({'command': 'ping'})
self.fail('Should raise ActorDeadError')
except ActorDeadError as exception:
self.assertEqual('%s not found' % self.ref, str(exception))
def test_ask_nonblocking_fails_future_if_actor_is_stopped(self):
self.ref.stop()
future = self.ref.ask({'command': 'ping'}, block=False)
try:
future.get()
self.fail('Should raise ActorDeadError')
except ActorDeadError as exception:
self.assertEqual('%s not found' % self.ref, str(exception))
def ConcreteRefTest(actor_class, future_class, sleep_function):
class C(RefTest, unittest.TestCase):
class AnActor(AnActor, actor_class):
def sleep(self, seconds):
sleep_function(seconds)
C.__name__ = '%sRefTest' % (actor_class.__name__,)
C.future_class = future_class
return C
ThreadingActorRefTest = ConcreteRefTest(
ThreadingActor, ThreadingFuture, time.sleep)
try:
import gevent
from pykka.gevent import GeventActor, GeventFuture
GeventActorRefTest = ConcreteRefTest(
GeventActor, GeventFuture, gevent.sleep)
except ImportError:
pass
try:
import eventlet
from pykka.eventlet import EventletActor, EventletFuture
EventletActorRefTest = ConcreteRefTest(
EventletActor, EventletFuture, eventlet.sleep)
except ImportError:
pass
| 31.068493
| 74
| 0.668651
| 3,744
| 0.825397
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.094797
|
3fda3cc0af3e5e42cd6c1e11390f1713cf4c09d1
| 3,365
|
py
|
Python
|
tests/unit/test_baseObject.py
|
asaranprasad/nvda
|
e9609694acbfb06398eb6552067a0dcd532d67af
|
[
"bzip2-1.0.6"
] | 1
|
2018-11-16T10:15:59.000Z
|
2018-11-16T10:15:59.000Z
|
tests/unit/test_baseObject.py
|
asaranprasad/nvda
|
e9609694acbfb06398eb6552067a0dcd532d67af
|
[
"bzip2-1.0.6"
] | 3
|
2017-09-29T17:14:18.000Z
|
2019-05-20T16:13:39.000Z
|
tests/unit/test_baseObject.py
|
asaranprasad/nvda
|
e9609694acbfb06398eb6552067a0dcd532d67af
|
[
"bzip2-1.0.6"
] | 1
|
2017-09-29T08:53:52.000Z
|
2017-09-29T08:53:52.000Z
|
#tests/unit/test_baseObject.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2018 NV Access Limited, Babbage B.V.
"""Unit tests for the baseObject module, its classes and their derivatives."""
import unittest
from baseObject import ScriptableObject
from objectProvider import PlaceholderNVDAObject
from scriptHandler import script
class NVDAObjectWithDecoratedScript(PlaceholderNVDAObject):
"""An object with a decorated script."""
@script(gestures=["kb:a"])
def script_alpha(self, gesture):
return
class NVDAObjectWithGesturesDictionary(PlaceholderNVDAObject):
"""An object with a script that is bound to a gesture in a L{__gestures} dictionary."""
def script_bravo(self, gesture):
return
__gestures = {
"kb:b": "bravo"
}
class NVDAObjectWithDecoratedScriptAndGesturesDictionary(PlaceholderNVDAObject):
"""An object with a decorated script
and a script that is bound to a gesture in a L{__gestures} dictionary.
"""
@script(gestures=["kb:c"])
def script_charlie(self, gesture):
return
def script_delta(self, gesture):
return
__gestures = {
"kb:d": "delta",
}
class SubclassedNVDAObjectWithDecoratedScriptAndGesturesDictionary(
NVDAObjectWithDecoratedScript,
NVDAObjectWithGesturesDictionary,
NVDAObjectWithDecoratedScriptAndGesturesDictionary
):
"""An object with decorated scripts and L{__gestures} dictionaries, based on subclassing."""
@script(gestures=["kb:e"])
def script_echo(self, gesture):
return
def script_foxtrot(self, gesture):
return
__gestures = {
"kb:f": "foxtrot",
}
class DynamicNVDAObjectWithDecoratedScriptAndGesturesDictionary(PlaceholderNVDAObject):
"""An object with decorated scripts and L{__gestures} dictionaries,
using the chooseOverlayClasses logic to construct a dynamic object."""
def findOverlayClasses(self, clsList):
clsList.extend([
NVDAObjectWithDecoratedScript,
NVDAObjectWithGesturesDictionary,
NVDAObjectWithDecoratedScriptAndGesturesDictionary
])
@script(gestures=["kb:g"])
def script_golf(self, gesture):
return
def script_hotel(self, gesture):
return
__gestures = {
"kb:h": "hotel",
}
class TestScriptableObject(unittest.TestCase):
"""A test that verifies whether scripts are properly bound to associated gestures."""
def test_decoratedScript(self):
obj = NVDAObjectWithDecoratedScript()
self.assertIn("kb:a", obj._gestureMap)
def test_gesturesDictionary(self):
obj = NVDAObjectWithGesturesDictionary()
self.assertIn("kb:b", obj._gestureMap)
def test_decoratedScriptAndGesturesDictionary(self):
obj = NVDAObjectWithDecoratedScriptAndGesturesDictionary()
self.assertIn("kb:c", obj._gestureMap)
self.assertIn("kb:d", obj._gestureMap)
def test_decoratedScriptsAndGestureDictionariesIfSubclassed(self):
obj = SubclassedNVDAObjectWithDecoratedScriptAndGesturesDictionary()
for key in ("a", "b", "c", "d", "e", "f"):
self.assertIn("kb:%s" % key, obj._gestureMap)
def test_decoratedScriptsAndGestureDictionariesIfDynamic(self):
obj = DynamicNVDAObjectWithDecoratedScriptAndGesturesDictionary()
for key in ("a", "b", "c", "d", "g", "h"):
self.assertIn("kb:%s" % key, obj._gestureMap)
| 30.044643
| 94
| 0.744428
| 2,887
| 0.857949
| 0
| 0
| 284
| 0.084398
| 0
| 0
| 1,012
| 0.300743
|
3fda75ffd417e01dfff80ddf791281704e021a18
| 3,960
|
py
|
Python
|
querybook/server/lib/query_executor/connection_string/hive.py
|
shivammmmm/querybook
|
71263eb7db79e56235ea752f2cf3339ca9b3a092
|
[
"Apache-2.0"
] | 1,144
|
2021-03-30T05:06:16.000Z
|
2022-03-31T10:40:31.000Z
|
querybook/server/lib/query_executor/connection_string/hive.py
|
shivammmmm/querybook
|
71263eb7db79e56235ea752f2cf3339ca9b3a092
|
[
"Apache-2.0"
] | 593
|
2021-07-01T10:34:25.000Z
|
2022-03-31T23:24:40.000Z
|
querybook/server/lib/query_executor/connection_string/hive.py
|
shivammmmm/querybook
|
71263eb7db79e56235ea752f2cf3339ca9b3a092
|
[
"Apache-2.0"
] | 113
|
2021-03-30T00:07:20.000Z
|
2022-03-31T07:18:43.000Z
|
import re
from typing import Dict, Tuple, List, NamedTuple, Optional
from lib.utils.decorators import with_exception_retry
from .helpers.common import (
split_hostport,
get_parsed_variables,
merge_hostport,
random_choice,
)
from .helpers.zookeeper import get_hostname_and_port_from_zk
# TODO: make these configurable?
MAX_URI_FETCH_ATTEMPTS = 10
MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC = 5
class RawHiveConnectionConf(NamedTuple):
# Raw Connection Configuration that's from a string -> dict transformation
hosts: List[Tuple[str, Optional[int]]]
default_db: str
session_variables: Dict[str, str]
conf_list: Dict[str, str]
var_list: Dict[str, str]
class HiveConnectionConf(NamedTuple):
host: str
port: Optional[int]
default_db: str
configuration: Dict[str, str]
def _extract_connection_url(connection_string: str) -> RawHiveConnectionConf:
# Parser for Hive JDBC string
# Loosely based on https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients#HiveServer2Clients-JDBC
match = re.search(
r"^(?:jdbc:)?hive2:\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)\/(\w*)((?:;[\w.-]+=[\w.-]+)*)(\?[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?(\#[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?$", # noqa: E501
connection_string,
)
hosts = match.group(1)
default_db = match.group(2) or "default"
session_variables = match.group(3) or ""
conf_list = match.group(4) or ""
var_list = match.group(5) or ""
parsed_hosts = []
for hostport in hosts.split(","):
parsed_hosts.append(split_hostport(hostport))
parsed_session_variables = get_parsed_variables(session_variables[1:])
parsed_conf_list = get_parsed_variables(conf_list[1:])
parsed_var_list = get_parsed_variables(var_list[1:])
return RawHiveConnectionConf(
hosts=parsed_hosts,
default_db=default_db,
session_variables=parsed_session_variables,
conf_list=parsed_conf_list,
var_list=parsed_var_list,
)
@with_exception_retry(
max_retry=MAX_URI_FETCH_ATTEMPTS,
get_retry_delay=lambda retry: min(MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC, retry),
)
def get_hive_host_port_from_zk(
connection_conf: RawHiveConnectionConf,
) -> Tuple[str, int]:
zk_quorum = ",".join(
map(lambda hostport: merge_hostport(hostport), connection_conf.hosts)
)
zk_namespace = connection_conf.session_variables.get("zooKeeperNamespace")
raw_server_uris = get_hostname_and_port_from_zk(zk_quorum, zk_namespace) or []
server_uri_dicts = filter(
lambda d: d is not None,
[_server_uri_to_dict(raw_server_uri) for raw_server_uri in raw_server_uris],
)
server_uris = list(map(lambda d: d["serverUri"], server_uri_dicts))
random_server_uri = random_choice(server_uris)
if not random_server_uri:
raise Exception("Failed to get hostname and port from Zookeeper")
return split_hostport(random_server_uri)
def _server_uri_to_dict(server_uri: str) -> Optional[Dict[str, str]]:
match = re.search(r"serverUri=(.*);version=(.*);sequence=(.*)", server_uri)
if match:
return {
"serverUri": match.group(1),
"version": match.group(2),
"sequence": match.group(3),
}
def get_hive_connection_conf(connection_string: str) -> HiveConnectionConf:
hostname = None
port = None
connection_conf = _extract_connection_url(connection_string)
# We use zookeeper to find host name
if connection_conf.session_variables.get("serviceDiscoveryMode") == "zooKeeper":
hostname, port = get_hive_host_port_from_zk(connection_conf)
else: # We just return a normal host
hostname, port = random_choice(connection_conf.hosts, default=(None, None))
return HiveConnectionConf(
host=hostname,
port=port,
default_db=connection_conf.default_db,
configuration=connection_conf.conf_list,
)
| 33.846154
| 202
| 0.689899
| 408
| 0.10303
| 0
| 0
| 942
| 0.237879
| 0
| 0
| 710
| 0.179293
|
3fdb9c34cb8887a4abfe9945968ed8dd70631d27
| 137
|
py
|
Python
|
flopz/__init__.py
|
Flopz-Project/flopz
|
eb470811e4a8be5e5d625209b0f8eb7ccd1d5da3
|
[
"Apache-2.0"
] | 7
|
2021-11-19T15:53:58.000Z
|
2022-03-28T03:38:52.000Z
|
flopz/__init__.py
|
Flopz-Project/flopz
|
eb470811e4a8be5e5d625209b0f8eb7ccd1d5da3
|
[
"Apache-2.0"
] | null | null | null |
flopz/__init__.py
|
Flopz-Project/flopz
|
eb470811e4a8be5e5d625209b0f8eb7ccd1d5da3
|
[
"Apache-2.0"
] | 1
|
2022-03-25T12:44:01.000Z
|
2022-03-25T12:44:01.000Z
|
"""
flopz.
Low Level Assembler and Firmware Instrumentation Toolkit
"""
__version__ = "0.2.0"
__author__ = "Noelscher Consulting GmbH"
| 15.222222
| 56
| 0.744526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.773723
|
3fdde609468413e798c5347a27251969395c0fce
| 2,294
|
py
|
Python
|
OracleCASB_API_Client/occs.py
|
ftnt-cse/Oracle_CASB_API_Client
|
00c92c7383d62d029736481f079773253e05589c
|
[
"Apache-2.0"
] | null | null | null |
OracleCASB_API_Client/occs.py
|
ftnt-cse/Oracle_CASB_API_Client
|
00c92c7383d62d029736481f079773253e05589c
|
[
"Apache-2.0"
] | null | null | null |
OracleCASB_API_Client/occs.py
|
ftnt-cse/Oracle_CASB_API_Client
|
00c92c7383d62d029736481f079773253e05589c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys, logging
import requests, json, argparse, textwrap
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from oracle_casb_api import *
parser = argparse.ArgumentParser(
prog='Oracle CASB API Client',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
./OCCS_api_client.py: an API client impelmentation to fetch Oracle CASB Events and reports. It then parses it and sends it as syslog to a Syslog server/SIEM Solution
'''))
parser.add_argument('-s', '--syslog-server',type=str, required=True, help="Syslog Server where to send the fetched OCCS data as syslog")
parser.add_argument('-b', '--base-url',type=str, required=True, help="Oracle CASB base url, typically https://XXXXXXXX.palerra.net")
parser.add_argument('-k', '--access-key',type=str, required=True, help="Oracle CASB Access Key")
parser.add_argument('-a', '--access-secret',type=str, required=True, help='Oracle CASB Access Secret')
parser.add_argument('-t', '--time-period',type=int, required=True, help='time period of the events expressed as number of hours')
args = parser.parse_args()
logger = logging.getLogger('OCCS_Logger')
logger.setLevel(logging.ERROR)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def occs_init():
try:
occs = occs_api.OracleCasbCS(
base_url=args.base_url,
access_key = args.access_key,
access_secret = args.access_secret,
verify_ssl=False,
logger = logger
)
return occs
except Exception as e:
print("Failed to connect: {}".format(e))
sys.exit(1)
occs_object=occs_init()
start_date = arrow.now().shift(hours=(-1 * args.time_period)).format('YYYY-MM-DDTHH:mm:ss.SSS')
end_date = arrow.now().format('YYYY-MM-DDTHH:mm:ss.SSS')
res = occs_object.get_risk_events(start_date)
send_syslog(args.syslog_server,(prepare_risk_events_for_syslog(res)))
res = occs_object.get_user_risk_score_report('userrisk',start_date,end_date,'100')
send_syslog(args.syslog_server,(prepare_users_risk_scores_for_syslog(res)))
| 37.606557
| 170
| 0.732781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 710
| 0.309503
|
3fde07047223da1d88704610e639913da4a2c4f4
| 1,787
|
py
|
Python
|
src/classification_metrics.py
|
crmauceri/ReferringExpressions
|
d2ca43bf6df88f83fbe6dfba99b1105dd14592f4
|
[
"Apache-2.0"
] | 6
|
2020-06-05T06:52:59.000Z
|
2021-05-27T11:38:16.000Z
|
src/classification_metrics.py
|
crmauceri/ReferringExpressions
|
d2ca43bf6df88f83fbe6dfba99b1105dd14592f4
|
[
"Apache-2.0"
] | 1
|
2021-03-28T13:27:21.000Z
|
2021-04-29T17:58:28.000Z
|
src/classification_metrics.py
|
crmauceri/ReferringExpressions
|
d2ca43bf6df88f83fbe6dfba99b1105dd14592f4
|
[
"Apache-2.0"
] | 2
|
2019-12-09T09:14:47.000Z
|
2019-12-22T13:57:08.000Z
|
import argparse
import json
from data_management.DatasetFactory import datasetFactory
from config import cfg
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculates metrics from output of a Classification network.' +
' Run `run_network.py <config> test` first.')
parser.add_argument('config_file', help='config file path')
parser.add_argument('results_file', help='results file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
refer = datasetFactory(cfg)
hamming_loss = 0.0
TP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FN = np.zeros((cfg.IMG_NET.N_LABELS+1,))
total = 0.0
# load generation outputs
with open(args.results_file, 'r') as f:
genData = json.load(f)
for row in genData:
total += 1.0
hamming_loss += row['Hamming_Loss']
TP[row['TP_classes']] += 1
FP[row['FP_classes']] += 1
FN[row['FN_classes']] += 1
print("Mean Hamming Loss: %3.3f" % (hamming_loss/total))
print("Mean precision: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FP))))
print("Mean recall: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FN))))
print("Class\tPrecision\tRecall")
for idx in range(cfg.IMG_NET.N_LABELS):
label = refer[0].coco.cats[refer[0].coco_cat_map[idx]]
print("%s\t%3.3f\t%3.3f" % (label['name'].ljust(20), TP[idx]/(TP[idx]+FP[idx]), TP[idx]/(TP[idx]+FN[idx])))
| 34.365385
| 115
| 0.614997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.239508
|
3fe076a26915fb3a8a0df4e110f97d0bbe198980
| 6,448
|
py
|
Python
|
base_model.py
|
Unmesh-Kumar/DMRM
|
f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02
|
[
"MIT"
] | 23
|
2019-12-19T02:46:33.000Z
|
2022-03-22T07:52:28.000Z
|
base_model.py
|
Unmesh-Kumar/DMRM
|
f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02
|
[
"MIT"
] | 5
|
2020-07-28T14:25:45.000Z
|
2022-03-08T14:30:21.000Z
|
base_model.py
|
Unmesh-Kumar/DMRM
|
f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02
|
[
"MIT"
] | 5
|
2019-12-20T15:46:08.000Z
|
2021-11-23T01:15:32.000Z
|
import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding, QuestionEmbedding2
from classifier import SimpleClassifier
from fc import FCNet
from Decoders.decoder1 import _netG as netG
import torch.nn.functional as F
from torch.autograd import Variable
from misc.utils import LayerNorm
class BaseModel2(nn.Module):
def __init__(self, w_emb, q_emb, h_emb, v_att, h_att, q_net, v_net, h_net, qih_att, qhi_att, qih_net, qhi_net,
decoder, args, qhih_att, qihi_att):
super(BaseModel2, self).__init__()
self.ninp = args.ninp
self.w_emb = w_emb
self.q_emb = q_emb
self.h_emb = h_emb
self.decoder = decoder
self.img_embed = nn.Linear(args.img_feat_size, 2 * args.nhid)
self.w1 = nn.Linear(args.nhid*2, args.nhid*2)
self.w2 = nn.Linear(args.nhid*2, args.nhid*2)
self.track_1 = v_att
self.locate_1 = h_att
self.locate_2 = qih_att
self.track_2 = qhi_att
self.locate_3 = qhih_att
self.track_3 = qihi_att
self.q_net = q_net
self.v_net = v_net
self.h_net = h_net
self.qih_net = qih_net
self.qhi_net = qhi_net
self.fc1 = nn.Linear(args.nhid * 4, self.ninp)
self.dropout = args.dropout
self.vocab_size = args.vocab_size
# self.fch = FCNet([args.nhid * 2, args.nhid * 2])
# self.layernorm = LayerNorm(args.nhid*2)
def forward(self, image, question, history, answer, tans, rnd, Training=True, sampling=False):
# prepare I, Q, H
image = self.img_embed(image)
w_emb = self.w_emb(question)
q_emb, ques_hidden = self.q_emb(w_emb) # [batch, q_dim]
hw_emb = self.w_emb(history)
h_emb, _ = self.h_emb(hw_emb) # [batch * rnd, h_dim]
h_emb = h_emb.view(-1, rnd, h_emb.size(1))
# cap & image
# qc_att = self.v_att(image, h_emb[:, 0, :])
# qc_emb = (qc_att * image).sum(1)
# qc_emb = self.fch(qc_emb * q_emb)
# question & image --> qi
qv_att = self.track_1(image, q_emb)
qv_emb = (qv_att * image).sum(1) # [batch, v_dim]
# question & history --> qh
qh_att = self.locate_1(h_emb, q_emb)
qh_emb = (qh_att * h_emb).sum(1) # [batch, h_dim]
# qh_emb = self.fch(qh_emb+q_emb)
# qh_emb = self.layernorm(qh_emb+h_emb[:,0,:])
# qh & image --> qhi
qhi_att = self.track_2(image, qh_emb)
qhi_emb = (qhi_att * image).sum(1) # [batch, v_dim]
# qi & history --> qih
qih_att = self.locate_2(h_emb, qv_emb)
qih_emb = (qih_att * h_emb).sum(1) # [batch, h_dim]
q_re = self.q_net(q_emb)
qih_emb = self.h_net(qih_emb)
qih_emb = q_re * qih_emb
qhi_emb = self.v_net(qhi_emb)
qhi_emb = q_re * qhi_emb
# qih & i --> qihi
qihi_att = self.track_3(image, qih_emb)
qihi_emb = (qihi_att * image).sum(1)
# qhi & his --> qhih
qhih_att = self.locate_3(h_emb, qhi_emb)
qhih_emb = (qhih_att * h_emb).sum(1)
q_repr = self.q_net(q_emb)
qhi_repr = self.qhi_net(qihi_emb)
qqhi_joint_repr = q_repr * qhi_repr
qih_repr = self.qih_net(qhih_emb)
qqih_joint_repr = q_repr * qih_repr
joint_repr = torch.cat([self.w1(qqhi_joint_repr), self.w2(qqih_joint_repr)], 1) # [batch, h_dim * 2
joint_repr = F.tanh(self.fc1(F.dropout(joint_repr, self.dropout, training=self.training)))
_, ques_hidden = self.decoder(joint_repr.view(-1, 1, self.ninp), ques_hidden)
if sampling:
batch_size, _, _ = image.size()
sample_ans_input = Variable(torch.LongTensor(batch_size, 1).fill_(2).cuda())
sample_opt = {'beam_size': 1}
seq, seqLogprobs = self.decoder.sample(self.w_emb, sample_ans_input, ques_hidden, sample_opt)
sample_ans = self.w_emb(Variable(seq))
ans_emb = self.w_emb(tans)
sample_ans = torch.cat([w_emb, joint_repr.view(batch_size, -1, self.ninp),sample_ans], 1)
ans_emb = torch.cat([w_emb, joint_repr.view(batch_size, -1, self.ninp), ans_emb], 1)
return sample_ans, ans_emb
if not Training:
batch_size, _, hid_size = image.size()
hid_size = int(hid_size / 2)
hidden_replicated = []
for hid in ques_hidden:
hidden_replicated.append(hid.view(2, batch_size, 1,hid_size).expand(2,
batch_size, 100, hid_size).clone().view(2, -1, hid_size))
hidden_replicated = tuple(hidden_replicated)
ques_hidden = hidden_replicated
emb = self.w_emb(answer)
pred, _ = self.decoder(emb, ques_hidden)
return pred
def build_baseline0_newatt2(args, num_hid):
w_emb = WordEmbedding(args.vocab_size, args.ninp, 0.0)
q_emb = QuestionEmbedding2(args.ninp, num_hid, args.nlayers, True, 0.0)
h_emb = QuestionEmbedding2(args.ninp, num_hid, args.nlayers, True, 0.0)
v_att = NewAttention(args.nhid*2, q_emb.num_hid*2, num_hid*2)
h_att = NewAttention(args.nhid*2, q_emb.num_hid*2, num_hid*2)
qih_att = NewAttention(args.nhid*2, q_emb.num_hid*2, num_hid*2)
qhi_att = NewAttention(args.nhid*2, q_emb.num_hid*2, num_hid*2)
q_net = FCNet([q_emb.num_hid*2, num_hid*2])
v_net = FCNet([args.nhid*2, num_hid*2])
h_net = FCNet([args.nhid*2, num_hid*2])
qih_net = FCNet([args.nhid*2, num_hid*2])
qhi_net = FCNet([args.nhid*2, num_hid*2])
qhih_att = NewAttention(args.nhid*2, q_emb.num_hid*2, num_hid*2)
qihi_att = NewAttention(args.nhid*2, q_emb.num_hid*2, num_hid*2)
decoder = netG(args)
return BaseModel2(w_emb, q_emb, h_emb, v_att, h_att, q_net, v_net, h_net, qih_att, qhi_att, qih_net, qhi_net,
decoder, args, qhih_att, qihi_att)
class attflat(nn.Module):
def __init__(self, args):
super(attflat, self).__init__()
self.mlp = FCNet([args.nhid * 2, args.nhid, 1])
self.fc = nn.Linear(args.nhid*2, args.nhid*2)
def forward(self, x):
batch_size, q_len, nhid = x.size()
att = self.mlp(x.view(-1, nhid))
att = F.softmax(att, dim=1)
x_atted = (att.view(batch_size, q_len, -1) * x.view(batch_size, q_len, -1)).sum(1)
x_atted = self.fc(x_atted)
return x_atted
| 40.049689
| 114
| 0.618021
| 4,991
| 0.774038
| 0
| 0
| 0
| 0
| 0
| 0
| 577
| 0.089485
|
3fe105950fe7c097a0cf82f9fd41aa14438e8996
| 66
|
py
|
Python
|
qymel/core/__init__.py
|
hal1932/QyMEL
|
4fdf2409aaa34516f021a37aac0f011fe6ea6073
|
[
"MIT"
] | 6
|
2019-12-23T05:20:29.000Z
|
2021-01-30T21:17:32.000Z
|
qymel/core/__init__.py
|
hal1932/QyMEL
|
4fdf2409aaa34516f021a37aac0f011fe6ea6073
|
[
"MIT"
] | null | null | null |
qymel/core/__init__.py
|
hal1932/QyMEL
|
4fdf2409aaa34516f021a37aac0f011fe6ea6073
|
[
"MIT"
] | 1
|
2020-03-05T08:17:44.000Z
|
2020-03-05T08:17:44.000Z
|
# coding: utf-8
from .force_reload import *
from .scopes import *
| 16.5
| 27
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.227273
|
3fe18d763d2aae257f541fc27bf3a672136ac390
| 5,244
|
py
|
Python
|
lambda/nodemanager.py
|
twosdai/cloud-enablement-aws
|
145bf88acc1781cdd696e2d77a5c2d3b796e16c3
|
[
"Apache-2.0"
] | 11
|
2018-05-25T18:48:30.000Z
|
2018-11-30T22:06:58.000Z
|
lambda/nodemanager.py
|
twosdai/cloud-enablement-aws
|
145bf88acc1781cdd696e2d77a5c2d3b796e16c3
|
[
"Apache-2.0"
] | 10
|
2019-01-29T19:39:46.000Z
|
2020-07-01T07:37:08.000Z
|
lambda/nodemanager.py
|
twosdai/cloud-enablement-aws
|
145bf88acc1781cdd696e2d77a5c2d3b796e16c3
|
[
"Apache-2.0"
] | 18
|
2019-01-29T05:31:23.000Z
|
2021-09-16T20:04:24.000Z
|
# Copyright 2002-2018 MarkLogic Corporation. All Rights Reserved.
import boto3
import botocore
import logging
import hashlib
import json
import time
from botocore.exceptions import ClientError
log = logging.getLogger()
log.setLevel(logging.INFO)
# global variables
ec2_client = boto3.client('ec2')
asg_client = boto3.client('autoscaling')
ec2_resource = boto3.resource('ec2')
def eni_wait_for_attachment(eni_id):
max_rety = 10
retries = 0
sleep_interval = 10
eni_info = None
while True and retries < max_rety:
try:
eni_info = ec2_resource.NetworkInterface(id=eni_id)
except ClientError as e:
reason = "Failed to get network interface by id %s" % eni_id
log.exception(reason)
time.sleep(sleep_interval)
retries += 1
continue
if not eni_info.attachment:
time.sleep(sleep_interval)
retries += 1
continue
status = eni_info.attachment["Status"]
if status == "attached":
break
elif status == "attaching":
time.sleep(sleep_interval)
retries += 1
continue
else:
log.warning(
"Network interface %s in unexpected status: %s" % (eni_id, status)
)
retries += 1
continue
else:
log.warning(
"Waiting for network interface %s attachment timed out" % eni_id
)
def handler(event, context):
msg_text = event["Records"][0]["Sns"]["Message"]
msg = json.loads(msg_text)
if "LifecycleTransition" in msg and \
msg["LifecycleTransition"] == "autoscaling:EC2_INSTANCE_LAUNCHING":
log.info("Handle EC2_INSTANCE_LAUNCHING event %s" % (json.dumps(event, indent=2)))
on_launch(msg)
# continue with the life cycle event
try:
asg_client.complete_lifecycle_action(
LifecycleHookName=msg['LifecycleHookName'],
AutoScalingGroupName=msg['AutoScalingGroupName'],
LifecycleActionToken=msg['LifecycleActionToken'],
LifecycleActionResult='CONTINUE'
)
except botocore.exceptions.ClientError as e:
reason = "Error completing life cycle hook for instance"
log.exception(reason)
time.sleep(5) # sleep for 5 seconds to allow exception info being sent to CloudWatch
def on_launch(msg):
instance_id = msg["EC2InstanceId"]
log.info("Launch event of instance %s" % instance_id)
try:
instance = ec2_client.describe_instances(InstanceIds=[instance_id])
except botocore.exceptions.ClientError as e:
reason = "Failed to describe instance %s" % instance_id
log.exception(reason)
time.sleep(5)
return False
# manage ENI
subnet_id = instance['Reservations'][0]['Instances'][0]['SubnetId']
tags = instance['Reservations'][0]['Instances'][0]['Tags']
stack_name = None
stack_id = None
for tag in tags:
if tag["Key"] == "marklogic:stack:name":
stack_name = tag["Value"]
if tag["Key"] == "marklogic:stack:id":
stack_id = tag["Value"]
if stack_name and stack_id:
log.info("Subnet: %s, Stack Name: %s, Stack Id: %s" % (str(subnet_id), stack_name, stack_id))
id_hash = hashlib.md5(stack_id.encode()).hexdigest()
eni_tag_prefix = stack_name + "-" + id_hash + "_"
for i in range(0,200):
tag = eni_tag_prefix + str(i)
log.info("Querying unattached ENI with tag %s" % tag)
# query
response = ec2_client.describe_network_interfaces(
Filters=[
{
"Name": "tag:cluster-eni-id",
"Values": [tag]
},
{
"Name": "status",
"Values": ["available"]
},
{
"Name": "subnet-id",
"Values": [subnet_id]
}
]
)
if len(response["NetworkInterfaces"]) == 0:
log.info("No qualified ENI found")
continue
# attach the available ENI
for eni_info in response["NetworkInterfaces"]:
eni_id = eni_info["NetworkInterfaceId"]
try:
attachment = ec2_client.attach_network_interface(
NetworkInterfaceId=eni_id,
InstanceId=instance_id,
DeviceIndex=1
)
log.info("Attaching ENI %s to instance %s" % (eni_id, instance_id))
except botocore.exceptions.ClientError as e:
reason = "Error attaching network interface %s" % eni_id
log.exception(reason)
time.sleep(5)
continue
eni_wait_for_attachment(eni_id)
break
else:
continue
break
else:
log.warning("Tags for stack name or stack id not found")
| 35.432432
| 101
| 0.54939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,273
| 0.242754
|
3fe27cb210e5f440aba20265f1b60a9554e9c206
| 5,724
|
py
|
Python
|
pyABC/0.10.14/petab/amici.py
|
ICB-DCM/lookahead-study
|
b9849ce2b0cebbe55d6c9f7a248a5f4dff191007
|
[
"MIT"
] | 3
|
2021-01-20T14:14:04.000Z
|
2022-02-23T21:21:18.000Z
|
pyABC/0.10.14/petab/amici.py
|
ICB-DCM/lookahead-study
|
b9849ce2b0cebbe55d6c9f7a248a5f4dff191007
|
[
"MIT"
] | 3
|
2021-01-20T23:11:20.000Z
|
2021-02-15T14:36:39.000Z
|
pyABC/Modified/petab/amici.py
|
ICB-DCM/lookahead-study
|
b9849ce2b0cebbe55d6c9f7a248a5f4dff191007
|
[
"MIT"
] | null | null | null |
import logging
from collections.abc import Sequence, Mapping
from typing import Callable, Union
import copy
import pyabc
from .base import PetabImporter, rescale
logger = logging.getLogger(__name__)
try:
import petab
import petab.C as C
except ImportError:
petab = C = None
logger.error("Install petab (see https://github.com/icb-dcm/petab) to use "
"the petab functionality.")
try:
import amici
import amici.petab_import
from amici.petab_objective import simulate_petab, LLH, RDATAS
except ImportError:
amici = amici.petab_import = simulate_petab = LLH = RDATAS = None
logger.error("Install amici (see https://github.com/icb-dcm/amici) to use "
"the amici functionality.")
class AmiciPetabImporter(PetabImporter):
"""
Import a PEtab model using AMICI to simulate it as a deterministic ODE.
Parameters
----------
petab_problem:
A PEtab problem containing all information on the parameter estimation
problem.
amici_model:
A corresponding compiled AMICI model that allows simulating data for
parameters. If not provided, one is created using
`amici.petab_import.import_petab_problem`.
amici_solver:
An AMICI solver to simulate the model. If not provided, one is created
using `amici_model.getSolver()`.
"""
def __init__(
self,
petab_problem: petab.Problem,
amici_model: amici.Model = None,
amici_solver: amici.Solver = None):
super().__init__(petab_problem=petab_problem)
if amici_model is None:
amici_model = amici.petab_import.import_petab_problem(
petab_problem)
self.amici_model = amici_model
if amici_solver is None:
amici_solver = self.amici_model.getSolver()
self.amici_solver = amici_solver
def create_model(
self,
return_simulations: bool = False,
return_rdatas: bool = False,
) -> Callable[[Union[Sequence, Mapping]], Mapping]:
"""Create model.
Note that since AMICI uses deterministic ODE simulations,
it is usually not necessary to store simulations, as these can
be reproduced from the parameters.
Parameters
----------
return_simulations:
Whether to return the simulations also (large, can be stored
in database).
return_rdatas:
Whether to return the full `List[amici.ExpData]` objects (large,
cannot be stored in database).
Returns
-------
model:
The model function, taking parameters and returning simulations.
The model returns already the likelihood value.
"""
# parameter ids to consider
x_free_ids = self.petab_problem.get_x_ids(free=True, fixed=False)
# fixed parameters
x_fixed_ids = self.petab_problem.get_x_ids(
free=False, fixed=True)
x_fixed_vals = self.petab_problem.get_x_nominal(
scaled=True, free=False, fixed=True)
# extract variables for improved pickling
petab_problem = self.petab_problem
amici_model = self.amici_model
amici_solver = self.amici_solver
prior_scales = self.prior_scales
scaled_scales = self.scaled_scales
if set(prior_scales.keys()) != set(x_free_ids):
# this should not happen
raise AssertionError("Parameter id mismatch")
# no gradients for pyabc
amici_solver.setSensitivityOrder(0)
def model(par: Union[Sequence, Mapping]) -> Mapping:
"""The model function.
Note: The parameters are assumed to be passed on prior scale.
"""
# copy since we add fixed parameters
par = copy.deepcopy(par)
# convenience to allow calling model not only with dicts
if not isinstance(par, Mapping):
par = {key: val for key, val in zip(x_free_ids, par)}
# add fixed parameters
for key, val in zip(x_fixed_ids, x_fixed_vals):
par[key] = val
# scale parameters whose priors are not on scale
for key in prior_scales.keys():
par[key] = rescale(
val=par[key],
origin_scale=prior_scales,
target_scale=scaled_scales,
)
# simulate model
sim = simulate_petab(
petab_problem=petab_problem,
amici_model=amici_model,
solver=amici_solver,
problem_parameters=par,
scaled_parameters=True)
# return values of interest
ret = {'llh': sim[LLH]}
if return_simulations:
for i_rdata, rdata in enumerate(sim[RDATAS]):
ret[f'y_{i_rdata}'] = rdata['y']
if return_rdatas:
ret[RDATAS] = sim[RDATAS]
return ret
return model
def create_kernel(
self,
) -> pyabc.StochasticKernel:
"""
Create acceptance kernel.
Returns
-------
kernel:
A pyabc distribution encoding the kernel function.
"""
def kernel_fun(x, x_0, t, par) -> float:
"""The kernel function."""
# the kernel value is computed by amici already
return x['llh']
# create a kernel from function, returning log-scaled values
kernel = pyabc.distance.SimpleFunctionKernel(
kernel_fun, ret_scale=pyabc.distance.SCALE_LOG)
return kernel
| 32.338983
| 79
| 0.601328
| 4,973
| 0.868798
| 0
| 0
| 0
| 0
| 0
| 0
| 2,249
| 0.392907
|
3fe32adbae6d30f0649147cee237cf1904d94533
| 99
|
py
|
Python
|
ui_automation_core/helpers/browser/alert_action_type.py
|
Harshavardhanchowdary/python-ui-testing-automation
|
a624c6b945276c05722be2919d95aa9e5539d0d0
|
[
"MIT"
] | null | null | null |
ui_automation_core/helpers/browser/alert_action_type.py
|
Harshavardhanchowdary/python-ui-testing-automation
|
a624c6b945276c05722be2919d95aa9e5539d0d0
|
[
"MIT"
] | null | null | null |
ui_automation_core/helpers/browser/alert_action_type.py
|
Harshavardhanchowdary/python-ui-testing-automation
|
a624c6b945276c05722be2919d95aa9e5539d0d0
|
[
"MIT"
] | null | null | null |
from enum import Enum, auto
class AlertActionType(Enum):
ACCEPT = auto()
DISMISS = auto()
| 16.5
| 28
| 0.676768
| 69
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3fe331ae497b79a61bbb73e932ba9991e96f0b3f
| 18,769
|
py
|
Python
|
xsertion/test_layers.py
|
karazijal/xsertion
|
102c1a4f07b049647064a968257d56b00a064d6c
|
[
"MIT"
] | null | null | null |
xsertion/test_layers.py
|
karazijal/xsertion
|
102c1a4f07b049647064a968257d56b00a064d6c
|
[
"MIT"
] | null | null | null |
xsertion/test_layers.py
|
karazijal/xsertion
|
102c1a4f07b049647064a968257d56b00a064d6c
|
[
"MIT"
] | 1
|
2021-11-09T09:06:48.000Z
|
2021-11-09T09:06:48.000Z
|
import unittest
from xsertion.layers import *
from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten
from keras.models import Model
import json
def desc(model : Model):
base_model_disc = json.loads(model.to_json())
return base_model_disc['config']
def topo_check(layerlist):
ind = {layer: i for i,layer in enumerate(layerlist)}
for i, layer in enumerate(layerlist):
if any(ind[l] > i for l in layer.get_inbound()): # all incoming must be before i
return False
if any(ind[l] < i for l in layer._get_outbound()): # all outgoing must be after i
return False
return True
class ParsingTestCase(unittest.TestCase):
def test_layer_con_and_config(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', name='TestLayer')
config = json.loads(json.dumps(c1.get_config())) # transform all tuples to lists
model=Model(input=it, output=c1(it))
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertDictEqual(config, layers[1].config)
self.assertEqual(layers[0], list(layers[1].get_inbound())[0])
self.assertEqual(layers[1], list(layers[0]._get_outbound())[0])
self.assertTrue(topo_check(layers))
def test_linear_model(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu')(it)
a1 = Flatten()(c1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(4, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_branching_model(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu')(it)
c2 = Convolution2D(32, 3, 3, activation='relu')(it)
c3 = Convolution2D(32, 3, 3, activation='relu')(it)
m1 = merge([c1, c2, c3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(7, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_branching_multistage_model(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(10, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_skip_connnection(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(3, 3, 3, border_mode='same', dim_ordering='th')(it) #dim_ordering to force match on inputshape
b1 = Activation('relu')(c1)
m1 = merge([b1, it], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(6, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_complex_skip(self):
l1 = Input((3,32,32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2,l3], name='5')
l6 = merge([l1,l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6,l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual('1', layers[0].get_name())
self.assertTrue(topo_check(layers))
self.assertListEqual(['1','2','3','4','5','7','6','8'], [l.get_name() for l in layers])
class ReplicationTestCase(unittest.TestCase):
def test_replication_layer_properties(self):
#use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
repl_list = replicate_layerlist(layers)
for l1, l2 in zip(layers, repl_list):
self.assertEqual(l1.class_name, l2.class_name)
self.assertEqual(l1.get_name(), l2.get_name())
self.assertDictEqual(l1.config, l2.config)
def test_replication_layer_connections(self):
# use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
def assertSameLayer(l1, l2):
self.assertEqual(l1.class_name, l2.class_name)
self.assertEqual(l1.get_name(), l2.get_name())
self.assertDictEqual(l1.config, l2.config)
repl_list = replicate_layerlist(layers)
for l1, l2 in zip(layers, repl_list):
# build matching inbound lists
for il in l1.get_inbound():
for il2 in l2.get_inbound():
if layers.index(il) == repl_list.index(il2):
assertSameLayer(il, il2)
def test_replication_layer_con_consitency(self):
# use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
llayers = layers[3:] # only take 4, 5, 6, 7, 8
repl_layers = replicate_layerlist(llayers)
self.assertEqual(0, len(repl_layers[0].get_inbound())) # no connections for 4 been inserted
self.assertEqual(0, len(repl_layers[1].get_inbound())) # no connections for 5 has been inserted
self.assertEqual(1, len(repl_layers[3].get_inbound())) # only connection to 4 has been included for 6
def assertSameLayer(l1, l2):
self.assertEqual(l1.class_name, l2.class_name)
self.assertEqual(l1.get_name(), l2.get_name())
self.assertDictEqual(l1.config, l2.config)
assertSameLayer(list(repl_layers[3].get_inbound())[0], layers[3])
def test_xspot_replication(self):
# use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
xspot = XLayerBP.insertXspot(layers[4], 16)
layers.insert(5, xspot)
repl_layers = replicate_layerlist(layers)
self.assertEqual('XSpot', repl_layers[5].class_name)
self.assertEqual(4, repl_layers.index(list(repl_layers[5].get_inbound())[0]))
class XLayerTestCase(unittest.TestCase):
def test_xspot_insertion_simple(self):
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l1)
l5 = merge([l2, l3, l4], name='5')
model = Model(l1, l5)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
xspot = XLayerBP.insertXspot(layers[2], 16) # insert after 3
# check that l3 is now only connected to xspot
self.assertEqual(list(layers[2]._get_outbound())[0], xspot)
def test_xspot_insertion_branching(self):
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l1)
l5 = merge([l2, l3, l4], name='5')
model = Model(l1, l5)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
xspot = XLayerBP.insertXspot(layers[2], 16) # insert after 3
# check that l5 is now connected to l2, l4, and xspot
b = layers[-1].get_inbound()
self.assertTrue(xspot in b)
self.assertTrue(layers[1] in b)
self.assertTrue(layers[3] in b)
class RenderingTestCase(unittest.TestCase):
def test_parse_render(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
mdescs = desc(model)
layers, model_inputs, model_outputs = parse_model_description(mdescs)
rend_descs = render(model_inputs, layers, model_outputs)
for inp in mdescs['input_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['input_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for inp in mdescs['output_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['output_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for layer in mdescs['layers']:
for llayer in rend_descs['layers']:
if layer['name'] == llayer['name']:
self.assertDictEqual(layer['config'], llayer['config'])
if len(layer['inbound_nodes']) > 0:
for inp in layer['inbound_nodes'][0]:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in llayer['inbound_nodes'][0]:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12 == p1 and p22 == p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
break
else:
self.assertTrue(False)
def test_render_xsport_skip(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
mdescs = desc(model)
layers, model_inputs, model_outputs = parse_model_description(mdescs)
xspots = []
for i,layer in enumerate(layers):
if layer.class_name == "Convolution2D":
xspot = XLayerBP.insertXspot(layer, 32)
xspots.append((i, xspot))
for c, (i, xs) in enumerate(xspots):
layers.insert(i+1+c, xs)
rend_descs = render(model_inputs, layers, model_outputs)
for inp in mdescs['input_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['input_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for inp in mdescs['output_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['output_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for layer in mdescs['layers']:
for llayer in rend_descs['layers']:
if layer['name'] == llayer['name']:
self.assertDictEqual(layer['config'], llayer['config'])
if len(layer['inbound_nodes']) > 0:
for inp in layer['inbound_nodes'][0]:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in llayer['inbound_nodes'][0]:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12 == p1 and p22 == p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
break
else:
self.assertTrue(False)
def test_render_xsport_skip_merge(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
mdescs = desc(model)
layers, model_inputs, model_outputs = parse_model_description(mdescs)
xspots = []
for i, layer in enumerate(layers):
if layer.class_name == "Activation":
xspot = XLayerBP.insertXspot(layer, 32)
xspots.append((i, xspot))
for c, (i, xs) in enumerate(xspots):
layers.insert(i + 1 + c, xs)
rend_descs = render(model_inputs, layers, model_outputs)
for inp in mdescs['input_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['input_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for inp in mdescs['output_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['output_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for layer in mdescs['layers']:
for llayer in rend_descs['layers']:
if layer['name'] == llayer['name']:
self.assertDictEqual(layer['config'], llayer['config'])
if len(layer['inbound_nodes']) > 0:
for inp in layer['inbound_nodes'][0]:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in llayer['inbound_nodes'][0]:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12 == p1 and p22 == p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
break
else:
self.assertTrue(False)
if __name__=="__main__":
unittest.main()
| 42.656818
| 121
| 0.553146
| 18,045
| 0.961426
| 0
| 0
| 0
| 0
| 0
| 0
| 2,033
| 0.108317
|
3fe371c906222e31026634c1cd2e9e52427c680b
| 151
|
py
|
Python
|
language/python/modules/websocket/websocket_module.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | 1
|
2019-11-25T07:23:42.000Z
|
2019-11-25T07:23:42.000Z
|
language/python/modules/websocket/websocket_module.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | 13
|
2020-01-07T16:09:47.000Z
|
2022-03-02T12:51:44.000Z
|
language/python/modules/websocket/websocket_module.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
web socket可用于实时聊天
"""
import websocket
if __name__ == '__main__':
pass
| 10.066667
| 26
| 0.629139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.684848
|
3fe5513bca482d43a59c15049895c9303427b971
| 85
|
py
|
Python
|
engines/__init__.py
|
mukeran/simple_sandbox
|
a2a97d13d814548f313871f0bd5c48f65b1a6180
|
[
"MIT"
] | null | null | null |
engines/__init__.py
|
mukeran/simple_sandbox
|
a2a97d13d814548f313871f0bd5c48f65b1a6180
|
[
"MIT"
] | null | null | null |
engines/__init__.py
|
mukeran/simple_sandbox
|
a2a97d13d814548f313871f0bd5c48f65b1a6180
|
[
"MIT"
] | null | null | null |
from .watcher import FileWatcher
from .fpm_sniffer import FPMSniffer, FPMSnifferMode
| 28.333333
| 51
| 0.858824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3fe6078d322f58b763a2e00d815b964e8911f9bf
| 885
|
py
|
Python
|
PyTrinamic/modules/TMC_EvalShield.py
|
trinamic-AA/PyTrinamic
|
b054f4baae8eb6d3f5d2574cf69c232f66abb4ee
|
[
"MIT"
] | 37
|
2019-01-13T11:08:45.000Z
|
2022-03-25T07:18:15.000Z
|
PyTrinamic/modules/TMC_EvalShield.py
|
AprDec/PyTrinamic
|
a9db10071f8fbeebafecb55c619e5893757dd0ce
|
[
"MIT"
] | 56
|
2019-02-25T02:48:27.000Z
|
2022-03-31T08:45:34.000Z
|
PyTrinamic/modules/TMC_EvalShield.py
|
AprDec/PyTrinamic
|
a9db10071f8fbeebafecb55c619e5893757dd0ce
|
[
"MIT"
] | 26
|
2019-01-14T05:20:16.000Z
|
2022-03-08T13:27:35.000Z
|
'''
Created on 18.03.2020
@author: LK
'''
class TMC_EvalShield(object):
"""
Arguments:
connection:
Type: connection interface
The connection interface used for this module.
shield:
Type: class
The EvalShield class used for every axis on this module.
For every axis connected, an instance of this class will be created,
which can be used later.
"""
def __init__(self, connection, shield, moduleID=1):
self.GPs = _GPs
self.shields = []
while(not(connection.globalParameter(self.GPs.attachedAxes, 0, moduleID))):
pass
attachedAxes = connection.globalParameter(self.GPs.attachedAxes, 0, moduleID)
for i in range(attachedAxes):
self.shields.append(shield(connection, i, moduleID))
class _GPs():
attachedAxes = 6
| 26.818182
| 85
| 0.615819
| 838
| 0.946893
| 0
| 0
| 0
| 0
| 0
| 0
| 413
| 0.466667
|
3fe68b75dfeb56985a424ac16b45a678c22019cc
| 285
|
py
|
Python
|
kattis/rollcall.py
|
terror/Solutions
|
1ad33daec95b565a38ac4730261593bcf249ac86
|
[
"CC0-1.0"
] | 2
|
2021-04-05T14:26:37.000Z
|
2021-06-10T04:22:01.000Z
|
kattis/rollcall.py
|
terror/Solutions
|
1ad33daec95b565a38ac4730261593bcf249ac86
|
[
"CC0-1.0"
] | null | null | null |
kattis/rollcall.py
|
terror/Solutions
|
1ad33daec95b565a38ac4730261593bcf249ac86
|
[
"CC0-1.0"
] | null | null | null |
import sys
d, n = [], {}
for i in sys.stdin:
if i.rstrip() == "":
break
a, b = map(str, i.split())
d.append([a, b])
if a in n:
n[a] += 1
else:
n[a] = 1
d = sorted(d, key=lambda x: (x[1], x[0]))
for k, v in d:
if n[k] > 1:
print(k, v)
else:
print(k)
| 14.25
| 41
| 0.45614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0.007018
|
3fe82d5a85daeba3d97651074742e05e1165543c
| 1,697
|
py
|
Python
|
test/vizier/test_nodes.py
|
robotarium/vizier
|
6ce2be4fc0edcdaf5ba246094c2e79bff32e219d
|
[
"MIT"
] | 11
|
2016-08-18T20:37:06.000Z
|
2019-11-24T17:34:27.000Z
|
test/vizier/test_nodes.py
|
robotarium/vizier
|
6ce2be4fc0edcdaf5ba246094c2e79bff32e219d
|
[
"MIT"
] | 6
|
2018-10-07T17:01:40.000Z
|
2019-11-24T17:41:16.000Z
|
test/vizier/test_nodes.py
|
robotarium/vizier
|
6ce2be4fc0edcdaf5ba246094c2e79bff32e219d
|
[
"MIT"
] | 3
|
2016-08-22T13:58:24.000Z
|
2018-06-07T21:06:35.000Z
|
import json
import vizier.node as node
import unittest
class TestVizierNodes(unittest.TestCase):
def setUp(self):
path_a = '../config/node_desc_a.json'
path_b = '../config/node_desc_b.json'
try:
f = open(path_a, 'r')
node_descriptor_a = json.load(f)
f.close()
except Exception as e:
print(repr(e))
print('Could not open given node file {}'.format(path_a))
return -1
try:
f = open(path_b, 'r')
node_descriptor_b = json.load(f)
f.close()
except Exception as e:
print(repr(e))
print('Could not open given node file {}'.format(path_b))
return -1
self.node_a = node.Node('localhost', 1883, node_descriptor_a)
self.node_a.start()
self.node_b = node.Node('localhost', 1883, node_descriptor_b)
self.node_b.start()
def test_publishable_links(self):
self.assertEqual(self.node_a.publishable_links, {'a/a_sub'})
self.assertEqual(self.node_b.publishable_links, set())
def test_subscribable_links(self):
self.assertEqual(self.node_a.subscribable_links, set())
self.assertEqual(self.node_b.subscribable_links, {'a/a_sub'})
def test_gettable_links(self):
self.assertEqual(self.node_a.gettable_links, set())
self.assertEqual(self.node_b.gettable_links, {'a/a_sub2'})
def test_puttable_links(self):
self.assertEqual(self.node_a.puttable_links, {'a/a_sub2'})
self.assertEqual(self.node_b.puttable_links, {'b/b_sub'})
def tearDown(self):
self.node_a.stop()
self.node_b.stop()
| 30.854545
| 69
| 0.614614
| 1,639
| 0.965822
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.118444
|
3fe84decaa2c4b931f2c3a8a70e6c95473baf73c
| 457
|
py
|
Python
|
tests/not_test_basics.py
|
kipfer/simple_modbus_server
|
f16caea62311e1946498392ab4cb5f3d2e1306cb
|
[
"MIT"
] | 1
|
2021-03-11T13:04:00.000Z
|
2021-03-11T13:04:00.000Z
|
tests/not_test_basics.py
|
kipfer/simple_modbus_server
|
f16caea62311e1946498392ab4cb5f3d2e1306cb
|
[
"MIT"
] | null | null | null |
tests/not_test_basics.py
|
kipfer/simple_modbus_server
|
f16caea62311e1946498392ab4cb5f3d2e1306cb
|
[
"MIT"
] | null | null | null |
import modbus_server
s = modbus_server.Server(
host="localhost", port=5020, daemon=True, loglevel="WARNING", autostart=False
)
s.start()
s.set_coil(1, True)
s.set_coils(2, [True, False, True])
s.set_discrete_input(1, True)
s.set_discrete_inputs(2, [True, False, True])
s.set_input_register(1, 1234, "h")
s.set_input_registers(2, [1, 2, 3, 4, 5], "h")
s.set_holding_register(1, 1234, "h")
s.set_holding_registers(2, [1, 2, 3, 4, 5], "h")
s.stop()
| 20.772727
| 81
| 0.68709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.070022
|
3fe8ccedd5919a259d55f873b8eeacc8ac42d24a
| 5,417
|
py
|
Python
|
cuda/rrnn_semiring.py
|
Noahs-ARK/rational-recurrences
|
3b7ef54520bcaa2b24551cf42a125c9251124229
|
[
"MIT"
] | 27
|
2018-09-28T02:17:07.000Z
|
2020-10-15T14:57:16.000Z
|
cuda/rrnn_semiring.py
|
Noahs-ARK/rational-recurrences
|
3b7ef54520bcaa2b24551cf42a125c9251124229
|
[
"MIT"
] | 1
|
2021-03-25T22:08:35.000Z
|
2021-03-25T22:08:35.000Z
|
cuda/rrnn_semiring.py
|
Noahs-ARK/rational-recurrences
|
3b7ef54520bcaa2b24551cf42a125c9251124229
|
[
"MIT"
] | 5
|
2018-11-06T05:49:51.000Z
|
2019-10-26T03:36:43.000Z
|
RRNN_SEMIRING = """
extern "C" {
__global__ void rrnn_semiring_fwd(
const float * __restrict__ u,
const float * __restrict__ eps,
const float * __restrict__ c1_init,
const float * __restrict__ c2_init,
const int len,
const int batch,
const int dim,
const int k,
float * __restrict__ c1,
float * __restrict__ c2,
int semiring_type) {
assert (k == K);
int ncols = batch*dim;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
const float *up = u + (col*k);
float *c1p = c1 + col;
float *c2p = c2 + col;
float cur_c1 = *(c1_init + col);
float cur_c2 = *(c2_init + col);
const float eps_val = *(eps + (col%dim));
for (int row = 0; row < len; ++row) {
float u1 = *(up);
float u2 = *(up+1);
float forget1 = *(up+2);
float forget2 = *(up+3);
float prev_c1 = cur_c1;
float op1 = times_forward(semiring_type, cur_c1, forget1);
cur_c1 = plus_forward(semiring_type, op1, u1);
float op2 = times_forward(semiring_type, cur_c2, forget2);
float op3_ = plus_forward(semiring_type, eps_val, prev_c1);
float op3 = times_forward(semiring_type, op3_, u2);
cur_c2 = plus_forward(semiring_type, op2, op3);
*c1p = cur_c1;
*c2p = cur_c2;
up += ncols_u;
c1p += ncols;
c2p += ncols;
}
}
__global__ void rrnn_semiring_bwd(
const float * __restrict__ u,
const float * __restrict__ eps,
const float * __restrict__ c1_init,
const float * __restrict__ c2_init,
const float * __restrict__ c1,
const float * __restrict__ c2,
const float * __restrict__ grad_c1,
const float * __restrict__ grad_c2,
const float * __restrict__ grad_last_c1,
const float * __restrict__ grad_last_c2,
const int len,
const int batch,
const int dim,
const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_eps,
float * __restrict__ grad_c1_init,
float * __restrict__ grad_c2_init,
int semiring_type) {
assert (k == K);
int ncols = batch*dim;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
float cur_c1 = *(grad_last_c1 + col);
float cur_c2 = *(grad_last_c2 + col);
const float eps_val = *(eps + (col%dim));
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *c1p = c1 + col + (len-1)*ncols;
const float *c2p = c2 + col + (len-1)*ncols;
const float *gc1p = grad_c1 + col + (len-1)*ncols;
const float *gc2p = grad_c2 + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float geps = 0.f;
for (int row = len-1; row >= 0; --row) {
float u1 = *(up);
float u2 = *(up+1);
float forget1 = *(up+2);
float forget2 = *(up+3);
const float c1_val = *c1p;
const float c2_val = *c2p;
const float prev_c1 = (row>0) ? (*(c1p-ncols)) : (*(c1_init+col));
const float prev_c2 = (row>0) ? (*(c2p-ncols)) : (*(c2_init+col));
const float gc1 = *(gc1p) + cur_c1;
const float gc2 = *(gc2p) + cur_c2;
cur_c1 = cur_c2 = 0.f;
float op1 = times_forward(semiring_type, prev_c1, forget1);
float gop1 = 0.f, gu1 = 0.f;
plus_backward(semiring_type, op1, u1, gc1, gop1, gu1);
float gprev_c1 = 0.f, gprev_c2 = 0.f, gforget1=0.f;
times_backward(semiring_type, prev_c1, forget1, gop1, gprev_c1, gforget1);
*(gup) = gu1;
*(gup+2) = gforget1;
cur_c1 += gprev_c1;
float op2 = times_forward(semiring_type, prev_c2, forget2);
float op3_ = plus_forward(semiring_type, eps_val, prev_c1);
float op3 = times_forward(semiring_type, op3_, u2);
float gop2 = 0.f, gop3 = 0.f;
plus_backward(semiring_type, op2, op3, gc2, gop2, gop3);
float gop3_ = 0.f, gu2 = 0.f, gforget2 = 0.f, cur_geps=0.f;
times_backward(semiring_type, prev_c2, forget2, gop2, gprev_c2, gforget2);
times_backward(semiring_type, op3_, u2, gop3, gop3_, gu2);
plus_backward(semiring_type, eps_val, prev_c1, gop3_, cur_geps, gprev_c1);
*(gup+1) = gu2;
*(gup+3) = gforget2;
geps += cur_geps;
cur_c1 += gprev_c1;
cur_c2 += gprev_c2;
up -= ncols_u;
c1p -= ncols;
c2p -= ncols;
gup -= ncols_u;
gc1p -= ncols;
gc2p -= ncols;
}
*(grad_c1_init + col) = cur_c1;
*(grad_c2_init + col) = cur_c2;
*(grad_eps + col%dim) = geps;
}
}
"""
| 36.601351
| 86
| 0.502677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,400
| 0.996862
|
3fe8e4411ff091a355fe9346309f0659c9b08983
| 1,841
|
py
|
Python
|
tests.py
|
c-okelly/movie_script_analytics
|
6fee40c0378921199ab14ca0b4db447b9f4e7bcf
|
[
"MIT"
] | 1
|
2017-11-09T13:24:47.000Z
|
2017-11-09T13:24:47.000Z
|
tests.py
|
c-okelly/movie_script_analytics
|
6fee40c0378921199ab14ca0b4db447b9f4e7bcf
|
[
"MIT"
] | null | null | null |
tests.py
|
c-okelly/movie_script_analytics
|
6fee40c0378921199ab14ca0b4db447b9f4e7bcf
|
[
"MIT"
] | null | null | null |
import re
import text_objects
import numpy as np
import pickle
# f = open("Data/scripts_text/17-Again.txt", 'r')
# text = f.read()
# text = text[900:1500]
# print(text)
# count = len(re.findall("\W+",text))
# print(count)
#
# lines = text.split('\n')
# lines_on_empty = re.split("\n\s+\n", text)
# print(len(lines))
# print(len(lines_on_empty))
#
# # Find empty lines
# count = 0
# for item in lines:
# if re.search("\A\s+\Z", item):
# print(count)
# count += 1
#
# # Search for character names in list
# for item in lines:
# if re.search("\A\s*Name_character\s*(\(.*\))?\s*\Z", item):
# print(item)
# # Generate list of characters from the script
# characters = dict()
#
#
# for line in lines:
# #Strip whitespace and check if whole line is in capital letters
# line = line.strip()
# if (line.isupper()):
#
# # Exclude lines with EXT / INT in them
# s1 = re.search('EXT\.', line)
# s2 = re.search('INT\.', line)
#
# # Select correct lines and strip out and elements within parathenses. Normally continued
# if (not(s1 or s2)):
# line = re.sub("\s*\(.*\)","",line)
# # If character no in dict add them. If a already in increase count by 1
# if line in characters:
# characters[line] = characters[line] + 1
# else:
# characters[line] = 1
#
# print(characters)
# Get description lines
if __name__ == '__main__':
#
# string = " -EARLY APRIL, 1841"
# print(re.match("^\s+-(\w+\s{0,3},?/?){0,4}(\s\d{0,5})-\s+",string))
# for i in np.arange(0,1,0.1):
# print(i,"to",i+0.1)
# array= [1,3,5,6,1]
#
# count = 0
var = pickle.load(open("Data/Pickled_objects/400.dat","rb"))
object_1 = var[0]
print(object_1.info_dict)
| 23.303797
| 98
| 0.558935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,576
| 0.856056
|
3fea883542666ba0f05267690f8d99f2d06892ea
| 1,945
|
py
|
Python
|
malcolm/modules/demo/parts/countermovepart.py
|
dinojugosloven/pymalcolm
|
0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a
|
[
"Apache-2.0"
] | null | null | null |
malcolm/modules/demo/parts/countermovepart.py
|
dinojugosloven/pymalcolm
|
0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a
|
[
"Apache-2.0"
] | null | null | null |
malcolm/modules/demo/parts/countermovepart.py
|
dinojugosloven/pymalcolm
|
0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a
|
[
"Apache-2.0"
] | null | null | null |
import time
from annotypes import Anno, add_call_types
from malcolm.core import PartRegistrar
from malcolm.modules import builtin
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = builtin.parts.APartName
AMri = builtin.parts.AMri
with Anno("The demand value to move our counter motor to"):
ADemand = float
with Anno("The amount of time to get to the demand position"):
ADuration = float
# How long between ticks of the "motor" position while moving
UPDATE_TICK = 0.1
# We will set these attributes on the child block, so don't save them
@builtin.util.no_save("counter")
class CounterMovePart(builtin.parts.ChildPart):
"""Provides control of a `counter_block` within a `ManagerController`"""
def __init__(self, name, mri):
# type: (APartName, AMri) -> None
super(CounterMovePart, self).__init__(
name, mri, stateful=False, initial_visibility=True)
def setup(self, registrar):
# type: (PartRegistrar) -> None
super(CounterMovePart, self).setup(registrar)
# Method
registrar.add_method_model(
self.move, self.name + "Move", needs_context=True)
@add_call_types
def move(self, context, demand, duration=0):
# type: (builtin.hooks.AContext, ADemand, ADuration) -> None
"""Move the counter to the demand value, taking duration seconds like
a motor would do"""
start = time.time()
child = context.block_view(self.mri)
distance = demand - child.counter.value
remaining = duration
# "Move" the motor, ticking at UPDATE_TICK rate
while remaining > 0:
child.counter.put_value(demand - distance * remaining / duration)
context.sleep(min(remaining, UPDATE_TICK))
remaining = start + duration - time.time()
# Final move to make sure we end up at the right place
child.counter.put_value(demand)
| 36.018519
| 77
| 0.679177
| 1,327
| 0.682262
| 0
| 0
| 1,360
| 0.699229
| 0
| 0
| 713
| 0.366581
|
3fea9db35ea3c9741fed546bd70ab750ac964bbd
| 12,740
|
py
|
Python
|
scripts/run_temporal_averaging.py
|
alexkaiser/heart_valves
|
53f30ec3680503542890a84949b7fb51d1734272
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/run_temporal_averaging.py
|
alexkaiser/heart_valves
|
53f30ec3680503542890a84949b7fb51d1734272
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/run_temporal_averaging.py
|
alexkaiser/heart_valves
|
53f30ec3680503542890a84949b7fb51d1734272
|
[
"BSD-3-Clause"
] | null | null | null |
import pyvista
import os, sys, glob
import subprocess
import math
from natsort import natsorted
import multiprocessing
def write_pvd(base_name, dt, nsteps, extension, nprocs_sim=1):
prefix = '''<?xml version="1.0"?>
<VTKFile type="Collection" version="0.1"
byte_order="LittleEndian"
compressor="vtkZLibDataCompressor">
<Collection>
'''
suffix = ''' </Collection>
</VTKFile>
'''
initialized = False
for n in range(nsteps):
for proc in range(nprocs_sim):
if not initialized:
filename_out = base_name + '.pvd'
print("filename_out = ", filename_out)
f_write = open(filename_out, 'w')
f_write.write(prefix)
initialized = True
tmp_str = ' <DataSet timestep="'
tmp_str += '{:.14f}'.format(dt * n)
tmp_str += '" group="" part="'
tmp_str += str(proc) + '"'
tmp_str += ' file="'
if nprocs_sim > 1:
tmp_str += base_name + str(n).zfill(4) + '/' # sorted into directories
tmp_str += base_name + str(n).zfill(4) + '.'
if nprocs_sim > 1:
tmp_str += str(proc) + '.'
tmp_str += extension
tmp_str += '"/>\n'
f_write.write(tmp_str)
f_write.write(suffix)
f_write.close()
def read_distributed_vtr(dir_name):
files = natsorted(glob.glob(dir_name + "/*.vtr"))
#print("files = ", files)
blocks = pyvista.MultiBlock([pyvista.read(f) for f in files])
return blocks.combine()
def average_eulerian_mesh_one_step(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension):
# always start with initial eulerian mesh
dir_name = "eulerian_vars" + str(0).zfill(4)
mesh = read_distributed_vtr(dir_name)
n_to_average = 0
for var_name in eulerian_var_names:
mesh[var_name] *= 0.0
# average over times
for idx, t in enumerate(times):
# check if time in range
cycle_num = math.floor(t / cycle_duration)
# skip cycle one
if cycle_num in cycles_to_output:
dir_name = "eulerian_vars" + str(idx).zfill(4)
# time since start of this cycle
t_reduced = t % cycle_duration
idx_mri_read_temp = math.floor(t_reduced / dt_mri_read)
if idx_mri_read == idx_mri_read_temp:
print("processing step ", idx)
mesh_tmp = read_distributed_vtr(dir_name)
for var_name in eulerian_var_names:
mesh[var_name] += mesh_tmp[var_name]
n_to_average += 1.0
# print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
for var_name in eulerian_var_names:
# if none to average, mesh output as all zeros
if n_to_average != 0:
mesh[var_name] /= float(n_to_average)
fname = base_name_out + str(idx_mri_read).zfill(4) + '.' + extension
mesh.save(base_dir + "/" + fname)
if __name__ == '__main__':
if len(sys.argv) >= 2:
nprocs_sim = int(sys.argv[1]) # number of procs in the sim, which determines how many files go into the decomposed data
else:
print("using default nprocs_sim = 1")
nprocs_sim = 1
# first make sure there is a times file
if not os.path.isfile('times.txt'):
subprocess.call('visit -cli -nowin -s ~/copies_scripts/write_times_file_visit.py', shell=True)
times = []
times_file = open('times.txt', 'r')
for line in times_file:
times.append(float(line))
eulerian = True
lagrangian = True
first_cycle = True
second_cycle = False
if first_cycle:
cycles_to_output = [0] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_1"
elif second_cycle:
cycles_to_output = [1] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_2"
else:
cycles_to_output = [1,2,3] # zero indexed
# set up some directories
base_dir = "vis_data_averaged_cycle_2_3_4"
cycle_duration = 8.3250000000000002e-01
mri_read_times_per_cycle = 10
dt_mri_read = cycle_duration / mri_read_times_per_cycle
output_times_per_cycle = 20
dt_output = cycle_duration / output_times_per_cycle
if not os.path.exists(base_dir):
os.mkdir(base_dir)
if eulerian:
eulerian_var_names = ['P','Omega', 'U']
# output file extension
extension = 'vtu'
suffix = "_averaged"
base_name_out = "eulerian_vars_mri_freq"
# average all the Eulerian files here
# for idx_mri_read in range(mri_read_times_per_cycle):
# average_eulerian_mesh_one_step(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension)
jobs = []
for idx_mri_read in range(mri_read_times_per_cycle):
p = multiprocessing.Process(target=average_eulerian_mesh_one_step, args=(idx_mri_read, eulerian_var_names, times, cycle_duration, cycles_to_output, dt_mri_read, base_dir, base_name_out, extension))
jobs.append(p)
p.start()
for p in jobs:
p.join()
# for idx_output in range(output_times_per_cycle):
# eulerian_dir_name = base_dir + '/' + 'eulerian_vars' + suffix + str(idx_output).zfill(4)
# if not os.path.exists(eulerian_dir_name):
# os.mkdir(eulerian_dir_name)
# only average cycle 2
# cycles_to_include = [2]
# loops over parallel data structure as outer loop
# for proc_num in range(nprocs_sim):
# read and zero meshes to use to accumulate from first mesh
dir_name = "eulerian_vars" + str(0).zfill(4)
# read all time zero meshes
# meshes_mri_read = []
# n_to_average = []
# for idx_mri_read in range(mri_read_times_per_cycle):
# meshes_mri_read.append(read_distributed_vtr(dir_name))
# n_to_average.append(0)
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] *= 0.0
meshes_mri_read = []
for idx_mri_read in range(mri_read_times_per_cycle):
fname = base_name_out + str(idx_mri_read).zfill(4) + '.' + extension
meshes_mri_read.append( pyvista.read(base_dir + "/" + fname) )
meshes_output = []
for idx_output in range(output_times_per_cycle):
meshes_output.append(read_distributed_vtr(dir_name))
for var_name in eulerian_var_names:
meshes_output[idx_output][var_name] *= 0.0
# # average over times
# for idx, t in enumerate(times):
# # check if time in range
# cycle_num = math.floor(t / cycle_duration)
# # skip cycle one
# if cycle_num in cycles_to_output:
# print("processing step ", idx)
# dir_name = "eulerian_vars" + str(idx).zfill(4)
# # time since start of this cycle
# t_reduced = t % cycle_duration
# idx_mri_read = math.floor(t_reduced / dt_mri_read)
# mesh_tmp = read_distributed_vtr(dir_name)
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] += mesh_tmp[var_name]
# n_to_average[idx_mri_read] += 1.0
# # print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
# print("n_to_average = ", n_to_average)
# # convert sums to averages
# for idx_mri_read in range(mri_read_times_per_cycle):
# for var_name in eulerian_var_names:
# meshes_mri_read[idx_mri_read][var_name] /= float(n_to_average[idx_mri_read])
# linearly interpolate before output
for idx_mri_read in range(mri_read_times_per_cycle):
for var_name in eulerian_var_names:
meshes_output[2*idx_mri_read][var_name] = meshes_mri_read[idx_mri_read][var_name]
for idx_mri_read in range(mri_read_times_per_cycle):
idx_mri_read_next = (idx_mri_read + 1) % mri_read_times_per_cycle
for var_name in eulerian_var_names:
meshes_output[2*idx_mri_read + 1][var_name] = 0.5 * (meshes_mri_read[idx_mri_read][var_name] + meshes_mri_read[idx_mri_read_next][var_name])
for idx_output in range(output_times_per_cycle):
eulerian_dir_name = base_dir
fname = "eulerian_vars" + suffix + str(idx_output).zfill(4) + '.' + extension
meshes_output[idx_output].save(eulerian_dir_name + "/" + fname)
# summary file
nprocs_output = 1
write_pvd("eulerian_vars" + suffix, dt_output, output_times_per_cycle, extension, nprocs_output)
os.rename("eulerian_vars" + suffix + '.pvd', base_dir + "/eulerian_vars" + suffix + '.pvd')
if lagrangian:
suffix = "_averaged"
for lag_file in os.listdir('..'):
if lag_file.endswith('.vertex'):
print("found lag file ", lag_file, ", processing ")
base_name_lag = lag_file.rsplit('.', 1)[0]
print("base_name_lag = ", base_name_lag)
# read and zero meshes to use to accumulate from first mesh
fname = base_name_lag + str(0).zfill(4) + '.vtu'
if not os.path.isfile(fname):
print("vtu file not found, cannot process this file, continuing")
continue
meshes_mri_read = []
n_to_average = []
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_mri_read.append(pyvista.read(fname))
n_to_average.append(0)
meshes_mri_read[idx_mri_read].points *= 0.0
meshes_output = []
for idx_output in range(output_times_per_cycle):
meshes_output.append(pyvista.read(fname))
meshes_output[idx_output].points *= 0.0
# average over times
for idx, t in enumerate(times):
# check if time in range
cycle_num = math.floor(t / cycle_duration)
# skip cycle one
if cycle_num in cycles_to_output:
fname = base_name_lag + str(idx).zfill(4) + '.vtu'
# time since start of this cycle
t_reduced = t % cycle_duration
idx_mri_read = math.floor(t_reduced / dt_mri_read)
mesh_tmp = pyvista.read(fname)
meshes_mri_read[idx_mri_read].points += mesh_tmp.points
n_to_average[idx_mri_read] += 1.0
# print("t = ", t, "t_reduced = ", t_reduced, "idx_mri_read = ", idx_mri_read)
print("n_to_average = ", n_to_average)
# convert sums to averages
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_mri_read[idx_mri_read].points /= float(n_to_average[idx_mri_read])
# linearly interpolate before output
for idx_mri_read in range(mri_read_times_per_cycle):
meshes_output[2*idx_mri_read].points = meshes_mri_read[idx_mri_read].points
for idx_mri_read in range(mri_read_times_per_cycle):
idx_mri_read_next = (idx_mri_read + 1) % mri_read_times_per_cycle
meshes_output[2*idx_mri_read + 1].points = 0.5 * (meshes_mri_read[idx_mri_read].points + meshes_mri_read[idx_mri_read_next].points)
for idx_output in range(output_times_per_cycle):
fname = base_name_lag + suffix + str(idx_output).zfill(4) + '.vtu'
meshes_output[idx_output].save(base_dir + "/" + fname)
# os.rename(fname, base_dir + "/" + base_name_lag + suffix + '.pvd')
# summary file
extension = 'vtu'
write_pvd(base_name_lag + suffix, dt_output, output_times_per_cycle, extension, 1)
os.rename(base_name_lag + suffix + '.pvd', base_dir + "/" + base_name_lag + suffix + '.pvd')
| 33.882979
| 209
| 0.586499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,968
| 0.31146
|
3fec010889ccbdbd07b4bb7fe68a11cde75d9565
| 3,641
|
py
|
Python
|
server.py
|
MVHSiot/yelperhelper
|
a94dc9e80e301241da58b678770338e3fa9b642e
|
[
"MIT"
] | null | null | null |
server.py
|
MVHSiot/yelperhelper
|
a94dc9e80e301241da58b678770338e3fa9b642e
|
[
"MIT"
] | null | null | null |
server.py
|
MVHSiot/yelperhelper
|
a94dc9e80e301241da58b678770338e3fa9b642e
|
[
"MIT"
] | null | null | null |
import sys
try:
sys.path.append('/opt/python3/lib/python3.4/site-packages')
except:
pass
import yelp_api
import pickle
import calc
pub_key = 'pub-c-2c436bc0-666e-4975-baaf-63f16a61558d'
sub_key = 'sub-c-0442432a-3312-11e7-bae3-02ee2ddab7fe'
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
pnconfig = PNConfiguration()
pnconfig.subscribe_key = sub_key
pnconfig.publish_key = pub_key
pubnub = PubNub(pnconfig)
def publishCallback(result, status):
pass
class subscribeCallback(SubscribeCallback):
def status(self, pubnub, status):
pass
def presence(self, pubnub, presence):
pass # handle incoming presence data
def message(self, pubnub, message):
if message.message['cmdtype'] == "request":
print(message.message)
try:
print('searching current loc')
best_restaurants = calc.process(message.message['latitude'],message.message['longitude'])
except:
print('searching city')
print(message.message['city'].replace("%20", "+"))
best_restaurants = calc.process(city=message.message['city'].replace("%20", "+"))
review = yelp_api.get_business_review(best_restaurants[0]['id'])
review1 = yelp_api.get_business_review(best_restaurants[1]['id'])
review2 = yelp_api.get_business_review(best_restaurants[2]['id'])
id1 = best_restaurants[0]['id']
id2 = best_restaurants[1]['id']
id3 = best_restaurants[2]['id']
loc = best_restaurants[0]['location']
name = best_restaurants[0]['name']
rating = best_restaurants[0]['rating']
price = best_restaurants[0]['price']
image = best_restaurants[0]['image_url']
url = best_restaurants[0]['url']
loc1 = best_restaurants[1]['location']
name1 = best_restaurants[1]['name']
rating1 = best_restaurants[1]['rating']
price1 = best_restaurants[1]['price']
image1 = best_restaurants[1]['image_url']
url1 = best_restaurants[1]['url']
loc2 = best_restaurants[2]['location']
name2 = best_restaurants[2]['name']
rating2 = best_restaurants[2]['rating']
price2 = best_restaurants[2]['price']
image2 = best_restaurants[2]['image_url']
url2 = best_restaurants[2]['url']
print(name,name1,name2)
pubnub.publish().channel('main_channel').message([{"name":name,"rating":rating,"price":price,"loc":loc,"image":image,"url":url, "review":review, "id":id1},
{"name":name1,"rating":rating1,"price":price1,"loc":loc1,"image":image1,"url":url1, "review":review1, "id":id2},
{"name":name2,"rating":rating2,"price":price2,"loc":loc2,"image":image2,"url":url2, "review":review2, "id":id3}]).async(publishCallback)
elif message.message['cmdtype'] == "append":
print("new restaurant received")
with open("restaurant_data.dat", "rb") as f:
rl = pickle.load(f)
rl.append(yelp_api.convertFormat(yelp_api.get_business_by_id(message.message['id'])))
with open("restaurant_data.dat", "wb") as f:
pickle.dump(rl,f)
pubnub.add_listener(subscribeCallback())
pubnub.subscribe().channels('secondary_channel').execute()
while True:
pass
| 44.402439
| 198
| 0.611096
| 2,925
| 0.803351
| 0
| 0
| 0
| 0
| 0
| 0
| 711
| 0.195276
|
3fed58a2f0d55e3c995e8a4ab026bd1e2fa3c343
| 59
|
py
|
Python
|
gmaploader/__init__.py
|
cormac-rynne/gmaploader
|
eec679af9a5d36b691bde05ffd6043bfef7e1acf
|
[
"MIT"
] | 2
|
2022-02-02T16:41:17.000Z
|
2022-03-16T08:43:18.000Z
|
gmaploader/__init__.py
|
cormac-rynne/gmaploader
|
eec679af9a5d36b691bde05ffd6043bfef7e1acf
|
[
"MIT"
] | null | null | null |
gmaploader/__init__.py
|
cormac-rynne/gmaploader
|
eec679af9a5d36b691bde05ffd6043bfef7e1acf
|
[
"MIT"
] | null | null | null |
__version__ = '0.1.1'
from .gmaploader import GMapLoader
| 11.8
| 34
| 0.745763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.118644
|
3feef5a3e0cc27bf16fbab36a842bb9bb4ecc2cd
| 643
|
py
|
Python
|
machina/templatetags/forum_tracking_tags.py
|
jujinesy/initdjango-machina
|
93c24877f546521867b3ef77fa278237af932d42
|
[
"BSD-3-Clause"
] | 1
|
2021-10-08T03:31:24.000Z
|
2021-10-08T03:31:24.000Z
|
machina/templatetags/forum_tracking_tags.py
|
jujinesy/initdjango-machina
|
93c24877f546521867b3ef77fa278237af932d42
|
[
"BSD-3-Clause"
] | 7
|
2020-02-12T01:11:13.000Z
|
2022-03-11T23:26:32.000Z
|
machina/templatetags/forum_tracking_tags.py
|
jujinesy/initdjango-machina
|
93c24877f546521867b3ef77fa278237af932d42
|
[
"BSD-3-Clause"
] | 1
|
2019-04-20T05:26:27.000Z
|
2019-04-20T05:26:27.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from machina.core.loading import get_class
TrackingHandler = get_class('forum_tracking.handler', 'TrackingHandler')
register = template.Library()
@register.simple_tag(takes_context=True)
def get_unread_topics(context, topics, user):
"""
This will return a list of unread topics for the given user from a given set of topics.
Usage::
{% get_unread_topics topics request.user as unread_topics %}
"""
request = context.get('request', None)
return TrackingHandler(request=request).get_unread_topics(topics, user)
| 24.730769
| 91
| 0.738725
| 0
| 0
| 0
| 0
| 396
| 0.615863
| 0
| 0
| 259
| 0.402799
|
3fef44aadd222f045efc994567ce2c00bef12f97
| 1,194
|
py
|
Python
|
xmodaler/modeling/layers/attention_pooler.py
|
cclauss/xmodaler
|
1368fba6c550e97008628edbf01b59a0a6c8fde5
|
[
"Apache-2.0"
] | 830
|
2021-06-26T07:16:33.000Z
|
2022-03-25T10:31:32.000Z
|
xmodaler/modeling/layers/attention_pooler.py
|
kevinjunwei/xmodaler
|
3e128a816876988c5fb07d842fde4a140e699dde
|
[
"Apache-2.0"
] | 28
|
2021-08-19T12:39:02.000Z
|
2022-03-14T13:04:19.000Z
|
xmodaler/modeling/layers/attention_pooler.py
|
kevinjunwei/xmodaler
|
3e128a816876988c5fb07d842fde4a140e699dde
|
[
"Apache-2.0"
] | 85
|
2021-08-15T06:58:29.000Z
|
2022-02-19T07:30:56.000Z
|
# Copyright 2021 JD.com, Inc., JD AI
"""
@author: Yehao Li
@contact: yehaoli.sysu@gmail.com
"""
import torch
import torch.nn as nn
__all__ = ["AttentionPooler"]
class AttentionPooler(nn.Module):
def __init__(
self,
*,
hidden_size: int,
output_size: int,
dropout: float,
use_bn: bool
):
super(AttentionPooler, self).__init__()
self.att = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(hidden_size, 1)
)
self.embed = nn.Linear(hidden_size, output_size)
self.softmax = nn.Softmax(dim=-1)
self.bn = nn.BatchNorm1d(output_size) if use_bn else None
def forward(self, hidden_states, masks = None, **kwargs):
score = self.att(hidden_states).squeeze(-1)
if masks is not None:
score = score + masks.view(score.size(0), -1)
score = self.softmax(score)
output = score.unsqueeze(1).matmul(hidden_states).squeeze(1)
output = self.embed(output)
if self.bn is not None:
output = self.bn(output)
return output
| 29.85
| 68
| 0.593802
| 1,031
| 0.863484
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.092965
|
3fefc1a6bf75d8c0151f7c8fa8710346285e3ae9
| 281
|
py
|
Python
|
aas_core_meta/__init__.py
|
aas-core-works/aas-core3-meta
|
88b618c82f78392a47ee58cf2657ae6df8e5a418
|
[
"MIT"
] | null | null | null |
aas_core_meta/__init__.py
|
aas-core-works/aas-core3-meta
|
88b618c82f78392a47ee58cf2657ae6df8e5a418
|
[
"MIT"
] | null | null | null |
aas_core_meta/__init__.py
|
aas-core-works/aas-core3-meta
|
88b618c82f78392a47ee58cf2657ae6df8e5a418
|
[
"MIT"
] | null | null | null |
"""Provide meta-models for Asset Administration Shell information model."""
__version__ = "2021.11.20a2"
__author__ = (
"Nico Braunisch, Marko Ristin, Robert Lehmann, Marcin Sadurski, Manuel Sauer"
)
__license__ = "License :: OSI Approved :: MIT License"
__status__ = "Alpha"
| 31.222222
| 81
| 0.736655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.758007
|
3ff189fdd25a003504ca018c6776d007950e9fc2
| 2,937
|
py
|
Python
|
arxivmail/web.py
|
dfm/ArXivMailer
|
f217466b83ae3009330683d1c53ba5a44b4bab29
|
[
"MIT"
] | 1
|
2020-09-15T11:59:44.000Z
|
2020-09-15T11:59:44.000Z
|
arxivmail/web.py
|
dfm/ArXivMailer
|
f217466b83ae3009330683d1c53ba5a44b4bab29
|
[
"MIT"
] | null | null | null |
arxivmail/web.py
|
dfm/ArXivMailer
|
f217466b83ae3009330683d1c53ba5a44b4bab29
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import flask
from .mail import send_email
from .models import db, Subscriber, Category
__all__ = ["web"]
web = flask.Blueprint("web", __name__)
@web.route("/", methods=["GET", "POST"])
def index():
if flask.request.method == "POST":
email = flask.request.form.get("email", None)
if email is None:
flask.flash("Missing email address.")
category = flask.request.form.get("category", None)
if category is None:
flask.flash("Missing arXiv category.")
if email is not None and category is not None:
category = category.strip()
email = email.strip()
cat = Category.query.filter_by(arxiv_name=category).first()
user = Subscriber.query.filter_by(email=email).first()
if user is None:
user = Subscriber(email)
db.session.add(user)
db.session.commit()
html_body = flask.render_template("welcome_email.html",
user=user)
send_email(email, "Welcome", html_body)
if cat in user.subscriptions:
flask.flash("Already subscribed to {0}".format(category))
else:
user.subscriptions.append(cat)
db.session.add(user)
db.session.commit()
flask.flash("{0} subscribed to {1}".format(email, category))
categories = Category.query.order_by("arxiv_name").all()
return flask.render_template("index.html", categories=categories)
@web.route("/confirm/<token>")
def confirm(token):
user = Subscriber.check_token(token)
if user is None:
return flask.abort(404)
user.confirmed = True
db.session.add(user)
db.session.commit()
flask.flash("Confirmed {0}.".format(user.email))
return flask.redirect(flask.url_for(".manage", token=token))
@web.route("/manage/<token>", methods=["GET", "POST"])
def manage(token):
user = Subscriber.check_token(token)
if user is None:
return flask.abort(404)
categories = Category.query.order_by("arxiv_name").all()
cdict = dict((c.arxiv_name, c) for c in categories)
if flask.request.method == "POST":
cats = [cdict.get(c, None)
for c in flask.request.form.getlist("category")]
user.subscriptions = [c for c in cats if c is not None]
return flask.render_template("manage.html", categories=categories,
user=user)
@web.route("/unsubscribe/<token_or_email>")
def unsubscribe(token_or_email):
user = Subscriber.query.filter_by(email=token_or_email).first()
if user is None:
user = Subscriber.check_token(token_or_email)
if user is not None:
db.session.delete(user)
db.session.commit()
flask.flash("Unsubscribed.")
return flask.redirect(flask.url_for(".index"))
| 34.964286
| 76
| 0.606742
| 0
| 0
| 0
| 0
| 2,755
| 0.938032
| 0
| 0
| 388
| 0.132108
|
3ff244c8c0c0b1265e61249a530b3e42331c5fc4
| 13,794
|
py
|
Python
|
qiskit/pulse/timeslots.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 3
|
2019-11-20T08:15:28.000Z
|
2020-11-01T15:32:57.000Z
|
qiskit/pulse/timeslots.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | null | null | null |
qiskit/pulse/timeslots.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Timeslots for channels.
"""
from collections import defaultdict
import itertools
from typing import Tuple, Union, Optional
from .channels import Channel
from .exceptions import PulseError
# pylint: disable=missing-return-doc
class Interval:
"""Time interval."""
def __init__(self, start: int, stop: int):
"""Create an interval, (start, stop).
Args:
start: Starting value of interval
stop: Stopping value of interval
Raises:
PulseError: when invalid time or duration is specified
"""
if start < 0:
raise PulseError("Cannot create Interval with negative starting value")
if stop < 0:
raise PulseError("Cannot create Interval with negative stopping value")
if start > stop:
raise PulseError("Cannot create Interval with value start after stop")
self._start = start
self._stop = stop
@property
def start(self):
"""Start of interval."""
return self._start
@property
def stop(self):
"""Stop of interval."""
return self._stop
@property
def duration(self):
"""Duration of this interval."""
return self.stop - self.start
def has_overlap(self, interval: 'Interval') -> bool:
"""Check if self has overlap with `interval`.
Args:
interval: interval to be examined
Returns:
bool: True if self has overlap with `interval` otherwise False
"""
return self.start < interval.stop and interval.start < self.stop
def shift(self, time: int) -> 'Interval':
"""Return a new interval shifted by `time` from self
Args:
time: time to be shifted
Returns:
Interval: interval shifted by `time`
"""
return Interval(self.start + time, self.stop + time)
def __eq__(self, other):
"""Two intervals are the same if they have the same starting and stopping values.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
"""
return self.start == other.start and self.stop == other.stop
def stops_before(self, other):
"""Whether intervals stops at value less than or equal to the
other interval's starting time.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
"""
return self.stop <= other.start
def starts_after(self, other):
"""Whether intervals starts at value greater than or equal to the
other interval's stopping time.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
"""
return self.start >= other.stop
def __repr__(self):
"""Return a readable representation of Interval Object"""
return "{}({}, {})".format(self.__class__.__name__, self.start, self.stop)
class Timeslot:
"""Named tuple of (Interval, Channel)."""
def __init__(self, interval: Interval, channel: Channel):
self._interval = interval
self._channel = channel
@property
def interval(self):
"""Interval of this time slot."""
return self._interval
@property
def channel(self):
"""Channel of this time slot."""
return self._channel
@property
def start(self):
"""Start of timeslot."""
return self.interval.start
@property
def stop(self):
"""Stop of timeslot."""
return self.interval.stop
@property
def duration(self):
"""Duration of this timeslot."""
return self.interval.duration
def shift(self, time: int) -> 'Timeslot':
"""Return a new Timeslot shifted by `time`.
Args:
time: time to be shifted
"""
return Timeslot(self.interval.shift(time), self.channel)
def has_overlap(self, other: 'Timeslot') -> bool:
"""Check if self has overlap with `interval`.
Args:
other: Other Timeslot to check for overlap with
Returns:
bool: True if intervals overlap and are on the same channel
"""
return self.interval.has_overlap(other) and self.channel == other.channel
def __eq__(self, other) -> bool:
"""Two time-slots are the same if they have the same interval and channel.
Args:
other (Timeslot): other Timeslot
"""
return self.interval == other.interval and self.channel == other.channel
def __repr__(self):
"""Return a readable representation of Timeslot Object"""
return "{}({}, {})".format(self.__class__.__name__,
self.channel,
(self.interval.start, self.interval.stop))
class TimeslotCollection:
"""Collection of `Timeslot`s."""
def __init__(self, *timeslots: Union[Timeslot, 'TimeslotCollection']):
"""Create a new time-slot collection.
Args:
*timeslots: list of time slots
Raises:
PulseError: when overlapped time slots are specified
"""
self._table = defaultdict(list)
for timeslot in timeslots:
if isinstance(timeslot, TimeslotCollection):
self._merge_timeslot_collection(timeslot)
else:
self._merge_timeslot(timeslot)
@property
def timeslots(self) -> Tuple[Timeslot]:
"""Sorted tuple of `Timeslot`s in collection."""
return tuple(itertools.chain.from_iterable(self._table.values()))
@property
def channels(self) -> Tuple[Timeslot]:
"""Channels within the timeslot collection."""
return tuple(k for k, v in self._table.items() if v)
@property
def start_time(self) -> int:
"""Return earliest start time in this collection."""
return self.ch_start_time(*self.channels)
@property
def stop_time(self) -> int:
"""Return maximum time of timeslots over all channels."""
return self.ch_stop_time(*self.channels)
@property
def duration(self) -> int:
"""Return maximum duration of timeslots over all channels."""
return self.stop_time
def _merge_timeslot_collection(self, other: 'TimeslotCollection'):
"""Mutably merge timeslot collections into this TimeslotCollection.
Args:
other: TimeSlotCollection to merge
"""
for channel, other_ch_timeslots in other._table.items():
if channel not in self._table:
self._table[channel] += other_ch_timeslots # extend to copy items
else:
# if channel is in self there might be an overlap
for idx, other_ch_timeslot in enumerate(other_ch_timeslots):
insert_idx = self._merge_timeslot(other_ch_timeslot)
if insert_idx == len(self._table[channel]) - 1:
# Timeslot was inserted at end of list. The rest can be appended.
self._table[channel] += other_ch_timeslots[idx + 1:]
break
def _merge_timeslot(self, timeslot: Timeslot) -> int:
"""Mutably merge timeslots into this TimeslotCollection.
Note timeslots are sorted internally on their respective channel
Args:
timeslot: Timeslot to merge
Returns:
int: Return the index in which timeslot was inserted
Raises:
PulseError: If timeslots overlap
"""
interval = timeslot.interval
ch_timeslots = self._table[timeslot.channel]
insert_idx = len(ch_timeslots)
# merge timeslots by insertion sort.
# Worst case O(n_channels), O(1) for append
# could be improved by implementing an interval tree
for ch_timeslot in reversed(ch_timeslots):
ch_interval = ch_timeslot.interval
if interval.start >= ch_interval.stop:
break
elif interval.has_overlap(ch_interval):
raise PulseError("Timeslot: {0} overlaps with existing"
"Timeslot: {1}".format(timeslot, ch_timeslot))
insert_idx -= 1
ch_timeslots.insert(insert_idx, timeslot)
return insert_idx
def ch_timeslots(self, channel: Channel) -> Tuple[Timeslot]:
"""Sorted tuple of `Timeslot`s for channel in this TimeslotCollection."""
if channel in self._table:
return tuple(self._table[channel])
return tuple()
def ch_start_time(self, *channels: Channel) -> int:
"""Return earliest start time in this collection.
Args:
*channels: Channels over which to obtain start_time.
"""
timeslots = list(itertools.chain(*(self._table[chan] for chan in channels
if chan in self._table)))
if timeslots:
return min(timeslot.start for timeslot in timeslots)
return 0
def ch_stop_time(self, *channels: Channel) -> int:
"""Return maximum time of timeslots over all channels.
Args:
*channels: Channels over which to obtain stop time.
"""
timeslots = list(itertools.chain(*(self._table[chan] for chan in channels
if chan in self._table)))
if timeslots:
return max(timeslot.stop for timeslot in timeslots)
return 0
def ch_duration(self, *channels: Channel) -> int:
"""Return maximum duration of timeslots over all channels.
Args:
*channels: Channels over which to obtain the duration.
"""
return self.ch_stop_time(*channels)
def is_mergeable_with(self, other: 'TimeslotCollection') -> bool:
"""Return if self is mergeable with `timeslots`.
Args:
other: TimeslotCollection to be checked for mergeability
"""
common_channels = set(self.channels) & set(other.channels)
for channel in common_channels:
ch_timeslots = self.ch_timeslots(channel)
other_ch_timeslots = other.ch_timeslots(channel)
if ch_timeslots[-1].stop < other_ch_timeslots[0].start:
continue # We are appending along this channel
i = 0 # iterate through this
j = 0 # iterate through other
while i < len(ch_timeslots) and j < len(other_ch_timeslots):
if ch_timeslots[i].interval.has_overlap(other_ch_timeslots[j].interval):
return False
if ch_timeslots[i].stop <= other_ch_timeslots[j].start:
i += 1
else:
j += 1
return True
def merge(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':
"""Return a new TimeslotCollection with `timeslots` merged into it.
Args:
timeslots: TimeslotCollection to be merged
"""
return TimeslotCollection(self, timeslots)
def shift(self, time: int) -> 'TimeslotCollection':
"""Return a new TimeslotCollection shifted by `time`.
Args:
time: time to be shifted by
"""
slots = [slot.shift(time) for slot in self.timeslots]
return TimeslotCollection(*slots)
def complement(self, stop_time: Optional[int] = None) -> 'TimeslotCollection':
"""Return a complement TimeSlotCollection containing all unoccupied Timeslots
within this TimeSlotCollection.
Args:
stop_time: Final time too which complement Timeslot's will be returned.
If not set, defaults to last time in this TimeSlotCollection
"""
timeslots = []
stop_time = stop_time or self.stop_time
for channel in self.channels:
curr_time = 0
for timeslot in self.ch_timeslots(channel):
next_time = timeslot.interval.start
if next_time-curr_time > 0:
timeslots.append(Timeslot(Interval(curr_time, next_time), channel))
curr_time = timeslot.interval.stop
# pad out channel to stop_time
if stop_time-curr_time > 0:
timeslots.append(Timeslot(Interval(curr_time, stop_time), channel))
return TimeslotCollection(*timeslots)
def __eq__(self, other) -> bool:
"""Two time-slot collections are the same if they have the same time-slots.
Args:
other (TimeslotCollection): other TimeslotCollection
"""
if set(self.channels) != set(other.channels):
return False
for channel in self.channels:
if self.ch_timeslots(channel) != self.ch_timeslots(channel):
return False
return True
def __repr__(self):
"""Return a readable representation of TimeslotCollection Object"""
rep = dict()
for key, val in self._table.items():
rep[key] = [(timeslot.start, timeslot.stop) for timeslot in val]
return self.__class__.__name__ + str(rep)
| 32.456471
| 89
| 0.603741
| 13,044
| 0.945629
| 0
| 0
| 1,602
| 0.116137
| 0
| 0
| 5,982
| 0.433667
|
3ff2f2040265231a2d5824e04f8c8d39faec1ec0
| 22,499
|
py
|
Python
|
core/assembler.py
|
iahuang/scratch-gcc
|
bc4989f3dc54f0cdc3098f66078d17750c111bec
|
[
"MIT"
] | null | null | null |
core/assembler.py
|
iahuang/scratch-gcc
|
bc4989f3dc54f0cdc3098f66078d17750c111bec
|
[
"MIT"
] | null | null | null |
core/assembler.py
|
iahuang/scratch-gcc
|
bc4989f3dc54f0cdc3098f66078d17750c111bec
|
[
"MIT"
] | null | null | null |
""" A basic two-pass MIPS assembler. Outputs a binary file in a custom format that can then be loaded into Scratch """
import struct
import re
import json
import os
"""
Diagram of the Scratch MIPS VM memory space
+--------------------- <- 0x0000000
| i/o space (see below)
+--------------------- <- 0x0000100
| data segment
+---------------------
| program
|
+--------------------- <- everything from here up ^^^ is included in the scratch binary file
|
| stack ^^^^
+--------------------- <- stack_pointer
| uninitialized/heap
|
|
+--------------------- <- mem_end
Static memory segment for interfacing with the Scratch VM (256 bytes wide)
Definitions for interfacing with this part of memory can be found in "lib/sys.h"
io {
0x00000 char stdout_buffer - write to this address to print to the "console"
0x00004 uint32 mem_end - pointer to the last address in memory
0x00008 uint32 stack_start - pointer to the bottom of the stack
0x0000C uint8 halt - set this byte to halt execution of the program
for whatever reason
}
...
Scratch executable binary format (the file outputted by Assembly.outputBinaryFile() )
header (100 bytes) {
char[4] identifier - set to "SBIN"
uint32 program_counter - the location in memory to begin execution
uint32 stack_pointer - initial location of the stack pointer
uint32 alloc_size - total amount of system memory to allocate
}
vvvv to be loaded in starting at address 0x00000000
program_data (n bytes) {
byte[256] - i/o segment data (zero initialized)
byte[n] - program data
}
"""
class AssemblyMessage:
def __init__(self, message, line=None):
self.message = message
self.line = line
class InstructionArgument:
"""
Class that represents an instruction argument
These are all examples of arguments:
%hi(a) # hi value of address of label a
%lo(a)($2) # r2 offset by lo value of address of label a
4($sp) # stack ptr offset by 4
$sp # register 29
-8 # constant integer -8
For an argument like 4($sp), the corresponding InstructionArgument returned by InstructionArgument.evaluate()
would be:
value: 29 # register number of the stack pointer
offset: 4
Not all arguments will have a corresponding offset
"""
def __init__(self, value):
self.value = value
self.offset = None
def __radd__(self, offset):
self.offset = offset
return self
def __repr__(self):
return f"<Argument value={self.value} offset={self.offset}>"
@staticmethod
def getRegisterNumber(registerName):
""" Find the register number (0-31) from a mnemonic register name like "$fp" """
assert registerName[0] == "$", Exception(
"Register name must start with $")
registerName = registerName[1:]
names = {
"zero": 0,
"gp": 28,
"sp": 29,
"fp": 30,
"ra": 31
}
if registerName in names:
return names[registerName]
else:
return int(registerName)
@staticmethod
def evaluate(expr, labels=None):
""" Evaluate the integer value of this argument.
Requires a [labels] argument in case this instruction argument references a label.
If this is the first pass, and we don't know the labels yet, set this to None.
A placeholder label with at address 0 will be used instead.
Return the value of this argument plus an AssemblyMessage argument if this operation returned
an error, otherwise None
"""
# to evaluate these expressions, we're going to use eval(), since i dont feel like writing a parser
# to mitigate security risks, we're going to restrict use of builtins and the global scope
# per https://realpython.com/python-eval-function, panic if the name __class__ is used
if "__class__" in expr:
raise Exception("Name in expression not allowed")
if not labels: # if we don't know any of the labels yet we're going to have to find them manually
labels = {}
# matches a string of characters that starts with a letter or underscore and is not preceded by
# a $ or %
for match in re.findall(r'(?<![$%])\b[a-zA-Z_]\w{0,}', string=expr):
labels[match] = 0
# replace the % operator prefix with two underscores (%hi -> __hi)
expr = expr.replace("%", "__")
# replace instances of stuff like, 4($sp) with 4+($sp)
def repl(matchObject: re.Match):
boundary = matchObject.group()
return boundary[0]+"+"+boundary[1]
# match boundaries between a symbol and an opening parentheses
expr = re.sub(r'[\d\)]\(', repl=repl, string=expr)
# replace $sp, $31, etc. with a getRegisterNumber expression
def repl(matchObject: re.Match):
registerName = matchObject.group()
return '__reg("{}")'.format(registerName)
expr = re.sub(r'\$\w+', repl=repl, string=expr)
# build global scope with relevant operator definitions and variables
globalScope = {
"__builtins__": {}, # used to prevent security risks
"__reg": lambda r: InstructionArgument(InstructionArgument.getRegisterNumber(r)),
"__lo": lambda n: (n << 16) >> 16, # find low 16 bits of word
"__hi": lambda n: (n >> 16) # find high 16 bits of word
}
# insert label definitions into global scope
for labelName, labelAddress in labels.items():
globalScope[labelName] = labelAddress
evald = 0 # default to 0 in case there is an error parsing
err = None
try:
evald = eval(expr, globalScope, {})
except NameError as e:
nameNotDefined = str(e).split("'")[1] # parse from the exception
err = AssemblyMessage(f'Label "{nameNotDefined}" is not defined')
except SyntaxError as e:
err = AssemblyMessage(f'Syntax Error')
if type(evald) == int:
return InstructionArgument(evald), err
return evald, err
""" Utility class for loading and packing arguments into a 32-bit instruction """
class MIPSInstructionFormat:
def __init__(self, name=None):
self.name = name
self._args = []
def argument(self, name, bits):
self._args.append([name, bits])
return self
@property
def argNames(self):
return list([a[0] for a in self._args])
def _toBits(self, n, numBits):
"""
convert [n] into a bit array [numBits] bits long
example: _toBits(50, 6) -> [1, 1, 0, 0, 1, 0]
"""
bitArray = []
for i in range(0, numBits):
positionValue = 2**(i)
bit = (n//positionValue) % 2
bitArray.insert(0, bit)
return bitArray
def _bitsToInt(self, bitArray):
"""
convert [bitArray] into a single number
example: _bitArrayToInt([1, 1, 0, 0, 1, 0]) -> 50 (0b110010)
"""
n = 0
for i in range(len(bitArray)-1, -1, -1):
positionValue = 2**(len(bitArray)-i-1)
bit = bitArray[i]
n += bit*positionValue
return n
def byteCodeFromArgs(self, argValues):
""" With the provided argument values, create a bytes-like representation of an instruction with this format """
bits = []
for argName, argSize in self._args:
bits += self._toBits(argValues[argName], argSize)
# Use big endian so the order is correct idk man
return struct.pack(">I", self._bitsToInt(bits))
def buildInstructionCode(self, argFormatString, argStrings, presetArgs={}, labels=None):
"""
Build a complete sequence of bytes representing the finished instruction
given unparsed and preset instruction arguments
Arguments:
argFormatString - a string dictating how to parse the values provided in argStrings
argStrings - a list of strings directly corresponding to the arguments of the instruction
presetArgs - other arguments to be set manually (not parsed)
labels - assembler label table to assist in parsing (not required for first-pass)
argFormat examples:
"rs,rt,imm" // args[0] is rs, args[1] is rt and so on
"rs,imm+rt" // args[0] is rs, args[1] is rt offset by imm
"""
# parse argFormat
argCorresponding = [arg.split("+") for arg in argFormatString.split(",")]
# parse arg strings
argValues = {k:v for k,v in presetArgs.items()} # copy presetArgs into argValues to avoid modifying the argument
errors = [] # any errors that arose from parsing
for argFormat, argExpr in zip(argCorresponding, argStrings):
if len(argFormat) == 1: # no offset
argName = argFormat[0]
val, err = InstructionArgument.evaluate(argExpr, labels)
argValues[argName] = val.value
if err:
errors.append(err)
else: # there is an offset
offsetArgName, argName = argFormat
argParsed = InstructionArgument.evaluate(argExpr)
val, err = argParsed
argValues[argName] = val.value
offset = 0 # default to 0 if no offset parsable
if val.offset == None: # if there was no offset parsable, add an error
errors.append(AssemblyMessage(f'Argument of format "{argFormat}" expected offset, but none was found'))
else:
offset = val.offset
argValues[offsetArgName] = offset
if err:
errors.append(err)
# fill missing arguments with value 0
for requiredArg in self.argNames:
if not requiredArg in argValues:
argValues[requiredArg] = 0
# check for extraneous arguments
for suppliedArg in argValues:
if not suppliedArg in self.argNames:
errors.append(AssemblyMessage(f'Extraneous argument with name "{suppliedArg}"'))
code = InstructionFormats.IType.byteCodeFromArgs(argValues)
return code, errors
a
class InstructionFormats:
IType = (
MIPSInstructionFormat()
.argument("op", bits=6)
.argument("rs", bits=5)
.argument("rt", bits=5)
.argument("imm", bits=16)
)
RType = (
MIPSInstructionFormat()
.argument("op", bits=6)
.argument("rs", bits=5)
.argument("rt", bits=5)
.argument("rd", bits=5)
.argument("func", bits=6)
)
JType = (
MIPSInstructionFormat()
.argument("op", bits=6)
.argument("addr", bits=26)
)
class AssemblerDataTable:
"""
A wrapper for a file containing information about the various instructions
and assembly symbols for the assembler to use
"""
def __init__(self, tableFile):
with open(tableFile) as fl:
_data = json.load(fl)
self.meta = _data["meta"]
self.ignoredDirectives = _data["directives"]["ignore"]
self.itypeInstructions = _data["instructions"]["i_type"]
class Assembly:
def __init__(self):
# Stores labels [labelName : codePosition]
self.labels = {}
# Stores forward references to labels [codePosition : labelName]
self.labelReferences = {}
# Stores forward references to labels in the code [lineNumber : labelName]
# Sole purpose of outputting error messages for invalid label names
self.codeLabelReferences = {}
# Debug:
self.machCodeLines = []
self.positionAtLastLine = 0
# Outputted machine code
self.machCode = bytearray()
self.currentPos = 0
# Current line number in processing
self.currentLine = 1
# Any warnings or errors created during assembly
self.errors: list[AssemblyMessage] = []
self.warnings: list[AssemblyMessage] = []
# Contents of current source file (split by line)
self.sourceLines = []
# Has this source been processed yet? (Assembly source can only be processed once per Assembly instance)
self.polluted = False
# data table
dataTablePath = os.path.dirname(os.path.realpath(__file__))
self.asmDataTable = AssemblerDataTable(dataTablePath+"/asm_data_table.json")
# other
self.ident = None # the info set by the .ident directive
# Settings
self.WARN_UNKNOWN_DIRECTIVE = True
self.MAX_STACK_SIZE = 1024
self.MAX_HEAP_SIZE = 0
# VM Constants
self._IO_SPACE_SIZE = 256
def addBytesToCode(self, bytes):
for b in bytes:
self.machCode.append(b)
self.currentPos += 1
def toWord(self, n: int): # Converts an int32 to an array of four bytes
return struct.pack("I", n)
def createWarning(self, message): # Creates a new assembler warning at the current line
self.warnings.append(
AssemblyMessage(message, self.currentLine)
)
def createError(self, message): # Creates a new assembler error at the current line
self.errors.append(
AssemblyMessage(message, self.currentLine)
)
def onDirective(self, directive, args, isFirstPass):
if directive == "word":
self.addBytesToCode(self.toWord(int(args[0])))
elif directive == "ident":
self.ident = args[0]
elif directive in self.asmDataTable.ignoredDirectives:
pass
else:
if self.WARN_UNKNOWN_DIRECTIVE and isFirstPass:
msg = 'Unknown assembler directive "{}"'.format(directive)
self.createWarning(msg)
def onLabel(self, labelName):
self.labels[labelName] = self.currentPos
def trackErrorsToCurrentLine(self, errors):
""" Add list of errors to current assembly errors. Sets the line number of these errors to the current line number """
# track errors
for err in errors:
# errors that were created by the parsing function
# did not track the line number
err.line = self.currentLine
self.errors.append(err)
def onInstruction(self, instruction, args, isFirstPass):
# Process pseudo-instructions
if instruction == "nop":
return self.onInstruction("sll", ["$zero", "$zero", "0"], isFirstPass)
labels = None if isFirstPass else self.labels
# Process I-Type instructions
if instruction in self.asmDataTable.itypeInstructions:
instructionData = self.asmDataTable.itypeInstructions[instruction]
argFormat = instructionData["arg_format"]
opcode = instructionData["opcode"]
code, errors = InstructionFormats.IType.buildInstructionCode(
argFormatString=argFormat,
argStrings=args,
presetArgs={"op": opcode},
labels=labels
)
self.trackErrorsToCurrentLine(errors)
self.addBytesToCode(code)
return
self.createError('Unknown instruction "{}"'.format(instruction))
def loadSourceFile(self, fl):
if self.sourceLines:
raise Exception("Assembly source already loaded")
with open(fl) as fl:
flContents = fl.read()
# Convert windows line endings to unix ones
flContents = flContents.replace("\r\n", "\n")
self.sourceLines = flContents.split("\n")
def runPass(self, isFirstPass=True):
# adds i/o space to program
for i in range(self._IO_SPACE_SIZE):
self.addBytesToCode(bytes([0]))
for line in self.sourceLines:
self.processLine(line, isFirstPass=isFirstPass)
# for debug purposes only
bytesAddedThisLine = self.machCode[self.positionAtLastLine:]
self.machCodeLines.append(bytesAddedThisLine)
self.positionAtLastLine = self.currentPos
def assemble(self, verbose=True):
if self.polluted:
raise Exception(
"Assembly source can only be processed once per Assembly instance")
self.runPass()
# reset variables and whatnot for the second pass
self.currentPos = 0
self.currentLine = 1
self.machCode = bytearray()
self.runPass(isFirstPass=False)
self.polluted = True
if verbose:
for error in self.errors:
print("Error:", error.message)
print(' on line {}: "{}"'.format(
error.line, self.sourceLines[error.line-1].strip()))
print()
for warn in self.warnings:
print("Warning:", warn.message)
print(' on line {}: "{}"'.format(
warn.line, self.sourceLines[warn.line-1].strip()))
print()
print("Assembly finished with {} errors and {} warnings".format(
len(self.errors),
len(self.warnings)
))
def _removeComments(self, line):
""" return [line] with any comments removed """
if not "#" in line:
return line
# Find the first instance of a # character that isn't enclosed inside a string
for i, c in enumerate(line):
if c == "#":
if not self._isCharEnclosed(i, line):
return line[:i]
def _split(self, string, delimiter):
""" split [string] on any delimiters that aren't enclosed in strings. delimiter can only be one character """
segment = ""
segments = []
for i, c in enumerate(string):
if c == delimiter and not self._isCharEnclosed(i, string):
segments.append(segment)
segment = ""
else:
segment += c
if segment:
segments.append(segment)
return segments
def _isCharEnclosed(self, charIndex, string):
""" Return true if the character at charIndex is enclosed in double quotes (a string) """
numQuotes = 0 # if the number of quotes past this character is odd, than this character lies inside a string
for i in range(charIndex, len(string)):
if string[i] == '"':
numQuotes += 1
return numQuotes % 2 == 1
def processLine(self, line, isFirstPass):
# Determine type of line
line = line.strip() # remove trailing and leading whitespace
line = self._removeComments(line) # remove comments from line
# Convert tabs into single spaces (makes parsing easier)
line = line.replace("\t", " ")
# Remove comments
if line == "": # is line empty?
return
elif line.endswith(":"): # is the line a label?
if not isFirstPass:
return # don't parse labels twice
self.onLabel(line.rstrip(":"))
elif line.startswith("."): # is the line a directive?
line = line.lstrip(".") # remove the dot from the directive
# results in a thing like ["align", "2"]
parts = self._split(line, " ")
directive = parts[0]
argString = "" # there might not be any arguments
if len(parts) > 1:
argString = parts[1]
args = self._split(argString, ",")
# remove surrounding whitespace from arguments (usually only applicable if the commas
args = list([arg.strip() for arg in args])
# separating the arguments have trailing spaces)
self.onDirective(directive, args, isFirstPass)
else: # it's probably an instruction
# results in a thing like ["lui", "$2,%hi(a)"]
parts = self._split(line, " ")
instruction = parts[0]
argString = "" # there might not be any arguments
if len(parts) > 1:
argString = parts[1]
args = self._split(argString, ",")
# remove surrounding whitespace from arguments (usually only applicable if the commas
args = list([arg.strip() for arg in args])
# separating the arguments have trailing spaces)
self.onInstruction(instruction, args, isFirstPass)
self.currentLine += 1
def findEntryPoint(self):
""" return memory address of main routine """
return self.labels["main"]
def getMachineCode(self):
return bytes(self.machCode)
def makeHeader(self, programSize, programCounter, stackSize, heapSize):
# see header format above
headerFormat = "IIII"
totalMemorySize = programSize+stackSize+heapSize
stackPointer = programSize+stackSize
structData = [0x4E494253, programCounter,
stackPointer, totalMemorySize]
return struct.pack(headerFormat, *structData)
def exportAsBinary(self, filename):
# see format above
with open(filename, 'wb') as fl:
programData = self.getMachineCode()
header = self.makeHeader(
programSize=len(programData),
programCounter=self.findEntryPoint(),
stackSize=self.MAX_STACK_SIZE,
heapSize=self.MAX_HEAP_SIZE
)
fl.write(header + programData)
def exportDebugFile(self, filename):
with open(filename, "w") as fl:
for sourceLine, lineCode in zip(self.sourceLines, self.machCodeLines):
fl.write(sourceLine+"\n")
if lineCode:
codeHex = lineCode.hex(" ")
codeBin = ""
for i in range(0, len(lineCode)):
byte = lineCode[i]
codeBin += "{:08b}".format(byte)+" "
fl.write(f" [{codeHex}] {codeBin}\n\n")
| 34.089394
| 126
| 0.586382
| 20,702
| 0.92013
| 0
| 0
| 3,724
| 0.165518
| 0
| 0
| 8,873
| 0.394373
|
3ff3b22779c14ce17a4d6563f15286360782e0ac
| 3,237
|
py
|
Python
|
qvdfile/tests/test_qvdfile.py
|
cosmocracy/qvdfile
|
c1f92ec153c07f607fd57c6f6679e3c7269d643e
|
[
"Apache-2.0"
] | 17
|
2019-07-18T12:50:33.000Z
|
2021-05-25T06:26:45.000Z
|
qvdfile/tests/test_qvdfile.py
|
cosmocracy/qvdfile
|
c1f92ec153c07f607fd57c6f6679e3c7269d643e
|
[
"Apache-2.0"
] | 2
|
2021-05-15T03:53:08.000Z
|
2021-07-22T14:31:15.000Z
|
qvdfile/tests/test_qvdfile.py
|
cosmocracy/qvdfile
|
c1f92ec153c07f607fd57c6f6679e3c7269d643e
|
[
"Apache-2.0"
] | 5
|
2019-07-18T12:55:31.000Z
|
2021-12-21T15:09:37.000Z
|
import pytest
import errno
import os
import glob
import shutil
import xml.etree.ElementTree as ET
from qvdfile.qvdfile import QvdFile, BadFormat
@pytest.fixture(scope="function")
def qvd():
""" standard setup for most of the tests """
yield QvdFile("data/tab1.qvd")
@pytest.fixture(scope="function")
def bigqvd():
""" standard setup for tests with bigger qvd"""
yield QvdFile("data/tab2.qvd")
# READING QVD ==================================================================
# init
def test_init_smoke(qvd):
# metadata is in attribs
assert "TableName" in qvd.attribs.keys()
assert qvd.attribs["TableName"] == "tab1"
# fields info is in fields
assert len(qvd.fields) == 3
assert "ID" in [ f["FieldName"] for f in qvd.fields ]
def test_init_no_file():
with pytest.raises(FileNotFoundError):
qvd = QvdFile("data/no_such_file.qvd")
def test_init_not_qvd_or_bad_file():
with pytest.raises(BadFormat):
qvd = QvdFile(__file__)
# getFieldVal
def test_get_field_val_smoke(qvd):
assert qvd.getFieldVal("ID",0) == "123.12"
assert qvd.getFieldVal("NAME",2) == "Vaysa"
assert qvd.getFieldVal("ONEVAL",0) == "0"
def test_get_field_val_bad_name(qvd):
with pytest.raises(KeyError):
qvd.getFieldVal("NOFILED",0)
def test_get_field_val_bad_index(qvd):
with pytest.raises(IndexError):
qvd.getFieldVal("ID",10)
# fieldsInRow
def test_fields_in_row_smoke(qvd):
rowf = qvd.fieldsInRow()
assert next(rowf)["FieldName"] == "NAME"
assert next(rowf)["FieldName"] == "ID"
with pytest.raises(StopIteration):
next(rowf)
def test_fields_in_row_bigger(bigqvd):
rowf = bigqvd.fieldsInRow()
assert next(rowf)["FieldName"] == "NAME"
assert next(rowf)["FieldName"] == "PHONE"
assert next(rowf)["FieldName"] == "VAL"
assert next(rowf)["FieldName"] == "ID"
with pytest.raises(StopIteration):
next(rowf)
# createMask
def test_create_mask_smoke(qvd):
assert qvd.createMask() == "uint:5,uint:3"
def test_create_mask_bigger(bigqvd):
assert bigqvd.createMask() == "uint:6,uint:5,uint:5,uint:8"
# getRow
def test_get_row_smoke(qvd):
row = qvd.getRow(0)
assert row["ID"] == "123.12"
assert row["NAME"] == "Pete"
assert row["ONEVAL"] == "0"
def test_get_row_bad_index(qvd):
with pytest.raises(IndexError):
qvd.getRow(10)
def test_get_row_null(qvd):
row = qvd.getRow(4)
assert row["ID"] == qvd.NoneValueStr
def test_get_row_bigger(bigqvd):
row = bigqvd.getRow(0)
assert row["ID"] == "1"
assert row["VAL"] == "100001"
assert row["NAME"] == "Pete1"
assert row["PHONE"] == "1234567890"
assert row["SINGLE"] == "single value"
def test_get_row_bigger_nulls(bigqvd):
row = bigqvd.getRow(17)
assert row["ID"] == "-18"
assert row["VAL"] == bigqvd.NoneValueStr
assert row["NAME"] == "Pete17"
assert row["PHONE"] == bigqvd.NoneValueStr
assert row["SINGLE"] == "single value"
# WRITING QVD ===================================================================
# code and tests will follow....
| 22.957447
| 81
| 0.611369
| 0
| 0
| 196
| 0.06055
| 264
| 0.081557
| 0
| 0
| 863
| 0.266605
|
3ff5387e0936b375509e91f2742e4bc5ae6feee1
| 4,221
|
py
|
Python
|
app/__init__.py
|
i2nes/app-engine-blog
|
94cdc25674c946ad643f7f140cbedf095773de3f
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
i2nes/app-engine-blog
|
94cdc25674c946ad643f7f140cbedf095773de3f
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
i2nes/app-engine-blog
|
94cdc25674c946ad643f7f140cbedf095773de3f
|
[
"MIT"
] | null | null | null |
from flask import Flask
from app.models import Article, Feature
import logging
def create_app(config, blog_config):
"""This initiates the Flask app and starts your app engine instance.
Startup Steps:
1. Instantiate the Flask app with the config settings.
2. Register bluprints.
3. Create the Contact and About Pages in the datastore if they don't exist yet.
4. Load the blog_config settings from the datatstore. Or add them if they don't exist yet.
"""
logging.info('STARTUP: Getting ready to launch the Flask App')
app = Flask(__name__)
app.config.update(config)
# Register blueprints
logging.info('STARTUP: Register Blueprints')
from .main import app as main_blueprint
app.register_blueprint(main_blueprint, url_prefix='/')
from .editor import app as editor_blueprint
app.register_blueprint(editor_blueprint, url_prefix='/editor')
# Add Contact and About pages to the datastore when first launching the blog
logging.info('STARTUP: Set up Contact and About pages')
# Contact page creation
query = Article.query(Article.slug == 'contact-page')
result = query.fetch(1)
if result:
logging.info('STARTUP: Contact page exists')
else:
logging.info('STARTUP: Creating a contact page')
contact_page = Article()
contact_page.title1 = 'Contact Me'
contact_page.title2 = 'Have questions? I have answers (maybe).'
contact_page.slug = 'contact-page'
contact_page.author = ''
contact_page.content = 'Want to get in touch with me? Fill out the form below to send me a message and I ' \
'will try to get back to you within 24 hours! '
contact_page.published = False
contact_page.put()
# About page creation
query = Article.query(Article.slug == 'about-page')
result = query.fetch(1)
if result:
logging.info('STARTUP: About page exists')
else:
logging.info('STARTUP: Creating an about page')
about_page = Article()
about_page.title1 = 'About Me'
about_page.title2 = 'This is what I do.'
about_page.slug = 'about-page'
about_page.author = ''
about_page.content = ''
about_page.published = False
about_page.put()
# Register blog configurations
# The Blog is initially configured with blog_conf settings
# The settings are added to the datastore and will take precedence from now on
# You can change the settings in the datastore.
# The settings are only updated on Startup, so you need to restart the instances to apply changes.
logging.info('STARTUP: Register Blog Configurations')
query = Feature.query()
for feature in blog_config:
# TODO: Add the accesslist to the datastore. The access list is still read only from the config file.
if feature == 'EDITOR_ACCESS_LIST':
pass
# TODO: The posts limit is an int and needs to be converted. Find a better way of doing this.
elif feature == 'POSTS_LIST_LIMIT':
result = query.filter(Feature.title == feature).fetch()
if result:
logging.info('STARTUP: Loading {}'.format(result[0].title))
blog_config['POSTS_LIST_LIMIT'] = int(result[0].value)
else:
logging.info('STARTUP: Adding to datastore: {}'.format(feature))
f = Feature()
f.title = feature
f.value = str(blog_config[feature])
f.put()
# Load the configs or add them to the datastore if they don't exist yet
else:
result = query.filter(Feature.title == feature).fetch()
if result:
logging.info('STARTUP: Loading {}'.format(result[0].title))
blog_config[result[0].title] = result[0].value
else:
logging.info('STARTUP: Adding to datastore: {}'.format(feature))
f = Feature()
f.title = feature
f.value = blog_config[feature]
f.put()
# Startup complete
logging.info('STARTUP: READY TO ROCK!!!')
return app
| 33.768
| 116
| 0.630419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,854
| 0.439232
|
3ff664299cdf95218a7f9411379521d7b5cdbaa4
| 430
|
py
|
Python
|
libs/msfpayload.py
|
darkoperator/SideStep
|
2c75af77ee2241595de4c65d7e4f8342dcc0bb50
|
[
"BSL-1.0"
] | 3
|
2015-09-16T16:09:14.000Z
|
2017-01-14T21:53:08.000Z
|
libs/msfpayload.py
|
darkoperator/SideStep
|
2c75af77ee2241595de4c65d7e4f8342dcc0bb50
|
[
"BSL-1.0"
] | null | null | null |
libs/msfpayload.py
|
darkoperator/SideStep
|
2c75af77ee2241595de4c65d7e4f8342dcc0bb50
|
[
"BSL-1.0"
] | 2
|
2016-04-22T04:44:50.000Z
|
2021-12-18T15:12:22.000Z
|
"""
Generates the Meterpreter payload from msfvenom
"""
import subprocess
def payloadGenerator(msfpath, msfvenom, msfpayload, ip, port):
payload = subprocess.Popen('ruby ' + msfpath + msfvenom + ' -p ' + msfpayload + ' LHOST=' + ip + ' LPORT=' + str(port) + ' EXITFUNC=thread -e x86/alpha_mixed -f raw BufferRegister=EAX', stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).communicate()[0]
return payload
| 53.75
| 275
| 0.727907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.346512
|
3ff6b1161dba69f783ae2e124e780852ea91eaaa
| 9,689
|
py
|
Python
|
RevitPythonShell_Scripts/GoogleTools.extension/GoogleTools.tab/Ontologies.Panel/BOS_SetValues.pushbutton/script.py
|
arupiot/create_revit_families
|
9beab3c7e242426b2dca99ca5477fdb433e39db2
|
[
"MIT"
] | 1
|
2021-02-04T18:20:58.000Z
|
2021-02-04T18:20:58.000Z
|
RevitPythonShell_Scripts/GoogleTools.extension/GoogleTools.tab/Ontologies.Panel/BOS_SetValues.pushbutton/script.py
|
arupiot/DBOTools
|
9beab3c7e242426b2dca99ca5477fdb433e39db2
|
[
"MIT"
] | null | null | null |
RevitPythonShell_Scripts/GoogleTools.extension/GoogleTools.tab/Ontologies.Panel/BOS_SetValues.pushbutton/script.py
|
arupiot/DBOTools
|
9beab3c7e242426b2dca99ca5477fdb433e39db2
|
[
"MIT"
] | null | null | null |
# Select an element
# Open yaml file with entity types
# If parameters are already present, set values according to yaml input
import sys
import clr
import System
import rpw
import yaml
import pprint
from System.Collections.Generic import *
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
from rpw.ui.forms import *
from Autodesk.Revit.UI.Selection import ObjectType
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
app = doc.Application
pp = pprint.PrettyPrinter(indent=1)
shared_param_file = app.OpenSharedParameterFile()
selection = [doc.GetElement(element_Id) for element_Id in uidoc.Selection.GetElementIds()]
def createnewgroup(shared_parameter_file, new_group_name):
try:
newgroup = shared_parameter_file.Groups.Create(new_group_name)
print("Group successfully created with name: {}".format(new_group_name))
except:
all_groups = []
for group in shared_parameter_file.Groups:
all_groups.append(group.Name)
if new_group_name in all_groups:
print("A group already exists with the following name: {}".format(new_group_name))
for group in shared_parameter_file.Groups:
if group.Name == new_group_name:
newgroup = group
else:
print("Something went wrong. The group with name {} was not created. Please check Shared Parameter File is not read-only.".format(new_group_name))
sys.exit("Script has ended")
return newgroup
def builtinGroupFromName(builtin_group_name):
b_i_groups = System.Enum.GetValues(BuiltInParameterGroup)
builtin_group = None
for g in b_i_groups:
if g.ToString() == builtin_group_name:
builtin_group = g
if builtin_group != None:
return builtin_group
else:
print("Built-in Group not valid: {}".format(builtin_group_name))
return None
def parameterName2ExternalDefinition(sharedParamFile, definitionName):
"""
Given the name of a parameter, return the definition from the shared parameter file
"""
externalDefinition = None
for group in sharedParamFile.Groups:
for definition in group.Definitions:
if definition.Name == definitionName:
externalDefinition = definition
return externalDefinition
def NameAndGroup2ExternalDefinition(sharedParamFile, definitionName, groupName):
external_definition = None
group_found = False
group_matches = None
for group in sharedParamFile.Groups:
if group.Name == groupName:
group_found = True
group_matches = group
if group_found == True:
for definition in group_matches.Definitions:
if definition.Name == definitionName:
external_definition = definition
else:
print("Group not found with name: {}".format(groupName))
return external_definition
def create_definition (group_name, shared_parameter_file, param_name, param_type, usermodifiable, description):
new_definition = None
group_matches = False
group = None
definition_matches = False
for existing_group in shared_parameter_file.Groups:
if existing_group.Name == group_name:
group_matches = True
group = existing_group
print("Group_matches: {}".format(group_matches))
if group_matches == True:
for existing_definition in group.Definitions:
if existing_definition.Name == param_name:
definition_matches = True
print("Definition matches:".format(definition_matches))
if definition_matches == False:
ext_def_creation_options = ExternalDefinitionCreationOptions(param_name, param_type)
ext_def_creation_options.UserModifiable = usermodifiable
ext_def_creation_options.Description = description
new_definition = group.Definitions.Create(ext_def_creation_options)
print("Created external definition \"{}\" in group \"{}\"".format(new_definition.Name, group.Name))
else:
print("Extenal definition already exists with name \"{}\" in group \"{}\"".format(param_name, group.Name))
else:
print("Group doesn't match")
family_instances = []
not_family_instances = []
print("Selected {} items".format(len(selection)))
for item in selection:
if type(item).__name__ == "FamilyInstance":
family_instances.append(item)
else:
not_family_instances.append(item)
print("The following elements are family instances and will receive the parameter values from the ontology:")
if family_instances == []:
print("None")
else:
print([item.Id.ToString() for item in family_instances])
print("The following elements are not family instances and will be dropped from the selection:")
if not_family_instances == []:
print("None")
else:
print([item.Id.ToString() for item in not_family_instances])
yaml_path = select_file("Yaml File (*.yaml)|*.yaml", "Select the yaml file with the parameters", multiple = False, restore_directory = True)
if yaml_path:
with open(yaml_path, "r") as stream:
ontology_yaml = yaml.safe_load(stream)
file_name_split = yaml_path.split("\\")
file_name_with_ext = file_name_split[-1]
file_name_with_ext_split = file_name_with_ext.split(".")
group_name = file_name_with_ext_split[0]
canonical_types = dict(filter(lambda elem : elem[1].get("is_canonical") == True, ontology_yaml.items()))
parameter_names = []
for canonical_type in canonical_types.items():
implements_params = canonical_type[1]["implements"]
for implement_param in implements_params:
parameter_names.append(implement_param)
parameter_names = list(dict.fromkeys(parameter_names))
param_names_with_prefix = []
for pn in parameter_names:
param_name_with_prefix = "Implements_" + pn
param_names_with_prefix.append(param_name_with_prefix)
param_names_with_prefix.append("Entity_Type")
#print(param_names_with_prefix)
# Check if item has the parameters:
print("Checking if family instances have the required parameters...")
for family_instance in family_instances:
all_params = family_instance.Parameters
all_params_names = [param.Definition.Name for param in all_params]
#pp.pprint(all_params_names)
missing_params = []
for param_name in param_names_with_prefix:
if param_name in all_params_names:
pass
else:
missing_params.append(param_name)
if missing_params == []:
print("Family instance {} has all required parameters.".format(family_instance.Id.ToString()))
else:
print("Family instance {} is missing the following parameters".format(family_instance.Id))
pp.pprint(missing_params)
family_instances.remove(family_instance)
print("Family instance {} removed from the list of objects to modify")
# ADD SELECTION OF TYPE THROUGH MENU
print("Please select an entity type from the yaml ontology...")
form_title = "Select an entity type:"
canonical_types = dict(filter(lambda elem : elem[1].get("is_canonical") == True, ontology_yaml.items()))
options = canonical_types.keys()
entity_type_name = rpw.ui.forms.SelectFromList(form_title,options,description=None,sort=True,exit_on_close=True)
entity_type_dict = (dict(filter(lambda elem: elem [0] == entity_type_name, canonical_types.items())))
print("Printing selected entity type:")
pp.pprint(entity_type_dict)
implements = entity_type_dict[entity_type_name]["implements"]
params_to_edit_names = []
for i in implements:
params_to_edit_names.append("Implements_"+i)
print(params_to_edit_names)
print("The following instances will be modified according to Entity Type: {}".format(entity_type_name))
pp.pprint(family_instances)
warnings = []
t = Transaction(doc, "Populate BOS parameters")
t.Start()
for family_instance in family_instances:
print("Editing family instance {}...".format(family_instance.Id.ToString()))
# MODIFY ENTITY TYPE
try:
p_entity_type = family_instance.LookupParameter("Entity_Type")
p_entity_type.Set(entity_type_name)
print("Entity_Type parameter successfully edited for family instance {}.".format(family_instance.Id.ToString()))
except:
message = "Couldn't edit parameter Entity_Type for family instance {}.".format(family_instance.Id.ToString())
warnings.append(message)
# MODIFY YESNO PARAMETERS
all_implements_params = []
for p in family_instance.Parameters:
if "Implements_" in p.Definition.Name:
all_implements_params.append(p)
for p in all_implements_params:
try:
if p.Definition.Name in params_to_edit_names:
p.Set(True)
else:
p.Set(False)
print("{} parameter successfully edited for family instance {}.".format(p.Definition.Name, family_instance.Id.ToString()))
except:
message = "Couldn't edit parameter {} for family instance {}.".format(p.Definition.Name, family_instance.Id.ToString())
warnings.append(message)
t.Commit()
print("Script has ended")
if warnings == []:
print("Warnings: None")
else:
print("Warnings:")
for w in warnings:
print(w)
| 38.601594
| 158
| 0.680462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,064
| 0.213025
|
3ff6bad744395c2228278988f9b9886b23c17ebf
| 8,110
|
py
|
Python
|
Code/src/models/optim/SimCLR_trainer.py
|
antoine-spahr/X-ray-Anomaly-Detection
|
850b6195d6290a50eee865b4d5a66f5db5260e8f
|
[
"MIT"
] | 2
|
2020-10-12T08:25:13.000Z
|
2021-08-16T08:43:43.000Z
|
Code/src/models/optim/SimCLR_trainer.py
|
antoine-spahr/X-ray-Anomaly-Detection
|
850b6195d6290a50eee865b4d5a66f5db5260e8f
|
[
"MIT"
] | null | null | null |
Code/src/models/optim/SimCLR_trainer.py
|
antoine-spahr/X-ray-Anomaly-Detection
|
850b6195d6290a50eee865b4d5a66f5db5260e8f
|
[
"MIT"
] | 1
|
2020-06-17T07:40:17.000Z
|
2020-06-17T07:40:17.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.manifold import TSNE
from src.models.optim.CustomLosses import NT_Xent_loss, SupervisedContrastiveLoss
from src.utils.utils import print_progessbar
class SimCLR_trainer:
"""
"""
def __init__(self, tau, n_epoch=100, batch_size=32,lr=1e-3, weight_decay=1e-6,
lr_milestones=(), n_job_dataloader=0, supervised_loss=False,
device='cuda', print_batch_progress=False):
"""
"""
self.tau = tau
self.n_epoch = n_epoch
self.batch_size = batch_size
self.lr = lr
self.weight_decay = weight_decay
self.lr_milestones = lr_milestones
self.n_job_dataloader = n_job_dataloader
self.supervised_loss = supervised_loss
self.device = device
self.print_batch_progress = print_batch_progress
self.train_time = None
self.train_loss = None
self.eval_repr = None
def train(self, dataset, net, valid_dataset=None):
"""
dataset yield two batch of images with different transformations
Train on NT-Xent loss
"""
logger = logging.getLogger()
# make dataloader (with drop_last = True to ensure that the loss can be computed)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.n_job_dataloader,
drop_last=True)
# put net on device
net = net.to(self.device)
# define loss function, supervised or self-supervised
if self.supervised_loss:
loss_fn = SupervisedContrastiveLoss(self.tau, self.batch_size, y_list=[1], device=self.device)
else:
loss_fn = NT_Xent_loss(self.tau, self.batch_size, device=self.device)
# define the optimizer
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# define the learning rate scheduler : 90% reduction at each steps
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestones, gamma=0.1)
# Training
logger.info('Start Training SimCLR.')
start_time = time.time()
epoch_loss_list = []
n_batch = len(train_loader)
for epoch in range(self.n_epoch):
net.train()
epoch_loss = 0.0
epoch_start_time = time.time()
for b, data in enumerate(train_loader):
# get data on device
input_1, input_2, semi_label, _ = data
input_1 = input_1.to(self.device).float().requires_grad_(True)
input_2 = input_2.to(self.device).float().requires_grad_(True)
semi_label = semi_label.to(self.device)
# Update by Backpropagation : Fowrad + Backward + step
optimizer.zero_grad()
_, z_1 = net(input_1)
_, z_2 = net(input_2)
# normalize embeddings
z_1 = F.normalize(z_1, dim=1)
z_2 = F.normalize(z_2, dim=1)
# compute the loss
if self.supervised_loss:
y = torch.where(semi_label == -1, torch.ones_like(semi_label), torch.zeros_like(semi_label)) # generate labels (1 if known abnormal, else it's considered normal)
loss = loss_fn(z_1, z_2, y)
else:
loss = loss_fn(z_1, z_2)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if self.print_batch_progress:
print_progessbar(b, n_batch, Name='\t\tTrain Batch', Size=40, erase=True)
# compute valid_loss if required
valid_loss = ''
if valid_dataset:
loss = self.evaluate(valid_dataset, net, save_tSNE=False, return_loss=True,
print_to_logger=False)
valid_loss = f' Valid Loss {loss:.6f} |'
# display epoch statistics
logger.info(f'----| Epoch {epoch + 1:03}/{self.n_epoch:03} '
f'| Time {time.time() - epoch_start_time:.3f} [s]'
f'| Loss {epoch_loss / n_batch:.6f} |' + valid_loss)
# store loss
epoch_loss_list.append([epoch+1, epoch_loss/n_batch])
# update learning rate if milestone is reached
scheduler.step()
if epoch + 1 in self.lr_milestones:
logger.info(f'---- LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}')
# Save results
self.train_time = time.time() - start_time
self.train_loss = epoch_loss_list
logger.info(f'---- Finished Training SimCLR in {self.train_time:.3f} [s].')
return net
def evaluate(self, dataset, net, save_tSNE=False, return_loss=True, print_to_logger=True):
"""
"""
if print_to_logger:
logger = logging.getLogger()
# make dataloader (with drop_last = True to ensure that the loss can be computed)
loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.n_job_dataloader,
drop_last=True)
# put net on device
net = net.to(self.device)
# define loss function, supervised or self-supervised
if self.supervised_loss:
loss_fn = SupervisedContrastiveLoss(self.tau, self.batch_size, y_list=[1], device=self.device)
else:
loss_fn = NT_Xent_loss(self.tau, self.batch_size, device=self.device)
if print_to_logger:
logger.info("Start Evaluating SimCLR.")
net.eval()
with torch.no_grad():
sum_loss = 0.0
idx_h_z = []
n_batch = len(loader)
for b, data in enumerate(loader):
# get input
input_1, input_2, semi_label, idx = data
input_1 = input_1.to(self.device).float()
input_2 = input_2.to(self.device).float()
semi_label = semi_label.to(self.device)
idx = idx.to(self.device)
# forward
h_1, z_1 = net(input_1)
h_2, z_2 = net(input_2)
# normalize
z_1 = F.normalize(z_1, dim=1)
z_2 = F.normalize(z_2, dim=1)
# compute loss
if self.supervised_loss:
y = torch.where(semi_label == -1, torch.ones_like(semi_label), torch.zeros_like(semi_label)) # generate labels (1 if known abnormal, else it's considered normal)
loss = loss_fn(z_1, z_2, y)
else:
loss = loss_fn(z_1, z_2)
sum_loss += loss.item()
# save embeddings
if save_tSNE:
idx_h_z += list(zip(idx.cpu().data.numpy().tolist(),
h_1.cpu().data.numpy().tolist(),
z_1.cpu().data.numpy().tolist()))
if self.print_batch_progress:
print_progessbar(b, n_batch, Name='\t\tEvaluation Batch', Size=40, erase=True)
if save_tSNE:
if print_to_logger:
logger.info("Computing the t-SNE representation.")
# Apply t-SNE transform on embeddings
index, h, z = zip(*idx_h_z)
h, z = np.array(h), np.array(z)
h = TSNE(n_components=2).fit_transform(h)
z = TSNE(n_components=2).fit_transform(z)
self.eval_repr = list(zip(index, h.tolist(), z.tolist()))
if print_to_logger:
logger.info("Succesfully computed the t-SNE representation ")
if return_loss:
return loss / n_batch
| 38.990385
| 181
| 0.561159
| 7,804
| 0.962269
| 0
| 0
| 0
| 0
| 0
| 0
| 1,518
| 0.187176
|
3ff6e816cd8b898e3be215d0d77841e6ad25c848
| 543
|
py
|
Python
|
patients/migrations/0008_alter_patient_age.py
|
Curewell-Homeo-Clinic/admin-system
|
c8ce56a2bdbccfe1e6bec09068932f1943498b9f
|
[
"MIT"
] | 1
|
2021-11-29T15:24:41.000Z
|
2021-11-29T15:24:41.000Z
|
patients/migrations/0008_alter_patient_age.py
|
Curewell-Homeo-Clinic/admin-system
|
c8ce56a2bdbccfe1e6bec09068932f1943498b9f
|
[
"MIT"
] | 46
|
2021-11-29T16:05:55.000Z
|
2022-03-01T13:04:45.000Z
|
patients/migrations/0008_alter_patient_age.py
|
Curewell-Homeo-Clinic/admin-system
|
c8ce56a2bdbccfe1e6bec09068932f1943498b9f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-20 16:13
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0007_alter_patient_age'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='age',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
]
| 27.15
| 179
| 0.67035
| 420
| 0.773481
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.174954
|
3ff70f0f8e53ee1c511ea409b894a75564f6138d
| 4,348
|
py
|
Python
|
kitsune/questions/tests/test_utils.py
|
AndrewDVXI/kitsune
|
84bd4fa60346681c3fc5a03b0b1540fd1335cee2
|
[
"BSD-3-Clause"
] | 1
|
2021-07-18T00:41:16.000Z
|
2021-07-18T00:41:16.000Z
|
kitsune/questions/tests/test_utils.py
|
AndrewDVXI/kitsune
|
84bd4fa60346681c3fc5a03b0b1540fd1335cee2
|
[
"BSD-3-Clause"
] | 9
|
2021-04-08T22:05:53.000Z
|
2022-03-12T00:54:11.000Z
|
kitsune/questions/tests/test_utils.py
|
AndrewDVXI/kitsune
|
84bd4fa60346681c3fc5a03b0b1540fd1335cee2
|
[
"BSD-3-Clause"
] | 1
|
2020-07-28T15:53:02.000Z
|
2020-07-28T15:53:02.000Z
|
from kitsune.questions.models import Answer, Question
from kitsune.questions.tests import AnswerFactory, QuestionFactory
from kitsune.questions.utils import (
get_mobile_product_from_ua,
mark_content_as_spam,
num_answers,
num_questions,
num_solutions,
)
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import UserFactory
from nose.tools import eq_
from parameterized import parameterized
class ContributionCountTestCase(TestCase):
def test_num_questions(self):
"""Answers are counted correctly on a user."""
u = UserFactory()
eq_(num_questions(u), 0)
q1 = QuestionFactory(creator=u)
eq_(num_questions(u), 1)
q2 = QuestionFactory(creator=u)
eq_(num_questions(u), 2)
q1.delete()
eq_(num_questions(u), 1)
q2.delete()
eq_(num_questions(u), 0)
def test_num_answers(self):
u = UserFactory()
q = QuestionFactory()
eq_(num_answers(u), 0)
a1 = AnswerFactory(creator=u, question=q)
eq_(num_answers(u), 1)
a2 = AnswerFactory(creator=u, question=q)
eq_(num_answers(u), 2)
a1.delete()
eq_(num_answers(u), 1)
a2.delete()
eq_(num_answers(u), 0)
def test_num_solutions(self):
u = UserFactory()
q1 = QuestionFactory()
q2 = QuestionFactory()
a1 = AnswerFactory(creator=u, question=q1)
a2 = AnswerFactory(creator=u, question=q2)
eq_(num_solutions(u), 0)
q1.solution = a1
q1.save()
eq_(num_solutions(u), 1)
q2.solution = a2
q2.save()
eq_(num_solutions(u), 2)
q1.solution = None
q1.save()
eq_(num_solutions(u), 1)
a2.delete()
eq_(num_solutions(u), 0)
class FlagUserContentAsSpamTestCase(TestCase):
def test_flag_content_as_spam(self):
# Create some questions and answers by the user.
u = UserFactory()
QuestionFactory(creator=u)
QuestionFactory(creator=u)
AnswerFactory(creator=u)
AnswerFactory(creator=u)
AnswerFactory(creator=u)
# Verify they are not marked as spam yet.
eq_(2, Question.objects.filter(is_spam=False, creator=u).count())
eq_(0, Question.objects.filter(is_spam=True, creator=u).count())
eq_(3, Answer.objects.filter(is_spam=False, creator=u).count())
eq_(0, Answer.objects.filter(is_spam=True, creator=u).count())
# Flag content as spam and verify it is updated.
mark_content_as_spam(u, UserFactory())
eq_(0, Question.objects.filter(is_spam=False, creator=u).count())
eq_(2, Question.objects.filter(is_spam=True, creator=u).count())
eq_(0, Answer.objects.filter(is_spam=False, creator=u).count())
eq_(3, Answer.objects.filter(is_spam=True, creator=u).count())
class GetMobileProductFromUATests(TestCase):
@parameterized.expand(
[
("Mozilla/5.0 (Android; Mobile; rv:40.0) Gecko/40.0 Firefox/40.0", "mobile"),
("Mozilla/5.0 (Android; Tablet; rv:40.0) Gecko/40.0 Firefox/40.0", "mobile"),
("Mozilla/5.0 (Android 4.4; Mobile; rv:41.0) Gecko/41.0 Firefox/41.0", "mobile"),
("Mozilla/5.0 (Android 4.4; Tablet; rv:41.0) Gecko/41.0 Firefox/41.0", "mobile"),
(
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/7.0.4 Mobile/16B91 Safari/605.1.15", # noqa: E501
"ios",
),
(
"Mozilla/5.0 (Android 10; Mobile; rv:76.0) Gecko/76.0 Firefox/76.0",
"firefox-preview",
),
(
"Mozilla/5.0 (Linux; Android 8.1.0; Redmi 6A Build/O11019; rv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Rocket/1.9.2(13715) Chrome/76.0.3809.132 Mobile Safari/537.36", # noqa: E501
"firefox-lite",
),
( # Chrome on Android:
"Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Mobile Safari/537.36", # noqa: E501
None,
),
]
)
def test_user_agents(self, ua, expected):
eq_(expected, get_mobile_product_from_ua(ua))
| 34.507937
| 207
| 0.606486
| 3,914
| 0.900184
| 0
| 0
| 1,411
| 0.324517
| 0
| 0
| 1,102
| 0.25345
|
3ff942a422edefd4743417af8a01150a5a71f98a
| 10,122
|
py
|
Python
|
scripts/create_fluseverity_figs_v2/export_zOR_classif_swap.py
|
eclee25/flu-SDI-exploratory-age
|
2f5a4d97b84d2116e179e85fe334edf4556aa946
|
[
"MIT"
] | 3
|
2018-03-29T23:02:43.000Z
|
2020-08-10T12:01:50.000Z
|
scripts/create_fluseverity_figs_v2/export_zOR_classif_swap.py
|
eclee25/flu-SDI-exploratory-age
|
2f5a4d97b84d2116e179e85fe334edf4556aa946
|
[
"MIT"
] | null | null | null |
scripts/create_fluseverity_figs_v2/export_zOR_classif_swap.py
|
eclee25/flu-SDI-exploratory-age
|
2f5a4d97b84d2116e179e85fe334edf4556aa946
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/14/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
# 10/14/14 swap OR age groups
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif_swap.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions_v2 as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
def print_dict_to_file(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s\n" % (key, value[0], value[1]))
def print_dict_to_file2(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,region,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
def print_dict_to_file3(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write('season,state,mn_retro,mn_early\n')
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: region-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: state-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s_swap.csv' %(nw)
print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s_swap.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg_swap.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst_swap.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4)
| 59.893491
| 206
| 0.742936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,751
| 0.666963
|
3ff99e7156481e3c6520089236ad30d435cc64ca
| 3,346
|
py
|
Python
|
sonar/endpoints.py
|
sharm294/sonar
|
99de16dd16d0aa77734584e67263c78a37abef86
|
[
"MIT"
] | 5
|
2018-11-21T02:33:38.000Z
|
2020-10-30T12:22:05.000Z
|
sonar/endpoints.py
|
sharm294/sonar
|
99de16dd16d0aa77734584e67263c78a37abef86
|
[
"MIT"
] | 2
|
2018-12-28T18:31:45.000Z
|
2020-06-12T19:24:57.000Z
|
sonar/endpoints.py
|
sharm294/sonar
|
99de16dd16d0aa77734584e67263c78a37abef86
|
[
"MIT"
] | 1
|
2019-03-10T13:48:50.000Z
|
2019-03-10T13:48:50.000Z
|
"""
Signal endpoints that can be used in testbenches
"""
import textwrap
from typing import Dict
import sonar.base_types as base
class Endpoint(base.SonarObject):
"""
Endpoint class
"""
arguments: Dict[str, int] = {}
@classmethod
def instantiate(cls, _indent):
"""
Instantiate the endpoint logic
Args:
_indent (str): Indentation to add to each line
Returns:
str: Updated ip_inst
"""
return ""
@classmethod
def asdict(cls):
tmp = {
"instantiate": False,
}
return tmp
class InterfaceEndpoint(Endpoint):
"""
InterfaceEndpoints class
"""
actions: Dict[str, Dict] = {}
@staticmethod
def import_packages_local(_interface):
"""
Specifies any packages that must be imported once per endpoint
Args:
interface (Interface): The interface belonging to the endpoint
Returns:
str: Packages to be imported
"""
return ""
@staticmethod
def initial_blocks(_indent):
"""
Any text that should be inside an initial block
Args:
indent (str): Indentation to add to each line
Returns:
list[str]: List of strings that go into separate initial blocks
"""
return []
@staticmethod
def prologue(_indent):
"""
Any text that should be part of the testbench as a prologue outside any
blocks such as variable declarations.
Args:
prologue (str): The preceding text to append to
indent (str): Indentation to add to each line
Returns:
str: Updated prologue
"""
return ""
@staticmethod
def source_tcl(_interface, _path):
"""
Any TCL files that should be sourced as part of initializing the
interface
Args:
interface (AXI4LiteSlave): AXI4LiteSlave object
path (str): Path where to place the TCL source files
"""
return None
@classmethod
def asdict(cls):
tmp = super().asdict()
tmp["import_packages_local"] = False
tmp["initial_blocks"] = False
tmp["source_tcl"] = False
tmp["prologue"] = False
return tmp
class PeriodicSignal(Endpoint):
"""
Endpoint class
"""
@classmethod
def instantiate(cls, indent):
"""
Any modules that this interface instantiates in SV.
Args:
indent (str): Indentation to add to each line
Returns:
str: Updated ip_inst
"""
name = cls.arguments["name"]
initial_value = cls.arguments["value"]
period = cls.arguments["period"]
block = textwrap.indent(
textwrap.dedent(
f"""\
initial begin
{name}_endpoint[$$endpointIndex] = {initial_value};
forever begin
#({period}/2) {name}_endpoint[$$endpointIndex] <= ~{name}_endpoint[$$endpointIndex];
end
end
"""
),
indent,
)
return block
@classmethod
def asdict(cls):
tmp = {
"instantiate": False,
}
return tmp
| 22.456376
| 104
| 0.545129
| 3,206
| 0.958159
| 0
| 0
| 2,868
| 0.857143
| 0
| 0
| 1,979
| 0.591452
|
3ff9c9e147dda16eeaf022e601e081b35faea86c
| 15,400
|
py
|
Python
|
minemeld/ft/condition/BoolExprParser.py
|
zul126/minemeld-core
|
2eb9b9bfd7654aee57aabd5fb280d4e89a438daf
|
[
"Apache-2.0"
] | 1
|
2021-01-02T07:25:04.000Z
|
2021-01-02T07:25:04.000Z
|
minemeld/ft/condition/BoolExprParser.py
|
zul126/minemeld-core
|
2eb9b9bfd7654aee57aabd5fb280d4e89a438daf
|
[
"Apache-2.0"
] | null | null | null |
minemeld/ft/condition/BoolExprParser.py
|
zul126/minemeld-core
|
2eb9b9bfd7654aee57aabd5fb280d4e89a438daf
|
[
"Apache-2.0"
] | 1
|
2019-03-14T06:52:52.000Z
|
2019-03-14T06:52:52.000Z
|
# Generated from BoolExpr.g4 by ANTLR 4.5.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
# flake8: noqa
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3")
buf.write(u"\22\63\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write(u"\7\4\b\t\b\3\2\3\2\3\2\3\2\3\3\3\3\5\3\27\n\3\3\4\3\4")
buf.write(u"\3\4\5\4\34\n\4\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\5\6&")
buf.write(u"\n\6\7\6(\n\6\f\6\16\6+\13\6\3\6\3\6\3\7\3\7\3\b\3\b")
buf.write(u"\3\b\2\2\t\2\4\6\b\n\f\16\2\4\3\2\6\13\4\2\f\16\20\21")
buf.write(u"/\2\20\3\2\2\2\4\26\3\2\2\2\6\30\3\2\2\2\b\35\3\2\2\2")
buf.write(u"\n \3\2\2\2\f.\3\2\2\2\16\60\3\2\2\2\20\21\5\4\3\2\21")
buf.write(u"\22\5\f\7\2\22\23\5\16\b\2\23\3\3\2\2\2\24\27\7\17\2")
buf.write(u"\2\25\27\5\6\4\2\26\24\3\2\2\2\26\25\3\2\2\2\27\5\3\2")
buf.write(u"\2\2\30\33\7\17\2\2\31\34\5\b\5\2\32\34\5\n\6\2\33\31")
buf.write(u"\3\2\2\2\33\32\3\2\2\2\34\7\3\2\2\2\35\36\7\3\2\2\36")
buf.write(u"\37\7\4\2\2\37\t\3\2\2\2 !\7\3\2\2!)\5\4\3\2\"%\7\5\2")
buf.write(u"\2#&\5\4\3\2$&\5\16\b\2%#\3\2\2\2%$\3\2\2\2&(\3\2\2\2")
buf.write(u"\'\"\3\2\2\2(+\3\2\2\2)\'\3\2\2\2)*\3\2\2\2*,\3\2\2\2")
buf.write(u"+)\3\2\2\2,-\7\4\2\2-\13\3\2\2\2./\t\2\2\2/\r\3\2\2\2")
buf.write(u"\60\61\t\3\2\2\61\17\3\2\2\2\6\26\33%)")
return buf.getvalue()
class BoolExprParser ( Parser ):
grammarFileName = "BoolExpr.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"'('", u"')'", u"','", u"'<'", u"'<='",
u"'=='", u"'>='", u"'>'", u"'!='", u"'true'", u"'false'",
u"'null'" ]
symbolicNames = [ u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"JAVASCRIPTIDENTIFIER", u"STRING",
u"NUMBER", u"WS" ]
RULE_booleanExpression = 0
RULE_expression = 1
RULE_functionExpression = 2
RULE_noArgs = 3
RULE_oneOrMoreArgs = 4
RULE_comparator = 5
RULE_value = 6
ruleNames = [ u"booleanExpression", u"expression", u"functionExpression",
u"noArgs", u"oneOrMoreArgs", u"comparator", u"value" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
JAVASCRIPTIDENTIFIER=13
STRING=14
NUMBER=15
WS=16
def __init__(self, input):
super(BoolExprParser, self).__init__(input)
self.checkVersion("4.5.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class BooleanExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.BooleanExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(BoolExprParser.ExpressionContext,0)
def comparator(self):
return self.getTypedRuleContext(BoolExprParser.ComparatorContext,0)
def value(self):
return self.getTypedRuleContext(BoolExprParser.ValueContext,0)
def getRuleIndex(self):
return BoolExprParser.RULE_booleanExpression
def enterRule(self, listener):
if hasattr(listener, "enterBooleanExpression"):
listener.enterBooleanExpression(self)
def exitRule(self, listener):
if hasattr(listener, "exitBooleanExpression"):
listener.exitBooleanExpression(self)
def booleanExpression(self):
localctx = BoolExprParser.BooleanExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_booleanExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 14
self.expression()
self.state = 15
self.comparator()
self.state = 16
self.value()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.ExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def JAVASCRIPTIDENTIFIER(self):
return self.getToken(BoolExprParser.JAVASCRIPTIDENTIFIER, 0)
def functionExpression(self):
return self.getTypedRuleContext(BoolExprParser.FunctionExpressionContext,0)
def getRuleIndex(self):
return BoolExprParser.RULE_expression
def enterRule(self, listener):
if hasattr(listener, "enterExpression"):
listener.enterExpression(self)
def exitRule(self, listener):
if hasattr(listener, "exitExpression"):
listener.exitExpression(self)
def expression(self):
localctx = BoolExprParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_expression)
try:
self.state = 20
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 18
self.match(BoolExprParser.JAVASCRIPTIDENTIFIER)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 19
self.functionExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.FunctionExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def JAVASCRIPTIDENTIFIER(self):
return self.getToken(BoolExprParser.JAVASCRIPTIDENTIFIER, 0)
def noArgs(self):
return self.getTypedRuleContext(BoolExprParser.NoArgsContext,0)
def oneOrMoreArgs(self):
return self.getTypedRuleContext(BoolExprParser.OneOrMoreArgsContext,0)
def getRuleIndex(self):
return BoolExprParser.RULE_functionExpression
def enterRule(self, listener):
if hasattr(listener, "enterFunctionExpression"):
listener.enterFunctionExpression(self)
def exitRule(self, listener):
if hasattr(listener, "exitFunctionExpression"):
listener.exitFunctionExpression(self)
def functionExpression(self):
localctx = BoolExprParser.FunctionExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_functionExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 22
self.match(BoolExprParser.JAVASCRIPTIDENTIFIER)
self.state = 25
la_ = self._interp.adaptivePredict(self._input,1,self._ctx)
if la_ == 1:
self.state = 23
self.noArgs()
pass
elif la_ == 2:
self.state = 24
self.oneOrMoreArgs()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NoArgsContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.NoArgsContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return BoolExprParser.RULE_noArgs
def enterRule(self, listener):
if hasattr(listener, "enterNoArgs"):
listener.enterNoArgs(self)
def exitRule(self, listener):
if hasattr(listener, "exitNoArgs"):
listener.exitNoArgs(self)
def noArgs(self):
localctx = BoolExprParser.NoArgsContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_noArgs)
try:
self.enterOuterAlt(localctx, 1)
self.state = 27
self.match(BoolExprParser.T__0)
self.state = 28
self.match(BoolExprParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OneOrMoreArgsContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.OneOrMoreArgsContext, self).__init__(parent, invokingState)
self.parser = parser
def expression(self, i=None):
if i is None:
return self.getTypedRuleContexts(BoolExprParser.ExpressionContext)
else:
return self.getTypedRuleContext(BoolExprParser.ExpressionContext,i)
def value(self, i=None):
if i is None:
return self.getTypedRuleContexts(BoolExprParser.ValueContext)
else:
return self.getTypedRuleContext(BoolExprParser.ValueContext,i)
def getRuleIndex(self):
return BoolExprParser.RULE_oneOrMoreArgs
def enterRule(self, listener):
if hasattr(listener, "enterOneOrMoreArgs"):
listener.enterOneOrMoreArgs(self)
def exitRule(self, listener):
if hasattr(listener, "exitOneOrMoreArgs"):
listener.exitOneOrMoreArgs(self)
def oneOrMoreArgs(self):
localctx = BoolExprParser.OneOrMoreArgsContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_oneOrMoreArgs)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 30
self.match(BoolExprParser.T__0)
self.state = 31
self.expression()
self.state = 39
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==BoolExprParser.T__2:
self.state = 32
self.match(BoolExprParser.T__2)
self.state = 35
token = self._input.LA(1)
if token in [BoolExprParser.JAVASCRIPTIDENTIFIER]:
self.state = 33
self.expression()
elif token in [BoolExprParser.T__9, BoolExprParser.T__10, BoolExprParser.T__11, BoolExprParser.STRING, BoolExprParser.NUMBER]:
self.state = 34
self.value()
else:
raise NoViableAltException(self)
self.state = 41
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 42
self.match(BoolExprParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ComparatorContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.ComparatorContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return BoolExprParser.RULE_comparator
def enterRule(self, listener):
if hasattr(listener, "enterComparator"):
listener.enterComparator(self)
def exitRule(self, listener):
if hasattr(listener, "exitComparator"):
listener.exitComparator(self)
def comparator(self):
localctx = BoolExprParser.ComparatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_comparator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 44
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BoolExprParser.T__3) | (1 << BoolExprParser.T__4) | (1 << BoolExprParser.T__5) | (1 << BoolExprParser.T__6) | (1 << BoolExprParser.T__7) | (1 << BoolExprParser.T__8))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(BoolExprParser.ValueContext, self).__init__(parent, invokingState)
self.parser = parser
def STRING(self):
return self.getToken(BoolExprParser.STRING, 0)
def NUMBER(self):
return self.getToken(BoolExprParser.NUMBER, 0)
def getRuleIndex(self):
return BoolExprParser.RULE_value
def enterRule(self, listener):
if hasattr(listener, "enterValue"):
listener.enterValue(self)
def exitRule(self, listener):
if hasattr(listener, "exitValue"):
listener.exitValue(self)
def value(self):
localctx = BoolExprParser.ValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_value)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 46
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << BoolExprParser.T__9) | (1 << BoolExprParser.T__10) | (1 << BoolExprParser.T__11) | (1 << BoolExprParser.STRING) | (1 << BoolExprParser.NUMBER))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 33.04721
| 241
| 0.586104
| 13,879
| 0.901234
| 0
| 0
| 0
| 0
| 0
| 0
| 1,710
| 0.111039
|
3ffb7c0442cbda7e7c873ec775ef33cdb0c000d2
| 398
|
py
|
Python
|
nodes/networkedSingleStepper/temporaryURLNode.py
|
imoyer/pygestalt
|
d332df64264cce4a2bec8a73d698c386f1eaca7b
|
[
"MIT"
] | 1
|
2017-07-03T08:34:39.000Z
|
2017-07-03T08:34:39.000Z
|
nodes/networkedSingleStepper/temporaryURLNode.py
|
imoyer/pygestalt
|
d332df64264cce4a2bec8a73d698c386f1eaca7b
|
[
"MIT"
] | 3
|
2015-12-04T23:14:50.000Z
|
2016-11-08T16:24:32.000Z
|
nodes/networkedSingleStepper/temporaryURLNode.py
|
imnp/pygestalt
|
d332df64264cce4a2bec8a73d698c386f1eaca7b
|
[
"MIT"
] | 1
|
2017-09-13T00:17:39.000Z
|
2017-09-13T00:17:39.000Z
|
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>404 Not Found</title>
</head><body>
<h1>Not Found</h1>
<p>The requested URL /vn/testNode.py was not found on this server.</p>
<p>Additionally, a 404 Not Found
error was encountered while trying to use an ErrorDocument to handle the request.</p>
<hr>
<address>Apache Server at www.pygestalt.org Port 80</address>
</body></html>
| 33.166667
| 85
| 0.718593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.067839
|
3ffbaac7ded264cd662d18071c85e8138b2662eb
| 4,761
|
py
|
Python
|
cppwg/writers/header_collection_writer.py
|
josephsnyder/cppwg
|
265117455ed57eb250643a28ea6029c2bccf3ab3
|
[
"MIT"
] | 21
|
2017-10-03T14:29:36.000Z
|
2021-12-07T08:54:43.000Z
|
cppwg/writers/header_collection_writer.py
|
josephsnyder/cppwg
|
265117455ed57eb250643a28ea6029c2bccf3ab3
|
[
"MIT"
] | 2
|
2017-12-29T19:17:44.000Z
|
2020-03-27T14:59:27.000Z
|
cppwg/writers/header_collection_writer.py
|
josephsnyder/cppwg
|
265117455ed57eb250643a28ea6029c2bccf3ab3
|
[
"MIT"
] | 6
|
2019-03-21T11:55:52.000Z
|
2021-07-13T20:49:50.000Z
|
#!/usr/bin/env python
"""
Generate the file classes_to_be_wrapped.hpp, which contains includes,
instantiation and naming typedefs for all classes that are to be
automatically wrapped.
"""
import os
import ntpath
class CppHeaderCollectionWriter():
"""
This class manages generation of the header collection file for
parsing by CastXML
"""
def __init__(self, package_info, wrapper_root):
self.wrapper_root = wrapper_root
self.package_info = package_info
self.header_file_name = "wrapper_header_collection.hpp"
self.hpp_string = ""
self.class_dict = {}
self.free_func_dict = {}
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
self.class_dict[eachClassInfo.name] = eachClassInfo
for eachFuncInfo in eachModule.free_function_info:
self.free_func_dict[eachFuncInfo.name] = eachFuncInfo
def add_custom_header_code(self):
"""
Any custom header code goes here
"""
pass
def write_file(self):
"""
The actual write
"""
if not os.path.exists(self.wrapper_root + "/"):
os.makedirs(self.wrapper_root + "/")
file_path = self.wrapper_root + "/" + self.header_file_name
hpp_file = open(file_path, 'w')
hpp_file.write(self.hpp_string)
hpp_file.close()
def should_include_all(self):
"""
Return whether all source files in the module source locs should be included
"""
for eachModule in self.package_info.module_info:
if eachModule.use_all_classes or eachModule.use_all_free_functions:
return True
return False
def write(self):
"""
Main method for generating the header file output string
"""
hpp_header_dict = {'package_name': self.package_info.name}
hpp_header_template = """\
#ifndef {package_name}_HEADERS_HPP_
#define {package_name}_HEADERS_HPP_
// Includes
"""
self.hpp_string = hpp_header_template.format(**hpp_header_dict)
# Now our own includes
if self.should_include_all():
for eachFile in self.package_info.source_hpp_files:
include_name = ntpath.basename(eachFile)
self.hpp_string += '#include "' + include_name + '"\n'
else:
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
if eachClassInfo.source_file is not None:
self.hpp_string += '#include "' + eachClassInfo.source_file + '"\n'
elif eachClassInfo.source_file_full_path is not None:
include_name = ntpath.basename(eachClassInfo.source_file_full_path)
self.hpp_string += '#include "' + include_name + '"\n'
for eachFuncInfo in eachModule.free_function_info:
if eachFuncInfo.source_file_full_path is not None:
include_name = ntpath.basename(eachFuncInfo.source_file_full_path)
self.hpp_string += '#include "' + include_name + '"\n'
# Add the template instantiations
self.hpp_string += "\n// Instantiate Template Classes \n"
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
full_names = eachClassInfo.get_full_names()
if len(full_names) == 1:
continue
prefix = "template class "
for eachTemplateName in full_names:
self.hpp_string += prefix + eachTemplateName.replace(" ","") + ";\n"
# Add typdefs for nice naming
self.hpp_string += "\n// Typedef for nicer naming\n"
self.hpp_string += "namespace cppwg{ \n"
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
full_names = eachClassInfo.get_full_names()
if len(full_names) == 1:
continue
short_names = eachClassInfo.get_short_names()
for idx, eachTemplateName in enumerate(full_names):
short_name = short_names[idx]
typdef_prefix = "typedef " + eachTemplateName.replace(" ","") + " "
self.hpp_string += typdef_prefix + short_name + ";\n"
self.hpp_string += "}\n"
self.add_custom_header_code()
self.hpp_string += "\n#endif // {}_HEADERS_HPP_\n".format(self.package_info.name)
self.write_file()
| 36.068182
| 91
| 0.603235
| 4,544
| 0.954421
| 0
| 0
| 0
| 0
| 0
| 0
| 1,046
| 0.219702
|
3ffbd01add7dfacc772a2751a5811b5cb60b641e
| 6,590
|
py
|
Python
|
22-crab-combat/solution22_2.py
|
johntelforduk/advent-of-code-2020
|
138df3a7b12e418f371f641fed02e57a98a7392e
|
[
"MIT"
] | 1
|
2020-12-03T13:20:49.000Z
|
2020-12-03T13:20:49.000Z
|
22-crab-combat/solution22_2.py
|
johntelforduk/advent-of-code-2020
|
138df3a7b12e418f371f641fed02e57a98a7392e
|
[
"MIT"
] | null | null | null |
22-crab-combat/solution22_2.py
|
johntelforduk/advent-of-code-2020
|
138df3a7b12e418f371f641fed02e57a98a7392e
|
[
"MIT"
] | null | null | null |
# Solution to part 2 of day 22 of AOC 2020, Crab Combat.
# https://adventofcode.com/2020/day/22
import sys
VERBOSE = ('-v' in sys.argv)
class Deck:
def __init__(self, player: int, cards: list):
self.player = player
self.cards = cards
def take_top_card(self) -> int:
"""Remove the top card from the deck. Return the value of that card."""
card = self.cards.pop(0)
if VERBOSE:
print(self.player, 'plays:', card)
return card
def top_cards(self, top: int) -> list:
"""Return a list of the top cards in the deck. The number of cards is the parm of this method."""
return self.cards[:top].copy()
def card_on_bottom(self, card: int):
"""Put the parm card on the bottom of the deck."""
self.cards.append(card)
def display(self):
"""Print out info about the deck to stdout."""
print('Player', str(self.player) + "'s deck: ", end='')
first = True
for card in self.cards:
if not first:
print(', ', end='')
first = False
print(card, end='')
print()
class Combat:
def __init__(self, game: int, p1_cards: list, p2_cards: list):
self.p1_deck = Deck(player=1, cards=p1_cards) # Player 1's deck of cards.
self.p2_deck = Deck(player=2, cards=p2_cards) # Player 2's card deck.
self.previous_rounds = [] # List of decks that each player had in previous rounds.
self.game = game
if VERBOSE:
print('=== Game', self.game, '===')
print()
self.round = 1
self.winner = 0 # 0 indicates no winner yet.
while self.winner == 0:
self.winner = self.play_a_round()
def play_a_round(self) -> int:
"""Play a round of the game.
If one of the players wins the game in this round, return their player number.
Otherwise, return 0, to indicate no winner after this round."""
if VERBOSE:
print('-- Round', self.round, '(Game ' + str(self.game) + ')--')
self.p1_deck.display()
self.p2_deck.display()
# "Before either player deals a card, if there was a previous round in this game that had exactly the same
# cards in the same order in the same players' decks, the game instantly ends in a win for player 1."
if (self.p1_deck.cards, self.p2_deck.cards) in self.previous_rounds:
if VERBOSE:
print('Stalemate, hence Player 1 wins')
return 1
self.previous_rounds.append((self.p1_deck.cards.copy(), self.p2_deck.cards.copy()))
# "... both players draw their top card..."
p1_card = self.p1_deck.take_top_card()
p2_card = self.p2_deck.take_top_card()
# "If both players have at least as many cards remaining in their deck as the value of the card they just drew,
# the winner of the round is determined by playing a new game of Recursive Combat."
if p1_card <= len(self.p1_deck.cards) and p2_card <= len(self.p2_deck.cards):
if VERBOSE:
print('Playing a sub-game to determine the winner...')
p1_new_game_cards = self.p1_deck.top_cards(top=p1_card)
p2_new_game_cards = self.p2_deck.top_cards(top=p2_card)
new_game = Combat(game=self.game + 1, p1_cards=p1_new_game_cards, p2_cards=p2_new_game_cards)
round_winner = new_game.winner
if VERBOSE:
print('...anyway, back to game', self.game)
elif p1_card > p2_card: # "The player with the higher-valued card wins the round."
round_winner = 1
else:
round_winner = 2
if round_winner == 1:
if VERBOSE:
print('Player 1 wins round', self.round, 'of game', self.game)
# "The winner keeps both cards, placing them on the bottom of their own deck so that the winner's card
# is above the other card."
self.p1_deck.card_on_bottom(p1_card)
self.p1_deck.card_on_bottom(p2_card)
else:
if VERBOSE:
print('Player 2 wins round', self.round, 'of game', self.game)
self.p2_deck.card_on_bottom(p2_card)
self.p2_deck.card_on_bottom(p1_card)
if VERBOSE:
print()
self.round += 1
# "If this causes a player to have all of the cards, they win, and the game ends."
if len(self.p1_deck.cards) == 0: # p1 has no cards left, so p2 wins.
if VERBOSE:
print('The winner of game', self.game, 'is player 2')
return 2
elif len(self.p2_deck.cards) == 0: # p2 has no cards left, so p1 wins.
if VERBOSE:
print('The winner of game', self.game, 'is player 1')
return 1
return 0 # 0 indicates no winner of the game during this round.
def calculate_winning_score(self) -> int:
"""Return score of winning deck."""
# "The bottom card in their deck is worth the value of the card multiplied by 1, the second-from-the-bottom
# card is worth the value of the card multiplied by 2, and so on."
if self.winner == 1:
cards = self.p1_deck.cards
else:
cards = self.p2_deck.cards
score = 0
multiplier = 1
for card in cards[::-1]: # Go through the winner's cards backwards.
score += card * multiplier
multiplier += 1
return score
def text_to_cards(text: str) -> list:
"""For parm text file, return a list of integers which are the cards in that text file."""
cards = []
# Each card starts on a new line. Ignore the first line, as it is the player number.
for card in text.split('\n')[1:]:
cards.append(int(card))
return cards
def main():
filename = sys.argv[1]
f = open(filename)
whole_text = f.read()
f.close()
p1_text, p2_text = whole_text.split('\n\n') # There is a blank line between the 2 players.
p1_cards_list = text_to_cards(p1_text)
p2_cards_list = text_to_cards(p2_text)
game = Combat(game=1, p1_cards=p1_cards_list, p2_cards=p2_cards_list)
print('== Post-game results ==')
game.p1_deck.display()
game.p2_deck.display()
print('Part 2:', game.calculate_winning_score())
if __name__ == "__main__":
main()
| 35.621622
| 119
| 0.582398
| 5,569
| 0.845068
| 0
| 0
| 0
| 0
| 0
| 0
| 2,355
| 0.35736
|
3ffc66c1a55abdcb165f5612bc7ea3c265086406
| 246
|
py
|
Python
|
consts.py
|
mauroreisvieira/sublime-tailwindcss-intellisense
|
140edc90c59c045fc8a9d7f6bcff0b727660ee64
|
[
"MIT"
] | null | null | null |
consts.py
|
mauroreisvieira/sublime-tailwindcss-intellisense
|
140edc90c59c045fc8a9d7f6bcff0b727660ee64
|
[
"MIT"
] | null | null | null |
consts.py
|
mauroreisvieira/sublime-tailwindcss-intellisense
|
140edc90c59c045fc8a9d7f6bcff0b727660ee64
|
[
"MIT"
] | null | null | null |
import os
# @see https://marketplace.visualstudio.com/items?itemName=bradlc.vscode-tailwindcss
EXTENSION_UID = "bradlc.vscode-tailwindcss"
EXTENSION_VERSION = "0.5.2"
SERVER_BINARY_PATH = os.path.join("extension", "dist", "server", "index.js")
| 30.75
| 84
| 0.764228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.621951
|
3ffe70804c74668d12ccd199fbcd96d4fb1cfb92
| 2,426
|
py
|
Python
|
backend/app/alembic/versions/491383f70589_add_separate_reported_and_deleted_tables.py
|
Pinafore/Karl-flashcards-web-app
|
2f4d9925c545f83eb3289dfef85d9b0bf9bfeb8c
|
[
"Apache-2.0"
] | 7
|
2020-09-13T06:06:32.000Z
|
2021-11-15T11:37:16.000Z
|
backend/app/alembic/versions/491383f70589_add_separate_reported_and_deleted_tables.py
|
Pinafore/Karl-flashcards-web-app
|
2f4d9925c545f83eb3289dfef85d9b0bf9bfeb8c
|
[
"Apache-2.0"
] | 16
|
2020-08-28T20:38:27.000Z
|
2021-03-18T04:03:00.000Z
|
backend/app/alembic/versions/491383f70589_add_separate_reported_and_deleted_tables.py
|
Pinafore/Karl-flashcards-web-app
|
2f4d9925c545f83eb3289dfef85d9b0bf9bfeb8c
|
[
"Apache-2.0"
] | null | null | null |
"""add separate reported and deleted tables
Revision ID: 491383f70589
Revises: 9afc4e3a9bf3
Create Date: 2020-06-26 05:23:30.267933
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '491383f70589'
down_revision = '9afc4e3a9bf3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('deleted',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fact_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('date_deleted', sa.TIMESTAMP(timezone=True), nullable=False),
sa.ForeignKeyConstraint(['fact_id'], ['fact.fact_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_deleted_id'), 'deleted', ['id'], unique=False)
op.create_table('reported',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fact_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('date_reported', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('suggestion', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.ForeignKeyConstraint(['fact_id'], ['fact.fact_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_reported_id'), 'reported', ['id'], unique=False)
op.drop_column('suspended', 'comment')
op.drop_column('suspended', 'suspend_type')
op.drop_column('suspended', 'suggestion')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('suspended', sa.Column('suggestion', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True))
op.add_column('suspended', sa.Column('suspend_type', postgresql.ENUM('delete', 'suspend', 'report', name='suspendtype'), autoincrement=False, nullable=False))
op.add_column('suspended', sa.Column('comment', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_index(op.f('ix_reported_id'), table_name='reported')
op.drop_table('reported')
op.drop_index(op.f('ix_deleted_id'), table_name='deleted')
op.drop_table('deleted')
# ### end Alembic commands ###
| 41.827586
| 162
| 0.694559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 882
| 0.363561
|
3fffb39e0047b218b9939ad4a6b88417807e3ce7
| 17,935
|
py
|
Python
|
test/test_viscous.py
|
nchristensen/mirgecom
|
f27285d1fc7e077e0b1ac6872712d88517588e33
|
[
"MIT"
] | null | null | null |
test/test_viscous.py
|
nchristensen/mirgecom
|
f27285d1fc7e077e0b1ac6872712d88517588e33
|
[
"MIT"
] | null | null | null |
test/test_viscous.py
|
nchristensen/mirgecom
|
f27285d1fc7e077e0b1ac6872712d88517588e33
|
[
"MIT"
] | null | null | null |
"""Test the viscous fluid helper functions."""
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.random
import numpy.linalg as la # noqa
import pyopencl.clmath # noqa
import logging
import pytest # noqa
from pytools.obj_array import make_obj_array
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL
import grudge.op as op
from grudge.eager import (
EagerDGDiscretization,
interior_trace_pair
)
from meshmode.array_context import ( # noqa
pytest_generate_tests_for_pyopencl_array_context
as pytest_generate_tests)
from mirgecom.fluid import make_conserved
from mirgecom.transport import (
SimpleTransport,
PowerLawTransport
)
from mirgecom.eos import IdealSingleGas
logger = logging.getLogger(__name__)
@pytest.mark.parametrize("transport_model", [0, 1])
def test_viscous_stress_tensor(actx_factory, transport_model):
"""Test tau data structure and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
mass = 2*ones
energy = zeros + 2.5
mom = mass * velocity
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
if transport_model:
tv_model = SimpleTransport(bulk_viscosity=1.0, viscosity=0.5)
else:
tv_model = PowerLawTransport()
eos = IdealSingleGas(transport_model=tv_model)
mu = tv_model.viscosity(eos, cv)
lam = tv_model.volume_viscosity(eos, cv)
# Exact answer for tau
exp_grad_v = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
exp_grad_v_t = np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
exp_div_v = 15
exp_tau = (mu*(exp_grad_v + exp_grad_v_t)
+ lam*exp_div_v*np.eye(3))
from mirgecom.viscous import viscous_stress_tensor
tau = viscous_stress_tensor(discr, eos, cv, grad_cv)
# The errors come from grad_v
assert discr.norm(tau - exp_tau, np.inf) < 1e-12
# Box grid generator widget lifted from @majosm and slightly bent
def _get_box_mesh(dim, a, b, n, t=None):
dim_names = ["x", "y", "z"]
bttf = {}
for i in range(dim):
bttf["-"+str(i+1)] = ["-"+dim_names[i]]
bttf["+"+str(i+1)] = ["+"+dim_names[i]]
from meshmode.mesh.generation import generate_regular_rect_mesh as gen
return gen(a=a, b=b, npoints_per_axis=n, boundary_tag_to_face=bttf, mesh_type=t)
@pytest.mark.parametrize("order", [2, 3, 4])
@pytest.mark.parametrize("kappa", [0.0, 1.0, 2.3])
def test_poiseuille_fluxes(actx_factory, order, kappa):
"""Test the viscous fluxes using a Poiseuille input state."""
actx = actx_factory()
dim = 2
from pytools.convergence import EOCRecorder
e_eoc_rec = EOCRecorder()
p_eoc_rec = EOCRecorder()
base_pressure = 100000.0
pressure_ratio = 1.001
mu = 42 # arbitrary
left_boundary_location = 0
right_boundary_location = 0.1
ybottom = 0.
ytop = .02
nspecies = 0
spec_diffusivity = 0 * np.ones(nspecies)
transport_model = SimpleTransport(viscosity=mu, thermal_conductivity=kappa,
species_diffusivity=spec_diffusivity)
xlen = right_boundary_location - left_boundary_location
p_low = base_pressure
p_hi = pressure_ratio*base_pressure
dpdx = (p_low - p_hi) / xlen
rho = 1.0
eos = IdealSingleGas(transport_model=transport_model)
from mirgecom.initializers import PlanarPoiseuille
initializer = PlanarPoiseuille(density=rho, mu=mu)
def _elbnd_flux(discr, compute_interior_flux, compute_boundary_flux,
int_tpair, boundaries):
return (compute_interior_flux(int_tpair)
+ sum(compute_boundary_flux(btag) for btag in boundaries))
from mirgecom.flux import gradient_flux_central
def cv_flux_interior(int_tpair):
normal = thaw(actx, discr.normal(int_tpair.dd))
flux_weak = gradient_flux_central(int_tpair, normal)
return discr.project(int_tpair.dd, "all_faces", flux_weak)
def cv_flux_boundary(btag):
boundary_discr = discr.discr_from_dd(btag)
bnd_nodes = thaw(actx, boundary_discr.nodes())
cv_bnd = initializer(x_vec=bnd_nodes, eos=eos)
bnd_nhat = thaw(actx, discr.normal(btag))
from grudge.trace_pair import TracePair
bnd_tpair = TracePair(btag, interior=cv_bnd, exterior=cv_bnd)
flux_weak = gradient_flux_central(bnd_tpair, bnd_nhat)
return discr.project(bnd_tpair.dd, "all_faces", flux_weak)
for nfac in [1, 2, 4]:
npts_axis = nfac*(11, 21)
box_ll = (left_boundary_location, ybottom)
box_ur = (right_boundary_location, ytop)
mesh = _get_box_mesh(2, a=box_ll, b=box_ur, n=npts_axis)
logger.info(
f"Number of {dim}d elements: {mesh.nelements}"
)
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# compute max element size
from grudge.dt_utils import h_max_from_volume
h_max = h_max_from_volume(discr)
# form exact cv
cv = initializer(x_vec=nodes, eos=eos)
cv_int_tpair = interior_trace_pair(discr, cv)
boundaries = [BTAG_ALL]
cv_flux_bnd = _elbnd_flux(discr, cv_flux_interior, cv_flux_boundary,
cv_int_tpair, boundaries)
from mirgecom.operators import grad_operator
grad_cv = make_conserved(dim, q=grad_operator(discr, cv.join(),
cv_flux_bnd.join()))
xp_grad_cv = initializer.exact_grad(x_vec=nodes, eos=eos, cv_exact=cv)
xp_grad_v = 1/cv.mass * xp_grad_cv.momentum
xp_tau = mu * (xp_grad_v + xp_grad_v.transpose())
# sanity check the gradient:
relerr_scale_e = 1.0 / discr.norm(xp_grad_cv.energy, np.inf)
relerr_scale_p = 1.0 / discr.norm(xp_grad_cv.momentum, np.inf)
graderr_e = discr.norm((grad_cv.energy - xp_grad_cv.energy), np.inf)
graderr_p = discr.norm((grad_cv.momentum - xp_grad_cv.momentum), np.inf)
graderr_e *= relerr_scale_e
graderr_p *= relerr_scale_p
assert graderr_e < 5e-7
assert graderr_p < 5e-11
zeros = discr.zeros(actx)
ones = zeros + 1
pressure = eos.pressure(cv)
# grad of p should be dp/dx
xp_grad_p = make_obj_array([dpdx*ones, zeros])
grad_p = op.local_grad(discr, pressure)
dpscal = 1.0/np.abs(dpdx)
temperature = eos.temperature(cv)
tscal = rho*eos.gas_const()*dpscal
xp_grad_t = xp_grad_p/(cv.mass*eos.gas_const())
grad_t = op.local_grad(discr, temperature)
# sanity check
assert discr.norm(grad_p - xp_grad_p, np.inf)*dpscal < 5e-9
assert discr.norm(grad_t - xp_grad_t, np.inf)*tscal < 5e-9
# verify heat flux
from mirgecom.viscous import conductive_heat_flux
heat_flux = conductive_heat_flux(discr, eos, cv, grad_t)
xp_heat_flux = -kappa*xp_grad_t
assert discr.norm(heat_flux - xp_heat_flux, np.inf) < 2e-8
# verify diffusive mass flux is zilch (no scalar components)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
assert len(j) == 0
xp_e_flux = np.dot(xp_tau, cv.velocity) - xp_heat_flux
xp_mom_flux = xp_tau
from mirgecom.viscous import viscous_flux
vflux = viscous_flux(discr, eos, cv, grad_cv, grad_t)
efluxerr = (
discr.norm(vflux.energy - xp_e_flux, np.inf)
/ discr.norm(xp_e_flux, np.inf)
)
momfluxerr = (
discr.norm(vflux.momentum - xp_mom_flux, np.inf)
/ discr.norm(xp_mom_flux, np.inf)
)
assert discr.norm(vflux.mass, np.inf) == 0
e_eoc_rec.add_data_point(h_max, efluxerr)
p_eoc_rec.add_data_point(h_max, momfluxerr)
assert (
e_eoc_rec.order_estimate() >= order - 0.5
or e_eoc_rec.max_error() < 3e-9
)
assert (
p_eoc_rec.order_estimate() >= order - 0.5
or p_eoc_rec.max_error() < 2e-12
)
def test_species_diffusive_flux(actx_factory):
"""Test species diffusive flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
def test_diffusive_heat_flux(actx_factory):
"""Test diffusive heat flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
@pytest.mark.parametrize("array_valued", [False, True])
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_local_max_species_diffusivity(actx_factory, dim, array_valued):
"""Test the local maximum species diffusivity."""
actx = actx_factory()
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
vel = .32
velocity = make_obj_array([zeros+vel for _ in range(dim)])
massval = 1
mass = massval*ones
energy = zeros + 1.0 / (1.4*.4)
mom = mass * velocity
species_mass = np.array([1., 2., 3.], dtype=object)
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
d_alpha_input = np.array([.1, .2, .3])
if array_valued:
f = 1 + 0.1*actx.np.sin(nodes[0])
d_alpha_input *= f
tv_model = SimpleTransport(species_diffusivity=d_alpha_input)
eos = IdealSingleGas(transport_model=tv_model)
d_alpha = tv_model.species_diffusivity(eos, cv)
from mirgecom.viscous import get_local_max_species_diffusivity
expected = .3*ones
if array_valued:
expected *= f
calculated = get_local_max_species_diffusivity(actx, discr, d_alpha)
assert discr.norm(calculated-expected, np.inf) == 0
@pytest.mark.parametrize("dim", [1, 2, 3])
@pytest.mark.parametrize("mu", [-1, 0, 1, 2])
@pytest.mark.parametrize("vel", [0, 1])
def test_viscous_timestep(actx_factory, dim, mu, vel):
"""Test timestep size."""
actx = actx_factory()
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
zeros = discr.zeros(actx)
ones = zeros + 1.0
velocity = make_obj_array([zeros+vel for _ in range(dim)])
massval = 1
mass = massval*ones
# I *think* this energy should yield c=1.0
energy = zeros + 1.0 / (1.4*.4)
mom = mass * velocity
species_mass = None
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
from grudge.dt_utils import characteristic_lengthscales
chlen = characteristic_lengthscales(actx, discr)
from grudge.op import nodal_min
chlen_min = nodal_min(discr, "vol", chlen)
mu = mu*chlen_min
if mu < 0:
mu = 0
tv_model = None
else:
tv_model = SimpleTransport(viscosity=mu)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import get_viscous_timestep
dt_field = get_viscous_timestep(discr, eos, cv)
speed_total = actx.np.sqrt(np.dot(velocity, velocity)) + eos.sound_speed(cv)
dt_expected = chlen / (speed_total + (mu / chlen))
error = (dt_expected - dt_field) / dt_expected
assert discr.norm(error, np.inf) == 0
| 34.292543
| 84
| 0.652188
| 0
| 0
| 0
| 0
| 10,783
| 0.601227
| 0
| 0
| 2,401
| 0.133872
|
b200470663bb7eee02e9c82ffb877d8af91ad93e
| 216
|
py
|
Python
|
aiobotocore_refreshable_credentials/__init__.py
|
aweber/aiobotocore-refreshable-credentials
|
3310d3fa29ac657f7cd5f64829da5f9b12c7a86d
|
[
"BSD-3-Clause"
] | null | null | null |
aiobotocore_refreshable_credentials/__init__.py
|
aweber/aiobotocore-refreshable-credentials
|
3310d3fa29ac657f7cd5f64829da5f9b12c7a86d
|
[
"BSD-3-Clause"
] | 2
|
2021-05-21T14:18:52.000Z
|
2022-03-15T12:34:45.000Z
|
aiobotocore_refreshable_credentials/__init__.py
|
aweber/aiobotocore-refreshable-credentials
|
3310d3fa29ac657f7cd5f64829da5f9b12c7a86d
|
[
"BSD-3-Clause"
] | 1
|
2021-06-18T18:37:15.000Z
|
2021-06-18T18:37:15.000Z
|
"""
aiobotocore-refreshable-credentials
===================================
"""
from aiobotocore_refreshable_credentials.session import get_session
version = '1.0.3'
__all__ = [
'get_session',
'version'
]
| 15.428571
| 67
| 0.606481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.50463
|
b2021676535704ccb7bbd4b21a330bdfa74bae2e
| 702
|
py
|
Python
|
g13gui/bitwidgets/label_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 3
|
2021-10-16T01:28:24.000Z
|
2021-12-07T21:49:54.000Z
|
g13gui/bitwidgets/label_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 12
|
2021-05-09T16:57:18.000Z
|
2021-06-16T19:20:57.000Z
|
g13gui/bitwidgets/label_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | null | null | null |
import unittest
import time
from g13gui.bitwidgets.display import Display
from g13gui.bitwidgets.x11displaydevice import X11DisplayDevice
from g13gui.bitwidgets.label import Label
class LabelTests(unittest.TestCase):
def setUp(self):
self.dd = X11DisplayDevice(self.__class__.__name__)
self.dd.start()
time.sleep(0.25)
self.d = Display(self.dd)
def tearDown(self):
time.sleep(1)
self.dd.shutdown()
self.dd.join()
def testDraw(self):
label = Label(0, 0, "Hello world!")
ctx = self.d.getContext()
label.show()
label.draw(ctx)
self.d.commit()
if __name__ == '__main__':
unittest.main()
| 22.645161
| 63
| 0.64245
| 469
| 0.668091
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.034188
|
b206b349123d73fd230c868195f898309f10c8ec
| 7,772
|
py
|
Python
|
padre/git_utils.py
|
krislindgren/padre
|
56e3342a953fdc472adc11ce301acabf6c595760
|
[
"MIT"
] | null | null | null |
padre/git_utils.py
|
krislindgren/padre
|
56e3342a953fdc472adc11ce301acabf6c595760
|
[
"MIT"
] | null | null | null |
padre/git_utils.py
|
krislindgren/padre
|
56e3342a953fdc472adc11ce301acabf6c595760
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# MIT License
#
# Modified from https://github.com/wzpan/git-repo-sync/
import os
import subprocess
import sys
def print_blocked(output):
print("=" * len(output))
print(output)
print("=" * len(output))
def check_output(cmd, **kwargs):
tmp_cmd = subprocess.list2cmdline(cmd)
print("Running command '%s'" % tmp_cmd)
return subprocess.check_output(cmd, **kwargs)
def call(cmd, **kwargs):
tmp_cmd = subprocess.list2cmdline(cmd)
print("Running command '%s'" % tmp_cmd)
return subprocess.call(cmd, **kwargs)
def get_remote_branches(working_dir, remote, skip_branches):
remote_branches = check_output(["git", "branch", "-r"],
cwd=working_dir)
remote_branches = remote_branches.split("\n")
tmp_branches = []
for branch in remote_branches:
branch = branch.strip()
if not branch:
continue
if branch.find("->") == -1:
tmp_branches.append(branch)
else:
tmp_branch, tmp_alias = branch.split("->", 1)
tmp_branch = tmp_branch.strip()
if tmp_branch:
tmp_branches.append(tmp_branch)
long_branches = []
short_branches = []
for branch in tmp_branches:
tmp_remote, short_branch = branch.split('/', 1)
if tmp_remote != remote:
continue
if short_branch in skip_branches:
continue
long_branches.append(branch)
short_branches.append(short_branch)
return long_branches, short_branches
def get_remote_tags(remote, working_dir):
cmd = ['git', 'ls-remote', '--tags', remote]
tags = check_output(cmd, cwd=working_dir)
tags = tags.split("\n")
tmp_tags = []
for tag in tags:
tag = tag.strip()
if not tag:
continue
tag_pieces = tag.split(None)
if len(tag_pieces) != 2:
continue
tag_sha, tag = tag_pieces
if tag.endswith("^{}"):
continue
if not tag.startswith("refs/tags/"):
continue
tag = tag[len("refs/tags/"):]
if tag and tag not in tmp_tags:
tmp_tags.append(tag)
return tmp_tags
def get_local_branches(working_dir):
local_branches = check_output(["git", "branch"], cwd=working_dir)
local_branches = local_branches.split("\n")
tmp_branches = []
for branch in local_branches:
branch = branch.replace("*", "")
branch = branch.strip()
if not branch:
continue
tmp_branches.append(branch)
return tmp_branches
def get_local_tags(working_dir):
local_tags = check_output(["git", "tag"], cwd=working_dir)
local_tags = local_tags.split("\n")
tmp_tags = []
for tag in local_tags:
tag = tag.strip()
if not tag:
continue
tmp_tags.append(tag)
return tmp_tags
def sync_push(working_folder, target, push_tags, push_branches,
push_tags_to_branches):
source_folder = os.path.join(working_folder, "source")
res = call(['git', 'remote', 'add', 'target', target],
cwd=source_folder)
if res != 0:
sys.stderr.write("Unable to add remote to %s\n" % target)
return 1
print_blocked("Interrogating")
remote_branches, remote_short_branches = get_remote_branches(
source_folder, 'origin', ['HEAD'])
all_success = True
branches_checked = 0
for branch, short_branch in zip(remote_branches, remote_short_branches):
branches_checked += 1
print("Checking out branch '%s'" % branch)
git_cmd = ['git', 'checkout']
if short_branch != "master":
git_cmd.append('-t')
git_cmd.append(branch)
res = call(git_cmd, cwd=source_folder)
if res != 0:
sys.stderr.write("Unable to checkout remote"
" branch '%s'\n" % (branch))
all_success = False
else:
res = call(['git', 'checkout', short_branch], cwd=source_folder)
if res != 0:
sys.stderr.write("Unable to checkout"
" branch '%s'\n" % (branch))
all_success = False
if not all_success:
sys.stderr.write("Failed interrogating %s"
" branches\n" % (branches_checked))
return 1
res = call(['git', 'fetch', '-t'], cwd=source_folder)
if res != 0:
sys.stderr.write("Failed fetching tags\n")
return 1
remote_tags = get_remote_tags("target", source_folder)
local_branches = get_local_branches(source_folder)
local_tags = get_local_tags(source_folder)
print_blocked("Validating")
for tag in push_tags:
if tag not in local_tags:
sys.stderr.write("Unable to find tag '%s'\n" % (tag))
return 1
for tag_branch in push_tags_to_branches:
tmp_tag, tmp_branch = tag_branch
if tmp_tag not in local_tags:
sys.stderr.write("Unable to find tag '%s'\n" % (tmp_tag))
return 1
for branch in push_branches:
if branch not in local_branches:
sys.stderr.write("Unable to find branch '%s'\n" % (branch))
return 1
print_blocked("Pushing")
push_fails = 0
branches_to_push = []
for branch in local_branches:
if branch not in push_branches:
continue
branches_to_push.append(branch)
if branches_to_push:
for branch in branches_to_push:
print("Pushing branch '%s'" % (branch))
res = call(['git', 'push', '-u', 'target', branch],
cwd=source_folder)
if res != 0:
sys.stderr.write("Pushing branch '%s' failed\n" % branch)
push_fails += 1
else:
print("No branches to push.")
tags_to_push = []
for tag in local_tags:
if tag in remote_tags or tag not in push_tags:
continue
tags_to_push.append(tag)
if tags_to_push:
for tag in tags_to_push:
print("Pushing tag '%s'" % (tag))
res = call(['git', 'push', '-u', 'target', tag],
cwd=source_folder)
if res != 0:
sys.stderr.write("Pushing tag '%s' failed\n" % tag)
push_fails += 1
else:
print("No tags to push.")
tags_to_push_as_branches = []
for tag_branch in push_tags_to_branches:
tmp_tag, tmp_branch = tag_branch
tags_to_push_as_branches.append((tmp_tag, tmp_branch))
if tags_to_push_as_branches:
for tag, branch in tags_to_push_as_branches:
print("Pushing tag '%s' as branch '%s'" % (tag, branch))
res = call(['git', 'checkout', tag], cwd=source_folder)
if res != 0:
sys.stderr.write("Checkout of tag '%s' failed\n" % tag)
push_fails += 1
else:
res = call(['git', 'checkout', "-b", branch],
cwd=source_folder)
if res != 0:
sys.stderr.write("Checkout of branch '%s'"
" failed\n" % branch)
push_fails += 1
else:
res = call(['git', 'push', "target",
"%s:%s" % (branch, branch)],
cwd=source_folder)
if res != 0:
sys.stderr.write("Pushing tag '%s' as branch"
" '%s' failed\n" % (tag, branch))
push_fails += 1
else:
print("No tags to push as branches.")
if push_fails:
return 1
return 0
| 34.237885
| 76
| 0.556356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,131
| 0.145522
|
b2080b7d4050889b2b37d9d988f89eaa6cb3c1e8
| 11,358
|
py
|
Python
|
domain_clf_analysis.py
|
xiaoleihuang/Domain_Adaptation_ACL2018
|
c077ceb7f67f1836043df88ac16ffed53cd3a9cb
|
[
"Apache-2.0"
] | 3
|
2018-06-12T01:43:18.000Z
|
2019-10-01T16:21:43.000Z
|
domain_clf_analysis.py
|
xiaoleihuang/Domain_Adaptation_ACL2018
|
c077ceb7f67f1836043df88ac16ffed53cd3a9cb
|
[
"Apache-2.0"
] | null | null | null |
domain_clf_analysis.py
|
xiaoleihuang/Domain_Adaptation_ACL2018
|
c077ceb7f67f1836043df88ac16ffed53cd3a9cb
|
[
"Apache-2.0"
] | null | null | null |
"""
Test on one domain, and train on the other domains,
Output f1 scores and visualize them by heat map
"""
from utils import data_helper, model_helper
from sklearn.metrics import f1_score
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import cross_val_score
import numpy as np
np.random.seed(0)
from pandas import DataFrame
import seaborn as sns
import matplotlib.pyplot as plt
import argparse
def domain2year(domain, name):
domain = int(domain)
if 'vaccine' == name:
if domain == 2013:
return '2013'
elif domain == 2014:
return '2014'
elif domain == 2015:
return '2015'
elif domain == 2016:
return '2016'
elif 'amazon' in name:
if domain == 1:
return '1997-99'
elif domain == 2:
return '2000-02'
elif domain == 3:
return '2003-05'
elif domain == 4:
return '2006-08'
elif domain == 5:
return '2009-11'
elif domain == 6:
return '2012-14'
elif 'yelp' in name:
if domain == 1:
return '2006-08'
elif domain == 2:
return '2009-11'
elif domain == 3:
return '2012-14'
elif domain == 4:
return '2015-17'
elif 'economy' in name:
if domain == 1:
return '1985-89'
elif domain == 2:
return '1990-94'
elif domain == 3:
return '1995-99'
elif domain == 4:
return '2000-04'
elif domain == 5:
return '2005-09'
elif domain == 6:
return '2010-14'
elif 'parties' in name:
if domain == 1:
return '1948-56'
elif domain == 2:
return '1960-68'
elif domain == 3:
return '1972-80'
elif domain == 4:
return '1984-92'
elif domain == 5:
return '1996-2004'
elif domain == 6:
return '2008-16'
def domain2month(domain, name=None):
if domain == 1:
return 'Jan-Mar'
elif domain == 2:
return 'Apr-Jun'
elif domain == 3:
return 'Jul-Sep'
else:
return 'Oct-Dec'
def cross_test_domain_clf(dataset, domain2label, data_name=None, balance=False, binary=False, ):
"""
Train on one domain, test on others
:return:
"""
uniq_domains = sorted(list(set([item[-2] for item in dataset])))
results = DataFrame([[0.0]*len(uniq_domains)]*len(uniq_domains),
index=[domain2label(item, data_name) for item in uniq_domains],
columns=[domain2label(item, data_name) for item in uniq_domains])
print(uniq_domains)
# loop through each domain
for domain in uniq_domains:
# build train_data
train_x = []
train_y = []
for item in dataset:
if domain == item[-2]:
train_x.append(item[0])
train_y.append(item[-1])
# build vectorizer and encoder
label_encoder = LabelEncoder()
if len(dataset) > 15469: # this number is length of "./yelp/yelp_Hotels_year_sample.tsv" - 1000
if not binary:
vectorizer = TfidfVectorizer(min_df=2, tokenizer=lambda x: x.split())
else:
vectorizer = TfidfVectorizer(min_df=2, tokenizer=lambda x: x.split(),
binary=True, use_idf=False, smooth_idf=False)
else:
if not binary:
vectorizer = TfidfVectorizer(ngram_range=(1, 3), min_df=2)
else:
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 3),
binary=True, use_idf=False, smooth_idf=False)
# encode the data
train_y = label_encoder.fit_transform(train_y)
train_x = vectorizer.fit_transform(train_x)
# balance
if balance:
random_sampler = RandomOverSampler(random_state=0)
train_x, train_y = random_sampler.fit_sample(train_x, train_y)
# build classifier
clf = model_helper.build_lr_clf()
clf.fit(train_x, train_y)
# instead of skipping self-domain, we take the 5-fold cross-validation for this domain
results[domain2label(domain, data_name)][domain2label(domain, data_name)] = np.mean(
cross_val_score(model_helper.build_lr_clf(),
train_x, train_y, cv=5,
scoring='f1_weighted')
)
train_x = None
train_y = None
# test and evaluation
for test_domain in [item for item in uniq_domains if item != domain]:
if int(test_domain) == int(domain):
continue
test_x = []
test_y = []
for item in dataset:
if test_domain == item[-2]:
test_x.append(item[0])
test_y.append(item[-1])
# encode the data
test_y = label_encoder.transform(test_y)
test_x = vectorizer.transform(test_x)
tmp_result = str(f1_score(y_true=test_y, y_pred=clf.predict(test_x), average='weighted'))
# results[domain][test_domain] = str(f1_score(y_true=test_y, y_pred=clf.predict(test_x), average='weighted'))
# print(str(domain)+','+str(test_domain)+','+str(f1_score(y_true=test_y, y_pred=clf.predict(test_x), average='weighted')))
results[domain2label(test_domain, data_name)][domain2label(domain, data_name)] = tmp_result
test_x = None
test_y = None
# pickle.dump(results, open('cross_test_domain_results_'+str(balance)+'.pkl', 'wb'))
print(results)
return results
def viz_perform(df, title, outpath='./image/output.pdf'):
"""
Heatmap visualization
:param df: an instance of pandas DataFrame
:return:
"""
a4_dims = (11.7, 11.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.set(font_scale=1.2)
viz_plot = sns.heatmap(df, annot=True, cbar=False, ax=ax, annot_kws={"size": 24}, cmap="YlGnBu", vmin=df.values.min(), fmt='.3f')
plt.xticks(rotation=20, fontsize=25)
plt.xlabel('Train', fontsize=25)
plt.ylabel('Test', fontsize=25)
plt.title(title, fontsize=25)
viz_plot.get_figure().savefig(outpath, format='pdf')
plt.close()
if __name__ == '__main__':
"""
"""
# parser = argparse.ArgumentParser()
# parser.add_argument('--month', default=None,
# type=str, help='The path raw csv or tsv file')
# parser.add_argument('--year', default=None,
# type=str, help='The path raw csv or tsv file')
# parser.add_argument('--output', default='vaccine',
# type=str, help='data source name')
# args = parser.parse_args()
# for is_binary in [True, False]:
# # on month
# if args.month:
# dataset = data_helper.load_data(args.month)
# # test on balanced data
# print('Test on balanced data')
# test_balance = cross_test_domain_clf(dataset, balance=True, binary=is_binary)
#
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, balance=False, binary=is_binary)
#
# viz_perform(test_balance, './image/'+args.output+'/cross_clf_balance_month_'+str(is_binary)+'.png')
# viz_perform(test_unbalance, './image/'+args.output+'/cross_clf_unbalance_month_'+str(is_binary)+'.png')
#
# # on year
# if args.year:
# dataset = data_helper.load_data(args.year)
# # test on balanced data
# print('Test on balanced data')
# test_balance = cross_test_domain_clf(dataset, balance=True, binary=is_binary)
#
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, balance=False, binary=is_binary)
#
# viz_perform(test_balance, './image/'+args.output+'/cross_clf_balance_year_'+str(is_binary)+'.png')
# viz_perform(test_unbalance, './image/'+args.output+'/cross_clf_unbalance_year_'+str(is_binary)+'.png')
file_list = [
('./data/vaccine/vaccine_month_sample.tsv', './data/vaccine/vaccine_year_sample.tsv', 'vaccine', 'Twitter data - vaccine'),
('./data/amazon/amazon_month_sample.tsv', './data/amazon/amazon_year_sample.tsv', 'amazon', 'Reviews data - music'),# './data/amazon/amazon_review_month_sample.tsv'
('./data/yelp/yelp_Hotels_month_sample.tsv', './data/yelp/yelp_Hotels_year_sample.tsv', 'yelp_hotel', 'Reviews data - hotels'),
(None, './data/parties/parties_year_sample.tsv', 'parties', 'Politics - US political data'),
('./data/economy/economy_month_sample.tsv', './data/economy/economy_year_sample.tsv', 'economy', 'News data - economy'),
('./data/yelp/yelp_Restaurants_month_sample.tsv', './data/yelp/yelp_Restaurants_year_sample.tsv', 'yelp_rest', 'Reviews data - restaurants'), # './data/yelp/yelp_Restaurants_month_sample.tsv'
]
for pair in file_list:
print(pair)
for is_binary in [False]: # True, skip binary currently
# on month
month_file = pair[0]
year_file = pair[1]
output = pair[2]
if month_file:
dataset = data_helper.load_data(month_file)
# test on balanced data
print('Test on balanced data')
test_balance = cross_test_domain_clf(dataset, domain2month, data_name=None, balance=True, binary=is_binary)
test_balance.to_csv('./tmp/' + output+ '_month.tsv', sep='\t')
viz_perform(test_balance, pair[3],'./image/' + output + '/cross_clf_balance_month_' + str(is_binary) + '.pdf')
test_balance = None
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, domain2month, data_name=None, balance=False, binary=is_binary)
# viz_perform(test_unbalance, pair[3], './image/'+output+'/cross_clf_unbalance_month_'+str(is_binary)+'.pdf')
# test_unbalance = None
# dataset = None
# on year
if year_file:
dataset = data_helper.load_data(year_file)
# test on balanced data
print('Test on balanced data')
test_balance = cross_test_domain_clf(dataset, domain2year, data_name=output, balance=True, binary=is_binary)
test_balance.to_csv('./tmp/' + output+ '_year.tsv', sep='\t')
viz_perform(test_balance, pair[3], './image/' + output + '/cross_clf_balance_year_' + str(is_binary) + '.pdf')
test_balance = None
# print('Test on unbalanced data')
# test_unbalance = cross_test_domain_clf(dataset, domain2year, data_name=output, balance=False, binary=is_binary)
# viz_perform(test_unbalance, pair[3], './image/'+output+'/cross_clf_unbalance_year_'+str(is_binary)+'.pdf')
test_unbalance = None
| 39.992958
| 199
| 0.588044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,639
| 0.408435
|
b209d756a7a9dd9b0a6aa608dc616fb5501e9ff4
| 219
|
py
|
Python
|
01 - Expressions, variables and assignments/exercises/perimeter-of-rectangle.py
|
PableraShow/python-exercises
|
e1648fd42f3009ec6fb1e2096852b6d399e91d5b
|
[
"MIT"
] | 8
|
2018-10-01T17:35:57.000Z
|
2022-02-01T08:12:12.000Z
|
01 - Expressions, variables and assignments/exercises/perimeter-of-rectangle.py
|
PableraShow/python-exercises
|
e1648fd42f3009ec6fb1e2096852b6d399e91d5b
|
[
"MIT"
] | null | null | null |
01 - Expressions, variables and assignments/exercises/perimeter-of-rectangle.py
|
PableraShow/python-exercises
|
e1648fd42f3009ec6fb1e2096852b6d399e91d5b
|
[
"MIT"
] | 6
|
2018-07-22T19:15:21.000Z
|
2022-02-05T07:54:58.000Z
|
"""
Prints the length in inches of the perimeter of a rectangle
with sides of length 4 and 7 inches.
"""
# Rectangle perimeter formula
length = 4
inches = 7
perimeter = 2 * length + 2 * inches
# Output
print perimeter
| 18.25
| 59
| 0.726027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 141
| 0.643836
|
b20a4404ed9e15d9b510a71724e0f8e9d7cc1046
| 645
|
py
|
Python
|
tools/leetcode.128.Longest Consecutive Sequence/leetcode.128.Longest Consecutive Sequence.submission1.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | 4
|
2015-10-10T00:30:55.000Z
|
2020-07-27T19:45:54.000Z
|
tools/leetcode.128.Longest Consecutive Sequence/leetcode.128.Longest Consecutive Sequence.submission1.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
tools/leetcode.128.Longest Consecutive Sequence/leetcode.128.Longest Consecutive Sequence.submission1.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
class Solution:
# @param {integer[]} nums
# @return {integer}
def longestConsecutive(self, nums):
if not nums:
return 0
result = 0
hashtable = set(nums)
while hashtable:
i = hashtable.pop()
cnt = 1
temp = i
while temp+1 in hashtable:
cnt += 1
temp += 1
hashtable.remove(temp)
temp = i
while temp-1 in hashtable:
cnt += 1
temp -=1
hashtable.remove(temp)
result = max(result, cnt)
return result
| 645
| 645
| 0.432558
| 645
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 625
| 0.968992
|
b20aba712b1ab01e3fb65465b63bc20687698132
| 123
|
py
|
Python
|
x_3_4.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
x_3_4.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | 1
|
2021-11-13T08:03:04.000Z
|
2021-11-13T08:03:04.000Z
|
x_3_4.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
# x_3_4
#
# mathモジュールからfloor関数だけインポートして切り捨て計算を行ってください
from statistics import mean
data = [7, 4, 3, 9]
print(mean(data))
| 12.3
| 43
| 0.739837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.614973
|
b20c24ef9d6d64b2c1eb48b70a055569f3cf0291
| 690
|
py
|
Python
|
2018/21/reverse_engineered.py
|
lvaughn/advent
|
ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e
|
[
"CC0-1.0"
] | null | null | null |
2018/21/reverse_engineered.py
|
lvaughn/advent
|
ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e
|
[
"CC0-1.0"
] | null | null | null |
2018/21/reverse_engineered.py
|
lvaughn/advent
|
ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
def simulate(reg_0, find_non_loop):
seen = set()
c = 0
last_unique_c = -1
while True:
a = c | 65536
c = reg_0
while True:
c = (((c + (a & 255)) & 16777215) * 65899) & 16777215
if a < 256:
if find_non_loop:
return c
else:
if c not in seen:
seen.add(c)
last_unique_c = c
break
else:
return last_unique_c
else:
a //= 256
print(simulate(7041048, True))
print(simulate(7041048, False))
| 22.258065
| 65
| 0.401449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.031884
|
b20ca1a11af5328342bece8c8b28ae8ca5c425a2
| 7,025
|
py
|
Python
|
pybilt/lipid_grid/lipid_grid_curv.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 11
|
2019-07-29T16:21:53.000Z
|
2022-02-02T11:44:57.000Z
|
pybilt/lipid_grid/lipid_grid_curv.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 11
|
2019-05-15T09:30:05.000Z
|
2021-07-19T16:49:59.000Z
|
pybilt/lipid_grid/lipid_grid_curv.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 9
|
2019-08-12T11:14:45.000Z
|
2020-12-22T18:22:55.000Z
|
'''
Classes and functions to implement gridding and curvature correlation analysis for lipid bilayers.
The gridding and anlaysis procedures are based on
the decription given in section "Correlation between bilayer surface curvature and the
clustering of lipid molecules" of Koldso H, Shorthouse D, He lie J, Sansom MSP (2014)
Lipid Clustering Correlates with Membrane Curvature as Revealed by Molecular Simulations of
Complex Lipid Bilayers. PLoS Comput Biol 10(10): e1003911. doi:10.1371/journal.pcbi.1003911
However, this implementation currently uses the z position (or normal position) of the lipids' centers of mass, while
their implementaion uses "the z coordinate of the interface between the head groups of the
lipids (excluding the current species being calculated and tails in
that box."
'''
import numpy as np
from six.moves import range
class LipidGrid_2d(object):
def __init__(self, com_frame, com_frame_indices,plane,nxbins=20,nybins=20):
#store the frame and leaflet
self.frame = com_frame
#self.leaflet = ms_leaflet
#get the x and y indices
ix = plane[0]
iy = plane[1]
iz = [i for i in [0,1,2] if i not in plane][0]
#get the box dimemsions
box = com_frame.box
boxx = box[ix]
boxy = box[iy]
#box_com = com_frame.mem_com
#box_com_x = box_com[ix]
#box_com_y = box_com[iy]
#save the numbers of bins
self.x_nbins = nxbins
self.y_nbins = nybins
#initialize the edges of the and centers of the gridpoints
# x
#self.x_min = -box_com_x
#self.x_max = boxx - box_com_x
self.x_min = 0.0
self.x_max = boxx
self.x_edges = np.linspace(self.x_min,self.x_max,(nxbins+1),endpoint=True)
self.x_incr = self.x_edges[1]-self.x_edges[0]
x_incr_h = self.x_incr/2.0
self.x_centers = np.zeros(nxbins)
self.x_nedges = len(self.x_edges)
for i in range(1,self.x_nedges):
j=i-1
self.x_centers[j]=self.x_edges[j]+x_incr_h
# y
#self.y_min = -box_com_y
#self.y_max = boxy - box_com_y
self.y_min = 0.0
self.y_max = boxy
self.y_edges = np.linspace(self.y_min,self.y_max,(nybins+1),endpoint=True)
self.y_incr = self.y_edges[1]-self.y_edges[0]
y_incr_h = self.y_incr/2.0
self.y_centers = np.zeros(nybins)
self.y_nedges = len(self.y_edges)
for i in range(1,self.y_nedges):
j=i-1
self.y_centers[j]=self.y_edges[j]+y_incr_h
self.x_length = self.x_max-self.x_min
self.y_length = self.y_max-self.y_min
# get the lipid indices for this leaflet
indices = com_frame_indices
#now assign lipids to the gridpoints
self.lipid_grid = []
#cx = 0
#print self.x_edges
mx_x = -1000.0
mn_x = 1000.0
for cx in range(len(self.x_edges)-1):
self.lipid_grid.append([])
x_lower = self.x_edges[cx]
x_upper = self.x_edges[cx+1]
#print "x_upper ",x_upper, " x_lower ",x_lower
for cy in range(len(self.y_edges)-1):
self.lipid_grid[cx].append([])
y_lower = self.y_edges[cy]
y_upper = self.y_edges[cy+1]
#check lipid COMs
for i in indices:
xi = com_frame.lipidcom[i].com[ix]
yi = com_frame.lipidcom[i].com[iy]
zi = com_frame.lipidcom[i].com_unwrap[iz]
x_box = xi > x_lower and xi < x_upper
y_box = yi > y_lower and yi < y_upper
if xi < mn_x:
mn_x = xi
if xi > mx_x:
mx_x = xi
if x_box and y_box:
#add to this grid
self.lipid_grid[cx][cy].append((i, com_frame.lipidcom[i].type, zi))
def get_index_at(self,ix,iy):
return self.lipid_grid[ix][iy][:,0]
def get_z_at(self,ix,iy):
return self.lipid_grid[ix][iy][:,2]
class LipidGrids(object):
def __init__(self, com_frame, leaflets,plane,nxbins=3,nybins=3):
#store the frame and leaflet
self.frame = com_frame
self.leaflets = leaflets
self.plane = plane
self.norm = [i for i in [0,1,2] if i not in plane][0]
self.nbins_x = nxbins
self.nbins_y = nybins
self.leaf_grid = {}
self.myframe = com_frame.mdnumber
#initialize the grids
#upper
upper_indices = leaflets['upper'].get_member_indices()
self.leaf_grid['upper'] = LipidGrid_2d(com_frame,upper_indices,plane,nxbins=nxbins,nybins=nybins)
#lower
lower_indices = leaflets['lower'].get_member_indices()
self.leaf_grid['lower'] = LipidGrid_2d(com_frame,lower_indices,plane,nxbins=nxbins,nybins=nybins)
return
def norm_displacement_cross_correlation(self):
output = dict()
for leaf in self.leaflets.keys():
output[leaf] = dict()
ll_types = self.leaflets[leaf].get_group_names()
for l_type in ll_types:
#loop over grid boxes
count = []
z_vals = []
n_box = 0.0
for xb in self.leaf_grid[leaf].lipid_grid:
for yb in xb:
box_count = 0
box_z_vals = []
for lipid in yb:
# print(lipid)
lipid_type = lipid[1]
lipid_z = lipid[2]
#print "lipid_z: ",lipid_z
if lipid_type == l_type:
box_count+=1
else:
box_z_vals.append(lipid_z)
n_box+=1
if len(box_z_vals) > 0:
#n_box+=1.0
box_z_avg = box_z_vals[0]
if len(box_z_vals) > 1:
box_z_avg = np.array(box_z_vals).mean()
count.append(float(box_count))
z_vals.append(box_z_avg)
cross_corr = 0.0
if len(count) > 1 and len(z_vals) >1:
count = np.array(count)
z_vals = np.array(z_vals)
count_mean = count.mean()
count_std = count.std()
z_mean = z_vals.mean()
z_std = z_vals.std()
cross_sum = np.dot(count-count_mean, z_vals-z_mean)
cross_corr = cross_sum/(count_std*z_std*n_box)
if np.isnan(cross_corr):
cross_corr = 0.0
output[leaf][l_type] = cross_corr
#quit()
return output
| 39.914773
| 117
| 0.540641
| 6,157
| 0.876441
| 0
| 0
| 0
| 0
| 0
| 0
| 1,540
| 0.219217
|
b20cc44e10c5f1d7b1d539469ba4792e3e3334fc
| 492
|
py
|
Python
|
security.py
|
Raghav714/intruder-alarm
|
c27825e5b483b6dc18704e0da76500b348174432
|
[
"MIT"
] | 4
|
2018-10-02T06:37:50.000Z
|
2021-10-31T16:41:59.000Z
|
security.py
|
Raghav714/intruder-alarm
|
c27825e5b483b6dc18704e0da76500b348174432
|
[
"MIT"
] | null | null | null |
security.py
|
Raghav714/intruder-alarm
|
c27825e5b483b6dc18704e0da76500b348174432
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import pygame
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
pygame.mixer.init()
pygame.mixer.music.load("1.mp3")
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
flag = np.std(fgmask)
if flag>50:
print("some one came")
pygame.mixer.music.play()
cv2.imshow('fgmask',frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
pygame.mixer.music.stop()
break
cap.release()
cv2.destroyAllWindows()
| 20.5
| 43
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.075203
|
b20cdd9f8c550b03afcaa9236a4a608b7379d8bd
| 453
|
py
|
Python
|
pygeos/measurements.py
|
jorisvandenbossche/pygeos
|
0a25af4ae1c96d11752318d2755f4f3342611b17
|
[
"BSD-3-Clause"
] | null | null | null |
pygeos/measurements.py
|
jorisvandenbossche/pygeos
|
0a25af4ae1c96d11752318d2755f4f3342611b17
|
[
"BSD-3-Clause"
] | null | null | null |
pygeos/measurements.py
|
jorisvandenbossche/pygeos
|
0a25af4ae1c96d11752318d2755f4f3342611b17
|
[
"BSD-3-Clause"
] | null | null | null |
from . import ufuncs
__all__ = ["area", "distance", "length", "hausdorff_distance"]
def area(geometries):
return ufuncs.area(geometries)
def distance(a, b):
return ufuncs.distance(a, b)
def length(geometries):
return ufuncs.length(geometries)
def hausdorff_distance(a, b, densify=None):
if densify is None:
return ufuncs.hausdorff_distance(a, b)
else:
return ufuncs.haussdorf_distance_densify(a, b, densify)
| 19.695652
| 63
| 0.697572
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.09713
|
b20ea0b58e52db3ee0246fdb58558d2834cf2129
| 9,539
|
py
|
Python
|
naff/models/naff/extension.py
|
Discord-Snake-Pit/dis_snek
|
45748467838b31d871a7166dbeb3aaa238ad94e3
|
[
"MIT"
] | 64
|
2021-10-12T15:31:36.000Z
|
2022-03-29T18:25:47.000Z
|
naff/models/naff/extension.py
|
Discord-Snake-Pit/dis_snek
|
45748467838b31d871a7166dbeb3aaa238ad94e3
|
[
"MIT"
] | 166
|
2021-10-10T16:27:52.000Z
|
2022-03-30T09:04:54.000Z
|
naff/models/naff/extension.py
|
Discord-Snake-Pit/dis_snek
|
45748467838b31d871a7166dbeb3aaa238ad94e3
|
[
"MIT"
] | 34
|
2021-10-10T13:26:41.000Z
|
2022-03-23T13:59:35.000Z
|
import asyncio
import inspect
import logging
from typing import Awaitable, List, TYPE_CHECKING, Callable, Coroutine, Optional
import naff.models.naff as naff
from naff.client.const import logger_name, MISSING
from naff.client.utils.misc_utils import wrap_partial
from naff.models.naff.tasks import Task
if TYPE_CHECKING:
from naff.client import Client
from naff.models.naff import AutoDefer, BaseCommand, Listener
from naff.models.naff import Context
log = logging.getLogger(logger_name)
__all__ = ("Extension",)
class Extension:
"""
A class that allows you to separate your commands and listeners into separate files. Skins require an entrypoint in the same file called `setup`, this function allows client to load the Extension.
??? Hint "Example Usage:"
```python
class ExampleExt(Extension):
def __init__(self, bot):
print("Extension Created")
@prefixed_command()
async def some_command(self, context):
await ctx.send(f"I was sent from a extension called {self.name}")
```
Attributes:
bot Client: A reference to the client
name str: The name of this Extension (`read-only`)
description str: A description of this Extension
extension_checks str: A list of checks to be ran on any command in this extension
extension_prerun List: A list of coroutines to be run before any command in this extension
extension_postrun List: A list of coroutines to be run after any command in this extension
"""
bot: "Client"
name: str
extension_name: str
description: str
extension_checks: List
extension_prerun: List
extension_postrun: List
extension_error: Optional[Callable[..., Coroutine]]
_commands: List
_listeners: List
auto_defer: "AutoDefer"
def __new__(cls, bot: "Client", *args, **kwargs) -> "Extension":
new_cls = super().__new__(cls)
new_cls.bot = bot
new_cls.name = cls.__name__
new_cls.extension_checks = []
new_cls.extension_prerun = []
new_cls.extension_postrun = []
new_cls.extension_error = None
new_cls.auto_defer = MISSING
new_cls.description = kwargs.get("Description", None)
if not new_cls.description:
new_cls.description = inspect.cleandoc(cls.__doc__) if cls.__doc__ else None
# load commands from class
new_cls._commands = []
new_cls._listeners = []
for _name, val in inspect.getmembers(
new_cls, predicate=lambda x: isinstance(x, (naff.BaseCommand, naff.Listener, Task))
):
if isinstance(val, naff.BaseCommand):
val.extension = new_cls
val = wrap_partial(val, new_cls)
if not isinstance(val, naff.PrefixedCommand) or not val.is_subcommand:
# we do not want to add prefixed subcommands
new_cls._commands.append(val)
if isinstance(val, naff.ModalCommand):
bot.add_modal_callback(val)
elif isinstance(val, naff.ComponentCommand):
bot.add_component_callback(val)
elif isinstance(val, naff.InteractionCommand):
bot.add_interaction(val)
else:
bot.add_prefixed_command(val)
elif isinstance(val, naff.Listener):
val = wrap_partial(val, new_cls)
bot.add_listener(val)
new_cls.listeners.append(val)
elif isinstance(val, Task):
wrap_partial(val, new_cls)
log.debug(
f"{len(new_cls._commands)} commands and {len(new_cls.listeners)} listeners"
f" have been loaded from `{new_cls.name}`"
)
new_cls.extension_name = inspect.getmodule(new_cls).__name__
new_cls.bot.ext[new_cls.name] = new_cls
if hasattr(new_cls, "async_start"):
if inspect.iscoroutinefunction(new_cls.async_start):
bot.async_startup_tasks.append(new_cls.async_start())
else:
raise TypeError("async_start is a reserved method and must be a coroutine")
return new_cls
@property
def __name__(self) -> str:
return self.name
@property
def commands(self) -> List["BaseCommand"]:
"""Get the commands from this Extension."""
return self._commands
@property
def listeners(self) -> List["Listener"]:
"""Get the listeners from this Extension."""
return self._listeners
def drop(self) -> None:
"""Called when this Extension is being removed."""
for func in self._commands:
if isinstance(func, naff.ModalCommand):
for listener in func.listeners:
# noinspection PyProtectedMember
self.bot._modal_callbacks.pop(listener)
elif isinstance(func, naff.ComponentCommand):
for listener in func.listeners:
# noinspection PyProtectedMember
self.bot._component_callbacks.pop(listener)
elif isinstance(func, naff.InteractionCommand):
for scope in func.scopes:
if self.bot.interactions.get(scope):
self.bot.interactions[scope].pop(func.resolved_name, [])
elif isinstance(func, naff.PrefixedCommand):
if not func.is_subcommand:
self.bot.prefixed_commands.pop(func.name, None)
for alias in func.aliases:
self.bot.prefixed_commands.pop(alias, None)
for func in self.listeners:
self.bot.listeners[func.event].remove(func)
self.bot.ext.pop(self.name, None)
log.debug(f"{self.name} has been drop")
def add_ext_auto_defer(self, ephemeral: bool = False, time_until_defer: float = 0.0) -> None:
"""
Add a auto defer for all commands in this extension.
Args:
ephemeral: Should the command be deferred as ephemeral
time_until_defer: How long to wait before deferring automatically
"""
self.auto_defer = naff.AutoDefer(enabled=True, ephemeral=ephemeral, time_until_defer=time_until_defer)
def add_ext_check(self, coroutine: Callable[["Context"], Awaitable[bool]]) -> None:
"""
Add a coroutine as a check for all commands in this extension to run. This coroutine must take **only** the parameter `context`.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_ext_check(self.example)
@staticmethod
async def example(context: Context):
if context.author.id == 123456789:
return True
return False
```
Args:
coroutine: The coroutine to use as a check
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Check must be a coroutine")
if not self.extension_checks:
self.extension_checks = []
self.extension_checks.append(coroutine)
def add_extension_prerun(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to be run **before** all commands in this Extension.
Note:
Pre-runs will **only** be run if the commands checks pass
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_extension_prerun(self.example)
async def example(self, context: Context):
await ctx.send("I ran first")
```
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if not self.extension_prerun:
self.extension_prerun = []
self.extension_prerun.append(coroutine)
def add_extension_postrun(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to be run **after** all commands in this Extension.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.add_extension_postrun(self.example)
async def example(self, context: Context):
await ctx.send("I ran first")
```
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if not self.extension_postrun:
self.extension_postrun = []
self.extension_postrun.append(coroutine)
def set_extension_error(self, coroutine: Callable[..., Coroutine]) -> None:
"""
Add a coroutine to handle any exceptions raised in this extension.
??? Hint "Example Usage:"
```python
def __init__(self, bot):
self.set_extension_error(self.example)
Args:
coroutine: The coroutine to run
"""
if not asyncio.iscoroutinefunction(coroutine):
raise TypeError("Callback must be a coroutine")
if self.extension_error:
log.warning("Extension error callback has been overridden!")
self.extension_error = coroutine
| 35.726592
| 200
| 0.605095
| 9,007
| 0.944229
| 0
| 0
| 341
| 0.035748
| 0
| 0
| 3,799
| 0.39826
|
b20eabd7816b307c80c7a57deaf784b914a0c831
| 2,619
|
py
|
Python
|
model/State.py
|
BrandonTheBuilder/thermawesome
|
b2f2cb95e1181f05a112193be11baa18e10d39b1
|
[
"MIT"
] | null | null | null |
model/State.py
|
BrandonTheBuilder/thermawesome
|
b2f2cb95e1181f05a112193be11baa18e10d39b1
|
[
"MIT"
] | null | null | null |
model/State.py
|
BrandonTheBuilder/thermawesome
|
b2f2cb95e1181f05a112193be11baa18e10d39b1
|
[
"MIT"
] | null | null | null |
from CoolProp import CoolProp as CP
class State(object):
"""
The state of a fluid is defined with two unique intensive properties
this class keeps track of the state of the fluid and solves for all other
intensive properties.
"""
def __init__(self, fluid, **kwargs):
"""
fluid: The type of fluid this is.
flowRate: The mass flow rate of the stream, kg/s
kwargs: intensive properties to define the state.
The Properties that we are interested in are:
h, specific Enthalpy
u, specific Internal Energy
v, specific Volume
s, specific Entropy
m, specific mass flow rate
"""
self.defined = False
self.fluid = fluid
self.properties = dict()
if kwargs is not None:
self.properties.update(kwargs)
def define(self, **kwargs):
"""
Define the fluid state based off of the inputed properties
"""
#Make a list of defined properties
inputProp = []
if kwargs is not None:
self.properties.update(kwargs)
for key in self.properties.keys():
inputProp.extend([key.capitalize(), self.properties[key]])
inputProp.append(self.fluid)
try:
self.properties.update(
T = CP.PropsSI('T', *inputProp),
P = CP.PropsSI('P', *inputProp),
h = CP.PropsSI('H', *inputProp),
s = CP.PropsSI('S',*inputProp),
u = CP.PropsSI('U', *inputProp),
v = 1/CP.PropsSI('D', *inputProp))
self.defined = True
except Exception as ex:
self.defined = False
print ex
return self.defined
def exergy_f(self, t0, p0):
deadState = State('Water', T=t0, P=p0)
if deadState.define():
self._exergy_f = ((self.properties['h'] - deadState.properties['h'])
-t0*(self.properties['s']-deadState.properties['s']))
return self._exergy_f
else:
return False
def exergy(self, t0, p0):
deadState = State('Water', T=t0, P=p0)
if deadState.define():
self._exergy = ((self.properties['u'] - deadState.properties['u'])
+p0*(self.properties['v'] - deadState.properties['v'])
-t0*(self.properties['s']-deadState.properties['s']))
return self._exergy
else:
return False
def __add__(self, other):
pass
def isDefined(self):
pass
| 33.576923
| 82
| 0.544101
| 2,577
| 0.983963
| 0
| 0
| 0
| 0
| 0
| 0
| 762
| 0.290951
|
b20ed9c65d8b7c88f2047aafe3f3e3d7c3016629
| 2,401
|
py
|
Python
|
dashboard_api/widget_def/migrations/0059_auto_20160701_0929.py
|
data61/Openboard
|
aaf7ef49e05c0771094efc6be811c6ae88055252
|
[
"Apache-2.0"
] | 2
|
2017-08-29T23:05:51.000Z
|
2019-04-02T21:11:35.000Z
|
dashboard_api/widget_def/migrations/0059_auto_20160701_0929.py
|
data61/Openboard
|
aaf7ef49e05c0771094efc6be811c6ae88055252
|
[
"Apache-2.0"
] | 1
|
2019-04-02T21:11:26.000Z
|
2019-04-03T15:12:57.000Z
|
dashboard_api/widget_def/migrations/0059_auto_20160701_0929.py
|
data61/Openboard
|
aaf7ef49e05c0771094efc6be811c6ae88055252
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-30 23:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('widget_def', '0058_auto_20160617_1124'),
]
operations = [
migrations.AlterUniqueTogether(
name='geodatasetdeclaration',
unique_together=set([]),
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='dataset',
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='frequency',
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='location',
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='theme',
),
migrations.RemoveField(
model_name='location',
name='geo_window',
),
migrations.AlterUniqueTogether(
name='widgetdeclaration',
unique_together=set([]),
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='definition',
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='frequency',
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='location',
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='theme',
),
migrations.AddField(
model_name='graphdefinition',
name='cluster_label',
field=models.CharField(default=b'cluster', help_text=b'Not used for line graphs', max_length=120),
),
migrations.AddField(
model_name='graphdefinition',
name='dataset_label',
field=models.CharField(default=b'dataset', max_length=120),
),
migrations.DeleteModel(
name='Frequency',
),
migrations.DeleteModel(
name='GeoDatasetDeclaration',
),
migrations.DeleteModel(
name='Location',
),
migrations.DeleteModel(
name='Theme',
),
migrations.DeleteModel(
name='WidgetDeclaration',
),
]
| 28.247059
| 110
| 0.549354
| 2,244
| 0.934611
| 0
| 0
| 0
| 0
| 0
| 0
| 597
| 0.248646
|
b74658fcd0b086ae391a31278701946a2e7748a0
| 7,649
|
py
|
Python
|
ngraph/python/tests/test_ngraph/test_ops_reshape.py
|
mnosov/openvino
|
c52c4916be0369f092f7da6c162b6c61c37c08d7
|
[
"Apache-2.0"
] | null | null | null |
ngraph/python/tests/test_ngraph/test_ops_reshape.py
|
mnosov/openvino
|
c52c4916be0369f092f7da6c162b6c61c37c08d7
|
[
"Apache-2.0"
] | 21
|
2021-02-16T13:02:05.000Z
|
2022-02-21T13:05:06.000Z
|
ngraph/python/tests/test_ngraph/test_ops_reshape.py
|
mmakridi/openvino
|
769bb7709597c14debdaa356dd60c5a78bdfa97e
|
[
"Apache-2.0"
] | null | null | null |
# ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_40957
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
axis = 0
expected = np.concatenate((a, b), axis=0)
runtime = get_runtime()
parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32)
parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32)
node = ng.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b)
result = computation(a, b)
assert np.allclose(result, expected)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))]
)
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type, value",
[
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957),
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_40957),
pytest.param(np.int8, np.int8(-63), marks=xfail_issue_40957),
pytest.param(np.int16, np.int16(-12345), marks=xfail_issue_40957),
pytest.param(np.int32, np.int32(-123456), marks=xfail_issue_40957),
pytest.param(np.int64, np.int64(-1234567), marks=xfail_issue_40957),
pytest.param(np.uint8, np.uint8(63), marks=xfail_issue_40957),
pytest.param(np.uint16, np.uint16(12345), marks=xfail_issue_40957),
pytest.param(np.uint32, np.uint32(123456), marks=xfail_issue_40957),
pytest.param(np.uint64, np.uint64(1234567), marks=xfail_issue_40957),
],
)
def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type",
[
pytest.param(np.float32, marks=xfail_issue_40957),
pytest.param(np.float64, marks=xfail_issue_40957),
],
)
def test_constant_from_float_array(val_type):
np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, range_start, range_end",
[
(np.int8, -8, 8),
(np.int16, -64, 64),
(np.int32, -1024, 1024),
(np.int64, -16383, 16383),
(np.uint8, 0, 8),
(np.uint16, 0, 64),
(np.uint32, 0, 1024),
(np.uint64, 0, 16383),
],
)
def test_constant_from_integer_array(val_type, range_start, range_end):
np.random.seed(133391)
input_data = np.array(
np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type
)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
def test_broadcast_numpy():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter)
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_broadcast_bidirectional():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL")
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_gather():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_axes = np.array([1], np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ng.gather, input_indices, input_axes)
assert np.allclose(result, expected)
def test_transpose():
input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape(
(3, 3, 224, 224)
)
input_order = np.array([0, 2, 3, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.transpose, input_order)
expected = np.transpose(input_tensor, input_order)
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation."
)
def test_tile():
input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3))
repeats = np.array([2, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.tile, repeats)
expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3))
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="RuntimeError: Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'"
)
def test_strided_slice():
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
begin = np.array([1, 0], dtype=np.int32)
end = np.array([0, 0], dtype=np.int32)
strides = np.array([1, 1], dtype=np.int32)
begin_mask = np.array([0, 0, 0], dtype=np.int32)
end_mask = np.array([0, 0, 0], dtype=np.int32)
new_axis_mask = np.array([0, 1, 0], dtype=np.int32)
shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32)
ellipsis_mask = np.array([0, 0, 0], dtype=np.int32)
result = run_op_node(
[input_tensor],
ng.strided_slice,
begin,
end,
strides,
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask,
)
expected = np.array(
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32
).reshape((1, 3, 4))
assert np.allclose(result, expected)
def test_reshape_v1():
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
shape = np.array([0, -1, 4], dtype=np.int32)
special_zero = True
expected_shape = np.array([2, 150, 4])
expected = np.reshape(A, expected_shape)
result = run_op_node([A], ng.reshape, shape, special_zero)
assert np.allclose(result, expected)
def test_shape_of():
input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
result = run_op_node([input_tensor], ng.shape_of)
assert np.allclose(result, [3, 3])
| 32.969828
| 109
| 0.651458
| 0
| 0
| 0
| 0
| 3,913
| 0.51157
| 0
| 0
| 1,074
| 0.140411
|
b746b8cda074f334edc7ccba71a84d7a2cd55be1
| 1,980
|
py
|
Python
|
malwarescan/wsclient.py
|
lbahtarliev/MalwareScan
|
495e2fd3ceb3498c651ddd360a4cc2eb9571a10b
|
[
"Unlicense"
] | 3
|
2018-12-06T03:09:16.000Z
|
2021-02-25T01:13:05.000Z
|
malwarescan/wsclient.py
|
lbahtarliev/MalwareScan
|
495e2fd3ceb3498c651ddd360a4cc2eb9571a10b
|
[
"Unlicense"
] | 9
|
2018-12-10T18:44:14.000Z
|
2019-02-06T21:13:31.000Z
|
malwarescan/wsclient.py
|
lbahtarliev/MalwareScan
|
495e2fd3ceb3498c651ddd360a4cc2eb9571a10b
|
[
"Unlicense"
] | 4
|
2019-06-04T13:46:24.000Z
|
2021-02-25T02:23:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import ssl
import click
from json.decoder import JSONDecodeError
from websocket import WebSocketException
from websocket import WebSocketConnectionClosedException
from websocket import create_connection
from datetime import datetime as dtime
from .app import create_app
flask_app = create_app()
def websocket_scan_thread(ws):
from mods.api.tasks import eval_result
while True:
msg = ""
try:
msg = ws.recv()
# print(dtime.now(), "[RESPONSE]", type(msg))
msg = json.loads(msg)
# print(dtime.now(), "[PARSED]", msg)
eval_result.apply_async(args=(msg,))
print(
dtime.now(),
"[!] [{server_time}] Result for file with SHA1 Hash: {sha1} | cas id: {id}"
.format(**msg))
except (KeyError, JSONDecodeError, WebSocketException, OSError,
WebSocketConnectionClosedException) as e:
print(dtime.now(), "[ERROR]: %s" % (e))
sys.exit(1)
pass
@click.command(
context_settings=dict(ignore_unknown_options=True, help_option_names=[]))
def run_wsclient():
"""Websocket listener for evaluation results"""
CAS_API = flask_app.config['CAS_API']
secure_prefix = "s"
token = CAS_API['token']
print(f"Loading token from app.config... {token}")
host = CAS_API['host']
print(f"Loading host from app.config... {host}")
headers = {'X-API-TOKEN': token}
# Subscribe to the websocket publisher.
url = "ws%s://%s/rapi/ws/cas_task" % (secure_prefix, host)
try:
ws = create_connection(
url, sslopt={"cert_reqs": ssl.CERT_NONE}, header=headers)
except (OSError,) as e:
print(dtime.now(), "[ERROR]: %s" % (e))
sys.exit(2)
pass
thread = websocket_scan_thread(ws)
thread.start()
| 33
| 92
| 0.59899
| 0
| 0
| 0
| 0
| 838
| 0.423232
| 0
| 0
| 481
| 0.242929
|