hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
843a7a0fcaaeddc92d334ae668dee6b9974e0a0d
| 92
|
py
|
Python
|
ASGama CTF/[RE] xor in reverse/solver.py
|
bemrdo/CTF-2019
|
424512f7c43278d72091aa737da78907c14f9fc1
|
[
"MIT"
] | null | null | null |
ASGama CTF/[RE] xor in reverse/solver.py
|
bemrdo/CTF-2019
|
424512f7c43278d72091aa737da78907c14f9fc1
|
[
"MIT"
] | null | null | null |
ASGama CTF/[RE] xor in reverse/solver.py
|
bemrdo/CTF-2019
|
424512f7c43278d72091aa737da78907c14f9fc1
|
[
"MIT"
] | 1
|
2020-03-14T07:24:12.000Z
|
2020-03-14T07:24:12.000Z
|
s = "a)))KkFmQ*wFz)TixK*||"
flag = ''
for i in s:
flag += chr(ord(i) ^ 25)
print flag
| 11.5
| 28
| 0.51087
|
s = "a)))KkFmQ*wFz)TixK*||"
flag = ''
for i in s:
flag += chr(ord(i) ^ 25)
print flag
| 0
| 0
| 0
|
9a4954bf539c0495ed9582a2a437a584b81eafba
| 1,537
|
py
|
Python
|
ELDAmwl/tests/test_factory.py
|
actris-scc/ELDAmwl
|
c4d8426e6609a00837779a80d4acd39c580a0178
|
[
"MIT"
] | 1
|
2021-12-06T09:48:07.000Z
|
2021-12-06T09:48:07.000Z
|
ELDAmwl/tests/test_factory.py
|
actris-scc/ELDAmwl
|
c4d8426e6609a00837779a80d4acd39c580a0178
|
[
"MIT"
] | null | null | null |
ELDAmwl/tests/test_factory.py
|
actris-scc/ELDAmwl
|
c4d8426e6609a00837779a80d4acd39c580a0178
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for Signals"""
from ELDAmwl.bases.factory import BaseOperation
from ELDAmwl.bases.factory import BaseOperationFactory
from ELDAmwl.component.registry import Registry
from unittest.mock import patch
import unittest
DB_DATA = [
('TestA', OperationA),
('TestB', OperationB),
]
| 26.964912
| 109
| 0.7365
|
# -*- coding: utf-8 -*-
"""Tests for Signals"""
from ELDAmwl.bases.factory import BaseOperation
from ELDAmwl.bases.factory import BaseOperationFactory
from ELDAmwl.component.registry import Registry
from unittest.mock import patch
import unittest
class Factory(BaseOperationFactory):
pass
class OperationA(BaseOperation):
pass
class OperationB(BaseOperation):
pass
DB_DATA = [
('TestA', OperationA),
('TestB', OperationB),
]
def test_factory_registration():
registry = Registry()
registry.register_class(Factory, 'TestA', OperationA)
registry.register_class(Factory, 'TestB', OperationB)
assert len(registry.factory_registry[Factory.name].registry) == 2
assert registry.get_factory_registration(Factory).find_class_by_name('TestA') == OperationA # noqa E501
assert registry.get_factory_registration(Factory).find_class_by_name('TestB') == OperationB # noqa E501
assert registry.find_class_by_name(Factory, 'TestA') == OperationA
assert registry.find_class_by_name(Factory, 'TestB') == OperationB
class TestFactory(unittest.TestCase):
@patch.object(Factory, 'get_classname_from_db')
def test_factory(self, mock_get_classname_from_db):
from ELDAmwl.component.registry import registry
for klass_name, klass in DB_DATA:
registry.register_class(Factory, klass_name, klass)
for klass_name, klass in DB_DATA:
mock_get_classname_from_db.return_value = klass_name
assert Factory()().__class__ == klass
| 940
| 159
| 115
|
bb519dce1d09797f72dd8f2de1841806604a851b
| 3,797
|
py
|
Python
|
pygtkweb/demos/012-label.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-09-19T09:14:16.000Z
|
2018-09-19T09:14:16.000Z
|
pygtkweb/demos/012-label.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
pygtkweb/demos/012-label.py
|
andreyvit/pyjamas
|
1154abe3340a84dba7530b8174aaddecfc1a0944
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-08-13T20:32:25.000Z
|
2019-08-13T20:32:25.000Z
|
#!/usr/bin/env python
# example label.py
import pygtk
pygtk.require('2.0')
import gtk
if __name__ == "__main__":
Labels()
main()
| 39.552083
| 82
| 0.520411
|
#!/usr/bin/env python
# example label.py
import pygtk
pygtk.require('2.0')
import gtk
class Labels:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", lambda w: gtk.main_quit())
self.window.set_title("Label")
vbox = gtk.VBox(False, 5)
hbox = gtk.HBox(False, 5)
self.window.add(hbox)
hbox.pack_start(vbox, False, False, 0)
self.window.set_border_width(5)
frame = gtk.Frame("Normal Label")
label = gtk.Label("This is a Normal label")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Multi-line Label")
label = gtk.Label("This is a Multi-line label.\nSecond line\n"
"Third line")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Left Justified Label")
label = gtk.Label("This is a Left-Justified\n"
"Multi-line label.\nThird line")
label.set_justify(gtk.JUSTIFY_LEFT)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Right Justified Label")
label = gtk.Label("This is a Right-Justified\nMulti-line label.\n"
"Fourth line, (j/k)")
label.set_justify(gtk.JUSTIFY_RIGHT)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
vbox = gtk.VBox(False, 5)
hbox.pack_start(vbox, False, False, 0)
frame = gtk.Frame("Line wrapped label")
label = gtk.Label("This is an example of a line-wrapped label. It "
"should not be taking up the entire "
"width allocated to it, but automatically "
"wraps the words to fit. "
"The time has come, for all good men, to come to "
"the aid of their party. "
"The sixth sheik's six sheep's sick.\n"
" It supports multiple paragraphs correctly, "
"and correctly adds "
"many extra spaces. ")
label.set_line_wrap(True)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Filled, wrapped label")
label = gtk.Label("This is an example of a line-wrapped, filled label. "
"It should be taking "
"up the entire width allocated to it. "
"Here is a sentence to prove "
"my point. Here is another sentence. "
"Here comes the sun, do de do de do.\n"
" This is a new paragraph.\n"
" This is another newer, longer, better "
"paragraph. It is coming to an end, "
"unfortunately.")
label.set_justify(gtk.JUSTIFY_FILL)
label.set_line_wrap(True)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Underlined label")
label = gtk.Label("This label is underlined!\n"
"This one is underlined in quite a funky fashion")
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_pattern(
"_________________________ _ _________ _ ______ __ _______ ___")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
self.window.show_all ()
def main():
gtk.main()
return 0
if __name__ == "__main__":
Labels()
main()
| 3,593
| -8
| 72
|
8f3cea5c3a663cdfa9d722dafbf2c0cf1621974b
| 188
|
py
|
Python
|
3day/for10.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
3day/for10.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
3day/for10.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
# for10.py
# [์
๋ ฅ๋ณ์ for ์ถ์ถ๋ณ์ in ๋์ if ์กฐ๊ฑด ]
# a ์ 1 ~ 10 ๊น์ง ์ซ์
a = [ i for i in range(1,11) ]
print(a)
# b ์ 1 ~ 10 ๊น์ง ์ซ์
b = [ i+1 for i in range(10) ]
print(b)
print(id(a) == id(b))
| 13.428571
| 31
| 0.526596
|
# for10.py
# [์
๋ ฅ๋ณ์ for ์ถ์ถ๋ณ์ in ๋์ if ์กฐ๊ฑด ]
# a ์ 1 ~ 10 ๊น์ง ์ซ์
a = [ i for i in range(1,11) ]
print(a)
# b ์ 1 ~ 10 ๊น์ง ์ซ์
b = [ i+1 for i in range(10) ]
print(b)
print(id(a) == id(b))
| 0
| 0
| 0
|
1534a129dbfddf083511bb0726870718c439eedb
| 1,417
|
py
|
Python
|
plot.py
|
yy-zhou/SVM-Spam-SMS-classifier
|
e6ac70be8fa54f7e7ab4fead06489d4d70985dd3
|
[
"MIT"
] | 1
|
2019-02-21T15:42:03.000Z
|
2019-02-21T15:42:03.000Z
|
plot.py
|
yy-zhou/SVM-Spam-SMS-classifier
|
e6ac70be8fa54f7e7ab4fead06489d4d70985dd3
|
[
"MIT"
] | null | null | null |
plot.py
|
yy-zhou/SVM-Spam-SMS-classifier
|
e6ac70be8fa54f7e7ab4fead06489d4d70985dd3
|
[
"MIT"
] | null | null | null |
__author__ = 'BorisMirage'
# --- coding:utf-8 ---
'''
Create by BorisMirage
File Name: plot
Create Time: 2018-12-02 14:45
'''
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
if __name__ == '__main__':
pass
| 25.763636
| 75
| 0.542696
|
__author__ = 'BorisMirage'
# --- coding:utf-8 ---
'''
Create by BorisMirage
File Name: plot
Create Time: 2018-12-02 14:45
'''
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
def svm_plot(x, y):
def get_data(x, y):
# clf = svm.SVC(kernel='linear', C=10)
n_samples, n_features = len(x), len(x[0])
return x, y, n_samples, n_features
def plot_embedding(data, label, title):
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min)
fig = plt.figure()
ax = plt.subplot(111)
for i in range(data.shape[0]):
plt.text(data[i, 0], data[i, 1], str(label[i]),
color=plt.cm.Set1(label[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
plt.title(title)
return fig
def main():
data, label, n_samples, n_features = get_data(x, y)
print('Computing t-SNE embedding')
tsne = TSNE(n_components=3, init='pca', random_state=0)
t0 = time()
result = tsne.fit_transform(data)
fig = plot_embedding(result, label,
't-SNE embedding of the messages (time %.2fs)'
% (time() - t0))
plt.show()
main()
if __name__ == '__main__':
pass
| 1,119
| 0
| 23
|
0ebace1dbbd279ebfafd87a38a3b7827f59351b3
| 283
|
py
|
Python
|
build_bridge.py
|
AaronC81/delta-pico
|
08a3dae3c8dbae3db45b8434351b4ac0abc5f1da
|
[
"MIT"
] | 2
|
2021-12-22T23:37:30.000Z
|
2022-03-10T01:22:00.000Z
|
build_bridge.py
|
AaronC81/delta-pico
|
08a3dae3c8dbae3db45b8434351b4ac0abc5f1da
|
[
"MIT"
] | null | null | null |
build_bridge.py
|
AaronC81/delta-pico
|
08a3dae3c8dbae3db45b8434351b4ac0abc5f1da
|
[
"MIT"
] | null | null | null |
import subprocess, os
print("Building Rust component...")
# "cargo build" the bridge project
root_dir = os.path.dirname(os.path.realpath(__file__))
bridge_dir = os.path.join(root_dir, "rust")
subprocess.check_output(["cargo", "build", "--release"], cwd=bridge_dir)
print("Done!")
| 25.727273
| 72
| 0.727915
|
import subprocess, os
print("Building Rust component...")
# "cargo build" the bridge project
root_dir = os.path.dirname(os.path.realpath(__file__))
bridge_dir = os.path.join(root_dir, "rust")
subprocess.check_output(["cargo", "build", "--release"], cwd=bridge_dir)
print("Done!")
| 0
| 0
| 0
|
6f6bfd6faba641ab09d6d1307be60d49c893fb56
| 338
|
py
|
Python
|
stereo/preprocess/__init__.py
|
leying95/stereopy
|
1580a88a091a2ebc0f177ea73409e2c4b4dd4c7e
|
[
"MIT"
] | null | null | null |
stereo/preprocess/__init__.py
|
leying95/stereopy
|
1580a88a091a2ebc0f177ea73409e2c4b4dd4c7e
|
[
"MIT"
] | null | null | null |
stereo/preprocess/__init__.py
|
leying95/stereopy
|
1580a88a091a2ebc0f177ea73409e2c4b4dd4c7e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
"""
@author: Ping Qiu qiuping1@genomics.cn
@last modified by: Ping Qiu
@file:__init__.py.py
@time:2021/03/05
"""
from .filter import filter_cells, filter_genes, filter_coordinates
from .normalize import Normalizer, normalize_total, normalize_zscore_disksmooth, quantile_norm
from .qc import cal_qc
| 28.166667
| 94
| 0.786982
|
#!/usr/bin/env python3
# coding: utf-8
"""
@author: Ping Qiu qiuping1@genomics.cn
@last modified by: Ping Qiu
@file:__init__.py.py
@time:2021/03/05
"""
from .filter import filter_cells, filter_genes, filter_coordinates
from .normalize import Normalizer, normalize_total, normalize_zscore_disksmooth, quantile_norm
from .qc import cal_qc
| 0
| 0
| 0
|
ae639af7b2deedc5fc4c667db8000d51c3ec0348
| 12,355
|
py
|
Python
|
tests/ut/python/dataset/test_datasets_usps.py
|
httpsgithu/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | 1
|
2022-03-30T03:43:29.000Z
|
2022-03-30T03:43:29.000Z
|
tests/ut/python/dataset/test_datasets_usps.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_datasets_usps.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test USPS dataset operators
"""
import os
from typing import cast
import matplotlib.pyplot as plt
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testUSPSDataset"
WRONG_DIR = "../data/dataset/testMnistData"
def load_usps(path, usage):
"""
load USPS data
"""
assert usage in ["train", "test"]
if usage == "train":
data_path = os.path.realpath(os.path.join(path, "usps"))
elif usage == "test":
data_path = os.path.realpath(os.path.join(path, "usps.t"))
with open(data_path, 'r') as f:
raw_data = [line.split() for line in f.readlines()]
tmp_list = [[x.split(':')[-1] for x in data[1:]] for data in raw_data]
images = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16, 1))
images = ((cast(np.ndarray, images) + 1) / 2 * 255).astype(dtype=np.uint8)
labels = [int(d[0]) - 1 for d in raw_data]
return images, labels
def visualize_dataset(images, labels):
"""
Helper function to visualize the dataset samples
"""
num_samples = len(images)
for i in range(num_samples):
plt.subplot(1, num_samples, i + 1)
plt.imshow(images[i].squeeze(), cmap=plt.cm.gray)
plt.title(labels[i])
plt.show()
def test_usps_content_check():
"""
Validate USPSDataset image readings
"""
logger.info("Test USPSDataset Op with content check")
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=10, shuffle=False)
images, labels = load_usps(DATA_DIR, "train")
num_iter = 0
# in this example, each dictionary has keys "image" and "label"
for i, data in enumerate(train_data.create_dict_iterator(num_epochs=1, output_numpy=True)):
for m in range(16):
for n in range(16):
assert (data["image"][m, n, 0] != 0 or images[i][m, n, 0] != 255) and \
(data["image"][m, n, 0] != 255 or images[i][m, n, 0] != 0)
assert (data["image"][m, n, 0] == images[i][m, n, 0]) or\
(data["image"][m, n, 0] == images[i][m, n, 0] + 1) or\
(data["image"][m, n, 0] + 1 == images[i][m, n, 0])
np.testing.assert_array_equal(data["label"], labels[i])
num_iter += 1
assert num_iter == 3
test_data = ds.USPSDataset(DATA_DIR, "test", num_samples=3, shuffle=False)
images, labels = load_usps(DATA_DIR, "test")
num_iter = 0
# in this example, each dictionary has keys "image" and "label"
for i, data in enumerate(test_data.create_dict_iterator(num_epochs=1, output_numpy=True)):
for m in range(16):
for n in range(16):
if (data["image"][m, n, 0] == 0 and images[i][m, n, 0] == 255) or\
(data["image"][m, n, 0] == 255 and images[i][m, n, 0] == 0):
assert False
if (data["image"][m, n, 0] != images[i][m, n, 0]) and\
(data["image"][m, n, 0] != images[i][m, n, 0] + 1) and\
(data["image"][m, n, 0] + 1 != images[i][m, n, 0]):
assert False
np.testing.assert_array_equal(data["label"], labels[i])
num_iter += 1
assert num_iter == 3
def test_usps_basic():
"""
Validate USPSDataset
"""
logger.info("Test USPSDataset Op")
# case 1: test loading whole dataset
train_data = ds.USPSDataset(DATA_DIR, "train")
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 3
test_data = ds.USPSDataset(DATA_DIR, "test")
num_iter = 0
for _ in test_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 3
# case 2: test num_samples
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=2)
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 2
# case 3: test repeat
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=2)
train_data = train_data.repeat(5)
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 10
# case 4: test batch with drop_remainder=False
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=3)
assert train_data.get_dataset_size() == 3
assert train_data.get_batch_size() == 1
train_data = train_data.batch(batch_size=2) # drop_remainder is default to be False
assert train_data.get_batch_size() == 2
assert train_data.get_dataset_size() == 2
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 2
# case 5: test batch with drop_remainder=True
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=3)
assert train_data.get_dataset_size() == 3
assert train_data.get_batch_size() == 1
train_data = train_data.batch(batch_size=2, drop_remainder=True) # the rest of incomplete batch will be dropped
assert train_data.get_dataset_size() == 1
assert train_data.get_batch_size() == 2
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 1
def test_usps_exception():
"""
Test error cases for USPSDataset
"""
error_msg_3 = "num_shards is specified and currently requires shard_id as well"
with pytest.raises(RuntimeError, match=error_msg_3):
ds.USPSDataset(DATA_DIR, "train", num_shards=10)
ds.USPSDataset(DATA_DIR, "test", num_shards=10)
error_msg_4 = "shard_id is specified but num_shards is not"
with pytest.raises(RuntimeError, match=error_msg_4):
ds.USPSDataset(DATA_DIR, "train", shard_id=0)
ds.USPSDataset(DATA_DIR, "test", shard_id=0)
error_msg_5 = "Input shard_id is not within the required interval"
with pytest.raises(ValueError, match=error_msg_5):
ds.USPSDataset(DATA_DIR, "train", num_shards=5, shard_id=-1)
ds.USPSDataset(DATA_DIR, "test", num_shards=5, shard_id=-1)
with pytest.raises(ValueError, match=error_msg_5):
ds.USPSDataset(DATA_DIR, "train", num_shards=5, shard_id=5)
ds.USPSDataset(DATA_DIR, "test", num_shards=5, shard_id=5)
with pytest.raises(ValueError, match=error_msg_5):
ds.USPSDataset(DATA_DIR, "train", num_shards=2, shard_id=5)
ds.USPSDataset(DATA_DIR, "test", num_shards=2, shard_id=5)
error_msg_6 = "num_parallel_workers exceeds"
with pytest.raises(ValueError, match=error_msg_6):
ds.USPSDataset(DATA_DIR, "train", shuffle=False, num_parallel_workers=0)
ds.USPSDataset(DATA_DIR, "test", shuffle=False, num_parallel_workers=0)
with pytest.raises(ValueError, match=error_msg_6):
ds.USPSDataset(DATA_DIR, "train", shuffle=False, num_parallel_workers=256)
ds.USPSDataset(DATA_DIR, "test", shuffle=False, num_parallel_workers=256)
with pytest.raises(ValueError, match=error_msg_6):
ds.USPSDataset(DATA_DIR, "train", shuffle=False, num_parallel_workers=-2)
ds.USPSDataset(DATA_DIR, "test", shuffle=False, num_parallel_workers=-2)
error_msg_7 = "Argument shard_id"
with pytest.raises(TypeError, match=error_msg_7):
ds.USPSDataset(DATA_DIR, "train", num_shards=2, shard_id="0")
ds.USPSDataset(DATA_DIR, "test", num_shards=2, shard_id="0")
error_msg_8 = "invalid input shape"
with pytest.raises(RuntimeError, match=error_msg_8):
train_data = ds.USPSDataset(DATA_DIR, "train")
train_data = train_data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
for _ in train_data.__iter__():
pass
test_data = ds.USPSDataset(DATA_DIR, "test")
test_data = test_data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
for _ in test_data.__iter__():
pass
error_msg_9 = "usps does not exist or is a directory"
with pytest.raises(RuntimeError, match=error_msg_9):
train_data = ds.USPSDataset(WRONG_DIR, "train")
for _ in train_data.__iter__():
pass
error_msg_10 = "usps.t does not exist or is a directory"
with pytest.raises(RuntimeError, match=error_msg_10):
test_data = ds.USPSDataset(WRONG_DIR, "test")
for _ in test_data.__iter__():
pass
def test_usps_visualize(plot=False):
"""
Visualize USPSDataset results
"""
logger.info("Test USPSDataset visualization")
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=3, shuffle=False)
num_iter = 0
image_list, label_list = [], []
for item in train_data.create_dict_iterator(num_epochs=1, output_numpy=True):
image = item["image"]
label = item["label"]
image_list.append(image)
label_list.append("label {}".format(label))
assert isinstance(image, np.ndarray)
assert image.shape == (16, 16, 1)
assert image.dtype == np.uint8
assert label.dtype == np.uint32
num_iter += 1
assert num_iter == 3
if plot:
visualize_dataset(image_list, label_list)
test_data = ds.USPSDataset(DATA_DIR, "test", num_samples=3, shuffle=False)
num_iter = 0
image_list, label_list = [], []
for item in test_data.create_dict_iterator(num_epochs=1, output_numpy=True):
image = item["image"]
label = item["label"]
image_list.append(image)
label_list.append("label {}".format(label))
assert isinstance(image, np.ndarray)
assert image.shape == (16, 16, 1)
assert image.dtype == np.uint8
assert label.dtype == np.uint32
num_iter += 1
assert num_iter == 3
if plot:
visualize_dataset(image_list, label_list)
def test_usps_usage():
"""
Validate USPSDataset image readings
"""
logger.info("Test USPSDataset usage flag")
assert test_config("train") == 3
assert test_config("test") == 3
assert "usage is not within the valid set of ['train', 'test', 'all']" in test_config("invalid")
assert "Argument usage with value ['list'] is not of type [<class 'str'>]" in test_config(["list"])
# change this directory to the folder that contains all USPS files
all_files_path = None
# the following tests on the entire datasets
if all_files_path is not None:
assert test_config("train", all_files_path) == 3
assert test_config("test", all_files_path) == 3
assert ds.USPSDataset(all_files_path, usage="train").get_dataset_size() == 3
assert ds.USPSDataset(all_files_path, usage="test").get_dataset_size() == 3
if __name__ == '__main__':
test_usps_content_check()
test_usps_basic()
test_usps_exception()
test_usps_visualize(plot=True)
test_usps_usage()
| 39.983819
| 117
| 0.630676
|
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test USPS dataset operators
"""
import os
from typing import cast
import matplotlib.pyplot as plt
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testUSPSDataset"
WRONG_DIR = "../data/dataset/testMnistData"
def load_usps(path, usage):
"""
load USPS data
"""
assert usage in ["train", "test"]
if usage == "train":
data_path = os.path.realpath(os.path.join(path, "usps"))
elif usage == "test":
data_path = os.path.realpath(os.path.join(path, "usps.t"))
with open(data_path, 'r') as f:
raw_data = [line.split() for line in f.readlines()]
tmp_list = [[x.split(':')[-1] for x in data[1:]] for data in raw_data]
images = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16, 1))
images = ((cast(np.ndarray, images) + 1) / 2 * 255).astype(dtype=np.uint8)
labels = [int(d[0]) - 1 for d in raw_data]
return images, labels
def visualize_dataset(images, labels):
"""
Helper function to visualize the dataset samples
"""
num_samples = len(images)
for i in range(num_samples):
plt.subplot(1, num_samples, i + 1)
plt.imshow(images[i].squeeze(), cmap=plt.cm.gray)
plt.title(labels[i])
plt.show()
def test_usps_content_check():
"""
Validate USPSDataset image readings
"""
logger.info("Test USPSDataset Op with content check")
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=10, shuffle=False)
images, labels = load_usps(DATA_DIR, "train")
num_iter = 0
# in this example, each dictionary has keys "image" and "label"
for i, data in enumerate(train_data.create_dict_iterator(num_epochs=1, output_numpy=True)):
for m in range(16):
for n in range(16):
assert (data["image"][m, n, 0] != 0 or images[i][m, n, 0] != 255) and \
(data["image"][m, n, 0] != 255 or images[i][m, n, 0] != 0)
assert (data["image"][m, n, 0] == images[i][m, n, 0]) or\
(data["image"][m, n, 0] == images[i][m, n, 0] + 1) or\
(data["image"][m, n, 0] + 1 == images[i][m, n, 0])
np.testing.assert_array_equal(data["label"], labels[i])
num_iter += 1
assert num_iter == 3
test_data = ds.USPSDataset(DATA_DIR, "test", num_samples=3, shuffle=False)
images, labels = load_usps(DATA_DIR, "test")
num_iter = 0
# in this example, each dictionary has keys "image" and "label"
for i, data in enumerate(test_data.create_dict_iterator(num_epochs=1, output_numpy=True)):
for m in range(16):
for n in range(16):
if (data["image"][m, n, 0] == 0 and images[i][m, n, 0] == 255) or\
(data["image"][m, n, 0] == 255 and images[i][m, n, 0] == 0):
assert False
if (data["image"][m, n, 0] != images[i][m, n, 0]) and\
(data["image"][m, n, 0] != images[i][m, n, 0] + 1) and\
(data["image"][m, n, 0] + 1 != images[i][m, n, 0]):
assert False
np.testing.assert_array_equal(data["label"], labels[i])
num_iter += 1
assert num_iter == 3
def test_usps_basic():
"""
Validate USPSDataset
"""
logger.info("Test USPSDataset Op")
# case 1: test loading whole dataset
train_data = ds.USPSDataset(DATA_DIR, "train")
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 3
test_data = ds.USPSDataset(DATA_DIR, "test")
num_iter = 0
for _ in test_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 3
# case 2: test num_samples
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=2)
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 2
# case 3: test repeat
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=2)
train_data = train_data.repeat(5)
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 10
# case 4: test batch with drop_remainder=False
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=3)
assert train_data.get_dataset_size() == 3
assert train_data.get_batch_size() == 1
train_data = train_data.batch(batch_size=2) # drop_remainder is default to be False
assert train_data.get_batch_size() == 2
assert train_data.get_dataset_size() == 2
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 2
# case 5: test batch with drop_remainder=True
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=3)
assert train_data.get_dataset_size() == 3
assert train_data.get_batch_size() == 1
train_data = train_data.batch(batch_size=2, drop_remainder=True) # the rest of incomplete batch will be dropped
assert train_data.get_dataset_size() == 1
assert train_data.get_batch_size() == 2
num_iter = 0
for _ in train_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 1
def test_usps_exception():
"""
Test error cases for USPSDataset
"""
error_msg_3 = "num_shards is specified and currently requires shard_id as well"
with pytest.raises(RuntimeError, match=error_msg_3):
ds.USPSDataset(DATA_DIR, "train", num_shards=10)
ds.USPSDataset(DATA_DIR, "test", num_shards=10)
error_msg_4 = "shard_id is specified but num_shards is not"
with pytest.raises(RuntimeError, match=error_msg_4):
ds.USPSDataset(DATA_DIR, "train", shard_id=0)
ds.USPSDataset(DATA_DIR, "test", shard_id=0)
error_msg_5 = "Input shard_id is not within the required interval"
with pytest.raises(ValueError, match=error_msg_5):
ds.USPSDataset(DATA_DIR, "train", num_shards=5, shard_id=-1)
ds.USPSDataset(DATA_DIR, "test", num_shards=5, shard_id=-1)
with pytest.raises(ValueError, match=error_msg_5):
ds.USPSDataset(DATA_DIR, "train", num_shards=5, shard_id=5)
ds.USPSDataset(DATA_DIR, "test", num_shards=5, shard_id=5)
with pytest.raises(ValueError, match=error_msg_5):
ds.USPSDataset(DATA_DIR, "train", num_shards=2, shard_id=5)
ds.USPSDataset(DATA_DIR, "test", num_shards=2, shard_id=5)
error_msg_6 = "num_parallel_workers exceeds"
with pytest.raises(ValueError, match=error_msg_6):
ds.USPSDataset(DATA_DIR, "train", shuffle=False, num_parallel_workers=0)
ds.USPSDataset(DATA_DIR, "test", shuffle=False, num_parallel_workers=0)
with pytest.raises(ValueError, match=error_msg_6):
ds.USPSDataset(DATA_DIR, "train", shuffle=False, num_parallel_workers=256)
ds.USPSDataset(DATA_DIR, "test", shuffle=False, num_parallel_workers=256)
with pytest.raises(ValueError, match=error_msg_6):
ds.USPSDataset(DATA_DIR, "train", shuffle=False, num_parallel_workers=-2)
ds.USPSDataset(DATA_DIR, "test", shuffle=False, num_parallel_workers=-2)
error_msg_7 = "Argument shard_id"
with pytest.raises(TypeError, match=error_msg_7):
ds.USPSDataset(DATA_DIR, "train", num_shards=2, shard_id="0")
ds.USPSDataset(DATA_DIR, "test", num_shards=2, shard_id="0")
error_msg_8 = "invalid input shape"
with pytest.raises(RuntimeError, match=error_msg_8):
train_data = ds.USPSDataset(DATA_DIR, "train")
train_data = train_data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
for _ in train_data.__iter__():
pass
test_data = ds.USPSDataset(DATA_DIR, "test")
test_data = test_data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
for _ in test_data.__iter__():
pass
error_msg_9 = "usps does not exist or is a directory"
with pytest.raises(RuntimeError, match=error_msg_9):
train_data = ds.USPSDataset(WRONG_DIR, "train")
for _ in train_data.__iter__():
pass
error_msg_10 = "usps.t does not exist or is a directory"
with pytest.raises(RuntimeError, match=error_msg_10):
test_data = ds.USPSDataset(WRONG_DIR, "test")
for _ in test_data.__iter__():
pass
def test_usps_visualize(plot=False):
"""
Visualize USPSDataset results
"""
logger.info("Test USPSDataset visualization")
train_data = ds.USPSDataset(DATA_DIR, "train", num_samples=3, shuffle=False)
num_iter = 0
image_list, label_list = [], []
for item in train_data.create_dict_iterator(num_epochs=1, output_numpy=True):
image = item["image"]
label = item["label"]
image_list.append(image)
label_list.append("label {}".format(label))
assert isinstance(image, np.ndarray)
assert image.shape == (16, 16, 1)
assert image.dtype == np.uint8
assert label.dtype == np.uint32
num_iter += 1
assert num_iter == 3
if plot:
visualize_dataset(image_list, label_list)
test_data = ds.USPSDataset(DATA_DIR, "test", num_samples=3, shuffle=False)
num_iter = 0
image_list, label_list = [], []
for item in test_data.create_dict_iterator(num_epochs=1, output_numpy=True):
image = item["image"]
label = item["label"]
image_list.append(image)
label_list.append("label {}".format(label))
assert isinstance(image, np.ndarray)
assert image.shape == (16, 16, 1)
assert image.dtype == np.uint8
assert label.dtype == np.uint32
num_iter += 1
assert num_iter == 3
if plot:
visualize_dataset(image_list, label_list)
def test_usps_usage():
"""
Validate USPSDataset image readings
"""
logger.info("Test USPSDataset usage flag")
def test_config(usage, path=None):
path = DATA_DIR if path is None else path
try:
data = ds.USPSDataset(path, usage=usage, shuffle=False)
num_rows = 0
for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
num_rows += 1
except (ValueError, TypeError, RuntimeError) as e:
return str(e)
return num_rows
assert test_config("train") == 3
assert test_config("test") == 3
assert "usage is not within the valid set of ['train', 'test', 'all']" in test_config("invalid")
assert "Argument usage with value ['list'] is not of type [<class 'str'>]" in test_config(["list"])
# change this directory to the folder that contains all USPS files
all_files_path = None
# the following tests on the entire datasets
if all_files_path is not None:
assert test_config("train", all_files_path) == 3
assert test_config("test", all_files_path) == 3
assert ds.USPSDataset(all_files_path, usage="train").get_dataset_size() == 3
assert ds.USPSDataset(all_files_path, usage="test").get_dataset_size() == 3
if __name__ == '__main__':
test_usps_content_check()
test_usps_basic()
test_usps_exception()
test_usps_visualize(plot=True)
test_usps_usage()
| 398
| 0
| 29
|
ec4aea568eff55f9b12d7d6ccea094cfc76818c4
| 919
|
py
|
Python
|
apps/plot.py
|
JongGuk/Raman_Mapping
|
e4b0fb44b8077a2a9c7965132794757a0079965e
|
[
"MIT"
] | null | null | null |
apps/plot.py
|
JongGuk/Raman_Mapping
|
e4b0fb44b8077a2a9c7965132794757a0079965e
|
[
"MIT"
] | null | null | null |
apps/plot.py
|
JongGuk/Raman_Mapping
|
e4b0fb44b8077a2a9c7965132794757a0079965e
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from pandas import DataFrame
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
data = {'Raman Shift': [-464, -460, -455, -450, -445],
'Intensity1': [745, 752, 746, 740, 750], 'Intensity2': [734, 745, 768, 763, 755]
} # ๋ฐ์ดํฐ ์งค๋ผ์จ๊ฒ ๋ฐ์์ค๊ธฐ
df = DataFrame(data) # data ๋ก๋ถํฐ ๋ฐ์ดํฐ ํ๋ ์ ๋ง๋ฆ
df.set_index('Raman Shift', inplace=True) # ๋ง๋ค์ด์ง ๋ฐ์ดํฐ ํ๋ ์ ์ค, Raman_Shift ํญ๋ชฉ์ x ์ถ์ผ๋ก ์ง์
root= tk.Tk() # tkinter ๋ก ์ฐฝ ๋์ฐ๊ธฐ
figure = plt.Figure(figsize=(5,4), dpi=100) # ๊ทธ๋ํ ๋์ธ ์ฐฝ ์ฌ์ด์ฆ
ax = figure.add_subplot(111) # ๊ทธ๋ํ plot ๋ฐ x,y์ถ ๋ฒ์ ์กฐ์ (๋ฒ์ ์ง์ ์๋ตํ๋ฉด auto)
ax.set_title('Raman spectrum at selected point')
line = FigureCanvasTkAgg(figure, root) # Figure ๊ทธ๋ ค์ root์ ํ์
line.get_tk_widget().pack() # pack ์์ ๊ทธ๋ํ๋ฅผ ์ข์ธก์ ๋ ฌ/์ฑ์ฐ๊ธฐ ๋ฑ ์ค์
#df.plot(~~~) # df.ํ์ด๋ฆ ์ผ๋ก ํน์ ํ ์ ํ ๊ฐ๋ฅ
df.Intensity2.plot(kind='line', ax=ax, color='r', marker='o', fontsize=10)
root.mainloop() #์๋ก๊ณ ์นจ
| 34.037037
| 89
| 0.687704
|
import tkinter as tk
from pandas import DataFrame
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
data = {'Raman Shift': [-464, -460, -455, -450, -445],
'Intensity1': [745, 752, 746, 740, 750], 'Intensity2': [734, 745, 768, 763, 755]
} # ๋ฐ์ดํฐ ์งค๋ผ์จ๊ฒ ๋ฐ์์ค๊ธฐ
df = DataFrame(data) # data ๋ก๋ถํฐ ๋ฐ์ดํฐ ํ๋ ์ ๋ง๋ฆ
df.set_index('Raman Shift', inplace=True) # ๋ง๋ค์ด์ง ๋ฐ์ดํฐ ํ๋ ์ ์ค, Raman_Shift ํญ๋ชฉ์ x ์ถ์ผ๋ก ์ง์
root= tk.Tk() # tkinter ๋ก ์ฐฝ ๋์ฐ๊ธฐ
figure = plt.Figure(figsize=(5,4), dpi=100) # ๊ทธ๋ํ ๋์ธ ์ฐฝ ์ฌ์ด์ฆ
ax = figure.add_subplot(111) # ๊ทธ๋ํ plot ๋ฐ x,y์ถ ๋ฒ์ ์กฐ์ (๋ฒ์ ์ง์ ์๋ตํ๋ฉด auto)
ax.set_title('Raman spectrum at selected point')
line = FigureCanvasTkAgg(figure, root) # Figure ๊ทธ๋ ค์ root์ ํ์
line.get_tk_widget().pack() # pack ์์ ๊ทธ๋ํ๋ฅผ ์ข์ธก์ ๋ ฌ/์ฑ์ฐ๊ธฐ ๋ฑ ์ค์
#df.plot(~~~) # df.ํ์ด๋ฆ ์ผ๋ก ํน์ ํ ์ ํ ๊ฐ๋ฅ
df.Intensity2.plot(kind='line', ax=ax, color='r', marker='o', fontsize=10)
root.mainloop() #์๋ก๊ณ ์นจ
| 0
| 0
| 0
|
59035eabd3ac8998d45ef40fc66c22001078419d
| 5,075
|
py
|
Python
|
openpose-app/MotionMeasure/OpenposeLogParser.py
|
B-C-WANG/AI-Apps
|
305d1960ec2b84081228543bf819deff694fddd2
|
[
"MIT"
] | 7
|
2018-11-10T09:15:29.000Z
|
2021-06-05T01:54:45.000Z
|
openpose-app/MotionMeasure/OpenposeLogParser.py
|
B-C-WANG/AI-Apps
|
305d1960ec2b84081228543bf819deff694fddd2
|
[
"MIT"
] | null | null | null |
openpose-app/MotionMeasure/OpenposeLogParser.py
|
B-C-WANG/AI-Apps
|
305d1960ec2b84081228543bf819deff694fddd2
|
[
"MIT"
] | 4
|
2019-06-22T03:26:46.000Z
|
2020-05-17T11:40:22.000Z
|
# encoding: utf-8
import json
import numpy as np
import matplotlib.pyplot as plt
import os
import queue
import _thread
import traceback
point_name = [
"Nose",
"Neck",
"RShoulder",
"RElbow",
"RWrist",
"LShoulder",
"LElbow",
"LWrist",
"MidHip",
"RHip",
"RKnee",
"RAnkle",
"LHip",
"LKnee",
"LAnkle",
"REye",
"LEye",
"REar",
"LEar",
"LBigToe",
"LSmallToe",
"LHeel",
"RBigToe",
"RSmallToe",
"RHeel",
"Background"
]
if __name__ == '__main__':
while 1:
for i in OpenposeJsonParser().stream_update_point_change_data_in_the_dir("G:\openpose\output",sum=True):
print(i)
| 25.248756
| 129
| 0.521576
|
# encoding: utf-8
import json
import numpy as np
import matplotlib.pyplot as plt
import os
import queue
import _thread
import traceback
point_name = [
"Nose",
"Neck",
"RShoulder",
"RElbow",
"RWrist",
"LShoulder",
"LElbow",
"LWrist",
"MidHip",
"RHip",
"RKnee",
"RAnkle",
"LHip",
"LKnee",
"LAnkle",
"REye",
"LEye",
"REar",
"LEar",
"LBigToe",
"LSmallToe",
"LHeel",
"RBigToe",
"RSmallToe",
"RHeel",
"Background"
]
class OpenposeJsonParser():
def __init__(self):
pass
def get_pose2d_state_of_first_people(self,json_file):
'''
get all body points of first people:
- all points will minus the position of point 1 to set 1 as center
(1 is "Neck")
to avoid the change of distance to camera
the distance of two points will be scaled by distance of 2-5
2-5 is RShoulder and LShoulder, the distance can not change much with body
in this way, all point distance is use 1 as center
and use distance of 2-5 as 1,
so it can be used to compare between two frame
'''
try:
with open(json_file, "r") as f:
data = json.load(f)
people = data["people"]
people = people[0]
pose2d = people["pose_keypoints_2d"]
pose2d = np.array(pose2d).reshape(-1,3)
#print(pose2d) # x y and confidence
coord = pose2d[:,:2]
center_pos = coord[1]
if (center_pos[0] < [0.1,0.1]).any():
# return false if can not detect center point
return False
if (coord[2] < [0.1,0.1]).any() or (coord[5] < [0.1,0.1]).any():
# return false if can not detect 2 5 point
return False
# set the position of [0,0] to center position so that will be 9 after minus
coord[(coord[:,:2] <[0.1,0.1]).any(axis=1)] = center_pos
# set center position
coord = coord - center_pos
# reset
coord = - coord
# scale according to refer_distance
refer_distance = np.sqrt(np.sum(np.square(coord[2]-coord[5])))
#print(refer_distance)
coord = coord / refer_distance
data ={}
#print(coord)
for i in range(coord.shape[0]):
if (np.abs(coord[i,:]) < ([0.0001,0.0001])).any():
data[point_name[i]] = False
else:
data[point_name[i]] = coord[i,:]
# finally add center_position, this center_position is the total_move of the body, its absolute value is meanningless
data[point_name[1]] = center_pos / refer_distance
return data
except:
# if met error, all set False
info = {}
for i in point_name[:-1]:
info[i] = False
return info
def get_point_change_data(self,last_state,now_state,sum=False):
try:
# all points move distance, related to energy people use
info = {}
for name in point_name[:-1]:# background not included
if now_state is bool:
raise ValueError()
if isinstance(now_state[name],bool) or isinstance(last_state[name],bool):
info[name] = False
else:
info[name] = np.sqrt(np.sum(np.square(last_state[name] - now_state[name])))
if sum == False:
return info
else:
value = 0
for i in info:
value += 0 if info[i] == False else abs(info[i])
return value
except:
traceback.print_exc()
if sum==False:
info = {}
for i in point_name:
info[i] = 0
return info
else:
return 0
def stream_update_point_change_data_in_the_dir(self,json_file_dir,sum=False):
last_state = None
while last_state is None:
file = os.listdir(json_file_dir)
for i in file:
if i.endswith(".json"):
file_path = json_file_dir + "/" + i
last_state = self.get_pose2d_state_of_first_people(file_path)
os.remove(file_path)
break
while 1:
file = os.listdir(json_file_dir)
for i in file:
if i.endswith(".json"):
file_path = json_file_dir + "/" + i
now_state = self.get_pose2d_state_of_first_people(file_path)
os.remove(file_path)
yield self.get_point_change_data(last_state,now_state,sum=sum)
break
if __name__ == '__main__':
while 1:
for i in OpenposeJsonParser().stream_update_point_change_data_in_the_dir("G:\openpose\output",sum=True):
print(i)
| 1,901
| 2,527
| 23
|
0449817e405dd949fcb83eafc1beb5ae393f23f2
| 47,889
|
py
|
Python
|
experimental/eidelyur/variabledNLsimulation_v2.py
|
radiasoft/rsnibo
|
c2040f2ec21bbc2701a5968c6f2d3e3e0d31f81d
|
[
"Apache-2.0"
] | null | null | null |
experimental/eidelyur/variabledNLsimulation_v2.py
|
radiasoft/rsnibo
|
c2040f2ec21bbc2701a5968c6f2d3e3e0d31f81d
|
[
"Apache-2.0"
] | null | null | null |
experimental/eidelyur/variabledNLsimulation_v2.py
|
radiasoft/rsnibo
|
c2040f2ec21bbc2701a5968c6f2d3e3e0d31f81d
|
[
"Apache-2.0"
] | 1
|
2019-04-26T22:58:22.000Z
|
2019-04-26T22:58:22.000Z
|
#
# This script develops the script 'variabledNLsimulation_v1.py' (Yury Eidelman)
#
# Started at June 28, 2019
#
# The three laws to change the strengths 't' of all nonlinear lens are implemented.
# From initial value t_i to final value t_f during N stepsthese laws are follows.
# 1) Linear: for step number n
# t(n) = t_0 + (t_f-t_0)*n/(N-1) for n = 0,1,...,N-1 .
# 2) Parabolic: for step number n
# t(n) = t_0 + (t_f-t_0)*n^2/(N-1)^2 for n = 0,1,...,N-1 .
# 3) Smooth sign-function: for step number n
# t(n) = .5*(t_0+t_f) + .5*(t_f-t_0)*tanh(x(n)), where
# x(n) = (6*n-3*(N-1))/(N-1) for n=0,1,...,N-1 .
# In this approach x(0) = -3., x(N-1) = 3.; so, tanh(3.) = - tanh(-3.) = .9951
#
import synergia
import os, sys
import inspect
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import gridspec
import rssynergia
from rssynergia.base_diagnostics import lfplot
from rssynergia.base_diagnostics import plotbeam
from rssynergia.base_diagnostics import pltbunch
#
# Output attributes of 'generate_lens' method:
#
# same as output of 'NonlinearInsertion'class and as well:
# s_vals (ndArray): coordinates of the center of each nonlinear lens (float ndArray, m);
# knll (ndArray): "strength" of each nonlinear lens (float ndArray, m);
# cnll (ndArray): aperture parameters for each nonlinear lens (float ndArray, m^1/2).
#
# Pickle helper is not necessary but is retained for this example
#
# Definition of class to ramp nonlinear lens
#
# Args of 'Ramp_actions' method are:
# 'type' - type of magnification (1 - relative, 2 - absolute),
# 'stepNumber' - current step of magnification,
# 'strengthLens' - set of strengthes 't' of central lens of the nonlinear insertion for all steps of
# magnification (relative magnification) or set of strengthes 't' of all lenses for
# current step (absolute magnification),
# 'updateOutputFlag' - flag to output the strength of one of nonlinear lens after it's magnification
# for current step,
# controlName - name of lens with maximal strength to use in output for checking of process
# of magnification.
#
#
# The arguments to __init__ are what the Ramp_actions instance is initialized with:
#
# Main method 'simulation'
#
#
# End of main method 'simulation'
#
#========================================================
fileIOTA = ".../ioptics/ioptics/lattices/Iota8-2/lattice_1IO_nll_center.madx"
# fileIOTA = ".../ioptics/ioptics/lattices/Iota8-4/lattice_8-4_1IO_nll_forTest.madx"
print "\nIOTA Nonlinear lattice: {} \n".format(fileIOTA)
lattice = synergia.lattice.MadX_reader().get_lattice("iota", \
"../ioptics/ioptics/lattices/Iota8-2/lattice_1IO_nll_center.madx")
# --------- Games -----------------------------
# indices = np.argsort(knllLenses)
# print "indices = ",indices
# for n in range(nLenses+1):
# print n,") name after sorting is ",nameLenses[indices[n]]
# for n in range(nLenses+1):
# print n,") knll after sorting is ",knllLenses[indices[n]]
# for n in range(nLenses+1):
# print n,") place after sorting is ",placeLenses[indices[n]]
# ----------- End of games --------------------
stepperCrrnt = synergia.simulation.Independent_stepper_elements(lattice,2,3)
lattice_simulator_Crrnt = stepperCrrnt.get_lattice_simulator()
# To recognize attributes of 'bunchParticles':
# printAttributes(lattice_simulator_Crrnt,'lattice_simulator_Crrnt','stepperCrrnt.get_lattice_simulator()')
# slicesHelp = lattice_simulator_Crrnt.get_slices()
# To recognize attributes of 'slicesHelp':
# printAttributes(slicesHelp,'slicesHelp','lattice_simulator_Crrnt.get_slices()')
# Bunch:
bunch_origin = synergia.optics.generate_matched_bunch_transverse(lattice_simulator_Crrnt, 1e-6, \
1e-6, 1e-3, 1e-4, 1e9, 1000, seed=1234)
#
# To compare two methods for drawing of the particles distributions:
#
loclTitle = "\nThese distributions were constructed using \
'synergia.optics.generated_matched_bunch_transverse' method"
loclTitle += "\nand plotted using two methods - 'pltbunch.plot_bunch' from the code synergia"
loclTitle += "\nand 'plotcoordDistr' from this script (to verify method 'plotcoordDistr'):"
print loclTitle
pltbunch.plot_bunch(bunch_origin)
# Distributions X-Y, X-X', Y-Y' using method 'plotcoordDistr':
bunchParticles = bunch_origin.get_local_particles()
# To recognize attributes of 'bunchParticles':
# printAttributes(bunchParticles,'bunchParticles', 'bunch.get_local_particles()')
plotcoordDistr(bunchParticles)
selection = 'loop'
while selection == 'loop':
simulation()
selection = raw_input("\nTo continue the simulation ('yes' or 'no'):")
print'Your selection is ',selection
if selection == 'yes':
selection = 'loop'
# if selection == 'no':
# exit(0)
| 44.756075
| 145
| 0.628098
|
#
# This script develops the script 'variabledNLsimulation_v1.py' (Yury Eidelman)
#
# Started at June 28, 2019
#
# The three laws to change the strengths 't' of all nonlinear lens are implemented.
# From initial value t_i to final value t_f during N stepsthese laws are follows.
# 1) Linear: for step number n
# t(n) = t_0 + (t_f-t_0)*n/(N-1) for n = 0,1,...,N-1 .
# 2) Parabolic: for step number n
# t(n) = t_0 + (t_f-t_0)*n^2/(N-1)^2 for n = 0,1,...,N-1 .
# 3) Smooth sign-function: for step number n
# t(n) = .5*(t_0+t_f) + .5*(t_f-t_0)*tanh(x(n)), where
# x(n) = (6*n-3*(N-1))/(N-1) for n=0,1,...,N-1 .
# In this approach x(0) = -3., x(N-1) = 3.; so, tanh(3.) = - tanh(-3.) = .9951
#
import synergia
import os, sys
import inspect
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import gridspec
import rssynergia
from rssynergia.base_diagnostics import lfplot
from rssynergia.base_diagnostics import plotbeam
from rssynergia.base_diagnostics import pltbunch
def plotcoordDistr(bunchParticles):
#
# Plot X-X', Y-Y', and X-Y distributions for 'bunchParticles'
#
# bunchParticles is a 'bunch' object;
# particles is 2D array: (numberOfParticles,(x,x',y,y',s,dp(?),ID);
#
numbPartcls = bunchParticles.shape[0]
particles = bunchParticles.real
newCoordinates = np.zeros((6,numbPartcls))
for k in range(numbPartcls):
for j in range(6):
newCoordinates[j,k] = 1.e3*particles[k,j] # Units: mm and mrad
xmax = 1.15*np.max(abs(newCoordinates[0,:]))
xpmax = 1.15*np.max(abs(newCoordinates[1,:]))
ymax = 1.15*np.max(abs(newCoordinates[2,:]))
ypmax = 1.15*np.max(abs(newCoordinates[3,:]))
meanX = np.mean(newCoordinates[0,:])
meanPX = np.mean(newCoordinates[1,:])
stdX = np.std(newCoordinates[0,:])
stdPX = np.std(newCoordinates[1,:])
meanY = np.mean(newCoordinates[2,:])
meanPY = np.mean(newCoordinates[3,:])
stdY = np.std(newCoordinates[2,:])
stdPY = np.std(newCoordinates[3,:])
# Another way - use gridspec
fig = plt.figure(figsize=(15,5))
gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])
ax0 = plt.subplot(gs[0])
plt.plot(newCoordinates[0,:],newCoordinates[2,:],'.',color='k')
x0Title = "X,mm: <> = {:.3f} +- {:.3f}\nY,mm: <> = {:.3f} +- {:.3f}".format(meanX,stdX,meanY,stdY)
ax0.set_title(x0Title,color='m',fontsize='16')
ax0.set_xlim([-xmax,xmax])
ax0.set_ylim([-ymax,ymax])
ax0.set_xlabel('X, mm',color='m',fontsize='14')
ax0.set_ylabel('Y, mm',color='m',fontsize='14')
ax0.grid(True)
ax1 = plt.subplot(gs[1])
plt.plot(newCoordinates[0,:],newCoordinates[1,:],'.',color='b')
x1Title = "X,mm: <> = {:.3f} +- {:.3f}\nX\',mrad: <> = {:.3f} +- {:.3f}".format(meanX,stdX,meanPX,stdPX)
ax1.set_title(x1Title,color='m',fontsize='16')
ax1.set_xlim([-xmax,xmax])
ax1.set_ylim([-xpmax,xpmax])
ax1.set_xlabel('X, mm',color='m',fontsize='14')
ax1.set_ylabel('X\', mrad',color='m',fontsize='14')
ax1.grid(True)
ax2 = plt.subplot(gs[2])
plt.plot(newCoordinates[2,:],newCoordinates[3,:],'.',color='r')
x2Title = "Y,mm: <> = {:.3f} +- {:.3f}\nY\',mrad: <> = {:.3f} +- {:.3f}".format(meanY,stdY,meanPY,stdPY)
ax2.set_title(x2Title,color='m',fontsize='16')
ax2.set_xlim([-ymax,ymax])
ax2.set_ylim([-ypmax,ypmax])
ax2.set_xlabel('Y, mm',color='m',fontsize='14')
ax2.set_ylabel('Y\', mrad',color='m',fontsize='14')
ax2.grid(True)
# fig.canvas.set_window_title('Synergia Phase Space Distribution')
fig.tight_layout()
plt.show()
return
def plotTracks(tracksCoords,numberTracks):
#
# Plot'numberTracks' tracks from 'tracksCoords'
#
# tracksCoords is 3D array: (totalTurns,particles,(x,y))
#
# print "numberTracks = ",numberTracks
trackColor = ['r','b','k','m','g']
numbPoints = tracksCoords.shape[0]
# print "numbPoints = ",numbPoints
xmax = 1.15*np.max(np.max(abs(tracksCoords[:,0:numberTracks,0])))
ymax = 1.15*np.max(np.max(abs(tracksCoords[:,0:numberTracks,1])))
turn = np.arange(0,numbPoints)
# Another way - use gridspec
fig = plt.figure(figsize=(15,5))
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax0 = plt.subplot(gs[0])
for prtcl in range(numberTracks):
plt.plot(turn,tracksCoords[0:numbPoints,prtcl,0],'.-',color=trackColor[prtcl])
# x0Title = "X,mm: <> = {:.3f} +- {:.3f}\nY,mm: <> = {:.3f} +- {:.3f}".format(meanX,stdX,meanY,stdY)
# ax0.set_title(x0Title,color='m',fontsize='16')
ax0.set_ylim([-xmax,xmax])
ax0.set_xlabel('Turn',color='m',fontsize='14')
ax0.set_ylabel('X, mm',color='m',fontsize='14')
ax0.grid(True)
ax1 = plt.subplot(gs[1])
for prtcl in range(numberTracks):
plt.plot(turn,tracksCoords[0:numbPoints,prtcl,1],'.-',color=trackColor[prtcl])
# x0Title = "X,mm: <> = {:.3f} +- {:.3f}\nY,mm: <> = {:.3f} +- {:.3f}".format(meanX,stdX,meanY,stdY)
# ax0.set_title(x0Title,color='m',fontsize='16')
ax1.set_ylim([-ymax,ymax])
ax1.set_xlabel('Turn',color='m',fontsize='14')
ax1.set_ylabel('Y, mm',color='m',fontsize='14')
ax1.grid(True)
# fig.canvas.set_window_title('Synergia Phase Space Distribution')
fig.tight_layout()
plt.show()
return
def plotParamLens(s_center,knll,cnll,title0,title1):
#
# Plot distribution of the strength 'knll' of the nonlinear lens inside
# nonlinear insertion:
#
knll_plot = np.zeros(len(knll))
for n in range(len(knll)):
knll_plot[n]=1.e6*knll[n]
# Another way - use gridspec
fig = plt.figure(figsize=(15,5))
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax0 = plt.subplot(gs[0])
plt.plot(s_center,knll_plot,'-x',color='r')
ax0.set_xlabel('s, m',color='m',fontsize=14)
ax0.set_ylabel('10^6 * knll, m',color='m',fontsize=14)
ax0.set_title(title0,color='m',fontsize=16)
ax0.grid(True)
ax1 = plt.subplot(gs[1])
plt.plot(s_center,cnll,'-x',color='r')
ax1.set_xlabel('s, m',color='m',fontsize=14)
ax1.set_ylabel('cnll, m^1/2',color='m',fontsize=14)
ax1.set_title(title1,color='m',fontsize=16)
ax1.grid(True)
fig.tight_layout()
plt.show()
return
def printAttributes(object,name,title):
#
# List of all attributes of 'object' for checking:
#
attrList = inspect.getmembers(object)
strTitle = "\nattrList ("+name+" = "+title+"):\n{}\n"
print strTitle.format(attrList)
def tracksCoords(bunchParticles):
#
# Preparation of the track coordinates:
#
# 'bunchParticle' is a 'bunch' object;
# 'particles' is 2D array: (numberParrticles,(x,x',y,y',s,dE,ID));
#
numbPartcls = bunchParticles.shape[0]
particles = bunchParticles.real
trackCoordinates = np.zeros((numbPartcls,2))
for prtcl in range(numbPartcls):
trackCoordinates[prtcl,0] = 1.e3*particles[prtcl,0] # x, mm
trackCoordinates[prtcl,1] = 1.e3*particles[prtcl,2] # y, mm
# if prtcl < 3:
# print "Particle {}: x = {} mm, y = {} mm". \
# format(prtcl,trackCoordinates[prtcl,0],trackCoordinates[prtcl,1])
return trackCoordinates
class NonlinearInsertion(object):
#
# Generation of the nonlinear lenses as set of segments of the nonlinear insertion
#
# Source:
# 1) Nonlinear Accelerator Lattices with One and Two Analytic Invariants.
# V. Danilov and S. Nagaitsev. Phys. Rev. ST Accel. Beams 13, 084002 (2010);
# https://journals.aps.org/prab/pdf/10.1103/PhysRevSTAB.13.084002.
# 2) Complex Representation of Potentials and Fields for the Nonlinear
# Magnetic Insert of the Integrable Optics Test Accelerator.
# Chad Mitchell. March 2017; https://esholarship.org/uc/item/7dt4t236.
# 3) Madx CERN User Guide. Chapter 10.10 - Nonlinear Lens with Elliptical Potential.
# http://mad.web.cern.ch/mad/
#
# Input args:
# length: the length of the nonlinear insertion (float, m);
# phase: the phase advance modulo 2pi through the nonlinear insertion;
# t: the strength parameter for center of the insertion (float, dimensionless,
# defaults to 0.1);
# c: the aperture parameter for center of the insertion
# (float, m^1/2, is defined by poles in the x-axis, defaults to 0.01);
# num_lens: the number of lonlinear lenses as an segments of the insertion (int, defaults to 20).
#
# Output attributes are the same as input one.
#
def __init__(self, length, phase, t = 0.1, c = 0.01, num_lens = 20):
self.length = length
self.phase = phase
self.t = t
self._c = c
self.num_lens = num_lens
# print "Input data for NonlinearInsertion:\nlength = ",self.length,", phase = ",self.phase, \
# ", t = ",self.t,", c = ",self.c,", num_lens = ",self.num_lens
# Aperture parameter c must be positive:
@property
def c(self):
return self._c
@c.setter
def c(self, cval):
if cval < 0:
raise ValueError("Aperture parameter c must be positive")
self._c = c
#
# Output attributes of 'generate_lens' method:
#
# same as output of 'NonlinearInsertion'class and as well:
# s_vals (ndArray): coordinates of the center of each nonlinear lens (float ndArray, m);
# knll (ndArray): "strength" of each nonlinear lens (float ndArray, m);
# cnll (ndArray): aperture parameters for each nonlinear lens (float ndArray, m^1/2).
#
def generate_lens(self,flag):
indxShift = self.num_lens-2*((self.num_lens+1)/2)+1
# Focal length f0 of the insertion (m):
f0 = self.length/4.0*(1.0+1.0/np.tan(np.pi*self.phase)**2)
# print "f0 = ",f0
# Coordinates s_vals of the center of each nonlinear lens (m):
first_lens = .5*(self.length/self.num_lens)
last_lens = self.length - first_lens
s_vals = np.linspace(first_lens,last_lens,self.num_lens)
self.s_vals = s_vals
# Set the structural beta-function of the nonlinear magnet (m):
beta_n = self.length*(1.-s_vals*(self.length-s_vals)/self.length/f0)/ \
np.sqrt(1.0-(1.0-self.length/2.0/f0)**2)
# self.betas = beta_n
cnll = self.c*np.sqrt(beta_n)
knn = self.t*self.length/self.num_lens/beta_n**2
knll = knn*cnll**2
# Sequence of lenses start from the minimal value of knll (flag = 1):
self.cnll = cnll
self.knll = knll
# Sequence of lenses start from the maximal value of knll (flag = 2):
if flag == 2:
cnll_help = []
knll_help = []
indxMax = 0
for n in range(self.num_lens-1):
if knll[n] < knll[n+1]:
indxMax = n+1
else:
break
for n in range (self.num_lens):
if n <= indxMax:
cnll_help.append(float(cnll[indxMax-n]))
knll_help.append(float(knll[indxMax-n]))
else:
cnll_help.append(float(cnll[n-indxMax-indxShift]))
knll_help.append(float(knll[n-indxMax-indxShift]))
self.cnll = cnll_help
self.knll = knll_help
return self
# Pickle helper is not necessary but is retained for this example
#
class Pickle_helper:
__getstate_manages_dict__ = 1
def __init__(self, *args):
self.args = args
def __getinitargs__(self):
return self.args
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
# Definition of class to ramp nonlinear lens
class Ramp_actions(synergia.simulation.Propagate_actions, Pickle_helper):
#
# Args of 'Ramp_actions' method are:
# 'type' - type of magnification (1 - relative, 2 - absolute),
# 'stepNumber' - current step of magnification,
# 'strengthLens' - set of strengthes 't' of central lens of the nonlinear insertion for all steps of
# magnification (relative magnification) or set of strengthes 't' of all lenses for
# current step (absolute magnification),
# 'updateOutputFlag' - flag to output the strength of one of nonlinear lens after it's magnification
# for current step,
# controlName - name of lens with maximal strength to use in output for checking of process
# of magnification.
#
#
# The arguments to __init__ are what the Ramp_actions instance is initialized with:
def __init__(self, type,stepNumber,strengthLens,outputFlag,controlName):
selfObject = synergia.simulation.Propagate_actions.__init__(self)
# To recognize attributes of 'selfObject':
# printAttributes(selfObject,'selfObject','synergia.simulation.Propagate_actions.__init__(self)')
# Pickling the arguments to the initializer allows the module to resume
# after checkpointing. They should be in the same order as the arguments to __init__.
Pickle_helper.__init__(self, type,stepNumber,strengthLens,outputFlag,controlName)
self.type = type
self.stepNumber = stepNumber
self.strengthLens = strengthLens
self.outputFlag = outputFlag
self.controlName = controlName
def turn_end_action(self, stepper, bunch, turn_num):
#---------------------------
# For checking:
# testObject = stepper.get_lattice_simulator().get_lattice()
# To recognize attributes of 'testObject':
# printAttributes(testObject,'testObject','stepper.get_lattice_simulator().get_lattice()')
# print "testName = '{}'".format(testObject.get_name())
#---------------------------
# Relative magnification:
if self.type == 1:
if self.stepNumber == 0:
self.multiplier = self.strengthLens[0]
print "Initialization lattice (relative magnification): Step ",self.stepNumber, \
", multiplier = ",self.multiplier
else:
self.multiplier = self.strengthLens[self.stepNumber]/self.strengthLens[self.stepNumber-1]
# Output title for checking of variables update:
print "Modified lattice (relative magnification): Step ",self.stepNumber, \
", multiplier = ",self.multiplier
for element in stepper.get_lattice_simulator().get_lattice().get_elements():
# To recognize attributes of 'element':
# printAttributes(element,'element', \
# 'stepper.get_lattice_simulator().get_lattice().get_elements()')
if element.get_type() == "nllens":
old_knll = element.get_double_attribute("knll")
new_knll = self.multiplier*old_knll
element.set_double_attribute("knll", new_knll)
# Output for checking of variables update checking nonlinear lens 'n.11' only:
if ((self.outputFlag == 1) and (element.get_name() == self.controlName)):
print element.get_name(),": knll=",old_knll," --> ",new_knll
# Absolute magnification:
if self.type == 2:
# Output title for checking of variables update:
print "Modified lattice (absolute magnification): Step ",self.stepNumber
crrntLens = 0
for element in stepper.get_lattice_simulator().get_lattice().get_elements():
# To recognize attributes of 'element':
# printAttributes(element,'element', \
# 'stepper.get_lattice_simulator().get_lattice().get_elements()')
if element.get_type() == "nllens":
old_knll = element.get_double_attribute("knll")
new_knll = self.strengthLens[crrntLens]
element.set_double_attribute("knll", new_knll)
crrntLens += 1
# Output for checking of variables update checking nonlinear lens 'n.11' only:
if ((self.outputFlag == 1) and (element.get_name() == self.controlName)):
print element.get_name(),": knll=",old_knll," --> ",new_knll
stepper.get_lattice_simulator().update()
def t_on_knll_function(l0,mu0,cval,lensNumb):
#
# "Reverse" dependence dimensionless strength 'tval' of nonlinear central lens on
# parameter 'knll' of this lens
#
nPoints = 50
knll = np.zeros(nPoints)
t = np.zeros(nPoints)
knll_logMin = math.log10(1.e-7)
knll_logMax = math.log10(1.e-4)
# Focal length f0 of the insertion (m):
f0 = l0/4.0*(1.0+1.0/np.tan(np.pi*mu0)**2)
# print "f0 = ",f0," m"
# Coordinate of the centers of the nonlinear lenses (m):
first_lens_center = .5*(l0/lensNumb)
last_lens_center = l0 - first_lens_center
s_vals = np.linspace(first_lens_center,last_lens_center,lensNumb)
# print "s_val =",s_vals
# Coordinate of the center of the nonlinear lens in the middle of nonlinear inserion (m):
s_center = s_vals[(num_lens+1)/2]
# Structural beta-function in the nonlinear magnet (m):
beta_center = l0*(1.-s_center*(l0-s_center)/l0/f0)/np.sqrt(1.0-(1.0-l0/2.0/f0)**2)
cnll_center = cval*np.sqrt(beta_center)
# print "s_center = ",s_center," m, beta_center = ",beta_center," m, cnll_center = ",cnll_center," m"
for n in range(nPoints):
knll_log10 = knll_logMin + n*(knll_logMax - knll_logMin)/nPoints
knll[n] = math.pow(10.,knll_log10)
t[n] = knll[n]*beta_center**2/(l0/lensNumb*cnll_center**2)
fig_10 = plt.figure(figsize=(15,5))
gs_10 = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax_10 = plt.subplot(gs_10[0])
# plt.semilogx(knll,t,'-x',color='r')
plt.loglog(knll,t,'-x',color='r')
ax_10.set_xlabel('knnl, m',color='m',fontsize=14)
ax_10.set_ylabel('Srength Parameter of the central lens, t',color='m',fontsize=14)
# Attempt to change number of grid lines:
# start, end = ax_10.get_xlim()
# ax_10.xaxis.set_ticks(np.arange(start, end, (end-start)/30))
title_t = "Nonlinear Insertion ({} lenses): L={:.2f} m, phase= {:.2f}, c={:.2f} m^1/2". \
format(lensNumb,l0, mu0, cval)
ax_10.set_title(title_t,color='m',fontsize=14)
ax_10.grid(True)
fig_10.tight_layout()
plt.show()
return
def lawsMagnification(t_i,t_f,steps):
# For relative magnification: t_i = 1., t_f = magnification:
#
# Three laws of magnification are in use
#
# 1) Linear: for step number n
# t(n) = t_i + (t_f-t_i)*n/(N-1) for n = 0,1,...,N-1 .
tLin = np.zeros(steps)
for n in range(steps):
tLin[n] = t_i+n*(t_f-t_i)/(steps-1)
# 2) Parabolic: for step number n
# t(n) = t_i + (t_f-t_i)*n^2/(N-1)^2 for n = 0,1,...,N-1 .
tPar= np.zeros(steps)
for n in range(steps):
tPar[n] = t_i+n**2*(t_f-t_i)/(steps-1)**2
# 3) Smooth sign-function: for step number n
# t(n) = .5*(t_f+t_i) + .5*(t_f-t_i)*tanh(x(n)), where
# x(n) = (6*n-3*(N-1))/(N-1) for n=0,1,...,N-1 .
# In this approach x(0) = -3., x(N-1) = 3.; so, tanh(3.) = - tanh(-3.) = .9951
tSSF= np.zeros(steps)
for n in range(steps):
x = (6.*n-3.*(steps-1))/(steps-1)
tSSF[n] = .5*(t_f+t_i)+.5*(t_f-t_i)*np.tanh(x)
# Plotting all cases:
step = range(steps)
tMin = .975*min(tLin)
tMax = 1.025*max(tLin)
fig = plt.figure(figsize=(15,5))
gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])
ax0 = plt.subplot(gs[0])
plt.plot(step,tLin,'-x',color='r')
x0Title = 'Linear Magnification'
ax0.set_title(x0Title,color='m',fontsize='16')
ax0.set_xlim([-1,steps+1])
ax0.set_ylim([tMin,tMax])
ax0.set_xlabel('Step n',color='m',fontsize='14')
ax0.set_ylabel('t',color='m',fontsize='14')
ax0.grid(True)
ax1 = plt.subplot(gs[1])
plt.plot(step,tPar,'-x',color='r')
x1Title = 'Parabolic Magnification'
ax1.set_title(x1Title,color='m',fontsize='16')
ax1.set_xlim([-1,steps+1])
ax1.set_ylim([tMin,tMax])
ax1.set_xlabel('Step n',color='m',fontsize='14')
ax1.set_ylabel('t',color='m',fontsize='14')
ax1.grid(True)
ax2 = plt.subplot(gs[2])
plt.plot(step,tSSF,'-x',color='r')
x2Title = 'Smooth Sign-function Magnification'
ax2.set_title(x2Title,color='m',fontsize='16')
ax2.set_xlim([-1,steps+1])
ax2.set_ylim([tMin,tMax])
ax2.set_xlabel('Step n',color='m',fontsize='14')
ax2.set_ylabel('t',color='m',fontsize='14')
ax2.grid(True)
fig.tight_layout()
plt.show()
selection = int(raw_input("\nYour selection of the law magnification \
\n(1 - linear, 2 - parabolic, 3 - smooth sign-function; -1 - exit): "))
return selection
#
# Main method 'simulation'
#
def simulation():
#
# Main predefined parameters of the nonlinear insertion:
insrtn_l0 = 1.8 # total length, m
insrtn_mu0 = .3 # phase, rad (/2pi)
insrtn_c = .01 # aperture factor, m^(1/2)
num_lens = 20 # number of lens inside insertion
#
# Interactive input of the parameters for simulation:
#
particlesInBunch = int(raw_input('\nTotal number of particles (= -1 to interrupt simulation):'))
if particlesInBunch == -1:
return
totalTurns = int(raw_input('\nTotal number if turns (= -1 to interrupt simulation):'))
if totalTurns == -1:
return
updateAfterTurns = int(raw_input( \
'\nPeriodicity (in turns) to update the parameters and distribution plots \n(nonlinear structure; = -1 to interrupt simulation)'))
if updateAfterTurns == -1:
return
stepsInMgnfctn = int(totalTurns/updateAfterTurns)+0
print "steps for magnification: ",stepsInMgnfctn
updateOutputFlag = int(raw_input('\nupdateOutputFlag (0 - no, 1 - yes, -1 - to interrupt simulation):'))
if updateOutputFlag == -1:
return
magnificationType = int(raw_input( \
'\nMagnification type \n(1 - relative, 2 - absolute, 0 - to interrupt simulation):'))
if magnificationType == 0:
return
else:
if magnificationType == 1:
mgnfctnFctr = float(raw_input( \
"\nFactor of relative magnification (RM) of the strength 't' of all (!) nonlinear lenses \n (RM = t_f/t_i; -1. - to interrupt simulation):"))
if mgnfctnFctr == -1.:
return
else:
t_i = 1.
t_f = mgnfctnFctr
else:
print "\nInformation for help (20 nonlinear lenses inside of the insertion): \n"
t_on_knll_function(insrtn_l0,insrtn_mu0,insrtn_c,20)
t_i = float(raw_input( \
"\nInitial value 't_i' of the strength of the central (!) nonlinear lens \n (-1.- to interrupt simulation):"))
if t_i == -1.:
return
t_f = float(raw_input( \
"\nFinal value 't_f' of the strength of nonlinear lens \n (-1.- to interrupt simulation):"))
if t_f == -1.:
return
print ""
law = lawsMagnification(t_i,t_f,stepsInMgnfctn)
print 'Your selection of law magnification: ', law
if law == -1:
return
# Input data for simulation:
print "\n################################################################\n###"
print "### Parameters for simulation:\n###"
print "### Particles in the bunch = ",particlesInBunch
print "### Total number of turns = ",totalTurns
print "### Periodicity (in turns) to update the parameters = ",updateAfterTurns
print "### magnificationType = ",magnificationType
if magnificationType == 1:
print "### Factor of relative magnification (RM) = ",mgnfctnFctr
if magnificationType == 2:
print "### For absolute magnification (AM) initial value t_i = ",t_i
print "### For absolute magnification (AM) final value t_f = ",t_f
laws = ['linear', 'parabolic', 'smooth sign-function']
print "### Law of magnification: ",laws[law-1]
print "### Steps in magnification: ",stepsInMgnfctn
print "###\n### Predefined parameters for nonlinear insertion:\n###"
print "### Length = ",insrtn_l0," m"
print "### Phase = ",insrtn_mu0," rad (/2pi)"
print "### Aperture factor = ",insrtn_c," m^(1/2)"
print "### Number of lens inside insertion = ",num_lens
print "###\n################################################################"
#
# For relative type of maginfication (magnificationType = 1):
#
if magnificationType == 1:
#
# t_i = 1. and t_f is total factor of magnification.
# So, 1D-array 'strengthLens[0:stepsInMgnfctn]' describes current value of the
# strength (knll) of lens for current step n; Then 1D-array 'magnifications[0:stepsInMgnfctn]'
# describe magnification factor to pass from old_knll_value = knll[n-1] to
# new_knll_value = knll[n] on step n:
# new_knll_value = magnifications[n]*old_knll_value .
# Factor 'magnifications' is the same for all lens of nonlinear insertion!
#
strengthLens = np.zeros(stepsInMgnfctn)
magnifications = np.zeros(stepsInMgnfctn)
totalMgnfcn = 1.
#
# For absolute magnification (magnificationType = 2):
#
if magnificationType == 2:
#
# parameters t_i and t_f characterize only central lens of nonlinear insertion.
# So, the strength of 't' for all rest lenses must be recalculate in corresponding
# distribution of beta-function inside the insertion by using method 'generate_lens'.
# So, 1D-array 'strengthLens[0:stepsInMgnfctn]' describes value of the strength
# of central lens only for current step n.
#
strengthLens = np.zeros(stepsInMgnfctn)
for n in range(stepsInMgnfctn):
if law == 1:
# 1) Linear: for step number n
# t(n) = t_i + (t_f-t_i)*n/(N-1) for n = 0,1,...,N-1 .
strengthLens[n] = t_i+n*(t_f-t_i)/(stepsInMgnfctn-1)
elif law == 2:
# 2) Parabolic: for step number n
# t(n) = t_i + (t_f-t_i)*n^2/(N-1)^2 for n = 0,1,...,N-1 .
strengthLens[n] = t_i+n**2*(t_f-t_i)/(stepsInMgnfctn-1)**2
elif law == 3:
# 3) Smooth sign-function: for step number n
# t(n) = .5*(t_i+t_f) + .5*(t_f-t_i)*tanh(x(n)), where
# x(n) = (6*n-3*(N-1))/(N-1) for n=0,1,...,N-1 .
# In this approach x(0) = -3., x(N-1) = 3.; so, tanh(3.) = - tanh(-3.) = .9951
x = (6.*n-3.*(stepsInMgnfctn-1))/(stepsInMgnfctn-1)
strengthLens[n] = .5*(t_i+t_f)+.5*(t_f-t_i)*np.tanh(x)
if magnificationType == 1:
if n == 0:
print "\nRelative magnification:"
magnifications[n] = strengthLens[n]
else:
magnifications[n] = strengthLens[n]/strengthLens[n-1]
print " magnifications[{}] = {}".format(n,magnifications[n])
totalMgnfcn *= magnifications[n]
if n == stepsInMgnfctn-1:
print "Total relative magnification (RM) will be = ",totalMgnfcn
if magnificationType == 2:
if n == 0:
print \
"\nStrengths 't' and corresponding values 'knll' of cenrtal lens for absolute magnification:"
# Calculate value 'knll', which correspond to current value of strngth 't' = strengthLens[n]:
f0Crrnt = insrtn_l0/4.0*(1.0+1.0/np.tan(np.pi*insrtn_mu0)**2)
first_lens_center = .5*(insrtn_l0/num_lens)
last_lens_center = insrtn_l0 - first_lens_center
# Coordinates of the center of the nonlinear lenses in the nonlinear inserion (m):
s_vals = np.linspace(first_lens_center,last_lens_center,num_lens)
# print "s_val =",s_vals
# Coordinate of the center of the nonlinear lens in the middle of nonlinear inserion (m):
s_center = s_vals[(num_lens+1)/2]
# Structural beta-function of the nonlinear magnet (m):
beta_center = insrtn_l0*(1.-s_center*(insrtn_l0-s_center)/insrtn_l0/f0Crrnt)/ \
np.sqrt(1.0-(1.0-insrtn_l0/2.0/f0Crrnt)**2)
cnll_center = insrtn_c*np.sqrt(beta_center)
# print "s_center = ",s_center," m, beta_center = ",beta_center, \
# " m, cnll_center = ",cnll_center," m"
knll_center = insrtn_l0/num_lens*strengthLens[n]*(cnll_center/beta_center)**2
print " t[{}]] = {} ==> knll = {} m".format(n,strengthLens[n],knll_center)
#
# Simulated lattice:
#
fileIOTA = ".../ioptics/ioptics/lattices/Iota8-2/lattice_1IO_nll_center.madx"
print "\nIOTA Nonlinear lattice: {} \n".format(fileIOTA)
lattice = synergia.lattice.MadX_reader().get_lattice("iota", \
"../ioptics/ioptics/lattices/Iota8-2/lattice_1IO_nll_center.madx")
# To recognize attributes of 'lattice':
# printAttributes(lattice,'lattice','synergia.lattice.MadX_reader().get_lattice')
# fileIOTA = ".../ioptics/ioptics/lattices/Iota8-4/lattice_8-4_1IO_nll_forTest.madx"
# print "\nIOTA Nonlinear lattice: {} \n".format(fileIOTA)
# lattice = synergia.lattice.MadX_reader().get_lattice("iota", \
# "../ioptics/ioptics/lattices/Iota8-4/lattice_8-4_1IO_nll_forTest.madx")
# For checking only:
# k = 0
# for elem in lattice.get_elements():
# if k == 0:
# printAttributes(elem,'elem','lattice.get_elements')
# k += 1
# if elem.get_type() == 'nllens':
# elem.set_string_attribute("extractor_type", "chef_propagate")
# else:
# elem.set_string_attribute("extractor_type", "chef_map")
# print "elem ({}): name = {}, type = {}, stringAttrbt ={}". \
# format(k,elem.get_name(),elem.get_type(),elem.get_string_attribute("extractor_type"))
knllLenses = []
nameLenses = []
placeLenses = []
numberLenses = 0
for element in lattice.get_elements():
if element.get_type() == 'nllens':
knllLenses.append(float(element.get_double_attribute("knll")))
nameLenses.append(element.get_name())
placeLenses.append(int(numberLenses))
numberLenses += 1
num_lens = numberLenses # number of lens inside insertion
# print "placeLenses: ",placeLenses
# print "nameLenses: ",nameLenses
# print "knllLenses: ",knllLenses
# print "Number of lenses: ",numberLenses
# Name of lens with maximal strength to use in output for checking of process of magnification
controlName = nameLenses[np.argmax(knllLenses)]
# print "controlName: ",controlName
startSequenceLenses = 1 # First lens has minimal knll
if knllLenses[0] > knllLenses[1]:
startSequenceLenses = 2 # First lens has maximal knll
# print "startSequenceLenses = ",startSequenceLenses
# Original version:
# lattice_simulator = synergia.simulation.Lattice_simulator(lattice, 2)
# Bunch:
# bunch = synergia.optics.generate_matched_bunch_transverse(lattice_simulator, 1e-6, \
# 1e-6, 1e-3, 1e-4, 1e9, 10000, seed=1234)
# YuE version:
stepperCrrnt = synergia.simulation.Independent_stepper_elements(lattice,2,3)
lattice_simulator_Crrnt = stepperCrrnt.get_lattice_simulator()
# Bunch:
bunch_origin = synergia.optics.generate_matched_bunch_transverse( \
lattice_simulator_Crrnt, 1e-6, 1e-6, 1e-3, 1e-4, 1e9, particlesInBunch, seed=1234)
# For checking:
# To recognize attributes of 'bunch_origin':
# printAttributes(bunch_origin,'bunch_origin','synergia.optics.generate_matched_bunch_transverse')
# particlesTmp = bunch_origin.get_local_particles()
# To recognize attributes of 'particlesTmp':
# printAttributes(particlesTmp,'particlesTmp','bunch_origin.get_local_particles')
# 'particlesCrrnt' is 2D array: (numberoFParticle,(x,x',y,y',s,dE,ID));
# particlesCrrnt = particlesTmp.real
# print " particlesCrrnt:"
# for prtcl in range(5):
# print "x (m) for particle {}: {}".format(prtcl,particlesCrrnt[prtcl,0])
# print "y (m) for particle {}: {}".format(prtcl,particlesCrrnt[prtcl,2])
# print "s (m) for particle {}: {}".format(prtcl,particlesCrrnt[prtcl,4])
# End of checking
#-------------------------------------------------
# For checking only:
#
# 1) Attributes:
# printAttributes(bunch,'bunch','synergia.optics.generate_matched_bunch_transverse')
# 2) Distributions X-Y, X-X', Y-Y' using method 'pltbunch.plot_bunch':
loclTitle = "\nThese distributions were constructed using "
loclTitle += "'synergia.optics.generated_matched_bunch_transverse' method:\n"
print loclTitle
pltbunch.plot_bunch(bunch_origin)
# 3) Distributions X-Y, X-X', Y-Y' using method 'plotcoordDistr':
bunchParticles = bunch_origin.get_local_particles()
# To recognize attributes of 'bunchParticles':
# printAttributes(bunchParticles,'bunchParticles', 'bunch.get_local_particles()')
plotcoordDistr(bunchParticles)
#--------------------------------------------------
# Steppers (YuE: both case 'splitoperator' and 'independent' work properly!):
# stepper = 'splitoperator'
stepper = 'independent'
if stepper == "splitoperator":
# Use the Split operator stepper with a dummy collective operator (with evenly-spaced steps)
no_op = synergia.simulation.Dummy_collective_operator("stub")
stepper = synergia.simulation.Split_operator_stepper(
lattice_simulator_Crrnt, no_op, 4)
elif stepper == "independent":
# Use the Independent particle stepper (by element)
stepper = synergia.simulation.Independent_stepper_elements(
lattice_simulator_Crrnt, 4)
else:
sys.stderr.write("fodo.py: stepper must be either 'independent' or 'splitoperator'\n")
exit(1)
# Bunch simulator:
bunch_simulator = synergia.simulation.Bunch_simulator(bunch_origin)
# This diagnostics does not use!
# Diagnostics:
# diagnostic_flag = 'None'
# for part in range(0, 0):
# bunch_simulator.add_per_step(synergia.bunch.Diagnostics_track("step_track_%02d.h5" % part,
# part))
# if diagnostic_flag == 'step_full2':
# bunch_simulator.add_per_step(synergia.bunch.Diagnostics_full2("step_full2.h5"))
# if diagnostic_flag == 'step_particles':
# bunch_simulator.add_per_step(synergia.bunch.Diagnostics_particles("step_particles.h5"))
# for part in range(0, 0):
# bunch_simulator.add_per_turn(synergia.bunch.Diagnostics_track("turn_track_%02d.h5" % part,
# part))
# if diagnostic_flag == 'turn_full2':
# bunch_simulator.add_per_turn(synergia.bunch.Diagnostics_full2("turn_full2.h5"))
# if diagnostic_flag == 'turn_particles':
# bunch_simulator.add_per_turn(synergia.bunch.Diagnostics_particles("turn_particles.h5"))
#---------------------------
# Propagate
#---------------------------
# Ramp action is instantiated and passed to the propagator instance during the propagate method
print "\n-------------------\n"
print " Nonlinear parameters will be CHANGED after each {} turns".format(updateAfterTurns)
print "\n-------------------\n"
# ะe-setting the original 'bunch_origin' object, because it was changed (for some unknown reason)
# while pulling a 'bunch' object through a fixed number of turns in a linear structure
bunch_origin = synergia.optics.generate_matched_bunch_transverse(lattice_simulator_Crrnt, 1e-6, \
1e-6, 1e-3, 1e-4, 1e9, particlesInBunch, seed=1234)
# For checking (to verify that particles from "old" and "new" 'bunch_origin' objects are the same):
# particlesOrg4 = bunch_origin.get_local_particles()
# To recognize attributes of 'particlesOrg2':
# printAttributes(particlesOrg4,'particlesOrg4','bunch_origin.get_local_particles')
# End of checking (result: particles in both "old" and "new" objects are the same!)
bunch = bunch_origin
# For checking:
# particlesTmp2 = bunch.get_local_particles()
# To recognize attributes of 'particlesTmp2':
# printAttributes(particlesTmp2,'particlesTmp2','bunch.get_local_particles')
# particlesCrrnt2 = particlesTmp2.real
# print " particlesCrrnt (again for nonlinear):"
# for prtcl in range(5):
# print "x (m) for particle {}: {}".format(prtcl,particlesCrrnt2[prtcl,0])
# print "y (m) for particle {}: {}".format(prtcl,particlesCrrnt2[prtcl,2])
# End of checking
bunch_simulator = synergia.simulation.Bunch_simulator(bunch)
propagator = synergia.simulation.Propagator(stepper)
# propagator.set_checkpoint_period(0)
# propagator.set_checkpoint_with_xml(True)
# tracksNonLinear is 3D array: (totalTurns,bunchParticles,(x,y))
tracksNonLinear = np.zeros((totalTurns,particlesInBunch,2))
nUpdate = 1
stepOfMgnfcn = 1
totalTimeCPU = 0.
for turnCrrnt in range(totalTurns):
timeStart = os.times()
#
# Without of initialization:
# propagatorCrrnt = propagator.propagate(bunch_simulator, 1, 1, 0)
# To recognize attributes of 'propagatorCrrnt':
# printAttributes(propagatorCrrnt,'propagatorCrrnt', \
# 'propagator.propagate(bunch_simulator, 1, 1, 0)')
if turnCrrnt == 0:
#------------------
# Initialization of the lattice before first turn:
#
if magnificationType == 1:
ramp_actions = Ramp_actions(magnificationType,0,strengthLens, \
updateOutputFlag,controlName)
if magnificationType == 2:
dataInsertion = \
NonlinearInsertion(insrtn_l0, insrtn_mu0, strengthLens[stepOfMgnfcn], \
insrtn_c, num_lens).generate_lens(startSequenceLenses)
knll_lens = dataInsertion.knll
ramp_actions = Ramp_actions(magnificationType,0,knll_lens, \
updateOutputFlag,controlName)
propagatorCrrnt = propagator.propagate(bunch_simulator, ramp_actions, 1, 1, 0)
#
# End of initialization of the lattice before first turn
#------------------
# bunchParticles is 2D array: (numberParrticles,(x,x',y,y',s,dE,ID))
bunchParticles = bunch.get_local_particles()
# coordsTracks is 2D array: (bunchParticles,(x,y))
coordsTracks = tracksCoords(bunchParticles)
numbPartcls = bunchParticles.shape[0]
for prtcl in range(numbPartcls):
for k in range(2):
tracksNonLinear[turnCrrnt,prtcl,k] = coordsTracks[prtcl,k]
# if prtcl < 3:
# print "tracksNonLinear (turn {}) for particle {}: x = {} mm, y = {} mm". \
# format(turnCrrnt,prtcl,tracksNonLinear[turnCrrnt,prtcl,0], \
# tracksNonLinear[turnCrrnt,prtcl,1])
turnNumber = turnCrrnt+1
timeEnd = os.times()
timeOfTurn = float(timeEnd[0] - timeStart[0]) # CPU time in seconds
totalTimeCPU += timeOfTurn
print ('Turn %3d is completed (CPU time = %6.3f seconds)' % (turnNumber, timeOfTurn))
if turnCrrnt == totalTurns-1:
break
sys.stdout.flush()
if nUpdate == updateAfterTurns:
timeStart = os.times()
print "\n"
plotcoordDistr(bunchParticles)
#== #
#== # Possibility for future to redefine parameters "in-fly" of simulation:
#== #
#== if updateInsideSmlnFlag == 1:
#== print "Old multiplyier for knl = {}".format(knlMultiplier)
#== # Multiplier 'knlMultiplier' is the same for all nonlinear lenses:
#== knlMultiplier = float(raw_input('\nNew multiplyier for knl:'))
#== print "Old multiplyier for cnll = {}".format(cnllMultiplier)
#== # IF NEEDED: multiplier 'cnllMultiplier' is the same for all nonlinear lenses:
#== cnllMultiplier = float(raw_input('\nNew multiplyier for cnll:'))
if magnificationType == 1:
#
# Relative magnification - for current step 'stepOfMgnfcn' > 1 multipliers for all lenses are the same
# and equal to ratio strengthLens[stepOfMgnfcn]/strengthLens[stepOfMgnfcn-1] (except the first step):
#
if stepOfMgnfcn == 0:
knlMultiplier = strengthLens[stepOfMgnfcn]
else:
knlMultiplier = strengthLens[stepOfMgnfcn]/strengthLens[stepOfMgnfcn-1]
# print "Step for relative magnification ",stepOfMgnfcn,": knlMultiplier = ",knlMultiplier
#
# REMINDER regarding of 'Ramp_actions' class!
#
# Args are:
# magnificationType - type of magnification (1 - relative, 2 - absolute),
# stepOfMgnfcn - current step of magnification,
# strengthLens - set of strengthes 't' of central lens of the nonlinear insertion for all steps of
# magnification (relative magnification) or set of strengthes 't' of all lenses for
# current step (absolute magnification),
# updateOutputFlag - flag to output the strength of one of nonlinear lens after it's magnification
# for current step,
# controlName - name of lens with maximal strength to use in output for checking of process of
# magnification.
#
ramp_actions = Ramp_actions(magnificationType,stepOfMgnfcn,strengthLens, \
updateOutputFlag,controlName)
if magnificationType == 2:
#
# Absolute magnification - for current step stepOfMgnfcn the strength 't' for central nonlinear lens
# equals strengthLens[stepOfMgnfcn]
#
#
# REMINDER regarding of 'NonlinearInsertion' class!
#
# Input args:
# length: the length of the nonlinear insertion (float, m);
# phase: the phase advance modulo 2pi through the nonlinear insertion;
# t: the strength parameter for center of the insertion (float, dimensionless, defaults to 0.1);
# c: the aperture parameter for center of the insertion
# (float, m^1/2, is defined by poles in the x-axis, defaults to 0.01);
# num_lens: the number of nonlinear lenses as an segments of the insertion (int, defaults to 20).
#
# Output attributes are the same as input one.
#
#
# REMINDER regarding of 'generate_lens' method!
#
# Input arg:
# startSequenceLenses - flag of the distribution 'knll' parameter of the lenses
# (1 - nonlinear insertion in *.madx description of the IOTA ring started from lens with minimal strength,
# 2 - nonlinear insertion in *.madx description of the IOTA ring started from lens with maximal strength).
#
# Output attributes:
#
# same as output of 'NonlinearInsertion' class and as well:
# s_vals (ndArray) - coordinates of the center of each nonlinear lens (float ndArray, m);
# knll (ndArray) - "strength" of each nonlinear lens (float ndArray, m);
# cnll (ndArray) - aperture parameters for each nonlinear lens (float ndArray, m^1/2).
#
dataInsertion = \
NonlinearInsertion(insrtn_l0, insrtn_mu0, strengthLens[stepOfMgnfcn], insrtn_c, num_lens). \
generate_lens(startSequenceLenses)
coords_lens = dataInsertion.s_vals
knll_lens = dataInsertion.knll
cnll_lens = dataInsertion.cnll
# if stepOfMgnfcn > 0:
# print "Step for absolute magnification ",stepOfMgnfcn, \
# ": for central lens current 't' = ",strengthLens[stepOfMgnfcn]
# print "coords_lens = ",coords_lens
# print "knll_lens = ",knll_lens
# print "cnll_lens = ",cnll_lens
# title_k for knll-plot, title_c - for cnll-plot:
title_k = "Nonlinear Insertion: L={:.1f}m, phase= {:.2f}, t={:.4f}, c={:.2f}m^1/2". \
format(insrtn_l0, insrtn_mu0, strengthLens[stepOfMgnfcn], insrtn_c)
# print "title_k = ",title_k
title_c = title_k
# print "title_c = ",title_c
plotParamLens(coords_lens,knll_lens,cnll_lens,title_k,title_c)
# print "Step ",stepOfMgnfcn,": knll = ",knll_lens
ramp_actions = Ramp_actions(magnificationType,stepOfMgnfcn,knll_lens, \
updateOutputFlag,controlName)
stepOfMgnfcn += 1
nUpdate = 0
print "\n After {} turns:\n".format(turnNumber)
propagatorCrrnt = propagator.propagate(bunch_simulator, ramp_actions, 1, 1, 0)
timeEnd = os.times()
timeUpdateAndPlot = float(timeEnd[0] - timeStart[0]) # CPU time in seconds
totalTimeCPU += timeUpdateAndPlot
print ('\nUpdate and plotting are completed (CPU time = %6.3f seconds)\n' % timeUpdateAndPlot)
nUpdate += 1
# for prtcl in range(5):
# print "x (mm) for particle {}: {}".format(prtcl,tracksNonLinear[:,prtcl,0])
# print "y (mm) for particle {}: {}".format(prtcl,tracksNonLinear[:,prtcl,1])
print "\n\n Final results: \n\n"
plotcoordDistr(bunchParticles)
plotTracks(tracksNonLinear,5)
print ('\nFor %5d turns CPU time = %6.3f seconds\n' % (totalTurns, totalTimeCPU))
return
#
# End of main method 'simulation'
#
#========================================================
fileIOTA = ".../ioptics/ioptics/lattices/Iota8-2/lattice_1IO_nll_center.madx"
# fileIOTA = ".../ioptics/ioptics/lattices/Iota8-4/lattice_8-4_1IO_nll_forTest.madx"
print "\nIOTA Nonlinear lattice: {} \n".format(fileIOTA)
lattice = synergia.lattice.MadX_reader().get_lattice("iota", \
"../ioptics/ioptics/lattices/Iota8-2/lattice_1IO_nll_center.madx")
# --------- Games -----------------------------
# indices = np.argsort(knllLenses)
# print "indices = ",indices
# for n in range(nLenses+1):
# print n,") name after sorting is ",nameLenses[indices[n]]
# for n in range(nLenses+1):
# print n,") knll after sorting is ",knllLenses[indices[n]]
# for n in range(nLenses+1):
# print n,") place after sorting is ",placeLenses[indices[n]]
# ----------- End of games --------------------
stepperCrrnt = synergia.simulation.Independent_stepper_elements(lattice,2,3)
lattice_simulator_Crrnt = stepperCrrnt.get_lattice_simulator()
# To recognize attributes of 'bunchParticles':
# printAttributes(lattice_simulator_Crrnt,'lattice_simulator_Crrnt','stepperCrrnt.get_lattice_simulator()')
# slicesHelp = lattice_simulator_Crrnt.get_slices()
# To recognize attributes of 'slicesHelp':
# printAttributes(slicesHelp,'slicesHelp','lattice_simulator_Crrnt.get_slices()')
# Bunch:
bunch_origin = synergia.optics.generate_matched_bunch_transverse(lattice_simulator_Crrnt, 1e-6, \
1e-6, 1e-3, 1e-4, 1e9, 1000, seed=1234)
#
# To compare two methods for drawing of the particles distributions:
#
loclTitle = "\nThese distributions were constructed using \
'synergia.optics.generated_matched_bunch_transverse' method"
loclTitle += "\nand plotted using two methods - 'pltbunch.plot_bunch' from the code synergia"
loclTitle += "\nand 'plotcoordDistr' from this script (to verify method 'plotcoordDistr'):"
print loclTitle
pltbunch.plot_bunch(bunch_origin)
# Distributions X-Y, X-X', Y-Y' using method 'plotcoordDistr':
bunchParticles = bunch_origin.get_local_particles()
# To recognize attributes of 'bunchParticles':
# printAttributes(bunchParticles,'bunchParticles', 'bunch.get_local_particles()')
plotcoordDistr(bunchParticles)
selection = 'loop'
while selection == 'loop':
simulation()
selection = raw_input("\nTo continue the simulation ('yes' or 'no'):")
print'Your selection is ',selection
if selection == 'yes':
selection = 'loop'
# if selection == 'no':
# exit(0)
| 40,746
| 1,792
| 339
|
2e52a259e89d5bda865dcd429e1f812444177e48
| 311
|
py
|
Python
|
tools/ExperimentServerTester/src/script.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | 15
|
2015-03-12T12:15:41.000Z
|
2021-12-20T17:53:24.000Z
|
tools/ExperimentServerTester/src/script.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | 44
|
2015-01-07T09:22:05.000Z
|
2017-01-31T22:44:21.000Z
|
tools/ExperimentServerTester/src/script.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | 22
|
2015-01-13T13:55:48.000Z
|
2021-12-16T17:07:00.000Z
|
import time
connect("127.0.0.1", "10039", "weblab")
#test_me("hello")
start_experiment()
send_file("script.py", "A script file")
response = send_command("Test Command")
print "The response is: %s" % response
msg_box("Test Message", "test")
time.sleep(2)
dispose()
disconnect()
| 13.521739
| 40
| 0.633441
|
import time
connect("127.0.0.1", "10039", "weblab")
#test_me("hello")
start_experiment()
send_file("script.py", "A script file")
response = send_command("Test Command")
print "The response is: %s" % response
msg_box("Test Message", "test")
time.sleep(2)
dispose()
disconnect()
| 0
| 0
| 0
|
48cabd08bfb5c7aca6c826bcc5b96062a846eb14
| 3,887
|
py
|
Python
|
source/vsm-dashboard/vsm_dashboard/dashboards/vsm/poolsmanagement/form.py
|
ramkrsna/virtual-storage-manager
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
[
"Apache-2.0"
] | 172
|
2015-01-07T08:40:17.000Z
|
2019-02-18T07:01:11.000Z
|
source/vsm-dashboard/vsm_dashboard/dashboards/vsm/poolsmanagement/form.py
|
ramkrsna/virtual-storage-manager
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
[
"Apache-2.0"
] | 83
|
2015-03-06T07:47:03.000Z
|
2018-07-05T15:10:19.000Z
|
source/vsm-dashboard/vsm_dashboard/dashboards/vsm/poolsmanagement/form.py
|
ramkrsna/virtual-storage-manager
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
[
"Apache-2.0"
] | 125
|
2015-01-05T12:22:15.000Z
|
2019-02-18T07:01:39.000Z
|
# Copyright 2014 Intel Corporation, All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.core import validators
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.validators import validate_port_range
# from horizon.utils import fields
import logging
from vsm_dashboard.api import vsm as vsm_api
from vsm_dashboard.utils.validators import validate_pool_name
LOG = logging.getLogger(__name__)
| 40.915789
| 137
| 0.654747
|
# Copyright 2014 Intel Corporation, All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.core import validators
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.validators import validate_port_range
# from horizon.utils import fields
import logging
from vsm_dashboard.api import vsm as vsm_api
from vsm_dashboard.utils.validators import validate_pool_name
LOG = logging.getLogger(__name__)
class CreateErasureCodedPool(forms.SelfHandlingForm):
failure_url = 'horizon:vsm:poolsmanagement:index'
name = forms.CharField(label=_("Pool name"),
max_length=255,
min_length=1,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validate_pool_name])
tag = forms.CharField(label=_("Tag"),
max_length=16,
min_length=1,
error_messages={
'required': _('This field is required.'),})
storage_group = forms.ChoiceField(label=_('Storage Group'))
ec_profile = forms.ChoiceField(label=_('Erasure Coded Profile'))
ec_failure_domain = forms.ChoiceField(label=_('Erasure Coded Failure Domain'))
def __init__(self, request, *args, **kwargs):
super(CreateErasureCodedPool, self).__init__(request, *args, **kwargs)
storage_group_list = []
ec_profile_list = []
ec_failure_domain_list = [('osd', "OSD (default)"), ("zone", "Zone"), ('host', "Host")]
ec_profiles = vsm_api.ec_profiles(self.request)
for k, v in enumerate(ec_profiles):
ec_profile_list.append((v['id'], v['name']))
try:
rsp, group_list= vsm_api.get_storage_group_list(self.request)
for key in group_list:
storage_group_list.append((key, group_list[key]))
except:
msg = _('Failed to get storage_group_list.')
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return False
self.fields['storage_group'].choices = storage_group_list
self.fields['ec_profile'].choices = ec_profile_list
self.fields['ec_failure_domain'].choices = ec_failure_domain_list
def handle(self, request, data):
pass
class RemoveCacheTier(forms.SelfHandlingForm):
failure_url = 'horizon:vsm:poolsmanagement:index'
cache_tier_pool = forms.ChoiceField(label=_('Cache Tier Pool'), required=False)
def __init__(self, request, *args, **kwargs):
super(RemoveCacheTier, self).__init__(request, *args, **kwargs)
cache_tier_pool_list = [('',"Select a Cache Tier Pool")]
pools = vsm_api.pool_status(request)
cache_tier_pool_list += [(pool.pool_id, pool.name) for pool in pools if str(pool.cache_tier_status).startswith("Cache pool for")]
self.fields['cache_tier_pool'].choices = cache_tier_pool_list
def handle(self, request, data):
pass
| 1,477
| 1,279
| 46
|
f1c1a2b775dd8f42670af033d3b68469cf42d8f7
| 4,515
|
py
|
Python
|
icaldump/crawler.py
|
adrien-f/ical-dumper
|
0dc597c77017c59041ae78d3c69854e40019a863
|
[
"MIT"
] | null | null | null |
icaldump/crawler.py
|
adrien-f/ical-dumper
|
0dc597c77017c59041ae78d3c69854e40019a863
|
[
"MIT"
] | null | null | null |
icaldump/crawler.py
|
adrien-f/ical-dumper
|
0dc597c77017c59041ae78d3c69854e40019a863
|
[
"MIT"
] | null | null | null |
import arrow
import requests
from arrow import Arrow
from bs4 import BeautifulSoup
from collections import defaultdict
from icalendar import Calendar, Event, vText, vCalAddress
from hashlib import md5
import json
| 48.031915
| 222
| 0.578295
|
import arrow
import requests
from arrow import Arrow
from bs4 import BeautifulSoup
from collections import defaultdict
from icalendar import Calendar, Event, vText, vCalAddress
from hashlib import md5
import json
class Crawler(object):
def __init__(self, username, password, root_path):
self.username = username
self.password = password
self.root_path = root_path
def _auth(self):
req = requests.post('{}/login_form'.format(self.root_path), {
'form.submitted': 1,
'came_from': self.root_path,
'js_enabled': 0,
'cookies_enabled': None,
'login_name': None,
'pwd_empty': 0,
'__ac_name': self.username,
'__ac_password': self.password,
'submit': 'Se connecter'
}, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'})
req.raise_for_status()
if ('login_form' in req.url):
raise Exception('Could not authenticate user {}.'.format(self.username))
else:
self.cookies = req.history[0].cookies
def _fetch_calendar(self, date):
req = requests.get('{}/emploi_du_temps'.format(self.root_path), {
'date': date.format('MM/DD/YYYY')
}, cookies=self.cookies, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'})
req.raise_for_status()
return req.text
def _parse_calendar(self, calendar, week):
bs = BeautifulSoup(calendar, 'html.parser')
day_offset_map = {}
planning = defaultdict(list)
for day in bs.body.find(id='DivBody').find_all('div', class_='Jour'):
style = day['style'][:-1]
rules = dict(item.strip().split(':') for item in style.split(';'))
left = int(float(rules['left'][:-1]))
if not (100 < left < 190):
continue
day_text = day.find('td').text
day_offset_map[left] = arrow.get(day_text, 'dddd D MMMM', locale='fr_FR').replace(year=week.year).isoformat()
for case in bs.body.find(id='DivBody').select('.Case'):
if "Pas de cours cette semaine" in case.text:
continue
style = case['style'][:-1]
rules = dict(item.strip().split(':') for item in style.split(';'))
left = int(float(rules.get('left', '0.0%')[:-1]))
if not (100 < left < 190):
continue
planning[day_offset_map[left]].append({
'name': case.find('td', class_='TCase').text.title(),
'teacher': list(case.find('td', class_='TCProf').strings)[0].title(),
'group': list(case.find('td', class_='TCProf').strings)[1],
'time': case.find('td', class_='TChdeb').text,
'room': case.find('td', class_='TCSalle').text
})
return planning
def crawl(self, start, end):
self._auth()
planning = {}
for r in arrow.Arrow.span_range('week', start, end):
print('Fetching calendar for week {}'.format(r[0].format('YYYY-MM-DD')))
calendar = self._fetch_calendar(r[0])
planning = {**planning, **self._parse_calendar(calendar, r[0])}
return planning, self._build_ical(planning)
def _build_ical(self, planning):
c = Calendar()
for day, courses in planning.items():
for course in courses:
event = Event()
start_time, end_time = course['time'].split(' - ')
event.add('uid', md5("{course[name]}.{course[teacher]}.{course[teacher]}.{start_time}.{end_time}.{day}".format(course=course, start_time=start_time, end_time=end_time, day=day).encode('utf-8')).hexdigest())
event.add('location', course['room'])
event.add('summary', course['name'])
event.add('dtstart', arrow.get(day).clone().replace(hours=int(start_time.split(':')[0]), minutes=int(start_time.split(':')[1])).datetime)
event.add('dtend', arrow.get(day).clone().replace(hours=int(end_time.split(':')[0]), minutes=int(end_time.split(':')[1])).datetime)
event.add('description', "Prof: {}\nGroupe: {}".format(course['teacher'], course['group']))
c.add_component(event)
return c.to_ical()
| 4,108
| 1
| 185
|
47a36abef43918b317e5caf6a9faf6952120ecad
| 178
|
py
|
Python
|
recipe/run_test.py
|
AnacondaRecipes/pyproj-feedstock
|
5000f3a702d692f508b8994ae84a7e8f7d55fe57
|
[
"BSD-3-Clause"
] | 4
|
2019-04-15T22:42:28.000Z
|
2021-11-09T11:29:36.000Z
|
recipe/run_test.py
|
AnacondaRecipes/pyproj-feedstock
|
5000f3a702d692f508b8994ae84a7e8f7d55fe57
|
[
"BSD-3-Clause"
] | 116
|
2016-03-05T08:22:09.000Z
|
2022-03-28T21:28:40.000Z
|
recipe/run_test.py
|
AnacondaRecipes/pyproj-feedstock
|
5000f3a702d692f508b8994ae84a7e8f7d55fe57
|
[
"BSD-3-Clause"
] | 15
|
2016-03-03T06:34:09.000Z
|
2022-03-18T13:19:21.000Z
|
import os
import sys
import pyproj
from pyproj import Proj
Proj(init="epsg:4269")
# Test pyproj_datadir.
if not os.path.isdir(pyproj.datadir.get_data_dir()):
sys.exit(1)
| 13.692308
| 52
| 0.741573
|
import os
import sys
import pyproj
from pyproj import Proj
Proj(init="epsg:4269")
# Test pyproj_datadir.
if not os.path.isdir(pyproj.datadir.get_data_dir()):
sys.exit(1)
| 0
| 0
| 0
|
66127ade5069a08d2d95e3b0c7cbd430fd9d7d41
| 2,462
|
py
|
Python
|
xontrib_term_integrations/kitty_completions.py
|
jnoortheen/xontrib-term-integrations
|
3c0f29835fb79a521a5d603d862387dcda93c959
|
[
"MIT"
] | 4
|
2022-01-14T08:03:13.000Z
|
2022-03-27T15:26:07.000Z
|
xontrib_term_integrations/kitty_completions.py
|
jnoortheen/xontrib-iterm2
|
3c0f29835fb79a521a5d603d862387dcda93c959
|
[
"MIT"
] | 4
|
2022-01-08T08:01:03.000Z
|
2022-03-07T18:53:26.000Z
|
xontrib_term_integrations/kitty_completions.py
|
jnoortheen/xontrib-term-integrations
|
3c0f29835fb79a521a5d603d862387dcda93c959
|
[
"MIT"
] | null | null | null |
"""Completers for pip."""
import contextlib
import os
import subprocess
from xonsh.built_ins import XSH
from xonsh.completers.tools import RichCompletion, contextual_command_completer
from xonsh.parsers.completion_context import CommandContext
def generate_completions_from_string(output: str):
"""Rich completion from multi-line string, each line representing a completion."""
if output:
lines = output.strip().splitlines(keepends=False)
# if there is a single completion candidate then maybe it is over
append_space = len(lines) == 1
for line in lines:
comp = create_rich_completion(line, append_space)
yield comp
@contextual_command_completer
def xonsh_complete(ctx: CommandContext):
"""Completes python's package manager pip."""
if not ctx.completing_command("kitty"):
return None
# like fish's
# commandline --tokenize --cut-at-cursor --current-process
tokens = [arg.raw_value for arg in ctx.args[: ctx.arg_index]]
# it already filters by prefix, just return it
return get_completions(*tokens, ctx.prefix)
if __name__ == "__main__":
# small testing won't hurt
from xonsh.main import setup
setup()
print(list(get_completions("kitty", "-")))
print(list(get_completions("kitty", "--")))
print(list(get_completions("kitty", "--d")))
| 28.298851
| 86
| 0.655971
|
"""Completers for pip."""
import contextlib
import os
import subprocess
from xonsh.built_ins import XSH
from xonsh.completers.tools import RichCompletion, contextual_command_completer
from xonsh.parsers.completion_context import CommandContext
def create_rich_completion(line: str, append_space=False):
line = line.strip()
if "\t" in line:
cmd, desc = map(str.strip, line.split("\t", maxsplit=1))
else:
cmd, desc = line, ""
# special treatment for path completions.
# not appending space even if it is a single candidate.
if cmd.endswith(os.pathsep):
append_space = False
return RichCompletion(
cmd,
description=desc,
append_space=append_space,
)
def generate_completions_from_string(output: str):
"""Rich completion from multi-line string, each line representing a completion."""
if output:
lines = output.strip().splitlines(keepends=False)
# if there is a single completion candidate then maybe it is over
append_space = len(lines) == 1
for line in lines:
comp = create_rich_completion(line, append_space)
yield comp
def run_subproc(exe: str, *tokens: "str"):
env = XSH.env.detype()
with contextlib.suppress(FileNotFoundError):
proc = subprocess.Popen(
[exe, "+complete", "fish2"],
stderr=subprocess.DEVNULL,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env,
text=True,
)
out, _ = proc.communicate(input="\n".join(tokens))
return out
def get_completions(*args):
if not args:
return
exe = args[0]
output = run_subproc(exe, *args)
return generate_completions_from_string(output)
@contextual_command_completer
def xonsh_complete(ctx: CommandContext):
"""Completes python's package manager pip."""
if not ctx.completing_command("kitty"):
return None
# like fish's
# commandline --tokenize --cut-at-cursor --current-process
tokens = [arg.raw_value for arg in ctx.args[: ctx.arg_index]]
# it already filters by prefix, just return it
return get_completions(*tokens, ctx.prefix)
if __name__ == "__main__":
# small testing won't hurt
from xonsh.main import setup
setup()
print(list(get_completions("kitty", "-")))
print(list(get_completions("kitty", "--")))
print(list(get_completions("kitty", "--d")))
| 1,022
| 0
| 69
|
4b0e96bdf51100d375dbba004ff29b2e3d770875
| 9,381
|
py
|
Python
|
enhterm/provider/parser/argparser/__init__.py
|
pyl1b/enhterm
|
b4eacc25ef1bdfecab9a662b5269d016070d4e6b
|
[
"MIT"
] | null | null | null |
enhterm/provider/parser/argparser/__init__.py
|
pyl1b/enhterm
|
b4eacc25ef1bdfecab9a662b5269d016070d4e6b
|
[
"MIT"
] | null | null | null |
enhterm/provider/parser/argparser/__init__.py
|
pyl1b/enhterm
|
b4eacc25ef1bdfecab9a662b5269d016070d4e6b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Contains the definition of the ArgParser class.
"""
import logging
from argparse import ArgumentParser, ArgumentError, Namespace
import shlex
from enhterm.command import Command
from enhterm.command.error import ErrorCommand
from enhterm.command.noop import NoOpCommand
from enhterm.command.text import TextCommand
from enhterm.impl.p2p.p2p_provider import RemoteProvider
from enhterm.provider import Provider
from enhterm.provider.parser import Parser
from enhterm.provider.queue_provider import QueueProvider
from enhterm.provider.text_provider import TextProvider
logger = logging.getLogger('et.argparser')
class ArgParseCommand(Command):
"""
A command returned by our parser.
"""
def __init__(self, parsed=None, *args, **kwargs):
""" Constructor. """
super().__init__(*args, **kwargs)
self.parsed = parsed
if parsed is not None:
self.call_me = parsed.func
del parsed.__dict__['func']
if hasattr(parsed, 'command'):
# Because we set the dest parameter to 'command' a
# command attribute is set, with the value of the
# name of the subparser.
self.command_name = parsed.command
del parsed.__dict__['command']
else:
# When a subparser was not set or was set but without
# dest argument.
self.command_name = None
else:
self.command_name = None
self.call_me = None
def __str__(self):
""" Represent this object as a human-readable string. """
return 'ArgParseCommand()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'ArgParseCommand()'
def execute(self):
"""
Called by the command loop to do some work.
The return value will be deposited by the command loop it into
the `result` member.
"""
return self.call_me(command=self, **self.parsed.__dict__)
def encode(self):
"""
Called when a class instance needs to be serialized.
.. note:
The `result` and `uuid` members should not be serialized
in case of :class:`~Command`.
"""
return self.command_name, self.parsed.__dict__
def decode(self, raw_data):
"""
Apply raw data to this instance.
It is asserted that correct class has already been constructed
and that it has `result` and `uuid` members set in case of
:class:`~Command`..
Raises:
DecodeError:
The implementation should raise this class or a
subclass of it.
Arguments:
raw_data (bytes):
The data to apply.
"""
assert len(raw_data) == 2
self.command_name, self.parsed = raw_data
self.parsed = Namespace(**self.parsed)
@classmethod
def class_id(cls):
"""
A unique identifier of the class.
This value is used as a key when a constructor needs to
be associated with a string
(see :class:`enhterm.ser_deser.dsds.DictSerDeSer`).
"""
return "argparse"
class ParserError(Exception):
""" Hops the exceptions back to :meth:`~parse`."""
pass
class NoOpError(Exception):
""" :meth:`~parse` should return a :class:`~NoOpCommand`."""
pass
class ArgParser(ArgumentParser, Parser):
"""
Parser that uses argparse library to interpret the text.
Note the two functions of this class: an `enhterm` parser
and :class:`argparse.ArgumentParser`.
The usual use of this parser is through subparsers that implement commands.
>>> from enhterm.provider.parser.argparser import ArgParser
>>> testee = ArgParser()
>>> subparsers = testee.add_subparsers(
>>> title="commands", dest="command", help="commands")
>>> def do_add(command, arguments):
>>> return sum(arguments.integers)
>>> parser_add = subparsers.add_parser('add')
>>> parser_add.add_argument(
>>> 'integers', metavar='int', nargs='+', type=int,
>>> help='an integer to be summed')
>>> parser_add.set_defaults(func=do_add)
>>> testee.parse('add -h')
>>> result = testee.parse('add 1 2 3')
>>> exec_result = result.execute()
A simpler variant is:
>>> from enhterm.provider.parser.argparser import ArgParser
>>> testee = ArgParser()
Attributes:
"""
def __init__(self, *args, **kwargs):
"""
Constructor.
Arguments:
"""
provider = kwargs.pop('provider', None)
super().__init__(*args, **kwargs)
assert provider is not None, "The provider must be set and kept " \
"the same for the lifetime of the parser"
self.provider = provider
self.prog = ''
self._subparser_action = None
self.prefix = ''
self.suffix = ''
def add_subparsers(self, **kwargs):
"""
Monkey-patch add_parser method.
Parsers created by the sub-parser have same class as
the main parser (in our case the class:`~ArgParser` class).
Because we want messages printed by the argparse library
to go through our watchers, we want to set the parser
so it is available in :meth:`~_print_message`.
This is because we don't want to ask the user to place
this argument themselves each time they create the parser.
"""
result = super().add_subparsers(**kwargs)
previous_method = result.add_parser
result.add_parser = monkey_patch
return result
def __str__(self):
""" Represent this object as a human-readable string. """
return 'ArgParser()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'ArgParser()'
@property
def parse(self, text):
"""
Convert a text into a command.
Arguments:
text (str):
The text to parse. This should be a full command.
Returns:
Command
The command that resulted from parsing the text.
If the parsing was unsuccessful the method may return either
:class:`~NoOpCommand' to keep using the provider or `None` to
uninstall it.
"""
try:
if text.startswith('wrap-commands') or text.startswith('wcs ') or text == 'wcs':
args = self.parse_args(shlex.split(text))
else:
args = self.parse_args(shlex.split(f'{self.prefix}{text}{self.suffix}'))
return ArgParseCommand(parsed=args)
except ParserError as exc:
message = str(exc)
self.provider.term.error(message)
return ErrorCommand(message=message)
except NoOpError:
return NoOpCommand()
def error(self, message):
"""
The parser has encountered an error while interpreting the input.
This method, according to argparse specs, should not return.
We raise a custom exception that is caught in :meth:`~parse`
and we pass along the error message.
"""
raise ParserError(message)
def exit(self, status=0, message=None):
""" Trap any exits left out by other code (help, version). """
raise NoOpError
class ArgparseRemoteProvider(RemoteProvider):
"""
A provider that simply takes the text and creates a text command for it.
"""
def __init__(self, parser=None, *args, **kwargs):
"""
Constructor.
"""
super().__init__(*args, **kwargs)
if parser:
self.parser = parser
parser.provider = self
else:
self.parser = ArgParser(provider=self)
def __str__(self):
""" Represent this object as a human-readable string. """
return 'ArgparseRemoteProvider()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'ArgparseRemoteProvider()'
def enqueue_command(self, command):
""" Adds a command to the internal list. """
assert isinstance(command, TextCommand)
new_command = self.parser.parse(command.content)
new_command.provider = self
new_command.uuid = command.uuid
self.queue.put(new_command)
return new_command
| 31.908163
| 92
| 0.606865
|
# -*- coding: utf-8 -*-
"""
Contains the definition of the ArgParser class.
"""
import logging
from argparse import ArgumentParser, ArgumentError, Namespace
import shlex
from enhterm.command import Command
from enhterm.command.error import ErrorCommand
from enhterm.command.noop import NoOpCommand
from enhterm.command.text import TextCommand
from enhterm.impl.p2p.p2p_provider import RemoteProvider
from enhterm.provider import Provider
from enhterm.provider.parser import Parser
from enhterm.provider.queue_provider import QueueProvider
from enhterm.provider.text_provider import TextProvider
logger = logging.getLogger('et.argparser')
class ArgParseCommand(Command):
"""
A command returned by our parser.
"""
def __init__(self, parsed=None, *args, **kwargs):
""" Constructor. """
super().__init__(*args, **kwargs)
self.parsed = parsed
if parsed is not None:
self.call_me = parsed.func
del parsed.__dict__['func']
if hasattr(parsed, 'command'):
# Because we set the dest parameter to 'command' a
# command attribute is set, with the value of the
# name of the subparser.
self.command_name = parsed.command
del parsed.__dict__['command']
else:
# When a subparser was not set or was set but without
# dest argument.
self.command_name = None
else:
self.command_name = None
self.call_me = None
def __str__(self):
""" Represent this object as a human-readable string. """
return 'ArgParseCommand()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'ArgParseCommand()'
def execute(self):
"""
Called by the command loop to do some work.
The return value will be deposited by the command loop it into
the `result` member.
"""
return self.call_me(command=self, **self.parsed.__dict__)
def encode(self):
"""
Called when a class instance needs to be serialized.
.. note:
The `result` and `uuid` members should not be serialized
in case of :class:`~Command`.
"""
return self.command_name, self.parsed.__dict__
def decode(self, raw_data):
"""
Apply raw data to this instance.
It is asserted that correct class has already been constructed
and that it has `result` and `uuid` members set in case of
:class:`~Command`..
Raises:
DecodeError:
The implementation should raise this class or a
subclass of it.
Arguments:
raw_data (bytes):
The data to apply.
"""
assert len(raw_data) == 2
self.command_name, self.parsed = raw_data
self.parsed = Namespace(**self.parsed)
@classmethod
def class_id(cls):
"""
A unique identifier of the class.
This value is used as a key when a constructor needs to
be associated with a string
(see :class:`enhterm.ser_deser.dsds.DictSerDeSer`).
"""
return "argparse"
class ParserError(Exception):
""" Hops the exceptions back to :meth:`~parse`."""
pass
class NoOpError(Exception):
""" :meth:`~parse` should return a :class:`~NoOpCommand`."""
pass
class ArgParser(ArgumentParser, Parser):
"""
Parser that uses argparse library to interpret the text.
Note the two functions of this class: an `enhterm` parser
and :class:`argparse.ArgumentParser`.
The usual use of this parser is through subparsers that implement commands.
>>> from enhterm.provider.parser.argparser import ArgParser
>>> testee = ArgParser()
>>> subparsers = testee.add_subparsers(
>>> title="commands", dest="command", help="commands")
>>> def do_add(command, arguments):
>>> return sum(arguments.integers)
>>> parser_add = subparsers.add_parser('add')
>>> parser_add.add_argument(
>>> 'integers', metavar='int', nargs='+', type=int,
>>> help='an integer to be summed')
>>> parser_add.set_defaults(func=do_add)
>>> testee.parse('add -h')
>>> result = testee.parse('add 1 2 3')
>>> exec_result = result.execute()
A simpler variant is:
>>> from enhterm.provider.parser.argparser import ArgParser
>>> testee = ArgParser()
Attributes:
"""
def __init__(self, *args, **kwargs):
"""
Constructor.
Arguments:
"""
provider = kwargs.pop('provider', None)
super().__init__(*args, **kwargs)
assert provider is not None, "The provider must be set and kept " \
"the same for the lifetime of the parser"
self.provider = provider
self.prog = ''
self._subparser_action = None
self.prefix = ''
self.suffix = ''
def add_subparsers(self, **kwargs):
"""
Monkey-patch add_parser method.
Parsers created by the sub-parser have same class as
the main parser (in our case the class:`~ArgParser` class).
Because we want messages printed by the argparse library
to go through our watchers, we want to set the parser
so it is available in :meth:`~_print_message`.
This is because we don't want to ask the user to place
this argument themselves each time they create the parser.
"""
result = super().add_subparsers(**kwargs)
previous_method = result.add_parser
def monkey_patch(*args, **my_kw_args):
return previous_method(*args, **my_kw_args,
provider=self.provider)
result.add_parser = monkey_patch
return result
def __str__(self):
""" Represent this object as a human-readable string. """
return 'ArgParser()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'ArgParser()'
@property
def subparsers(self):
if self._subparser_action is None:
self._subparser_action = self.add_subparsers(
title="commands", dest="command", help="commands")
return self._subparser_action
def add_parser(self, *args, **kwargs):
return self.subparsers.add_parser(*args, **kwargs)
def parse(self, text):
"""
Convert a text into a command.
Arguments:
text (str):
The text to parse. This should be a full command.
Returns:
Command
The command that resulted from parsing the text.
If the parsing was unsuccessful the method may return either
:class:`~NoOpCommand' to keep using the provider or `None` to
uninstall it.
"""
try:
if text.startswith('wrap-commands') or text.startswith('wcs ') or text == 'wcs':
args = self.parse_args(shlex.split(text))
else:
args = self.parse_args(shlex.split(f'{self.prefix}{text}{self.suffix}'))
return ArgParseCommand(parsed=args)
except ParserError as exc:
message = str(exc)
self.provider.term.error(message)
return ErrorCommand(message=message)
except NoOpError:
return NoOpCommand()
def error(self, message):
"""
The parser has encountered an error while interpreting the input.
This method, according to argparse specs, should not return.
We raise a custom exception that is caught in :meth:`~parse`
and we pass along the error message.
"""
raise ParserError(message)
def exit(self, status=0, message=None):
""" Trap any exits left out by other code (help, version). """
raise NoOpError
def print_usage(self, file=None):
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
self._print_message(self.format_help(), file)
def _print_message(self, message, file=None):
if message:
assert file is None
self.provider.term.info(message)
class ArgparseRemoteProvider(RemoteProvider):
"""
A provider that simply takes the text and creates a text command for it.
"""
def __init__(self, parser=None, *args, **kwargs):
"""
Constructor.
"""
super().__init__(*args, **kwargs)
if parser:
self.parser = parser
parser.provider = self
else:
self.parser = ArgParser(provider=self)
def __str__(self):
""" Represent this object as a human-readable string. """
return 'ArgparseRemoteProvider()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'ArgparseRemoteProvider()'
def enqueue_command(self, command):
""" Adds a command to the internal list. """
assert isinstance(command, TextCommand)
new_command = self.parser.parse(command.content)
new_command.provider = self
new_command.uuid = command.uuid
self.queue.put(new_command)
return new_command
| 667
| 0
| 165
|
3c90085df15908d2e744ebdb67e564f8b9b0393f
| 7,665
|
py
|
Python
|
farabio/models/segmentation/linknet.py
|
tuttelikz/farabi
|
5b65cdf39ceecbd69ae759d030b132ee74661b48
|
[
"Apache-2.0"
] | 53
|
2021-04-06T17:57:12.000Z
|
2022-03-07T17:45:45.000Z
|
farabio/models/segmentation/linknet.py
|
tuttelikz/farabi
|
5b65cdf39ceecbd69ae759d030b132ee74661b48
|
[
"Apache-2.0"
] | 1
|
2022-03-07T19:48:44.000Z
|
2022-03-07T19:49:47.000Z
|
farabio/models/segmentation/linknet.py
|
tuttelikz/farabi
|
5b65cdf39ceecbd69ae759d030b132ee74661b48
|
[
"Apache-2.0"
] | 2
|
2021-12-06T14:42:44.000Z
|
2021-12-07T11:33:14.000Z
|
"""LinkNet
Paper: https://arxiv.org/pdf/1707.03718
Adapted from: https://github.com/qubvel/segmentation_models.pytorch/blob/master/segmentation_models_pytorch/linknet/model.py
Copyright 2021 | farabio
"""
from typing import List, Optional, Union, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from farabio.models.segmentation.base import SegModel, SegmentationHead
from farabio.models.segmentation.backbones._backbones import get_backbone
from farabio.models.segmentation.blocks import Conv2dReLU
from farabio.utils.helpers import get_num_parameters
__all__ = [
'Linknet', 'linknet_vgg11', 'linknet_vgg11_bn', 'linknet_vgg13', 'linknet_vgg13_bn',
'linknet_vgg16', 'linknet_vgg16_bn', 'linknet_vgg19', 'linknet_vgg19_bn', 'linknet_mobilenetv2',
'linknet_resnet18', 'linknet_resnet34', 'linknet_resnet50', 'linknet_resnet101', 'linknet_resnet152'
]
# test()
| 33.915929
| 124
| 0.663405
|
"""LinkNet
Paper: https://arxiv.org/pdf/1707.03718
Adapted from: https://github.com/qubvel/segmentation_models.pytorch/blob/master/segmentation_models_pytorch/linknet/model.py
Copyright 2021 | farabio
"""
from typing import List, Optional, Union, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from farabio.models.segmentation.base import SegModel, SegmentationHead
from farabio.models.segmentation.backbones._backbones import get_backbone
from farabio.models.segmentation.blocks import Conv2dReLU
from farabio.utils.helpers import get_num_parameters
__all__ = [
'Linknet', 'linknet_vgg11', 'linknet_vgg11_bn', 'linknet_vgg13', 'linknet_vgg13_bn',
'linknet_vgg16', 'linknet_vgg16_bn', 'linknet_vgg19', 'linknet_vgg19_bn', 'linknet_mobilenetv2',
'linknet_resnet18', 'linknet_resnet34', 'linknet_resnet50', 'linknet_resnet101', 'linknet_resnet152'
]
class Linknet(SegModel):
def __init__(
self,
in_channels: int = 3,
out_channels: int = 1,
encoder_name: str = "resnet34",
encoder_depth: int = 5,
decoder_use_bn: bool = True,
decoder_attention_type: Optional[str] = None,
activation: Optional[Union[str, callable]] = None
):
super().__init__()
self.encoder = get_backbone(
encoder_name,
in_channels = in_channels,
depth = encoder_depth,
)
self.decoder = LinknetDecoder(
encoder_channels = self.encoder.out_channels,
n_blocks = encoder_depth,
prefinal_channels=32,
use_bn = decoder_use_bn
)
self.seg_head = SegmentationHead(
in_channels=32,
out_channels=out_channels,
activation=activation,
kernel_size=1
)
self.class_head = None
self.name = "linknet-{}".format(encoder_name)
self.init()
class LinknetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
n_blocks=5,
prefinal_channels=32,
use_bn = True
):
super().__init__()
encoder_channels = encoder_channels[1:]
encoder_channels = encoder_channels[::-1]
channels = list(encoder_channels) + [prefinal_channels]
self.blocks = nn.ModuleList([
DecoderBlock(channels[i], channels[i+1], use_bn=use_bn)
for i in range(n_blocks)
])
def forward(self, *features):
features = features[1:]
features = features[::-1]
x = features[0]
skips = features[1:]
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=True):
super().__init__()
self.block = nn.Sequential(
Conv2dReLU(in_channels, in_channels // 4, kernel_size=1, use_bn=use_bn),
TransposeX2(in_channels // 4, in_channels // 4, use_bn=use_bn),
Conv2dReLU(in_channels // 4, out_channels, kernel_size=1, use_bn=use_bn)
)
def forward(self, x, skip=None):
x = self.block(x)
if skip is not None:
x = x + skip
return x
class TransposeX2(nn.Sequential):
def __init__(self, in_channels, out_channels, use_bn=True):
super().__init__()
layers = [
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True)
]
if use_bn:
layers.insert(1, nn.BatchNorm2d(out_channels))
super().__init__(*layers)
def _linknet(
backbone: str = "resnet18",
in_channels = 3,
out_channels = 1,
**kwargs: Any
) -> Linknet:
model = Linknet(
encoder_name=backbone,
in_channels=in_channels,
out_channels=out_channels
)
return model
def linknet_vgg11(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg11", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg11_bn(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg11_bn", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg13(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg13", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg13_bn(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg13_bn", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg16(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg16", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg16_bn(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg16_bn", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg19(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg19", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_vgg19_bn(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="vgg19_bn", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_mobilenetv2(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="mobilenet_v2", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_resnet18(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="resnet18", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_resnet34(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="resnet34", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_resnet50(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="resnet50", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_resnet101(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="resnet101", in_channels=in_channels, out_channels=out_channels, **kwargs)
def linknet_resnet152(in_channels=3, out_channels=1, **kwargs: Any) -> Linknet:
return _linknet(backbone="resnet152", in_channels=in_channels, out_channels=out_channels, **kwargs)
def test():
x = torch.randn(4, 3, 256, 256)
tests = {
"linknet_vgg11": linknet_vgg11(),
"linknet_vgg11_bn": linknet_vgg11_bn(),
"linknet_vgg13": linknet_vgg13(),
"linknet_vgg13_bn": linknet_vgg13_bn(),
"linknet_vgg16": linknet_vgg16(),
"linknet_vgg16_bn": linknet_vgg16_bn(),
"linknet_vgg19": linknet_vgg19(),
"linknet_vgg19_bn": linknet_vgg19_bn(),
"linknet_mobilenetv2": linknet_mobilenetv2(),
"linknet_resnet18": linknet_resnet18(),
"linknet_resnet34": linknet_resnet34(),
"linknet_resnet50": linknet_resnet50(),
"linknet_resnet101": linknet_resnet101(),
"linknet_resnet152": linknet_resnet152(),
}
for key, value in tests.items():
model = tests[key]
y = model(x)
print("Model name: ", model.name)
print("Trainable parameters: ", get_num_parameters(model))
print("in shape: ", x.shape, ", out shape: ", y.shape)
# test()
| 6,071
| 35
| 630
|
ab8cb05d5c02d84b5dee7c2f53886dd4b6bffb7a
| 971
|
py
|
Python
|
students/k3342/laboratory_works/Frolov_Alex/laboratory_work_2/lab2_app/migrations/0002_auto_20200613_2256.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 10
|
2020-03-20T09:06:12.000Z
|
2021-07-27T13:06:02.000Z
|
students/k3342/laboratory_works/Frolov_Alex/laboratory_work_2/lab2_app/migrations/0002_auto_20200613_2256.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 134
|
2020-03-23T09:47:48.000Z
|
2022-03-12T01:05:19.000Z
|
students/k3342/laboratory_works/Frolov_Alex/laboratory_work_2/lab2_app/migrations/0002_auto_20200613_2256.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 71
|
2020-03-20T12:45:56.000Z
|
2021-10-31T19:22:25.000Z
|
# Generated by Django 3.0.5 on 2020-06-13 19:56
from django.db import migrations, models
| 40.458333
| 376
| 0.527291
|
# Generated by Django 3.0.5 on 2020-06-13 19:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab2_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='nclass',
name='letter',
field=models.CharField(blank=True, choices=[('ะ', 'ะ'), ('ะ', 'ะ'), ('ะ', 'ะ'), ('ะ', 'ะ')], max_length=2, verbose_name='ะัะบะฒะฐ'),
),
migrations.AlterField(
model_name='timetable',
name='lesson',
field=models.CharField(blank=True, choices=[('1-8:30-9:10', '1-8:30-9:10'), ('2-9:20-10:00', '2-9:20-10:00'), ('3-10:10-10:50', '3-10:10-10:50'), ('4-11:00-11:40', '4-11:00-11:40'), ('5-12:10-12:50', '5-12:10-12:50'), ('6-13:10-13:50', '6-13:10-13:50'), ('7-14:00-14:40', '7-14:00-14:40'), ('8-14:45-15:25', '8-14:45-15:25')], max_length=50, verbose_name='ะฃัะพะบ'),
),
]
| 0
| 868
| 25
|
98e6d816ecc49f008ca2ce95cf7c99f5e3356e23
| 94
|
py
|
Python
|
trading_bot/tools/setting_parameters.py
|
ArthurBernard/Strategy_Manager
|
a6c80fe1a51a300e8a612fb69e0e17d0ae06f455
|
[
"MIT"
] | 6
|
2020-02-24T02:19:30.000Z
|
2021-12-19T03:03:11.000Z
|
trading_bot/tools/setting_parameters.py
|
ArthurBernard/Strategy_Manager
|
a6c80fe1a51a300e8a612fb69e0e17d0ae06f455
|
[
"MIT"
] | 1
|
2020-06-17T03:29:14.000Z
|
2020-06-17T04:45:34.000Z
|
trading_bot/tools/setting_parameters.py
|
ArthurBernard/Trading_Bot
|
a6c80fe1a51a300e8a612fb69e0e17d0ae06f455
|
[
"MIT"
] | 1
|
2019-01-02T16:00:07.000Z
|
2019-01-02T16:00:07.000Z
|
#!/usr/bin/env python3
# coding: utf-8
# Import built-in packages
# Import external packages
| 15.666667
| 26
| 0.734043
|
#!/usr/bin/env python3
# coding: utf-8
# Import built-in packages
# Import external packages
| 0
| 0
| 0
|
facf09de719c86989c7c7380adeb080e99663302
| 1,685
|
py
|
Python
|
chwall/fetcher/local.py
|
milouse/chwall
|
963045658abd41c94e29850e9f416c8970e06c32
|
[
"WTFPL"
] | 4
|
2019-11-02T12:22:48.000Z
|
2022-01-07T11:40:40.000Z
|
chwall/fetcher/local.py
|
milouse/chwall
|
963045658abd41c94e29850e9f416c8970e06c32
|
[
"WTFPL"
] | 1
|
2022-03-29T18:44:47.000Z
|
2022-03-30T07:04:54.000Z
|
chwall/fetcher/local.py
|
milouse/chwall
|
963045658abd41c94e29850e9f416c8970e06c32
|
[
"WTFPL"
] | null | null | null |
import os
import glob
from chwall.utils import get_logger
import gettext
# Uncomment the following line during development.
# Please, be cautious to NOT commit the following line uncommented.
# gettext.bindtextdomain("chwall", "./locale")
gettext.textdomain("chwall")
_ = gettext.gettext
logger = get_logger(__name__)
| 27.622951
| 67
| 0.511573
|
import os
import glob
from chwall.utils import get_logger
import gettext
# Uncomment the following line during development.
# Please, be cautious to NOT commit the following line uncommented.
# gettext.bindtextdomain("chwall", "./locale")
gettext.textdomain("chwall")
_ = gettext.gettext
logger = get_logger(__name__)
def fetch_pictures(config):
conf = config.get("local", {})
paths = conf.get("paths", [])
include_fav = conf.get("favorites", True)
fav_dir = config["general"]["favorites_path"]
try:
if os.path.exists(fav_dir) and include_fav:
paths.insert(0, fav_dir)
except PermissionError as e:
logger.error(e)
if len(paths) == 0:
return {}
pictures = {}
for path in paths:
path = os.path.expanduser(path)
try:
for ext in ["jpg", "jpeg", "png"]:
glob_path = "{}/*.{}".format(path, ext)
for f in glob.iglob(glob_path, recursive=True):
pictures[f] = {
"image": f,
"type": "local",
"url": f,
"copyright": _("Local wallpaper")
}
except PermissionError as e:
logger.error(e)
return pictures
def preferences():
return {
"name": _("Local files"),
"options": {
"paths": {
"widget": "list",
"label": _("Wallpaper repositories")
},
"favorites": {
"label": _("Include favorites wallpapers"),
"widget": "toggle",
"default": True
}
}
}
| 1,316
| 0
| 46
|
170f178a212883d5eb070081e64ebf6154b68374
| 7,190
|
py
|
Python
|
python/server.py
|
air01a/esp32_rekognition
|
1e91a7ae8898e765c27153d4aedf4eef82a8e275
|
[
"MIT"
] | null | null | null |
python/server.py
|
air01a/esp32_rekognition
|
1e91a7ae8898e765c27153d4aedf4eef82a8e275
|
[
"MIT"
] | null | null | null |
python/server.py
|
air01a/esp32_rekognition
|
1e91a7ae8898e765c27153d4aedf4eef82a8e275
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
TOKEN = ""
TIMER = 30
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
import time
from AWSRekognition import AWSRekognition
import re
import np
import cv2
if __name__ == '__main__':
run()
| 27.760618
| 91
| 0.668846
|
#!/usr/bin/env python3
TOKEN = ""
TIMER = 30
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
import time
from AWSRekognition import AWSRekognition
import re
import np
import cv2
class SimpleHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
# check for security token
def secure(self):
global TOKEN
self.cookie='?'
headers = self.headers.get('Authorization')
if headers==None:
print(str(self.path))
if str(self.path).find(TOKEN)!=-1:
self.cookie='?id=' + TOKEN
return True
if headers == TOKEN:
return True
self.send_response(503)
self.end_headers()
return False
#Manage GET
def do_GET(self):
if not self.secure():
return False
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
# Manage HEAD
def do_HEAD(self):
if not self.secure():
return False
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
# Mange POST to get FILE
def do_POST(self):
if not self.secure():
return False
"""Serve a POST request."""
r, info = self.deal_post_data()
print((r, info, "by: ", self.client_address))
f = BytesIO()
f.write(str(TIMER).encode())
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
# Use AWS Reko to draw bbox around people on the frame
def getHuman(self,frame):
aws = AWSRekognition()
res = aws.labelDetection(frame)
print(res)
file_bytes = np.asarray(bytearray(frame), dtype=np.uint8)
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
for box in res:
cat,prob,x,y,w,h,module = box
if cat=='Person':
cv2.rectangle(img, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 0))
name=str(time.time())+".jpg"
cv2.imwrite(name, img)
# Get file in POST DATA the Ugly way
def deal_post_data(self):
content_type = self.headers['content-type']
if not content_type:
return (False, "Content-Type header doesn't contain boundary")
boundary = content_type.split("=")[1].encode()
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="imageFile"; filename="(.*)"', line.decode())
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
out = BytesIO()
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith(b'\r'):
preline = preline[0:-1]
out.write(preline)
self.getHuman(out.getvalue())
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
# Send header to get and head request
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
# List files in current directory and encapsulate the result in html
def list_directory(self, path):
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = BytesIO()
displaypath = cgi.escape(urllib.parse.unquote(self.path))
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(("<html>\n<title>Directory listing for %s</title>\n" % displaypath).encode())
f.write(("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath).encode())
f.write(b"<hr>\n")
f.write(b"<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
f.write(b"<input name=\"imageFile\" type=\"file\"/>")
f.write(b"<input type=\"submit\" value=\"upload\"/></form>\n")
f.write(b"<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(('<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname)+self.cookie, cgi.escape(displayname))).encode())
f.write(b"</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = [_f for _f in words if _f]
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def run(HandlerClass = SimpleHTTPRequestHandler,ServerClass = http.server.HTTPServer):
server_address = ('0.0.0.0', 8081)
httpd = ServerClass(server_address,HandlerClass)
httpd.serve_forever()
if __name__ == '__main__':
run()
| 5,885
| 872
| 47
|
1622590d2431eaa2a1f8c59dc73a7726022c9d22
| 800
|
py
|
Python
|
tests/data/generator/test_sea_generator.py
|
trajkova-elena/scikit-multiflow
|
dd372c677a97346a9c60cd25b45b350e0fd83d3c
|
[
"BSD-3-Clause"
] | 1
|
2020-10-14T10:36:28.000Z
|
2020-10-14T10:36:28.000Z
|
tests/data/generator/test_sea_generator.py
|
trajkova-elena/scikit-multiflow
|
dd372c677a97346a9c60cd25b45b350e0fd83d3c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/data/generator/test_sea_generator.py
|
trajkova-elena/scikit-multiflow
|
dd372c677a97346a9c60cd25b45b350e0fd83d3c
|
[
"BSD-3-Clause"
] | 3
|
2020-10-02T08:36:52.000Z
|
2020-10-21T10:50:20.000Z
|
import os
import numpy as np
from skmultiflow.data.generator.sea_generator import SEAGenerator
| 36.363636
| 125
| 0.72875
|
import os
import numpy as np
from skmultiflow.data.generator.sea_generator import SEAGenerator
def test_sea_generator(test_path):
stream = SEAGenerator(classification_function=2, random_state=112, balance_classes=False, noise_percentage=0.28)
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'sea_stream.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
for j in range(0,10):
X, y = stream.next_sample()
assert np.alltrue(np.isclose(X, X_expected[j]))
assert np.alltrue(np.isclose(y[0], y_expected[j]))
expected_info = "SEAGenerator(balance_classes=False, classification_function=2, noise_percentage=0.28, random_state=112)"
assert stream.get_info() == expected_info
| 681
| 0
| 23
|
68597cccb9af19fe7498a4081ce24a2e1772b686
| 300
|
py
|
Python
|
web/visualizer.py
|
jagsgill/410project
|
f3e28f796ff89aa43c48cd8e18ad0ad412335263
|
[
"MIT"
] | null | null | null |
web/visualizer.py
|
jagsgill/410project
|
f3e28f796ff89aa43c48cd8e18ad0ad412335263
|
[
"MIT"
] | null | null | null |
web/visualizer.py
|
jagsgill/410project
|
f3e28f796ff89aa43c48cd8e18ad0ad412335263
|
[
"MIT"
] | null | null | null |
import json
import flask
import os
app = flask.Flask(__name__)
@app.route("/")
if __name__ == "__main__":
begin()
| 15
| 53
| 0.673333
|
import json
import flask
import os
app = flask.Flask(__name__)
@app.route("/")
def index():
return flask.render_template("index.html")
def begin():
port = 8080
os.system("open http://localhost:{0}/".format(port))
app.debug = True
app.run(port=port)
if __name__ == "__main__":
begin()
| 134
| 0
| 45
|
9fef173258aa5bfe192c3ee01893f01998735937
| 597
|
py
|
Python
|
src/__init__.py
|
scott-currie/stock_portfolio
|
568d581ed277d1999563f4da427cc6b4fa5d387b
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
scott-currie/stock_portfolio
|
568d581ed277d1999563f4da427cc6b4fa5d387b
|
[
"MIT"
] | 6
|
2020-03-24T16:39:37.000Z
|
2021-09-08T01:06:58.000Z
|
src/__init__.py
|
scott-currie/stock_portfolio
|
568d581ed277d1999563f4da427cc6b4fa5d387b
|
[
"MIT"
] | null | null | null |
from flask import Flask
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# `flask run` - runs application on local server
app = Flask(__name__, static_url_path='', static_folder='static',
instance_relative_config=True)
DATABASE_URL = os.environ.get('DATABASE_URL')
if os.environ.get('TESTING') == 'True':
DATABASE_URL = os.environ.get('TEST_DATABASE_URL')
app.config.from_mapping(
SECRET_KEY=os.environ.get('SECRET_KEY'),
SQLALCHEMY_DATABASE_URI=DATABASE_URL,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
from . import routes, models, exceptions, auth
| 24.875
| 65
| 0.743719
|
from flask import Flask
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# `flask run` - runs application on local server
app = Flask(__name__, static_url_path='', static_folder='static',
instance_relative_config=True)
DATABASE_URL = os.environ.get('DATABASE_URL')
if os.environ.get('TESTING') == 'True':
DATABASE_URL = os.environ.get('TEST_DATABASE_URL')
app.config.from_mapping(
SECRET_KEY=os.environ.get('SECRET_KEY'),
SQLALCHEMY_DATABASE_URI=DATABASE_URL,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
from . import routes, models, exceptions, auth
| 0
| 0
| 0
|
6189469b12c3351f8e07df13c8b4ead29dc48169
| 1,376
|
py
|
Python
|
utils/jsdati.py
|
bynil/v2ex-crawler
|
c3ceefba9b330e1356259433e67633bf5d5da956
|
[
"MIT"
] | 15
|
2017-10-09T13:15:27.000Z
|
2020-06-30T23:42:54.000Z
|
utils/jsdati.py
|
bynil/v2ex-crawler
|
c3ceefba9b330e1356259433e67633bf5d5da956
|
[
"MIT"
] | 2
|
2019-10-21T15:03:29.000Z
|
2021-06-02T03:11:54.000Z
|
utils/jsdati.py
|
bynil/v2ex-crawler
|
c3ceefba9b330e1356259433e67633bf5d5da956
|
[
"MIT"
] | 3
|
2017-10-17T02:52:26.000Z
|
2019-07-05T02:54:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: gexiao
# Created on 2018-05-07 22:04
import logging
import requests
import base64
SERVER_HOST = 'https://v2-api.jsdama.com/upload'
SOFTWARE_ID = 9487
SOFTWARE_SECRET = 'nb4GHmdsPxzbcB7iIrU36JPI73HOjUyUEnq3pkob'
| 31.272727
| 112
| 0.590843
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: gexiao
# Created on 2018-05-07 22:04
import logging
import requests
import base64
SERVER_HOST = 'https://v2-api.jsdama.com/upload'
SOFTWARE_ID = 9487
SOFTWARE_SECRET = 'nb4GHmdsPxzbcB7iIrU36JPI73HOjUyUEnq3pkob'
class JsdatiApi():
def __init__(self, username, password):
self.username = username
self.password = password
def decode_image_bin_content(self, content, type):
filedata = base64.b64encode(content).decode('ascii')
payload = {'softwareId': SOFTWARE_ID,
'softwareSecret': SOFTWARE_SECRET,
'username': self.username,
'password': self.password,
'captchaData': filedata,
'captchaType': 1017, # 8ไฝๆ8ไฝไปฅไธ่ฑๆๆๆฐๅญ็ฑปๅ
}
headers = {
'Accept-Encoding': "application/json, text/javascript, */*; q=0.01",
'Content-Type': "application/json",
}
response = requests.request("POST", SERVER_HOST, json=payload, headers=headers)
res = response.json()
# {"code":0,"data":{"recognition":"NDSBJCSY","captchaId":"20180507:000000000016483190234"},"message":""}
if res['code'] == 0:
return res['data']['recognition']
else:
logging.error(res)
return res['code']
| 1,056
| -3
| 77
|
21cd0b478e379ec3b6523625ca68a49624cc6e66
| 2,786
|
py
|
Python
|
rdchiral/utils.py
|
Furuidemu/retrosim
|
21f5449f617a93f2a64e927fcde224b298327727
|
[
"MIT"
] | 65
|
2020-06-27T04:28:21.000Z
|
2022-03-30T11:18:22.000Z
|
template/rdchiral/utils.py
|
sw32-seo/GTA
|
86b102a14b78f6c8b50d742a56445c748e59b51e
|
[
"MIT"
] | 15
|
2020-07-07T13:17:05.000Z
|
2022-03-22T12:52:29.000Z
|
template/rdchiral/utils.py
|
sw32-seo/GTA
|
86b102a14b78f6c8b50d742a56445c748e59b51e
|
[
"MIT"
] | 14
|
2020-06-30T09:22:13.000Z
|
2022-03-30T11:18:28.000Z
|
from __future__ import print_function
PLEVEL = 0
def parity4(data):
'''
Thanks to http://www.dalkescientific.com/writings/diary/archive/2016/08/15/fragment_parity_calculation.html
'''
if data[0] < data[1]:
if data[2] < data[3]:
if data[0] < data[2]:
if data[1] < data[2]:
return 0 # (0, 1, 2, 3)
else:
if data[1] < data[3]:
return 1 # (0, 2, 1, 3)
else:
return 0 # (0, 3, 1, 2)
else:
if data[0] < data[3]:
if data[1] < data[3]:
return 0 # (1, 2, 0, 3)
else:
return 1 # (1, 3, 0, 2)
else:
return 0 # (2, 3, 0, 1)
else:
if data[0] < data[3]:
if data[1] < data[2]:
if data[1] < data[3]:
return 1 # (0, 1, 3, 2)
else:
return 0 # (0, 2, 3, 1)
else:
return 1 # (0, 3, 2, 1)
else:
if data[0] < data[2]:
if data[1] < data[2]:
return 1 # (1, 2, 3, 0)
else:
return 0 # (1, 3, 2, 0)
else:
return 1 # (2, 3, 1, 0)
else:
if data[2] < data[3]:
if data[0] < data[3]:
if data[0] < data[2]:
return 1 # (1, 0, 2, 3)
else:
if data[1] < data[2]:
return 0 # (2, 0, 1, 3)
else:
return 1 # (2, 1, 0, 3)
else:
if data[1] < data[2]:
return 1 # (3, 0, 1, 2)
else:
if data[1] < data[3]:
return 0 # (3, 1, 0, 2)
else:
return 1 # (3, 2, 0, 1)
else:
if data[0] < data[2]:
if data[0] < data[3]:
return 0 # (1, 0, 3, 2)
else:
if data[1] < data[3]:
return 1 # (2, 0, 3, 1)
else:
return 0 # (2, 1, 3, 0)
else:
if data[1] < data[2]:
if data[1] < data[3]:
return 0 # (3, 0, 2, 1)
else:
return 1 # (3, 1, 2, 0)
else:
return 0 # (3, 2, 1, 0)
| 33.97561
| 111
| 0.298277
|
from __future__ import print_function
PLEVEL = 0
def vprint(level, txt, *args):
if PLEVEL >= level:
print(txt.format(*args))
def parity4(data):
'''
Thanks to http://www.dalkescientific.com/writings/diary/archive/2016/08/15/fragment_parity_calculation.html
'''
if data[0] < data[1]:
if data[2] < data[3]:
if data[0] < data[2]:
if data[1] < data[2]:
return 0 # (0, 1, 2, 3)
else:
if data[1] < data[3]:
return 1 # (0, 2, 1, 3)
else:
return 0 # (0, 3, 1, 2)
else:
if data[0] < data[3]:
if data[1] < data[3]:
return 0 # (1, 2, 0, 3)
else:
return 1 # (1, 3, 0, 2)
else:
return 0 # (2, 3, 0, 1)
else:
if data[0] < data[3]:
if data[1] < data[2]:
if data[1] < data[3]:
return 1 # (0, 1, 3, 2)
else:
return 0 # (0, 2, 3, 1)
else:
return 1 # (0, 3, 2, 1)
else:
if data[0] < data[2]:
if data[1] < data[2]:
return 1 # (1, 2, 3, 0)
else:
return 0 # (1, 3, 2, 0)
else:
return 1 # (2, 3, 1, 0)
else:
if data[2] < data[3]:
if data[0] < data[3]:
if data[0] < data[2]:
return 1 # (1, 0, 2, 3)
else:
if data[1] < data[2]:
return 0 # (2, 0, 1, 3)
else:
return 1 # (2, 1, 0, 3)
else:
if data[1] < data[2]:
return 1 # (3, 0, 1, 2)
else:
if data[1] < data[3]:
return 0 # (3, 1, 0, 2)
else:
return 1 # (3, 2, 0, 1)
else:
if data[0] < data[2]:
if data[0] < data[3]:
return 0 # (1, 0, 3, 2)
else:
if data[1] < data[3]:
return 1 # (2, 0, 3, 1)
else:
return 0 # (2, 1, 3, 0)
else:
if data[1] < data[2]:
if data[1] < data[3]:
return 0 # (3, 0, 2, 1)
else:
return 1 # (3, 1, 2, 0)
else:
return 0 # (3, 2, 1, 0)
| 66
| 0
| 22
|
c60aab25d55598eaca48d8eaa95733efdd745194
| 1,897
|
py
|
Python
|
Third course/5th semester/Analysis of algorithms course/Lab3 - Sorting/main1.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | null | null | null |
Third course/5th semester/Analysis of algorithms course/Lab3 - Sorting/main1.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | null | null | null |
Third course/5th semester/Analysis of algorithms course/Lab3 - Sorting/main1.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | 7
|
2020-12-04T07:26:46.000Z
|
2022-03-08T17:47:47.000Z
|
from sort import *
import time
import random
n1 = int(input("Size\nFrom: "))
n2 = int(input("To: "))
h = int(input("Step:"))
if n1 > n2 or n2 == n1 or h == 0:
print("Wrong input")
exit()
else:
result = measure_time(get_best_array, get_best_array, mysort_quick_middle, n1, n2 + 1, h, 100)
print("\n", result, "\n")
result = measure_time(get_worst_array, get_best_array, mysort_quick_end, n1, n2 + 1, h, 100)
print("\n", result, "\n")
result = measure_time(get_random_array, get_random_array, mysort_quick_middle, n1, n2 + 1, h, 100)
print("\n", result, "\n")
| 22.583333
| 103
| 0.530311
|
from sort import *
import time
import random
def get_random_array(n):
array = []
for i in range(n):
array.append(random.randint(0, 20000))
return array
def get_best_array(n):
array = []
for i in range(n):
array.append(i)
return array
def get_worst_array(n):
array = []
for i in range(n):
array.append(n - i)
return array
def get_calc_time(func, arr):
t2 = time.process_time()
func(arr)
t1 = time.process_time() - t2
return t1
def measure_time(get_array, get_array_quick, func, n1, n2, st, it):
t_bubble = []
t_shell = []
t_quick = []
for n in range(n1, n2, st):
print(n, ' ', time.time())
t = 0
for i in range(it):
arr = get_array(n)
t += get_calc_time(mysort_bubble, arr)
t_bubble.append(t / it)
t = 0
for i in range(it):
arr = get_array(n)
t += get_calc_time(mysort_insert, arr)
t_shell.append(t / it)
t = 0
for i in range(it):
arr = get_array_quick(n)
t += get_calc_time(func, arr)
t_quick.append(t / it)
return (t_bubble, t_shell, t_quick)
n1 = int(input("Size\nFrom: "))
n2 = int(input("To: "))
h = int(input("Step:"))
if n1 > n2 or n2 == n1 or h == 0:
print("Wrong input")
exit()
else:
result = measure_time(get_best_array, get_best_array, mysort_quick_middle, n1, n2 + 1, h, 100)
print("\n", result, "\n")
result = measure_time(get_worst_array, get_best_array, mysort_quick_end, n1, n2 + 1, h, 100)
print("\n", result, "\n")
result = measure_time(get_random_array, get_random_array, mysort_quick_middle, n1, n2 + 1, h, 100)
print("\n", result, "\n")
| 1,152
| 0
| 125
|
09f55b7870106786dba51122424f11feffb40feb
| 1,680
|
py
|
Python
|
flocker/common/_era.py
|
stackriot/flocker
|
eaa586248986d7cd681c99c948546c2b507e44de
|
[
"Apache-2.0"
] | 2,690
|
2015-01-02T11:12:11.000Z
|
2022-03-15T15:41:51.000Z
|
flocker/common/_era.py
|
stackriot/flocker
|
eaa586248986d7cd681c99c948546c2b507e44de
|
[
"Apache-2.0"
] | 2,102
|
2015-01-02T18:49:40.000Z
|
2021-01-21T18:49:47.000Z
|
flocker/common/_era.py
|
stackriot/flocker
|
eaa586248986d7cd681c99c948546c2b507e44de
|
[
"Apache-2.0"
] | 333
|
2015-01-10T01:44:01.000Z
|
2022-03-08T15:03:04.000Z
|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Era information for Flocker nodes.
Every time a node reboots it gets a new, globally unique era.
"""
import sys
from uuid import UUID
from zope.interface import implementer
from twisted.internet.defer import succeed
from twisted.python.filepath import FilePath
from twisted.python.usage import Options
from twisted.python.runtime import platform
from ..common.script import (
ICommandLineScript, flocker_standard_options, FlockerScriptRunner,
)
_BOOT_ID = FilePath(b"/proc/sys/kernel/random/boot_id")
def get_era():
"""
:return UUID: A node- and boot-specific globally unique id.
"""
return UUID(hex=_BOOT_ID.getContent().strip())
@flocker_standard_options
class EraOptions(Options):
"""
Command line options for ``flocker-node-era``.
"""
longdesc = (
"Print the current node's era to stdout. The era is a unique"
"identifier per reboot per node, and can be used to discover the"
"current node's state safely using Flocker's REST API.\n"
)
synopsis = "Usage: flocker-node-era"
@implementer(ICommandLineScript)
class EraScript(object):
"""
Output the era to stdout.
"""
def era_main():
"""
Entry point for ``flocker-node-era`` command-line tool.
"""
return FlockerScriptRunner(
script=EraScript(),
options=EraOptions(),
logging=False).main()
| 24.347826
| 73
| 0.682738
|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Era information for Flocker nodes.
Every time a node reboots it gets a new, globally unique era.
"""
import sys
from uuid import UUID
from zope.interface import implementer
from twisted.internet.defer import succeed
from twisted.python.filepath import FilePath
from twisted.python.usage import Options
from twisted.python.runtime import platform
from ..common.script import (
ICommandLineScript, flocker_standard_options, FlockerScriptRunner,
)
_BOOT_ID = FilePath(b"/proc/sys/kernel/random/boot_id")
def get_era():
"""
:return UUID: A node- and boot-specific globally unique id.
"""
return UUID(hex=_BOOT_ID.getContent().strip())
@flocker_standard_options
class EraOptions(Options):
"""
Command line options for ``flocker-node-era``.
"""
longdesc = (
"Print the current node's era to stdout. The era is a unique"
"identifier per reboot per node, and can be used to discover the"
"current node's state safely using Flocker's REST API.\n"
)
synopsis = "Usage: flocker-node-era"
@implementer(ICommandLineScript)
class EraScript(object):
"""
Output the era to stdout.
"""
def main(self, reactor, options):
if not platform.isLinux():
raise SystemExit("flocker-node-era only works on Linux.")
sys.stdout.write(str(get_era()))
sys.stdout.flush()
return succeed(None)
def era_main():
"""
Entry point for ``flocker-node-era`` command-line tool.
"""
return FlockerScriptRunner(
script=EraScript(),
options=EraOptions(),
logging=False).main()
| 214
| 0
| 26
|
82e9d12b07897f4af449eb7e6367c29c879bc2f3
| 1,801
|
py
|
Python
|
book/_build/jupyter_execute/pandas/Week 4-Introduction to Data Science[Coursera].py
|
hossainlab/dsnotes
|
fee64e157f45724bba1f49ad1b186dcaaf1e6c02
|
[
"CC0-1.0"
] | null | null | null |
book/_build/jupyter_execute/pandas/Week 4-Introduction to Data Science[Coursera].py
|
hossainlab/dsnotes
|
fee64e157f45724bba1f49ad1b186dcaaf1e6c02
|
[
"CC0-1.0"
] | null | null | null |
book/_build/jupyter_execute/pandas/Week 4-Introduction to Data Science[Coursera].py
|
hossainlab/dsnotes
|
fee64e157f45724bba1f49ad1b186dcaaf1e6c02
|
[
"CC0-1.0"
] | null | null | null |
# Distributions in Pandas
import pandas as pd
import numpy as np
np.random.binomial(1, 0.5)
np.random.binomial(1000, 0.5)/1000
chance_of_tornado = 0.01/100
np.random.binomial(100000, chance_of_tornado)
chance_of_tornado = 0.01
tornado_events = np.random.binomial(1, chance_of_tornado, 1000000)
two_days_in_a_row = 0
for j in range(1,len(tornado_events)-1):
if tornado_events[j]==1 and tornado_events[j-1]==1:
two_days_in_a_row+=1
print('{} tornadoes back to back in {} years'.format(two_days_in_a_row, 1000000/365))
np.random.uniform(0, 1)
np.random.normal(0.75)
Formula for standard deviation
$$\sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - \overline{x})^2}$$
distribution = np.random.normal(0.75,size=1000)
np.sqrt(np.sum((np.mean(distribution)-distribution)**2)/len(distribution))
np.std(distribution)
import scipy.stats as stats
stats.kurtosis(distribution)
stats.skew(distribution)
chi_squared_df2 = np.random.chisquare(2, size=10000)
stats.skew(chi_squared_df2)
chi_squared_df5 = np.random.chisquare(5, size=10000)
stats.skew(chi_squared_df5)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
output = plt.hist([chi_squared_df2,chi_squared_df5], bins=50, histtype='step',
label=['2 degrees of freedom','5 degrees of freedom'])
plt.legend(loc='upper right')
# Hypothesis Testing
df = pd.read_csv('grades.csv')
df.head()
len(df)
early = df[df['assignment1_submission'] <= '2015-12-31']
late = df[df['assignment1_submission'] > '2015-12-31']
early.mean()
late.mean()
from scipy import stats
stats.ttest_ind?
stats.ttest_ind(early['assignment1_grade'], late['assignment1_grade'])
stats.ttest_ind(early['assignment2_grade'], late['assignment2_grade'])
stats.ttest_ind(early['assignment3_grade'], late['assignment3_grade'])
| 22.797468
| 85
| 0.7407
|
# Distributions in Pandas
import pandas as pd
import numpy as np
np.random.binomial(1, 0.5)
np.random.binomial(1000, 0.5)/1000
chance_of_tornado = 0.01/100
np.random.binomial(100000, chance_of_tornado)
chance_of_tornado = 0.01
tornado_events = np.random.binomial(1, chance_of_tornado, 1000000)
two_days_in_a_row = 0
for j in range(1,len(tornado_events)-1):
if tornado_events[j]==1 and tornado_events[j-1]==1:
two_days_in_a_row+=1
print('{} tornadoes back to back in {} years'.format(two_days_in_a_row, 1000000/365))
np.random.uniform(0, 1)
np.random.normal(0.75)
Formula for standard deviation
$$\sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - \overline{x})^2}$$
distribution = np.random.normal(0.75,size=1000)
np.sqrt(np.sum((np.mean(distribution)-distribution)**2)/len(distribution))
np.std(distribution)
import scipy.stats as stats
stats.kurtosis(distribution)
stats.skew(distribution)
chi_squared_df2 = np.random.chisquare(2, size=10000)
stats.skew(chi_squared_df2)
chi_squared_df5 = np.random.chisquare(5, size=10000)
stats.skew(chi_squared_df5)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
output = plt.hist([chi_squared_df2,chi_squared_df5], bins=50, histtype='step',
label=['2 degrees of freedom','5 degrees of freedom'])
plt.legend(loc='upper right')
# Hypothesis Testing
df = pd.read_csv('grades.csv')
df.head()
len(df)
early = df[df['assignment1_submission'] <= '2015-12-31']
late = df[df['assignment1_submission'] > '2015-12-31']
early.mean()
late.mean()
from scipy import stats
stats.ttest_ind?
stats.ttest_ind(early['assignment1_grade'], late['assignment1_grade'])
stats.ttest_ind(early['assignment2_grade'], late['assignment2_grade'])
stats.ttest_ind(early['assignment3_grade'], late['assignment3_grade'])
| 0
| 0
| 0
|
514f367de86a238caeba42c73f5c4c8ed5711914
| 207
|
py
|
Python
|
jarbas/settings_unit_tests.py
|
mazulo/serenata-de-amor
|
d5f6feb97f1bbd748fda6e99fe07a47c52db3fa6
|
[
"MIT"
] | null | null | null |
jarbas/settings_unit_tests.py
|
mazulo/serenata-de-amor
|
d5f6feb97f1bbd748fda6e99fe07a47c52db3fa6
|
[
"MIT"
] | null | null | null |
jarbas/settings_unit_tests.py
|
mazulo/serenata-de-amor
|
d5f6feb97f1bbd748fda6e99fe07a47c52db3fa6
|
[
"MIT"
] | null | null | null |
from jarbas.settings import *
MIGRATION_MODULES = DisableMigrations()
| 15.923077
| 39
| 0.700483
|
from jarbas.settings import *
class DisableMigrations:
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
MIGRATION_MODULES = DisableMigrations()
| 55
| 3
| 76
|
129442cfda6260b8d0649c7091329a38a7fc5a11
| 5,173
|
py
|
Python
|
sdk/communication/azure-communication-identity/samples/identity_samples.py
|
abhahn/azure-sdk-for-python
|
09521dfb517e0859ec961cae006fb728d787b565
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/communication/azure-communication-identity/samples/identity_samples.py
|
rakshith91/azure-sdk-for-python
|
3c4f2575d31260fa1bda870b04e34c082ac5702b
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-identity/samples/identity_samples.py
|
rakshith91/azure-sdk-for-python
|
3c4f2575d31260fa1bda870b04e34c082ac5702b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: identity_sample.py
DESCRIPTION:
These samples demonstrate creating a user, issuing a token, revoking a token and deleting a user.
///authenticating a client via a connection string
USAGE:
python identity_samples.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
"""
import os
if __name__ == '__main__':
sample = CommunicationIdentityClientSamples()
sample.create_user()
sample.create_user_with_token()
sample.get_token()
sample.revoke_tokens()
sample.delete_user()
| 46.603604
| 107
| 0.7023
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: identity_sample.py
DESCRIPTION:
These samples demonstrate creating a user, issuing a token, revoking a token and deleting a user.
///authenticating a client via a connection string
USAGE:
python identity_samples.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
"""
import os
class CommunicationIdentityClientSamples(object):
def __init__(self):
self.connection_string = os.getenv('AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING')
self.endpoint = os.getenv('AZURE_COMMUNICATION_SERVICE_ENDPOINT')
self.client_id = os.getenv('AZURE_CLIENT_ID')
self.client_secret = os.getenv('AZURE_CLIENT_SECRET')
self.tenant_id = os.getenv('AZURE_TENANT_ID')
def get_token(self):
from azure.communication.identity import (
CommunicationIdentityClient,
CommunicationTokenScope
)
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
identity_client = CommunicationIdentityClient(self.endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
user = identity_client.create_user()
print("Getting token for: " + user.identifier)
tokenresponse = identity_client.get_token(user, scopes=[CommunicationTokenScope.CHAT])
print("Token issued with value: " + tokenresponse.token)
def revoke_tokens(self):
from azure.communication.identity import (
CommunicationIdentityClient,
CommunicationTokenScope
)
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
identity_client = CommunicationIdentityClient(self.endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
user = identity_client.create_user()
tokenresponse = identity_client.get_token(user, scopes=[CommunicationTokenScope.CHAT])
print("Revoking token: " + tokenresponse.token)
identity_client.revoke_tokens(user)
print(tokenresponse.token + " revoked successfully")
def create_user(self):
from azure.communication.identity import CommunicationIdentityClient
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
identity_client = CommunicationIdentityClient(self.endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
print("Creating new user")
user = identity_client.create_user()
print("User created with id:" + user.identifier)
def create_user_with_token(self):
from azure.communication.identity import (
CommunicationIdentityClient,
CommunicationTokenScope
)
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
identity_client = CommunicationIdentityClient(self.endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
print("Creating new user with token")
user, tokenresponse = identity_client.create_user_with_token(scopes=[CommunicationTokenScope.CHAT])
print("User created with id:" + user.identifier)
print("Token issued with value: " + tokenresponse.token)
def delete_user(self):
from azure.communication.identity import CommunicationIdentityClient
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
identity_client = CommunicationIdentityClient(self.endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
user = identity_client.create_user()
print("Deleting user: " + user.identifier)
identity_client.delete_user(user)
print(user.identifier + " deleted")
if __name__ == '__main__':
sample = CommunicationIdentityClientSamples()
sample.create_user()
sample.create_user_with_token()
sample.get_token()
sample.revoke_tokens()
sample.delete_user()
| 4,003
| 28
| 185
|
d0ae7e9e7b74ad5927d28b3655f5e9408cd4c60e
| 497
|
py
|
Python
|
receiver.py
|
rasathus/raspberrylogger
|
f2084b67b679523b6c0ec1f436a5fcad6f104aaa
|
[
"MIT"
] | null | null | null |
receiver.py
|
rasathus/raspberrylogger
|
f2084b67b679523b6c0ec1f436a5fcad6f104aaa
|
[
"MIT"
] | null | null | null |
receiver.py
|
rasathus/raspberrylogger
|
f2084b67b679523b6c0ec1f436a5fcad6f104aaa
|
[
"MIT"
] | null | null | null |
'''
Created on 1 Dec 2012
@author: Jeremy
'''
import serial
import sys
import rt
import time
s = serial.Serial(sys.argv[1],115200,timeout=15)
t = time.time()
c = 0
RT = rt.RaceTech(s)
RT.run(decode)
| 17.75
| 76
| 0.565392
|
'''
Created on 1 Dec 2012
@author: Jeremy
'''
import serial
import sys
import rt
import time
s = serial.Serial(sys.argv[1],115200,timeout=15)
t = time.time()
c = 0
def decode(header,length,msg,cs,variable_length):
# print header,length,msg,cs
global c,t
c += 1
if c > 999:
d = time.time() - t
print 'Received %d messages in %.3f seconds (%.3f mps)' % (c,d,c/d)
c = 0
t = time.time()
RT = rt.RaceTech(s)
RT.run(decode)
| 252
| 0
| 25
|
e3ee2b35688dd42c5efe6d2b57469efb051f11a0
| 670
|
py
|
Python
|
project/urls.py
|
dmitrytk/takkand.pw
|
162fd5bb0e58c419977e06ce4633177918bd6f61
|
[
"MIT"
] | null | null | null |
project/urls.py
|
dmitrytk/takkand.pw
|
162fd5bb0e58c419977e06ce4633177918bd6f61
|
[
"MIT"
] | 10
|
2021-03-18T23:07:30.000Z
|
2022-03-12T00:13:17.000Z
|
project/urls.py
|
dmitrytk/takkand.pw
|
162fd5bb0e58c419977e06ce4633177918bd6f61
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from pages import views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')), # allauth
path('', include('pages.urls')), # Home and tools pages
path('db/', include('db.urls')), # Oil field and well database
path('accounts/', include('users.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = views.handler404
handler500 = views.handler500
| 39.411765
| 81
| 0.632836
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from pages import views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')), # allauth
path('', include('pages.urls')), # Home and tools pages
path('db/', include('db.urls')), # Oil field and well database
path('accounts/', include('users.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = views.handler404
handler500 = views.handler500
| 0
| 0
| 0
|
5f78fda933058c7a4fb151a47534efae216f1181
| 2,805
|
py
|
Python
|
app/Http/Controllers/machinelearning.py
|
ChrisFodor333/early_warning
|
b506e6ddaa50912f3cc5b58ee73de8a463879716
|
[
"MIT"
] | null | null | null |
app/Http/Controllers/machinelearning.py
|
ChrisFodor333/early_warning
|
b506e6ddaa50912f3cc5b58ee73de8a463879716
|
[
"MIT"
] | null | null | null |
app/Http/Controllers/machinelearning.py
|
ChrisFodor333/early_warning
|
b506e6ddaa50912f3cc5b58ee73de8a463879716
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import numpy as np
import pandas as pd
import sys
df = pd.read_csv('https://raw.githubusercontent.com/ChrisFodor333/early_warning/main/assets/machine.csv',header = 0);
df = df.dropna();
#df.head(20);
from sklearn.model_selection import train_test_split
data = df
X = data[['altman', 'in05', 'quicktest','bonity','taffler','binkert']]
y = data['result']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
pd.options.mode.chained_assignment = None
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
data["result"] = labelencoder.fit_transform(data["result"])
type = pd.DataFrame({'result': ['No Financial Distress', 'First Degree Financial Distress ', 'Second Degree Financial Distress', 'Third Degree Financial Distress']})
data = create_dummies(data,"result")
# Aby nevypรญsal warningy
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
# Vlastnosti pred strednou normalizรกciou
vlastnosti_pred = X_train
# Strednรก normalizรกcia pre rรฝchlejลกรญ classifier
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
#Transformรกcia dรกt
X_train_array = sc.fit_transform(X_train.values)
# Priradรญm ลกkรกlovanรฉ รบdaje do DataFrame a pouลพijem argumenty indexu a stฤบpcov, aby som zachoval svoje pรดvodnรฉ indexy a nรกzvy stฤบpcov:
X_train = pd.DataFrame(X_train_array, index=X_train.index, columns=X_train.columns)
# Vycentrovanรฉ testovacie dรกta na trรฉnovacรญch dรกtach
X_test_array = sc.transform(X_test.values)
X_test = pd.DataFrame(X_test_array, index=X_test.index, columns=X_test.columns)
# import modelu MLP
from sklearn.neural_network import MLPClassifier
# Inicializovanie perceptrรณnu
mlp = MLPClassifier(hidden_layer_sizes =(100,),solver='adam',learning_rate_init= 0.01, max_iter=500)
# Natrรฉnovaลฅ model
mlp.fit(X_train, y_train)
# Vรฝstupy
MLPClassifier (activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=10, learning_rate='constant',
learning_rate_init=0.01, max_iter=1000, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=None,
shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,
verbose=False, warm_start=False)
altman = sys.argv[1]
in05 = sys.argv[2]
qt = sys.argv[3]
bonity = sys.argv[4]
taffler = sys.argv[5]
binkert = sys.argv[6]
X_test = [[altman, in05, qt, bonity, taffler, binkert]];
X_test = np.array(X_test);
X_test.reshape(1, -1);
mlp.predict(X_test)
mlp.predict_proba(X_test)*100
print(mlp.predict(X_test),mlp.predict_proba(X_test)*100);
| 31.166667
| 165
| 0.775045
|
#!/usr/bin/python
import numpy as np
import pandas as pd
import sys
df = pd.read_csv('https://raw.githubusercontent.com/ChrisFodor333/early_warning/main/assets/machine.csv',header = 0);
df = df.dropna();
#df.head(20);
from sklearn.model_selection import train_test_split
data = df
X = data[['altman', 'in05', 'quicktest','bonity','taffler','binkert']]
y = data['result']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
pd.options.mode.chained_assignment = None
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
data["result"] = labelencoder.fit_transform(data["result"])
type = pd.DataFrame({'result': ['No Financial Distress', 'First Degree Financial Distress ', 'Second Degree Financial Distress', 'Third Degree Financial Distress']})
def create_dummies(df,column_name):
dummies = pd.get_dummies(df[column_name],prefix=column_name)
df = pd.concat([df,dummies],axis=1)
return df
data = create_dummies(data,"result")
# Aby nevypรญsal warningy
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
# Vlastnosti pred strednou normalizรกciou
vlastnosti_pred = X_train
# Strednรก normalizรกcia pre rรฝchlejลกรญ classifier
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
#Transformรกcia dรกt
X_train_array = sc.fit_transform(X_train.values)
# Priradรญm ลกkรกlovanรฉ รบdaje do DataFrame a pouลพijem argumenty indexu a stฤบpcov, aby som zachoval svoje pรดvodnรฉ indexy a nรกzvy stฤบpcov:
X_train = pd.DataFrame(X_train_array, index=X_train.index, columns=X_train.columns)
# Vycentrovanรฉ testovacie dรกta na trรฉnovacรญch dรกtach
X_test_array = sc.transform(X_test.values)
X_test = pd.DataFrame(X_test_array, index=X_test.index, columns=X_test.columns)
# import modelu MLP
from sklearn.neural_network import MLPClassifier
# Inicializovanie perceptrรณnu
mlp = MLPClassifier(hidden_layer_sizes =(100,),solver='adam',learning_rate_init= 0.01, max_iter=500)
# Natrรฉnovaลฅ model
mlp.fit(X_train, y_train)
# Vรฝstupy
MLPClassifier (activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=10, learning_rate='constant',
learning_rate_init=0.01, max_iter=1000, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=None,
shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,
verbose=False, warm_start=False)
altman = sys.argv[1]
in05 = sys.argv[2]
qt = sys.argv[3]
bonity = sys.argv[4]
taffler = sys.argv[5]
binkert = sys.argv[6]
X_test = [[altman, in05, qt, bonity, taffler, binkert]];
X_test = np.array(X_test);
X_test.reshape(1, -1);
mlp.predict(X_test)
mlp.predict_proba(X_test)*100
print(mlp.predict(X_test),mlp.predict_proba(X_test)*100);
| 133
| 0
| 23
|
ed370fff408ba9f8a4761235130117e6135851e7
| 215
|
py
|
Python
|
python/hello_world.py
|
alanverdugo/travis_github_pages
|
5a7aefc4ac09e27e9a4214469c64262e62458553
|
[
"Apache-2.0"
] | null | null | null |
python/hello_world.py
|
alanverdugo/travis_github_pages
|
5a7aefc4ac09e27e9a4214469c64262e62458553
|
[
"Apache-2.0"
] | null | null | null |
python/hello_world.py
|
alanverdugo/travis_github_pages
|
5a7aefc4ac09e27e9a4214469c64262e62458553
|
[
"Apache-2.0"
] | 1
|
2020-01-30T16:49:12.000Z
|
2020-01-30T16:49:12.000Z
|
#!/usr/bin/python
"""Sample program."""
def hello_world():
"""Print a message to stdout."""
print("Hello, world!")
def return_true():
"""You can rent this space for only $5 a week."""
return True
| 17.916667
| 53
| 0.609302
|
#!/usr/bin/python
"""Sample program."""
def hello_world():
"""Print a message to stdout."""
print("Hello, world!")
def return_true():
"""You can rent this space for only $5 a week."""
return True
| 0
| 0
| 0
|
867226d0bb3ecd16b14fbd99c31bdc8838ca1406
| 1,616
|
py
|
Python
|
allaccess/tests/test_backends.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
allaccess/tests/test_backends.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
allaccess/tests/test_backends.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import authenticate
from .base import AllAccessTestCase
class AuthBackendTestCase(AllAccessTestCase):
"Custom contrib.auth backend tests."
def test_successful_authenticate(self):
"User successfully authenticated."
provider = self.access.provider
identifier = self.access.identifier
user = authenticate(provider=provider, identifier=identifier)
self.assertEqual(user, self.user, "Correct user was not returned.")
def test_provider_name(self):
"Match on provider name as a string."
provider = self.access.provider.name
identifier = self.access.identifier
user = authenticate(provider=provider, identifier=identifier)
self.assertEqual(user, self.user, "Correct user was not returned.")
def test_failed_authentication(self):
"No matches found for the provider/id pair."
provider = self.access.provider
identifier = self.access.identifier
self.access.delete()
user = authenticate(provider=provider, identifier=identifier)
self.assertEqual(user, None, "No user should be returned.")
def test_match_no_user(self):
"Matched access is not associated with a user."
self.access.user = None
self.access.save()
user = authenticate(provider=self.access.provider, identifier=self.access.identifier)
self.assertEqual(user, None, "No user should be returned.")
def test_performance(self):
"Only one query should be required to get the user."
with self.assertNumQueries(1):
authenticate(provider=self.access.provider, identifier=self.access.identifier)
| 44.888889
| 87
| 0.778465
|
from django.contrib.auth import authenticate
from .base import AllAccessTestCase
class AuthBackendTestCase(AllAccessTestCase):
"Custom contrib.auth backend tests."
def setUp(self):
self.user = self.create_user()
self.access = self.create_access(user=self.user)
def test_successful_authenticate(self):
"User successfully authenticated."
provider = self.access.provider
identifier = self.access.identifier
user = authenticate(provider=provider, identifier=identifier)
self.assertEqual(user, self.user, "Correct user was not returned.")
def test_provider_name(self):
"Match on provider name as a string."
provider = self.access.provider.name
identifier = self.access.identifier
user = authenticate(provider=provider, identifier=identifier)
self.assertEqual(user, self.user, "Correct user was not returned.")
def test_failed_authentication(self):
"No matches found for the provider/id pair."
provider = self.access.provider
identifier = self.access.identifier
self.access.delete()
user = authenticate(provider=provider, identifier=identifier)
self.assertEqual(user, None, "No user should be returned.")
def test_match_no_user(self):
"Matched access is not associated with a user."
self.access.user = None
self.access.save()
user = authenticate(provider=self.access.provider, identifier=self.access.identifier)
self.assertEqual(user, None, "No user should be returned.")
def test_performance(self):
"Only one query should be required to get the user."
with self.assertNumQueries(1):
authenticate(provider=self.access.provider, identifier=self.access.identifier)
| 79
| 0
| 23
|
e1ac703da42b95f543e965d75dca58d86d5ede31
| 717
|
py
|
Python
|
src/_stories/shortcuts.py
|
gtors/stories
|
0614624f472151f20617afa4e6c4a0af9b409b6d
|
[
"BSD-2-Clause"
] | null | null | null |
src/_stories/shortcuts.py
|
gtors/stories
|
0614624f472151f20617afa4e6c4a0af9b409b6d
|
[
"BSD-2-Clause"
] | null | null | null |
src/_stories/shortcuts.py
|
gtors/stories
|
0614624f472151f20617afa4e6c4a0af9b409b6d
|
[
"BSD-2-Clause"
] | null | null | null |
from _stories.mounted import ClassMountedStory
| 23.9
| 52
| 0.60251
|
from _stories.mounted import ClassMountedStory
def contract_in(cls, *args):
def setter(contract):
for attrname in dir(cls):
attribute = getattr(cls, attrname)
if type(attribute) is ClassMountedStory:
attribute.contract(contract)
return contract
if args:
return setter(*args)
else:
return setter
def failures_in(cls, *args):
def setter(failures):
for attrname in dir(cls):
attribute = getattr(cls, attrname)
if type(attribute) is ClassMountedStory:
attribute.failures(failures)
return failures
if args:
return setter(*args)
else:
return setter
| 622
| 0
| 46
|
6766ab8b4ecce2efa50640fdfa43566253393b40
| 466
|
py
|
Python
|
mundo_1/ex017.py
|
tseiiti/curso_em_video
|
59565ce809c1f025fb41ab69de3b8c5b53c8f7b2
|
[
"MIT"
] | null | null | null |
mundo_1/ex017.py
|
tseiiti/curso_em_video
|
59565ce809c1f025fb41ab69de3b8c5b53c8f7b2
|
[
"MIT"
] | null | null | null |
mundo_1/ex017.py
|
tseiiti/curso_em_video
|
59565ce809c1f025fb41ab69de3b8c5b53c8f7b2
|
[
"MIT"
] | null | null | null |
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 017:
Faรงa um programa que leia o comprimento do cateto oposto e do cateto adjacente de um
triรขngulo retรขngulo, calcule e mostre o comprimento da hipotenusa.
''')
from math import hypot
n1 = float(input('Cateto oposto: '))
n2 = float(input('Cateto adjacente: '))
#print('A hipotenusa รฉ {}'.format((n1 ** 2 + n2 ** 2) ** 0.5))
print('A hipotenusa รฉ {}'.format(hypot(n1, n2)))
| 31.066667
| 84
| 0.684549
|
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 017:
Faรงa um programa que leia o comprimento do cateto oposto e do cateto adjacente de um
triรขngulo retรขngulo, calcule e mostre o comprimento da hipotenusa.
''')
from math import hypot
n1 = float(input('Cateto oposto: '))
n2 = float(input('Cateto adjacente: '))
#print('A hipotenusa รฉ {}'.format((n1 ** 2 + n2 ** 2) ** 0.5))
print('A hipotenusa รฉ {}'.format(hypot(n1, n2)))
| 0
| 0
| 0
|
3889096aea39db5a3ddcfe2ddce7a298cdf60775
| 696
|
py
|
Python
|
scripts/dmg_settings.py
|
magicien/JoyfulPlayer
|
a06e684bd37f387a977427a83f21b07f567f7f09
|
[
"MIT"
] | 1
|
2020-07-04T18:38:54.000Z
|
2020-07-04T18:38:54.000Z
|
scripts/dmg_settings.py
|
magicien/JoyfulPlayer
|
a06e684bd37f387a977427a83f21b07f567f7f09
|
[
"MIT"
] | null | null | null |
scripts/dmg_settings.py
|
magicien/JoyfulPlayer
|
a06e684bd37f387a977427a83f21b07f567f7f09
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import biplist
import os.path
app = defines.get('app', './dmg/JoyfulPlayer.app')
appname = os.path.basename(app)
# Basics
format = defines.get('format', 'UDZO')
size = defines.get('size', None)
files = [ app ]
icon_locations = {
appname: (160, 160),
}
# Window configuration
show_status_bar = False
show_tab_view = False
show_toolbar = False
show_pathbar = False
show_sidebar = False
sidebar_width = 180
window_rect = ((322, 331), (320, 362))
defaullt_view = 'icon_view'
# Icon view configuration
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 100
scrolll_position = (0, 0)
label_pos = 'bottom'
text_size = 12
icon_size = 164
| 16.571429
| 50
| 0.712644
|
from __future__ import unicode_literals
import biplist
import os.path
app = defines.get('app', './dmg/JoyfulPlayer.app')
appname = os.path.basename(app)
# Basics
format = defines.get('format', 'UDZO')
size = defines.get('size', None)
files = [ app ]
icon_locations = {
appname: (160, 160),
}
# Window configuration
show_status_bar = False
show_tab_view = False
show_toolbar = False
show_pathbar = False
show_sidebar = False
sidebar_width = 180
window_rect = ((322, 331), (320, 362))
defaullt_view = 'icon_view'
# Icon view configuration
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 100
scrolll_position = (0, 0)
label_pos = 'bottom'
text_size = 12
icon_size = 164
| 0
| 0
| 0
|
d976a1a0f0149c3c740a9169eca118f522e6a8b3
| 916
|
py
|
Python
|
examples/gps/mf_lqr.py
|
JoeMWatson/trajopt
|
8b98718721e0c373cd7dc01a35f42447c1134713
|
[
"MIT"
] | 1
|
2019-10-17T08:42:17.000Z
|
2019-10-17T08:42:17.000Z
|
examples/gps/mf_lqr.py
|
JoeMWatson/trajopt
|
8b98718721e0c373cd7dc01a35f42447c1134713
|
[
"MIT"
] | null | null | null |
examples/gps/mf_lqr.py
|
JoeMWatson/trajopt
|
8b98718721e0c373cd7dc01a35f42447c1134713
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Filename: mf_lqr.py
# @Date: 2019-06-16-18-38
# @Author: Hany Abdulsamad
# @Contact: hany@robot-learning.de
import gym
from trajopt.gps import MFGPS
# lqr task
env = gym.make('LQR-TO-v0')
env._max_episode_steps = 100
alg = MFGPS(env, nb_steps=100,
kl_bound=10.,
init_ctl_sigma=50.,
activation=range(100))
# run gps
trace = alg.run(nb_episodes=10, nb_iter=5)
# plot dists
alg.plot()
# execute and plot
nb_episodes = 25
data = alg.sample(nb_episodes, stoch=False)
import matplotlib.pyplot as plt
plt.figure()
for k in range(alg.nb_xdim):
plt.subplot(alg.nb_xdim + alg.nb_udim, 1, k + 1)
plt.plot(data['x'][k, ...])
for k in range(alg.nb_udim):
plt.subplot(alg.nb_xdim + alg.nb_udim, 1, alg.nb_xdim + k + 1)
plt.plot(data['u'][k, ...])
plt.show()
# plot objective
plt.figure()
plt.plot(trace)
plt.show()
| 18.693878
| 66
| 0.649563
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Filename: mf_lqr.py
# @Date: 2019-06-16-18-38
# @Author: Hany Abdulsamad
# @Contact: hany@robot-learning.de
import gym
from trajopt.gps import MFGPS
# lqr task
env = gym.make('LQR-TO-v0')
env._max_episode_steps = 100
alg = MFGPS(env, nb_steps=100,
kl_bound=10.,
init_ctl_sigma=50.,
activation=range(100))
# run gps
trace = alg.run(nb_episodes=10, nb_iter=5)
# plot dists
alg.plot()
# execute and plot
nb_episodes = 25
data = alg.sample(nb_episodes, stoch=False)
import matplotlib.pyplot as plt
plt.figure()
for k in range(alg.nb_xdim):
plt.subplot(alg.nb_xdim + alg.nb_udim, 1, k + 1)
plt.plot(data['x'][k, ...])
for k in range(alg.nb_udim):
plt.subplot(alg.nb_xdim + alg.nb_udim, 1, alg.nb_xdim + k + 1)
plt.plot(data['u'][k, ...])
plt.show()
# plot objective
plt.figure()
plt.plot(trace)
plt.show()
| 0
| 0
| 0
|
ea039db56cbfde50039819cf3b9a23da17aaa55f
| 1,089
|
py
|
Python
|
L.I.S.A/client.py
|
malnou-org/malnou
|
7ebe565f5df6058bbb9b0991b4d20e2cb79cda65
|
[
"MIT"
] | 8
|
2019-07-17T13:15:55.000Z
|
2021-11-08T09:34:04.000Z
|
L.I.S.A/client.py
|
PotatoSpudowski/malnou
|
7ebe565f5df6058bbb9b0991b4d20e2cb79cda65
|
[
"MIT"
] | null | null | null |
L.I.S.A/client.py
|
PotatoSpudowski/malnou
|
7ebe565f5df6058bbb9b0991b4d20e2cb79cda65
|
[
"MIT"
] | 1
|
2019-08-25T09:17:40.000Z
|
2019-08-25T09:17:40.000Z
|
import time
import sys
import uuid
import argparse
import ibmiotf.device
import wiotp.sdk.device
from configparser import ConfigParser
| 25.928571
| 107
| 0.662994
|
import time
import sys
import uuid
import argparse
import ibmiotf.device
import wiotp.sdk.device
from configparser import ConfigParser
def commandProcessor(cmd):
print("Command received: %s" % cmd.data)
def myOnPublishCallback():
print("Confirmed event received by IoTF\n")
def sendToCloud(data):
authMethod = None
cfg = ConfigParser()
cfg.read('device.cfg')
deviceOptions = {
"identity": {"orgId": cfg.get('device', 'org'),
"typeId": cfg.get('device', 'type'),
"deviceId": cfg.get('device', 'id')},
"auth": {"token": cfg.get('device', 'auth-token')},
}
deviceCli = wiotp.sdk.device.DeviceClient(deviceOptions)
deviceCli.commandCallback = commandProcessor
# Connect and send datapoint(s) into the cloud
deviceCli.connect()
success = deviceCli.publishEvent("Child_screening", "json", data, qos=0, onPublish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
# Disconnect the device and application from the cloud
deviceCli.disconnect()
| 883
| 0
| 69
|
f5c3e4a7036aaa2b68eeebe3f97522092aaea427
| 2,062
|
py
|
Python
|
lonely-lemmings/earlyinternet/gifapp/urls.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 40
|
2020-08-02T07:38:22.000Z
|
2021-07-26T01:46:50.000Z
|
lonely-lemmings/earlyinternet/gifapp/urls.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 134
|
2020-07-31T12:15:45.000Z
|
2020-12-13T04:42:19.000Z
|
lonely-lemmings/earlyinternet/gifapp/urls.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 101
|
2020-07-31T12:00:47.000Z
|
2021-11-01T09:06:58.000Z
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import editorviews
from . import userviews
urlpatterns = [
# editor paths
path("project", editorviews.render_all_projects, name="projects"),
path("project/create", editorviews.parse_new_project_request, name="new"),
path("project/<str:project_name>", editorviews.paint, name="paint"),
path("project/<str:project_name>/save", editorviews.parse_save_request, name="save"),
path("project/<str:project_name>/render", editorviews.parse_render_request, name="render"),
path("project/<str:project_name>/view", editorviews.parse_view_request, name="view"),
path("project/<str:project_name>/publish", editorviews.parse_post_request, name="publish"),
path("project/<str:project_name>/load", editorviews.parse_image_request, name="images"),
path("project/<str:user>/<str:project_name>/detail", userviews.detail, name="project-detail"),
path("project/<str:user>/<str:project_name>/comment", userviews.submit_comment, name="submit-comment"),
path("", userviews.home, name="home"),
# user authentication paths
path("login/", auth_views.LoginView.as_view(template_name='login.html'), name="login"),
path("logout/", auth_views.LogoutView.as_view(template_name='logout.html'), name="logout"),
path("register/", userviews.register, name="register"),
# password reset paths
path("password_reset/", auth_views.PasswordResetView.as_view(template_name='password_reset.html'),
name='password_reset'),
path("password_reset/done", auth_views.PasswordResetDoneView.as_view(template_name='password_reset_done.html'),
name='password_reset_done'),
path("password_reset/confirm",
auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html'),
name='password_reset_confirm'),
path("password_reset/complete",
auth_views.PasswordResetCompleteView.as_view(template_name='password_reset_complete.html'),
name='password_reset_complete'),
]
| 52.871795
| 115
| 0.739088
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import editorviews
from . import userviews
urlpatterns = [
# editor paths
path("project", editorviews.render_all_projects, name="projects"),
path("project/create", editorviews.parse_new_project_request, name="new"),
path("project/<str:project_name>", editorviews.paint, name="paint"),
path("project/<str:project_name>/save", editorviews.parse_save_request, name="save"),
path("project/<str:project_name>/render", editorviews.parse_render_request, name="render"),
path("project/<str:project_name>/view", editorviews.parse_view_request, name="view"),
path("project/<str:project_name>/publish", editorviews.parse_post_request, name="publish"),
path("project/<str:project_name>/load", editorviews.parse_image_request, name="images"),
path("project/<str:user>/<str:project_name>/detail", userviews.detail, name="project-detail"),
path("project/<str:user>/<str:project_name>/comment", userviews.submit_comment, name="submit-comment"),
path("", userviews.home, name="home"),
# user authentication paths
path("login/", auth_views.LoginView.as_view(template_name='login.html'), name="login"),
path("logout/", auth_views.LogoutView.as_view(template_name='logout.html'), name="logout"),
path("register/", userviews.register, name="register"),
# password reset paths
path("password_reset/", auth_views.PasswordResetView.as_view(template_name='password_reset.html'),
name='password_reset'),
path("password_reset/done", auth_views.PasswordResetDoneView.as_view(template_name='password_reset_done.html'),
name='password_reset_done'),
path("password_reset/confirm",
auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html'),
name='password_reset_confirm'),
path("password_reset/complete",
auth_views.PasswordResetCompleteView.as_view(template_name='password_reset_complete.html'),
name='password_reset_complete'),
]
| 0
| 0
| 0
|
5e365b996113e0816d0acd5b8b5838506b5170ec
| 1,400
|
py
|
Python
|
src/mdscripts/mdpmaker/mdpmaker.py
|
awacha/mdscripts
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
[
"BSD-3-Clause"
] | null | null | null |
src/mdscripts/mdpmaker/mdpmaker.py
|
awacha/mdscripts
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
[
"BSD-3-Clause"
] | null | null | null |
src/mdscripts/mdpmaker/mdpmaker.py
|
awacha/mdscripts
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
[
"BSD-3-Clause"
] | null | null | null |
from PyQt5 import QtWidgets
from .pages import IntroPage, EMPage, SimTypePage, IntegratorPage, \
NeighbourSearchPage, FreqControlPage, CoulombPage, \
VdWPage, EwaldPage, ThermostatPage, EndPage
| 34.146341
| 74
| 0.622857
|
from PyQt5 import QtWidgets
from .pages import IntroPage, EMPage, SimTypePage, IntegratorPage, \
NeighbourSearchPage, FreqControlPage, CoulombPage, \
VdWPage, EwaldPage, ThermostatPage, EndPage
class MDPWizard(QtWidgets.QWizard):
def __init__(self, parent=None):
super().__init__(parent)
self._pages = []
self.setupUi(self)
def setupUi(self, Wizard):
self.setButtonText(QtWidgets.QWizard.CustomButton1, 'Load MDP...')
self.customButtonClicked.connect(self.onCustomButtonClicked)
for pageclass in [
IntroPage, SimTypePage, EMPage, IntegratorPage,
NeighbourSearchPage,
FreqControlPage, CoulombPage, VdWPage, EwaldPage,
ThermostatPage, EndPage]:
page = pageclass()
self._pages.append(page)
self.setPage(page.pageID, page)
def onCustomButtonClicked(self, which: int):
if which == QtWidgets.QWizard.CustomButton1:
# open new file
filename, fltr = QtWidgets.QFileDialog.getOpenFileUrl(
parent=self, caption='Load an MDP file', directory='',
filter='MDP files (*.mdp);;All files (*)',
initialFilter='MDP files (*.mdp)')
if filename:
self.loadMDP(filename)
def loadMDP(filename):
pass
def saveMDP(filename):
pass
| 1,025
| 14
| 157
|
f7728f245f6643550200ae56f152b0dcaf86976e
| 14,468
|
py
|
Python
|
sidpy/hdf/dtype_utils.py
|
ziatdinovmax/sidpy
|
299147bfc22741b5170aa00e92b34159dfc910c5
|
[
"MIT"
] | null | null | null |
sidpy/hdf/dtype_utils.py
|
ziatdinovmax/sidpy
|
299147bfc22741b5170aa00e92b34159dfc910c5
|
[
"MIT"
] | null | null | null |
sidpy/hdf/dtype_utils.py
|
ziatdinovmax/sidpy
|
299147bfc22741b5170aa00e92b34159dfc910c5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Utilities for transforming and validating data types
Given that many of the data transformations involve copying the data, they should
ideally happen in a lazy manner to avoid memory issues.
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, absolute_import, unicode_literals, print_function
import sys
from warnings import warn
import h5py
import numpy as np
import dask.array as da
__all__ = ['flatten_complex_to_real', 'get_compound_sub_dtypes', 'flatten_compound_to_real', 'check_dtype',
'stack_real_to_complex', 'validate_dtype', 'is_complex_dtype',
'stack_real_to_compound', 'stack_real_to_target_dtype', 'flatten_to_real']
from sidpy.hdf.hdf_utils import lazy_load_array
if sys.version_info.major == 3:
unicode = str
def flatten_complex_to_real(dataset, lazy=False):
"""
Stacks the real values followed by the imaginary values in the last dimension of the given N dimensional matrix.
Thus a complex matrix of shape (2, 3, 5) will turn into a matrix of shape (2, 3, 10)
Parameters
----------
dataset : array-like or :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Dataset of complex data type
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
-------
retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
real valued dataset
"""
if not isinstance(dataset, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('dataset should either be a h5py.Dataset or numpy / dask array')
if not is_complex_dtype(dataset.dtype):
raise TypeError("Expected a complex valued dataset")
if isinstance(dataset, da.core.Array):
lazy = True
xp = np
if lazy:
dataset = lazy_load_array(dataset)
xp = da
axis = xp.array(dataset).ndim - 1
if axis == -1:
return xp.hstack([xp.real(dataset), xp.imag(dataset)])
else: # along the last axis
return xp.concatenate([xp.real(dataset), xp.imag(dataset)], axis=axis)
def flatten_compound_to_real(dataset, lazy=False):
"""
Flattens the individual components in a structured array or compound valued hdf5 dataset along the last axis to form
a real valued array. Thus a compound h5py.Dataset or structured numpy matrix of shape (2, 3, 5) having 3 components
will turn into a real valued matrix of shape (2, 3, 15), assuming that all the sub-dtypes of the matrix are real
valued. ie - this function does not handle structured dtypes having complex values
Parameters
----------
dataset : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Numpy array that is a structured array or a :class:`h5py.Dataset` of compound dtype
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
-------
retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
real valued dataset
"""
if isinstance(dataset, h5py.Dataset):
if len(dataset.dtype) == 0:
raise TypeError("Expected compound h5py dataset")
if lazy:
xp = da
dataset = lazy_load_array(dataset)
else:
xp = np
warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')
return xp.concatenate([xp.array(dataset[name]) for name in dataset.dtype.names], axis=len(dataset.shape) - 1)
elif isinstance(dataset, (np.ndarray, da.core.Array)):
if isinstance(dataset, da.core.Array):
lazy = True
xp = np
if lazy:
dataset = lazy_load_array(dataset)
xp = da
if len(dataset.dtype) == 0:
raise TypeError("Expected structured array")
if dataset.ndim > 0:
return xp.concatenate([dataset[name] for name in dataset.dtype.names], axis=dataset.ndim - 1)
else:
return xp.hstack([dataset[name] for name in dataset.dtype.names])
elif isinstance(dataset, np.void):
return np.hstack([dataset[name] for name in dataset.dtype.names])
else:
raise TypeError('Datatype {} not supported'.format(type(dataset)))
def flatten_to_real(ds_main, lazy=False):
"""
Flattens complex / compound / real valued arrays to real valued arrays
Parameters
----------
ds_main : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Compound, complex or real valued numpy array or HDF5 dataset
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_main : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
Array raveled to a float data type
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray, da.core.Array)):
ds_main = np.array(ds_main)
if is_complex_dtype(ds_main.dtype):
return flatten_complex_to_real(ds_main, lazy=lazy)
elif len(ds_main.dtype) > 0:
return flatten_compound_to_real(ds_main, lazy=lazy)
else:
return ds_main
def get_compound_sub_dtypes(struct_dtype):
"""
Returns a dictionary of the dtypes of each of the fields in the given structured array dtype
Parameters
----------
struct_dtype : :class:`numpy.dtype`
dtype of a structured array
Returns
-------
dtypes : dict
Dictionary whose keys are the field names and values are the corresponding dtypes
"""
if not isinstance(struct_dtype, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
dtypes = dict()
for field_name in struct_dtype.fields:
dtypes[field_name] = struct_dtype.fields[field_name][0]
return dtypes
def check_dtype(h5_dset):
"""
Checks the datatype of the input HDF5 dataset and provides the appropriate
function calls to convert it to a float
Parameters
----------
h5_dset : :class:`h5py.Dataset`
Dataset of interest
Returns
-------
func : callable
function that will convert the dataset to a float
is_complex : bool
is the input dataset complex?
is_compound : bool
is the input dataset compound?
n_features : Unsigned int
Unsigned integer - the length of the 2nd dimension of the data after `func` is called on it
type_mult : Unsigned int
multiplier that converts from the typesize of the input :class:`~numpy.dtype` to the
typesize of the data after func is run on it
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
is_complex = False
is_compound = False
in_dtype = h5_dset.dtype
# TODO: avoid assuming 2d shape - why does one even need n_samples!? We only care about the last dimension!
n_features = h5_dset.shape[-1]
if is_complex_dtype(h5_dset.dtype):
is_complex = True
new_dtype = np.real(h5_dset[0, 0]).dtype
type_mult = new_dtype.itemsize * 2
func = flatten_complex_to_real
n_features *= 2
elif len(h5_dset.dtype) > 0:
"""
Some form of structured numpy is in use
We only support real scalars for the component types at the current time
"""
is_compound = True
# TODO: Avoid hard-coding to float32
new_dtype = np.float32
type_mult = len(in_dtype) * new_dtype(0).itemsize
func = flatten_compound_to_real
n_features *= len(in_dtype)
else:
if h5_dset.dtype not in [np.float32, np.float64]:
new_dtype = np.float32
else:
new_dtype = h5_dset.dtype.type
type_mult = new_dtype(0).itemsize
func = new_dtype
return func, is_complex, is_compound, n_features, type_mult
def stack_real_to_complex(ds_real, lazy=False):
"""
Puts the real and imaginary sections of the provided matrix (in the last axis) together to make complex matrix
Parameters
------------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, 2 x features],
where the first half of the features are the real component and the
second half contains the imaginary components
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
2D complex array arranged as [sample, features]
"""
if not isinstance(ds_real, (np.ndarray, da.core.Array, h5py.Dataset)):
if not isinstance(ds_real, (tuple, list)):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
if is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if ds_real.shape[-1] / 2 != ds_real.shape[-1] // 2:
raise ValueError("Last dimension must be even sized")
half_point = ds_real.shape[-1] // 2
if isinstance(ds_real, da.core.Array):
lazy = True
if lazy and not isinstance(ds_real, da.core.Array):
ds_real = lazy_load_array(ds_real)
return ds_real[..., :half_point] + 1j * ds_real[..., half_point:]
def stack_real_to_compound(ds_real, compound_type, lazy=False):
"""
Converts a real-valued dataset to a compound dataset (along the last axis) of the provided compound d-type
Parameters
------------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, features]
compound_type : :class:`numpy.dtype`
Target complex data-type
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
N-dimensional complex-valued array arranged as [sample, features]
"""
if lazy or isinstance(ds_real, da.core.Array):
raise NotImplementedError('Lazy operation not available due to absence of Dask support')
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, (list, tuple)):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
elif is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if not isinstance(compound_type, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
new_spec_length = ds_real.shape[-1] / len(compound_type)
if new_spec_length % 1:
raise ValueError('Provided compound type was not compatible by number of elements')
new_spec_length = int(new_spec_length)
new_shape = list(ds_real.shape) # Make mutable
new_shape[-1] = new_spec_length
xp = np
kwargs = {}
"""
if isinstance(ds_real, h5py.Dataset) and not lazy:
warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')
if isinstance(ds_real, da.core.Array):
lazy = True
if lazy:
xp = da
ds_real = lazy_load_array(ds_real)
kwargs = {'chunks': 'auto'}
"""
ds_compound = xp.empty(new_shape, dtype=compound_type, **kwargs)
for name_ind, name in enumerate(compound_type.names):
i_start = name_ind * new_spec_length
i_end = (name_ind + 1) * new_spec_length
ds_compound[name] = ds_real[..., i_start:i_end]
return ds_compound.squeeze()
def stack_real_to_target_dtype(ds_real, new_dtype, lazy=False):
"""
Transforms real data into the target dtype
Parameters
----------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array` or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset
new_dtype : :class:`numpy.dtype`
Target data-type
Returns
----------
ret_val : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
N-dimensional array of the target data-type
"""
if is_complex_dtype(new_dtype):
return stack_real_to_complex(ds_real, lazy=lazy)
try:
if len(new_dtype) > 0:
return stack_real_to_compound(ds_real, new_dtype, lazy=lazy)
except TypeError:
return new_dtype(ds_real)
# catching all other cases, such as np.dtype('<f4')
return new_dtype.type(ds_real)
def validate_dtype(dtype):
"""
Checks the provided object to ensure that it is a valid dtype that can be written to an HDF5 file.
Raises a type error if invalid. Returns True if the object passed the tests
Parameters
----------
dtype : object
Object that is hopefully a :class:`h5py.Datatype`, or :class:`numpy.dtype` object
Returns
-------
status : bool
True if the object was a valid data-type
"""
if isinstance(dtype, (h5py.Datatype, np.dtype)):
pass
elif isinstance(np.dtype(dtype), np.dtype):
# This should catch all those instances when dtype is something familiar like - np.float32
pass
else:
raise TypeError('dtype should either be a numpy or h5py dtype')
return True
def is_complex_dtype(dtype):
"""
Checks if the provided dtype is a complex dtype
Parameters
----------
dtype : object
Object that is a class:`h5py.Datatype`, or :class:`numpy.dtype` object
Returns
-------
is_complex : bool
True if the dtype was a complex dtype. Else returns False
"""
validate_dtype(dtype)
if dtype in [np.complex, np.complex64, np.complex128]:
return True
return False
| 35.99005
| 143
| 0.661736
|
# -*- coding: utf-8 -*-
"""
Utilities for transforming and validating data types
Given that many of the data transformations involve copying the data, they should
ideally happen in a lazy manner to avoid memory issues.
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, absolute_import, unicode_literals, print_function
import sys
from warnings import warn
import h5py
import numpy as np
import dask.array as da
__all__ = ['flatten_complex_to_real', 'get_compound_sub_dtypes', 'flatten_compound_to_real', 'check_dtype',
'stack_real_to_complex', 'validate_dtype', 'is_complex_dtype',
'stack_real_to_compound', 'stack_real_to_target_dtype', 'flatten_to_real']
from sidpy.hdf.hdf_utils import lazy_load_array
if sys.version_info.major == 3:
unicode = str
def flatten_complex_to_real(dataset, lazy=False):
"""
Stacks the real values followed by the imaginary values in the last dimension of the given N dimensional matrix.
Thus a complex matrix of shape (2, 3, 5) will turn into a matrix of shape (2, 3, 10)
Parameters
----------
dataset : array-like or :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Dataset of complex data type
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
-------
retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
real valued dataset
"""
if not isinstance(dataset, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('dataset should either be a h5py.Dataset or numpy / dask array')
if not is_complex_dtype(dataset.dtype):
raise TypeError("Expected a complex valued dataset")
if isinstance(dataset, da.core.Array):
lazy = True
xp = np
if lazy:
dataset = lazy_load_array(dataset)
xp = da
axis = xp.array(dataset).ndim - 1
if axis == -1:
return xp.hstack([xp.real(dataset), xp.imag(dataset)])
else: # along the last axis
return xp.concatenate([xp.real(dataset), xp.imag(dataset)], axis=axis)
def flatten_compound_to_real(dataset, lazy=False):
"""
Flattens the individual components in a structured array or compound valued hdf5 dataset along the last axis to form
a real valued array. Thus a compound h5py.Dataset or structured numpy matrix of shape (2, 3, 5) having 3 components
will turn into a real valued matrix of shape (2, 3, 15), assuming that all the sub-dtypes of the matrix are real
valued. ie - this function does not handle structured dtypes having complex values
Parameters
----------
dataset : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Numpy array that is a structured array or a :class:`h5py.Dataset` of compound dtype
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
-------
retval : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
real valued dataset
"""
if isinstance(dataset, h5py.Dataset):
if len(dataset.dtype) == 0:
raise TypeError("Expected compound h5py dataset")
if lazy:
xp = da
dataset = lazy_load_array(dataset)
else:
xp = np
warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')
return xp.concatenate([xp.array(dataset[name]) for name in dataset.dtype.names], axis=len(dataset.shape) - 1)
elif isinstance(dataset, (np.ndarray, da.core.Array)):
if isinstance(dataset, da.core.Array):
lazy = True
xp = np
if lazy:
dataset = lazy_load_array(dataset)
xp = da
if len(dataset.dtype) == 0:
raise TypeError("Expected structured array")
if dataset.ndim > 0:
return xp.concatenate([dataset[name] for name in dataset.dtype.names], axis=dataset.ndim - 1)
else:
return xp.hstack([dataset[name] for name in dataset.dtype.names])
elif isinstance(dataset, np.void):
return np.hstack([dataset[name] for name in dataset.dtype.names])
else:
raise TypeError('Datatype {} not supported'.format(type(dataset)))
def flatten_to_real(ds_main, lazy=False):
"""
Flattens complex / compound / real valued arrays to real valued arrays
Parameters
----------
ds_main : :class:`numpy.ndarray`, or :class:`h5py.Dataset`, or :class:`dask.array.core.Array`
Compound, complex or real valued numpy array or HDF5 dataset
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_main : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
Array raveled to a float data type
"""
if not isinstance(ds_main, (h5py.Dataset, np.ndarray, da.core.Array)):
ds_main = np.array(ds_main)
if is_complex_dtype(ds_main.dtype):
return flatten_complex_to_real(ds_main, lazy=lazy)
elif len(ds_main.dtype) > 0:
return flatten_compound_to_real(ds_main, lazy=lazy)
else:
return ds_main
def get_compound_sub_dtypes(struct_dtype):
"""
Returns a dictionary of the dtypes of each of the fields in the given structured array dtype
Parameters
----------
struct_dtype : :class:`numpy.dtype`
dtype of a structured array
Returns
-------
dtypes : dict
Dictionary whose keys are the field names and values are the corresponding dtypes
"""
if not isinstance(struct_dtype, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
dtypes = dict()
for field_name in struct_dtype.fields:
dtypes[field_name] = struct_dtype.fields[field_name][0]
return dtypes
def check_dtype(h5_dset):
"""
Checks the datatype of the input HDF5 dataset and provides the appropriate
function calls to convert it to a float
Parameters
----------
h5_dset : :class:`h5py.Dataset`
Dataset of interest
Returns
-------
func : callable
function that will convert the dataset to a float
is_complex : bool
is the input dataset complex?
is_compound : bool
is the input dataset compound?
n_features : Unsigned int
Unsigned integer - the length of the 2nd dimension of the data after `func` is called on it
type_mult : Unsigned int
multiplier that converts from the typesize of the input :class:`~numpy.dtype` to the
typesize of the data after func is run on it
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
is_complex = False
is_compound = False
in_dtype = h5_dset.dtype
# TODO: avoid assuming 2d shape - why does one even need n_samples!? We only care about the last dimension!
n_features = h5_dset.shape[-1]
if is_complex_dtype(h5_dset.dtype):
is_complex = True
new_dtype = np.real(h5_dset[0, 0]).dtype
type_mult = new_dtype.itemsize * 2
func = flatten_complex_to_real
n_features *= 2
elif len(h5_dset.dtype) > 0:
"""
Some form of structured numpy is in use
We only support real scalars for the component types at the current time
"""
is_compound = True
# TODO: Avoid hard-coding to float32
new_dtype = np.float32
type_mult = len(in_dtype) * new_dtype(0).itemsize
func = flatten_compound_to_real
n_features *= len(in_dtype)
else:
if h5_dset.dtype not in [np.float32, np.float64]:
new_dtype = np.float32
else:
new_dtype = h5_dset.dtype.type
type_mult = new_dtype(0).itemsize
func = new_dtype
return func, is_complex, is_compound, n_features, type_mult
def stack_real_to_complex(ds_real, lazy=False):
"""
Puts the real and imaginary sections of the provided matrix (in the last axis) together to make complex matrix
Parameters
------------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, 2 x features],
where the first half of the features are the real component and the
second half contains the imaginary components
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
2D complex array arranged as [sample, features]
"""
if not isinstance(ds_real, (np.ndarray, da.core.Array, h5py.Dataset)):
if not isinstance(ds_real, (tuple, list)):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
if is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if ds_real.shape[-1] / 2 != ds_real.shape[-1] // 2:
raise ValueError("Last dimension must be even sized")
half_point = ds_real.shape[-1] // 2
if isinstance(ds_real, da.core.Array):
lazy = True
if lazy and not isinstance(ds_real, da.core.Array):
ds_real = lazy_load_array(ds_real)
return ds_real[..., :half_point] + 1j * ds_real[..., half_point:]
def stack_real_to_compound(ds_real, compound_type, lazy=False):
"""
Converts a real-valued dataset to a compound dataset (along the last axis) of the provided compound d-type
Parameters
------------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array`, or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset where data arranged as [instance, features]
compound_type : :class:`numpy.dtype`
Target complex data-type
lazy : bool, optional. Default = False
If set to True, will use lazy Dask arrays instead of in-memory numpy arrays
Returns
----------
ds_compound : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
N-dimensional complex-valued array arranged as [sample, features]
"""
if lazy or isinstance(ds_real, da.core.Array):
raise NotImplementedError('Lazy operation not available due to absence of Dask support')
if not isinstance(ds_real, (np.ndarray, h5py.Dataset)):
if not isinstance(ds_real, (list, tuple)):
raise TypeError("Expected at least an iterable like a list or tuple")
ds_real = np.array(ds_real)
if len(ds_real.dtype) > 0:
raise TypeError("Array cannot have a compound dtype")
elif is_complex_dtype(ds_real.dtype):
raise TypeError("Array cannot have complex dtype")
if not isinstance(compound_type, np.dtype):
raise TypeError('Provided object must be a structured array dtype')
new_spec_length = ds_real.shape[-1] / len(compound_type)
if new_spec_length % 1:
raise ValueError('Provided compound type was not compatible by number of elements')
new_spec_length = int(new_spec_length)
new_shape = list(ds_real.shape) # Make mutable
new_shape[-1] = new_spec_length
xp = np
kwargs = {}
"""
if isinstance(ds_real, h5py.Dataset) and not lazy:
warn('HDF5 datasets will be loaded as Dask arrays in the future. ie - kwarg lazy will default to True in future releases of sidpy')
if isinstance(ds_real, da.core.Array):
lazy = True
if lazy:
xp = da
ds_real = lazy_load_array(ds_real)
kwargs = {'chunks': 'auto'}
"""
ds_compound = xp.empty(new_shape, dtype=compound_type, **kwargs)
for name_ind, name in enumerate(compound_type.names):
i_start = name_ind * new_spec_length
i_end = (name_ind + 1) * new_spec_length
ds_compound[name] = ds_real[..., i_start:i_end]
return ds_compound.squeeze()
def stack_real_to_target_dtype(ds_real, new_dtype, lazy=False):
"""
Transforms real data into the target dtype
Parameters
----------
ds_real : :class:`numpy.ndarray`, :class:`dask.array.core.Array` or :class:`h5py.Dataset`
n dimensional real-valued numpy array or HDF5 dataset
new_dtype : :class:`numpy.dtype`
Target data-type
Returns
----------
ret_val : :class:`numpy.ndarray` or :class:`dask.array.core.Array`
N-dimensional array of the target data-type
"""
if is_complex_dtype(new_dtype):
return stack_real_to_complex(ds_real, lazy=lazy)
try:
if len(new_dtype) > 0:
return stack_real_to_compound(ds_real, new_dtype, lazy=lazy)
except TypeError:
return new_dtype(ds_real)
# catching all other cases, such as np.dtype('<f4')
return new_dtype.type(ds_real)
def validate_dtype(dtype):
"""
Checks the provided object to ensure that it is a valid dtype that can be written to an HDF5 file.
Raises a type error if invalid. Returns True if the object passed the tests
Parameters
----------
dtype : object
Object that is hopefully a :class:`h5py.Datatype`, or :class:`numpy.dtype` object
Returns
-------
status : bool
True if the object was a valid data-type
"""
if isinstance(dtype, (h5py.Datatype, np.dtype)):
pass
elif isinstance(np.dtype(dtype), np.dtype):
# This should catch all those instances when dtype is something familiar like - np.float32
pass
else:
raise TypeError('dtype should either be a numpy or h5py dtype')
return True
def is_complex_dtype(dtype):
"""
Checks if the provided dtype is a complex dtype
Parameters
----------
dtype : object
Object that is a class:`h5py.Datatype`, or :class:`numpy.dtype` object
Returns
-------
is_complex : bool
True if the dtype was a complex dtype. Else returns False
"""
validate_dtype(dtype)
if dtype in [np.complex, np.complex64, np.complex128]:
return True
return False
| 0
| 0
| 0
|
e1ac5baef00b06d774dac67914421c5c10c7f2b8
| 4,347
|
py
|
Python
|
artellapipe/libs/usd/core/usdview.py
|
ArtellaPipe/artellapipe-libs-usd
|
20b89bceca730aa961cc10a98ee6b94e09908d80
|
[
"MIT"
] | 1
|
2021-12-01T12:06:59.000Z
|
2021-12-01T12:06:59.000Z
|
artellapipe/libs/usd/core/usdview.py
|
ArtellaPipe/artellapipe-libs-usd
|
20b89bceca730aa961cc10a98ee6b94e09908d80
|
[
"MIT"
] | null | null | null |
artellapipe/libs/usd/core/usdview.py
|
ArtellaPipe/artellapipe-libs-usd
|
20b89bceca730aa961cc10a98ee6b94e09908d80
|
[
"MIT"
] | 1
|
2022-01-04T09:00:21.000Z
|
2022-01-04T09:00:21.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains functions related with Pixar USD usdview application
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import os
import sys
import logging
import subprocess
from artellapipe.libs.usd.core import usdpaths
LOGGER = logging.getLogger('artellapipe-libs-usd')
def get_usd_view_path():
"""
Returns path to USD view executable
:return: str
"""
platform_path = usdpaths.get_platform_path()
usd_view_path = os.path.join(platform_path, 'pixar', 'bin', 'usdview')
return usd_view_path
def open_usd_file(usd_file_path):
"""
Opens given USD file in USD viewer (usdview)
:param usd_file_path: str
:return: bool
"""
if not usd_file_path or not os.path.isfile(usd_file_path):
LOGGER.warning('Given USD file path does not exists: {}!'.format(usd_file_path))
return False
usd_view_path = get_usd_view_path()
if not os.path.exists(usd_view_path):
LOGGER.warning(
'usdview path does not exists: {}. Impossible to open USD file!'.format(usd_view_path))
return False
usd_view_python_libs_path = get_usd_view_python_libs_path()
if not os.path.isdir(usd_view_python_libs_path):
LOGGER.warning(
'No usdview Pythyon libs directory found. usdview cannot be opened or usdview OpenGL can be disabled')
usd_view_python_libs_path = None
pixar_usd_binaries_path = usdpaths.get_pixar_usd_binaries_path()
if not pixar_usd_binaries_path:
LOGGER.warning(
'No Pixar USD binaries path found: "{}". Impossible to launch usdview'.format(pixar_usd_binaries_path))
return False
pixar_usd_libraries_path = usdpaths.get_pixar_usd_libraries_path()
if not pixar_usd_libraries_path:
LOGGER.warning(
'No Pixar USD libraries path found: "{}". Impossible to launch usdview'.format(pixar_usd_libraries_path))
return False
# Dictionary that contains the environment configuration that will be used by usdview instance
usd_view_env = dict()
usd_view_env['PATH'] = r'{}{}{}'.format(pixar_usd_binaries_path, os.pathsep, pixar_usd_libraries_path)
pixar_usd_python_libs_path = usdpaths.get_pixar_usd_python_libs_path()
if pixar_usd_python_libs_path and os.path.isdir(pixar_usd_python_libs_path):
if usd_view_python_libs_path and os.path.isdir(usd_view_python_libs_path):
usd_view_env['PYTHONPATH'] = r'{}{}{}'.format(
pixar_usd_python_libs_path, os.pathsep, usd_view_python_libs_path)
else:
usd_view_env['PYTHONPATH'] = r'{}'.format(pixar_usd_python_libs_path)
else:
if usd_view_python_libs_path and os.path.isdir(usd_view_python_libs_path):
usd_view_env['PYTHONPATH'] = r'{}'.format(usd_view_python_libs_path)
usd_view_plugins_path = get_usd_view_plugins_path()
if usd_view_plugins_path and os.path.isdir(usd_view_python_libs_path):
usd_view_env['PYTHONPATH'] += r'{}{}'.format(os.pathsep, usd_view_plugins_path)
for name in os.listdir(usd_view_plugins_path):
plugin_path = os.path.join(usd_view_plugins_path, name)
if not os.path.isdir(plugin_path):
continue
if usd_view_env.get('PXR_PLUGINPATH_NAME', None):
usd_view_env['PXR_PLUGINPATH_NAME'] += r'{}{}'.format(os.pathsep, plugin_path)
else:
usd_view_env['PXR_PLUGINPATH_NAME'] = r'{}'.format(plugin_path)
p = subprocess.Popen(
['python.exe', usd_view_path, usd_file_path], env=usd_view_env)
# output, error = p.communicate()
# if error:
# LOGGER.error('>>> usdview: {}'.format(error))
return True
| 34.228346
| 117
| 0.700023
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains functions related with Pixar USD usdview application
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import os
import sys
import logging
import subprocess
from artellapipe.libs.usd.core import usdpaths
LOGGER = logging.getLogger('artellapipe-libs-usd')
def get_usd_view_path():
"""
Returns path to USD view executable
:return: str
"""
platform_path = usdpaths.get_platform_path()
usd_view_path = os.path.join(platform_path, 'pixar', 'bin', 'usdview')
return usd_view_path
def get_usd_view_python_libs_path():
externals_path = usdpaths.get_usd_externals_path()
if sys.version[0] == '2':
usd_view_py_libs_path = os.path.join(externals_path, 'python', '2')
else:
usd_view_py_libs_path = os.path.join(externals_path, 'python', '3')
return usd_view_py_libs_path
def get_usd_view_plugins_path():
plugins_path = usdpaths.get_usd_plugins_path()
return os.path.join(plugins_path, 'usdview')
def open_usd_file(usd_file_path):
"""
Opens given USD file in USD viewer (usdview)
:param usd_file_path: str
:return: bool
"""
if not usd_file_path or not os.path.isfile(usd_file_path):
LOGGER.warning('Given USD file path does not exists: {}!'.format(usd_file_path))
return False
usd_view_path = get_usd_view_path()
if not os.path.exists(usd_view_path):
LOGGER.warning(
'usdview path does not exists: {}. Impossible to open USD file!'.format(usd_view_path))
return False
usd_view_python_libs_path = get_usd_view_python_libs_path()
if not os.path.isdir(usd_view_python_libs_path):
LOGGER.warning(
'No usdview Pythyon libs directory found. usdview cannot be opened or usdview OpenGL can be disabled')
usd_view_python_libs_path = None
pixar_usd_binaries_path = usdpaths.get_pixar_usd_binaries_path()
if not pixar_usd_binaries_path:
LOGGER.warning(
'No Pixar USD binaries path found: "{}". Impossible to launch usdview'.format(pixar_usd_binaries_path))
return False
pixar_usd_libraries_path = usdpaths.get_pixar_usd_libraries_path()
if not pixar_usd_libraries_path:
LOGGER.warning(
'No Pixar USD libraries path found: "{}". Impossible to launch usdview'.format(pixar_usd_libraries_path))
return False
# Dictionary that contains the environment configuration that will be used by usdview instance
usd_view_env = dict()
usd_view_env['PATH'] = r'{}{}{}'.format(pixar_usd_binaries_path, os.pathsep, pixar_usd_libraries_path)
pixar_usd_python_libs_path = usdpaths.get_pixar_usd_python_libs_path()
if pixar_usd_python_libs_path and os.path.isdir(pixar_usd_python_libs_path):
if usd_view_python_libs_path and os.path.isdir(usd_view_python_libs_path):
usd_view_env['PYTHONPATH'] = r'{}{}{}'.format(
pixar_usd_python_libs_path, os.pathsep, usd_view_python_libs_path)
else:
usd_view_env['PYTHONPATH'] = r'{}'.format(pixar_usd_python_libs_path)
else:
if usd_view_python_libs_path and os.path.isdir(usd_view_python_libs_path):
usd_view_env['PYTHONPATH'] = r'{}'.format(usd_view_python_libs_path)
usd_view_plugins_path = get_usd_view_plugins_path()
if usd_view_plugins_path and os.path.isdir(usd_view_python_libs_path):
usd_view_env['PYTHONPATH'] += r'{}{}'.format(os.pathsep, usd_view_plugins_path)
for name in os.listdir(usd_view_plugins_path):
plugin_path = os.path.join(usd_view_plugins_path, name)
if not os.path.isdir(plugin_path):
continue
if usd_view_env.get('PXR_PLUGINPATH_NAME', None):
usd_view_env['PXR_PLUGINPATH_NAME'] += r'{}{}'.format(os.pathsep, plugin_path)
else:
usd_view_env['PXR_PLUGINPATH_NAME'] = r'{}'.format(plugin_path)
p = subprocess.Popen(
['python.exe', usd_view_path, usd_file_path], env=usd_view_env)
# output, error = p.communicate()
# if error:
# LOGGER.error('>>> usdview: {}'.format(error))
return True
| 411
| 0
| 46
|
3d6edba4947aa5c21d770035d86b0d082f1079c6
| 1,509
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/LCS_FORMED_CONSECUTIVE_SEGMENTS_LEAST_LENGTH_K.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/LCS_FORMED_CONSECUTIVE_SEGMENTS_LEAST_LENGTH_K.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/LCS_FORMED_CONSECUTIVE_SEGMENTS_LEAST_LENGTH_K.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#TOFILL
if __name__ == '__main__':
param = [
(4,'aggayxysdfa','aggajxaaasdfa',),
(2,'55571659965107','390286654154',),
(3,'01011011100','0000110001000',),
(5,'aggasdfa','aggajasdfaxy',),
(2,'5710246551','79032504084062',),
(3,'0100010','10100000',),
(3,'aabcaaaa','baaabcd',),
(1,'1219','3337119582',),
(2,'111000011','011',),
(2,'wiC oD','csiuGOUwE',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 35.928571
| 91
| 0.473161
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( k , s1 , s2 ) :
n = len ( s1 )
m = len ( s2 )
lcs = [ [ 0 for x in range ( m + 1 ) ] for y in range ( n + 1 ) ]
cnt = [ [ 0 for x in range ( m + 1 ) ] for y in range ( n + 1 ) ]
for i in range ( 1 , n + 1 ) :
for j in range ( 1 , m + 1 ) :
lcs [ i ] [ j ] = max ( lcs [ i - 1 ] [ j ] , lcs [ i ] [ j - 1 ] )
if ( s1 [ i - 1 ] == s2 [ j - 1 ] ) :
cnt [ i ] [ j ] = cnt [ i - 1 ] [ j - 1 ] + 1 ;
if ( cnt [ i ] [ j ] >= k ) :
for a in range ( k , cnt [ i ] [ j ] + 1 ) :
lcs [ i ] [ j ] = max ( lcs [ i ] [ j ] , lcs [ i - a ] [ j - a ] + a )
return lcs [ n ] [ m ]
#TOFILL
if __name__ == '__main__':
param = [
(4,'aggayxysdfa','aggajxaaasdfa',),
(2,'55571659965107','390286654154',),
(3,'01011011100','0000110001000',),
(5,'aggasdfa','aggajasdfaxy',),
(2,'5710246551','79032504084062',),
(3,'0100010','10100000',),
(3,'aabcaaaa','baaabcd',),
(1,'1219','3337119582',),
(2,'111000011','011',),
(2,'wiC oD','csiuGOUwE',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 675
| 0
| 22
|
af7b906d9f975113497674a1301d8a1da8b05a40
| 13,694
|
py
|
Python
|
TD3.py
|
NamKim88/PGPS
|
51617637cde9b46a6f9fe0ae8c418cb60b0cf15a
|
[
"MIT"
] | 2
|
2020-10-06T12:11:25.000Z
|
2021-09-03T08:57:06.000Z
|
TD3.py
|
NamKim88/PGPS
|
51617637cde9b46a6f9fe0ae8c418cb60b0cf15a
|
[
"MIT"
] | null | null | null |
TD3.py
|
NamKim88/PGPS
|
51617637cde9b46a6f9fe0ae8c418cb60b0cf15a
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import copy
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.nn.functional as FF
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
USE_CUDA = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
Device = torch.device("cuda" if USE_CUDA else "cpu")
| 39.578035
| 108
| 0.635607
|
import numpy as np
import os
import copy
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.nn.functional as FF
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
USE_CUDA = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
Device = torch.device("cuda" if USE_CUDA else "cpu")
class RLNN(nn.Module):
def __init__(self, args):
super(RLNN, self).__init__()
self.args = args
self.nonlinearity_actor = args.nonlinearity_actor
self.nonlinearity_critic = args.nonlinearity_critic
def set_params(self, params):
cpt = 0
for param in self.parameters():
tmp = np.product(param.size())
param.data.copy_(torch.from_numpy(params[cpt:cpt + tmp]).view(param.size()).to(Device))
cpt += tmp
def get_params(self):
return copy.deepcopy(np.hstack([v.cpu().data.numpy().flatten() for v in self.parameters()]))
def get_grads(self):
return copy.deepcopy(np.hstack([v.grad.cpu().data.numpy().flatten() for v in self.parameters()]))
def get_size(self):
return self.get_params().shape[0]
def load_model(self, filename, net_name):
if filename is None: return
params = np.load('{}/{}.npy'.format(filename, net_name))
self.set_params(params)
def save_model(self, output, net_name):
params = self.get_params()
np.save('{}/{}.npy'.format(output, net_name), params)
class Actor(RLNN):
def __init__(self, args, state_dim, action_dim, max_action, hidden1_node=400, hidden2_node=300):
super(Actor, self).__init__(args)
self.l1 = nn.Linear(state_dim, hidden1_node)
self.l2 = nn.Linear(hidden1_node, hidden2_node)
self.l3 = nn.Linear(hidden2_node, action_dim)
self.max_action = max_action
self.to(Device)
def forward(self, state):
# Relu was used in original TD3
if self.nonlinearity_actor == "relu":
a = FF.relu(self.l1(state))
a = FF.relu(self.l2(a))
a = torch.tanh(self.l3(a))
# Elu was used in CERL
elif self.nonlinearity_actor == "elu":
a = FF.elu(self.l1(state))
a = FF.elu(self.l2(a))
a = torch.tanh(self.l3(a))
# Tanh was used in ERL, CEM-RL, and PDERL, this is basic setting
else:
a = torch.tanh(self.l1(state))
a = torch.tanh(self.l2(a))
a = torch.tanh(self.l3(a))
return self.max_action * a
def select_action(self, state):
# Input state is np.array(), therefore, convert np.array() to tensor
state = FloatTensor(state).unsqueeze(0)
# Get action from current policy
action = self.forward(state)
# Must be env.step(np.array* or lis*), therefore, convert tensor to np.array()
return action.cpu().data.numpy().flatten()
class Critic(RLNN):
def __init__(self, args, state_dim, action_dim, hidden1_node=400, hidden2_node=300):
super(Critic, self).__init__(args)
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, hidden1_node)
self.l2 = nn.Linear(hidden1_node, hidden2_node)
self.l3 = nn.Linear(hidden2_node, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, hidden1_node)
self.l5 = nn.Linear(hidden1_node, hidden2_node)
self.l6 = nn.Linear(hidden2_node, 1)
self.to(Device)
def forward(self, state, action):
# The input of critic-Q is [state, action]
sa = torch.cat([state, action], 1)
# Relu was used in original TD3
if self.nonlinearity_critic == "relu":
q1 = FF.relu(self.l1(sa))
q1 = FF.relu(self.l2(q1))
q2 = FF.relu(self.l4(sa))
q2 = FF.relu(self.l5(q2))
# Elu was used in ERL, CERL, and PDERL
elif self.nonlinearity_critic == "elu":
q1 = FF.elu(self.l1(sa))
q1 = FF.elu(self.l2(q1))
q2 = FF.elu(self.l4(sa))
q2 = FF.elu(self.l5(q2))
# Leaky_relu was used in CEM-RL, this is basic setting
else:
q1 = FF.leaky_relu(self.l1(sa))
q1 = FF.leaky_relu(self.l2(q1))
q2 = FF.leaky_relu(self.l4(sa))
q2 = FF.leaky_relu(self.l5(q2))
q1 = self.l3(q1)
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
if self.nonlinearity_critic == "relu":
q1 = FF.relu(self.l1(sa))
q1 = FF.relu(self.l2(q1))
elif self.nonlinearity_critic == "elu":
q1 = FF.elu(self.l1(sa))
q1 = FF.elu(self.l2(q1))
else:
q1 = FF.leaky_relu(self.l1(sa))
q1 = FF.leaky_relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(self, state_dim, action_dim, max_action, args):
# Parameters about the neural net structure of critic and actor
self.args = args
self.max_action = max_action
# Training batch size
self.batch_size = args.batch_size
self.discount = args.discount
self.tau = args.tau
# Action noise is added in the action of target Q
self.policy_noise = args.policy_noise
self.noise_clip = args.noise_clip
# Parameters for Asynchronous update frequency
self.total_iterC = 0
self.total_iterA = 0
self.policy_freq = args.policy_freq
# Guided Beta
self.guided_beta = args.guided_beta
# Define critics and actors
self.critic = Critic(args, state_dim, action_dim, self.args.h1_critic, self.args.h2_critic)
self.actor = Actor(args, state_dim, action_dim, max_action, self.args.h1_actor, self.args.h2_actor)
self.critic_target = copy.deepcopy(self.critic)
self.actor_target = copy.deepcopy(self.actor)
# Define optimizer in which Adam is used
self.critic_optimizer = Adam(self.critic.parameters(), lr=args.critic_lr, weight_decay=args.l2_rate)
self.actor_optimizer = Adam(self.actor.parameters(), lr=args.actor_lr, weight_decay=args.l2_rate)
def select_action(self, state):
# Call the select_action function of actor
return self.actor.select_action(state)
def train(self, replay_buffer):
self.total_iterC += 1
# Sample mini-batch from replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(self.batch_size)
# Define target_Q used to estimate critic loss (=TD error)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
# Calculate the target_Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current_Q value
current_Q1, current_Q2 = self.critic(state, action)
# Calculate critic loss (=difference between target_Q and current_Q)
critic_loss = FF.mse_loss(current_Q1, target_Q) + FF.mse_loss(current_Q2, target_Q)
# Optimize the critic parameters
self.critic_optimizer.zero_grad()
critic_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), 10)
self.critic_optimizer.step()
if self.total_iterC % self.policy_freq == 0:
self.total_iterA += 1
# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor parameters
self.actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), 10)
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def train_guided(self, replay_buffer, guided_param):
self.total_iterC += 1
state, action, next_state, reward, not_done = replay_buffer.sample(self.batch_size)
with torch.no_grad():
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
current_Q1, current_Q2 = self.critic(state, action)
critic_loss = FF.mse_loss(current_Q1, target_Q) + FF.mse_loss(current_Q2, target_Q)
self.critic_optimizer.zero_grad()
critic_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), 10)
self.critic_optimizer.step()
if self.total_iterC % self.policy_freq == 0:
self.total_iterA += 1
with torch.no_grad():
guided_actor = copy.deepcopy(self.actor)
guided_actor.set_params(guided_param)
distance = ((self.actor(state) - guided_actor(state)) ** 2).mean()
actor_loss = -self.critic.Q1(state, self.actor(state)).mean() + self.guided_beta * distance
self.actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), 10)
self.actor_optimizer.step()
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def train_critic(self, replay_buffer):
self.total_iterC += 1
state, action, next_state, reward, not_done = replay_buffer.sample(self.batch_size)
with torch.no_grad():
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
current_Q1, current_Q2 = self.critic(state, action)
critic_loss = FF.mse_loss(current_Q1, target_Q) + FF.mse_loss(current_Q2, target_Q)
self.critic_optimizer.zero_grad()
critic_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), 10)
self.critic_optimizer.step()
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def train_actor(self, replay_buffer):
self.total_iterA += 1
state, action, next_state, reward, not_done = replay_buffer.sample(self.batch_size)
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), 10)
self.actor_optimizer.step()
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def train_actor_guided(self, replay_buffer, guided_param):
self.total_iterA += 1
state, action, next_state, reward, not_done = replay_buffer.sample(self.batch_size)
with torch.no_grad():
guided_actor = copy.deepcopy(self.actor)
guided_actor.set_params(guided_param)
distance = ((self.actor(state) - guided_actor(state)) ** 2).mean()
actor_loss = -self.critic.Q1(state, self.actor(state)).mean() + self.guided_beta * distance
self.actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), 10)
self.actor_optimizer.step()
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
np.save(filename + "_critic.npy", self.critic.state_dict().data.cpu().numpy())
np.save(filename + "_actor.npy", self.actor.state_dict().data.cpu().numpy())
def load(self, filename):
params_critic = np.laod(filename + "_critic.npy")
self.critic.set_params(params_critic)
self.critic_optimizer = copy.deepcopy(self.critic)
params_actor = np.laod(filename + "_actor.npy")
self.critic.set_params(params_actor)
self.actor_target = copy.deepcopy(self.actor)
| 12,669
| -7
| 682
|
13bdfd3b8751d680236d3414ac983cb03ec44f16
| 3,724
|
py
|
Python
|
QuickPotato/harness/results.py
|
JoeyHendricks/QuickPotato
|
5e33e64d77997b00a43f5573353138436b1f1a34
|
[
"MIT"
] | 130
|
2020-11-19T00:19:53.000Z
|
2022-01-18T21:16:40.000Z
|
QuickPotato/harness/results.py
|
JoeyHendricks/QuickPotato
|
5e33e64d77997b00a43f5573353138436b1f1a34
|
[
"MIT"
] | 16
|
2020-11-22T14:27:11.000Z
|
2022-01-19T17:38:57.000Z
|
QuickPotato/harness/results.py
|
JoeyHendricks/QuickPotato
|
5e33e64d77997b00a43f5573353138436b1f1a34
|
[
"MIT"
] | 11
|
2020-12-02T08:36:46.000Z
|
2021-12-27T06:52:23.000Z
|
from QuickPotato.database.queries import Crud
| 28.868217
| 89
| 0.584855
|
from QuickPotato.database.queries import Crud
class BoundariesTestEvidence(Crud):
def __init__(self):
super(BoundariesTestEvidence, self).__init__()
self.test_id = None
self.test_case_name = None
self.database_name = None
self.epoch_timestamp = None
self.human_timestamp = None
self.verification_name = None
self.status = None
self.value = None
self.boundary = None
def save(self):
"""
Will insert the test results into the database.
Returns
-------
Will return True on success
"""
payload = {
"test_id": self.test_id,
"test_case_name": self.test_case_name,
"epoch_timestamp": self.epoch_timestamp,
"human_timestamp": self.human_timestamp,
"verification_name": self.verification_name,
"status": self.status,
"value": self.value,
"boundary": self.boundary
}
return self.insert_boundaries_test_evidence(
database_name=self.database_name,
payload=payload
)
class RegressionTestEvidence(Crud):
def __init__(self):
super(RegressionTestEvidence, self).__init__()
self.test_id = None
self.test_case_name = None
self.database_name = None
self.epoch_timestamp = None
self.human_timestamp = None
self.verification_name = None
self.status = None
self.value = None
self.critical_value = None
def save_test_evidence(self):
"""
Will insert the test results into the database.
Returns
-------
Will return True on success
"""
payload = {
"test_id": self.test_id,
"test_case_name": self.test_case_name,
"epoch_timestamp": self.epoch_timestamp,
"human_timestamp": self.human_timestamp,
"verification_name": self.verification_name,
"status": self.status,
"value": self.value,
"critical_value": self.critical_value
}
return self.insert_regression_test_evidence(
database_name=self.database_name,
payload=payload
)
class TestReport(Crud):
def __init__(self):
super(TestReport, self).__init__()
self.test_id = None
self.test_case_name = None
self.database_name = None
self.epoch_timestamp = None
self.human_timestamp = None
self.status = None
self.boundaries_breached = None
self.regression_found = None
def save(self):
"""
Will insert the test results into the database.
Returns
-------
Will return True on success
"""
payload = {
"test_id": self.test_id,
"test_case_name": self.test_case_name,
"epoch_timestamp": self.epoch_timestamp,
"human_timestamp": self.human_timestamp,
"status": self.status,
"boundaries_breached": self.boundaries_breached,
"regression_found": self.regression_found
}
if self.check_if_test_id_exists_in_test_report(self.database_name, self.test_id):
# Update existing test results
return self.update_results_in_test_report(
database_name=self.database_name,
test_id=self.test_id,
payload=payload
)
else:
# Insert new test results
return self.insert_results_into_test_report(
database_name=self.database_name,
payload=payload
)
| 1,007
| 2,599
| 69
|
ebc77182ef16795e237af1a79e03276e443b251d
| 276
|
py
|
Python
|
chill/examples/chill/testcases/peel12.script.py
|
CompOpt4Apps/Artifact-DataDepSimplify
|
4fa1bf2bda2902fec50a54ee79ae405a554fc9f4
|
[
"MIT"
] | 5
|
2019-05-20T03:35:41.000Z
|
2021-09-16T22:22:13.000Z
|
chill/examples/chill/testcases/peel12.script.py
|
CompOpt4Apps/Artifact-DataDepSimplify
|
4fa1bf2bda2902fec50a54ee79ae405a554fc9f4
|
[
"MIT"
] | null | null | null |
chill/examples/chill/testcases/peel12.script.py
|
CompOpt4Apps/Artifact-DataDepSimplify
|
4fa1bf2bda2902fec50a54ee79ae405a554fc9f4
|
[
"MIT"
] | null | null | null |
#
# example from CHiLL manual page 13
#
# peel 4 statements from the END of innermost loop
#
from chill import *
source('peel9101112.c')
destination('peel12modified.c')
procedure('mm')
loop(0)
peel(1,2,-4) # statement 1, loop 2 (middle, for j), 4 statements from END
| 15.333333
| 74
| 0.699275
|
#
# example from CHiLL manual page 13
#
# peel 4 statements from the END of innermost loop
#
from chill import *
source('peel9101112.c')
destination('peel12modified.c')
procedure('mm')
loop(0)
peel(1,2,-4) # statement 1, loop 2 (middle, for j), 4 statements from END
| 0
| 0
| 0
|
eb7a380c79b2a724dacaac572d80cf9e40cec552
| 5,408
|
py
|
Python
|
Main crawler/novel/spiders/crawler.py
|
phantom0174/Novel_Crawler
|
9a38ec46bb6d0963ba3d80ae99e7b4f9ff7c15a0
|
[
"MIT"
] | 1
|
2022-03-19T13:20:00.000Z
|
2022-03-19T13:20:00.000Z
|
Main crawler/novel/spiders/crawler.py
|
phantom0174/Light-Novel-crawler
|
9a38ec46bb6d0963ba3d80ae99e7b4f9ff7c15a0
|
[
"MIT"
] | null | null | null |
Main crawler/novel/spiders/crawler.py
|
phantom0174/Light-Novel-crawler
|
9a38ec46bb6d0963ba3d80ae99e7b4f9ff7c15a0
|
[
"MIT"
] | null | null | null |
import scrapy
from opencc import OpenCC
import os
all = [[]]
del(all[0])
| 37.041096
| 225
| 0.514238
|
import scrapy
from opencc import OpenCC
import os
all = [[]]
del(all[0])
class ncrawler(scrapy.Spider):
name = 'n'
#Things of web links
start_urls = [input('Input the url of the book :')]
domain = str()
domain_set = bool(0)
#Crawler mode set
parse_mode = int(1)
next_chapter = int(0)
#Index of txt inputing
cur_book_chapter_count = int(0)
cur_book = int(0)
#Temp
book_text = []
#Book basic info
book_name_tw = str()
book_ccount = []
title_order_tw = []
chapter_name_tw = []
chapter_links = []
def parse(self, response):
if(ncrawler.domain_set == bool(0) and ncrawler.start_urls[0][-1] == 'm'):
ncrawler.domain = ncrawler.start_urls[0].split('i')[0]
if (ncrawler.parse_mode == 1):
book_name_link = response.xpath('//*[@id="title"]/text()')
order_links = response.xpath('//td[contains(@class,"css")]')
book_name = book_name_link.get()
#Partial basic info
class_order = []
title_order = []
chapter_name = []
chapter_partial_links = []
for i in order_links:
spec_class = i.xpath('@class').get()
if (spec_class == 'ccss' and i.xpath('string()').get() != '\xa0'):
class_order.append(spec_class)
chapter_name.append(i.xpath('a/text()').extract())
chapter_partial_links.append(i.css('a::attr(href)').extract())
elif (spec_class == 'vcss'):
class_order.append(spec_class)
title_order.append(i.xpath('text()').get())
class_order.append('vcss')
# find the chapter count of each book
chapter_count = int(0)
for i in class_order:
if (i != 'vcss'):
chapter_count += 1
else:
ncrawler.book_ccount.append(chapter_count)
chapter_count = 0
ncrawler.book_ccount.remove(0)
class_order.clear()
# translate
cc = OpenCC('s2tw')
ncrawler.book_name_tw = cc.convert(book_name)
#
for i in title_order:
ncrawler.title_order_tw.append(cc.convert(i))
for i in range(len(chapter_name)):
ncrawler.chapter_name_tw.append(cc.convert(str(chapter_name[i])))
title_order.clear()
chapter_name.clear()
#Specific character removal
ncrawler.book_name_tw = ncrawler.book_name_tw.replace('\\','_').replace('/','_').replace(':','๏ผ').replace('*','๏ผ').replace('?','๏ผ').replace('"','_').replace('<','๏ผ').replace('>','๏ผ').replace('|','๏ฝ')
for i in range(len(ncrawler.title_order_tw)):
ncrawler.title_order_tw[i] = ncrawler.title_order_tw[i].replace('\\','_').replace('/','_').replace(':','๏ผ').replace('*','๏ผ').replace('?','๏ผ').replace('"','_').replace('<','๏ผ').replace('>','๏ผ').replace('|','๏ฝ')
#
for i in chapter_partial_links:
ncrawler.chapter_links.append(ncrawler.domain + i[0])
ncrawler.parse_mode = 2
elif(ncrawler.parse_mode == 2):
cc = OpenCC('s2tw')
chapter_title = cc.convert(response.xpath('//*[@id="title"]/text()').get())
if(chapter_title[-3:-1] != 'ๆๅ'):
text_links = response.xpath('//*[@id="content"]/text()')
inner_string = str()
for i in text_links:
inner_string = inner_string + i.get()
ncrawler.book_text.append(inner_string)
ncrawler.cur_book_chapter_count += 1
if(ncrawler.cur_book_chapter_count == ncrawler.book_ccount[ncrawler.cur_book]):
temp = []
for i in ncrawler.book_text:
temp.append(i)
all.append(temp)
ncrawler.book_text.clear()
ncrawler.cur_book += 1
ncrawler.cur_book_chapter_count = 0
ncrawler.next_chapter += 1
if(ncrawler.next_chapter > (len(ncrawler.chapter_links) - 1)):
chapter_title_count = int(0)
for i in range(len(ncrawler.title_order_tw)):
folder = 'D:\\' + ncrawler.book_name_tw
if not os.path.isdir(folder):
os.mkdir(folder)
path = str(folder + '\\' + str(i + 1) + ncrawler.title_order_tw[i] + '.txt')
f = open(path, 'w', encoding='UTF-8')
for j in range(len(all[i])):
if (ncrawler.chapter_name_tw[chapter_title_count][2:-2] != 'ๆๅ'):
chapter_head = str('//' + ncrawler.chapter_name_tw[chapter_title_count][2:-2])
f.write(chapter_head)
cc = OpenCC('s2tw')
for k in all[i][j]:
f.write(cc.convert(k))
chapter_title_count += 1
f.close()
if(ncrawler.parse_mode == 2 and ncrawler.next_chapter <= len(ncrawler.chapter_links) - 1):
yield scrapy.Request(url=ncrawler.chapter_links[ncrawler.next_chapter], callback=self.parse)
| 4,838
| 506
| 23
|
2a562ab23ac47bf79051fcbd2ddf912e304dc587
| 663
|
py
|
Python
|
examples/django_example/example/urls.py
|
hhru/python_social_auth
|
56945b8a031f276f4415a92a9ca4b7d61e951b12
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T07:12:28.000Z
|
2015-11-05T07:12:28.000Z
|
examples/django_example/example/urls.py
|
JasonSanford/python-social-auth
|
2034a4390f785639c99fc05a0b747739e6d297fd
|
[
"BSD-3-Clause"
] | null | null | null |
examples/django_example/example/urls.py
|
JasonSanford/python-social-auth
|
2034a4390f785639c99fc05a0b747739e6d297fd
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'example.app.views.home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^signup-email/', 'example.app.views.signup_email'),
url(r'^email-sent/', 'example.app.views.validation_sent'),
url(r'^login/$', 'example.app.views.home'),
url(r'^logout/$', 'example.app.views.logout'),
url(r'^done/$', 'example.app.views.done', name='done'),
url(r'^email/$', 'example.app.views.require_email', name='require_email'),
url(r'', include('social.apps.django_app.urls', namespace='social'))
)
| 36.833333
| 78
| 0.665158
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'example.app.views.home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^signup-email/', 'example.app.views.signup_email'),
url(r'^email-sent/', 'example.app.views.validation_sent'),
url(r'^login/$', 'example.app.views.home'),
url(r'^logout/$', 'example.app.views.logout'),
url(r'^done/$', 'example.app.views.done', name='done'),
url(r'^email/$', 'example.app.views.require_email', name='require_email'),
url(r'', include('social.apps.django_app.urls', namespace='social'))
)
| 0
| 0
| 0
|
1acfbcde022326d208f6aa498eaf13494d09b493
| 188
|
py
|
Python
|
test_examples/python_2.py
|
Tejas-P-Herle/Python_Language_Converter
|
f349659a7fcc980d31ddf58f38b35a4aae28561b
|
[
"MIT"
] | 3
|
2018-05-09T14:06:55.000Z
|
2019-04-10T22:53:42.000Z
|
test_examples/python_2.py
|
Tejas-P-Herle/PLC
|
f349659a7fcc980d31ddf58f38b35a4aae28561b
|
[
"MIT"
] | null | null | null |
test_examples/python_2.py
|
Tejas-P-Herle/PLC
|
f349659a7fcc980d31ddf58f38b35a4aae28561b
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 14.461538
| 26
| 0.478723
|
def main():
value = 1
if value == 0:
print("False")
elif value == 1:
print("True")
else:
print("Undefined")
if __name__ == "__main__":
main()
| 126
| 0
| 22
|
b2974283a5479f9baea3ad417a3eb2cb36194a6b
| 1,308
|
py
|
Python
|
apps/api/auth/decorators.py
|
Praetorian-Defence/praetorian-api
|
181fa22b043e58b2ac9c5f4eae4c3471a44c9bf4
|
[
"MIT"
] | 2
|
2020-06-29T15:12:04.000Z
|
2020-10-13T14:18:21.000Z
|
apps/api/auth/decorators.py
|
Praetorian-Defence/praetorian-api
|
181fa22b043e58b2ac9c5f4eae4c3471a44c9bf4
|
[
"MIT"
] | 10
|
2021-01-04T11:33:38.000Z
|
2021-05-07T10:23:48.000Z
|
apps/api/auth/decorators.py
|
zurek11/praetorian-api
|
181fa22b043e58b2ac9c5f4eae4c3471a44c9bf4
|
[
"MIT"
] | null | null | null |
from functools import wraps
from http import HTTPStatus
from django.utils.translation import gettext as _
from apps.api.errors import ApiException
def signature_exempt(view_func):
"""Mark a view function as being exempt from signature and apikey check."""
wrapped_view.signature_exempt = True
return wraps(view_func)(wrapped_view)
| 33.538462
| 113
| 0.716361
|
from functools import wraps
from http import HTTPStatus
from django.utils.translation import gettext as _
from apps.api.errors import ApiException
def token_required(func):
@wraps(func)
def inner(request, *args, **kwargs):
if not hasattr(request, 'user') or not request.user or not request.user.is_authenticated:
raise ApiException(request, _("Invalid or missing credentials"), status_code=HTTPStatus.UNAUTHORIZED)
elif request.user.is_2fa and not request.token.active_2fa:
raise ApiException(request, _("Invalid or missing credentials"), status_code=HTTPStatus.UNAUTHORIZED)
return func(request, *args, **kwargs)
return inner
def superuser_required(func):
@wraps(func)
def inner(request, *args, **kwargs):
if not request.user.is_authenticated or not request.user.is_superuser:
raise ApiException(request, _('User is unauthorized.'), status_code=HTTPStatus.FORBIDDEN)
return func(request, *args, **kwargs)
return inner
def signature_exempt(view_func):
"""Mark a view function as being exempt from signature and apikey check."""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.signature_exempt = True
return wraps(view_func)(wrapped_view)
| 887
| 0
| 72
|
944fa87a20a65b14f77e66c05ef217e835e0f5ea
| 644
|
py
|
Python
|
qv/pages.py
|
Furuneko/otree_quadratic_voting
|
30ec9002d03153a6b22c9f1eedfe242b199c1255
|
[
"MIT"
] | 1
|
2020-02-22T21:26:12.000Z
|
2020-02-22T21:26:12.000Z
|
qv/pages.py
|
Furuneko/otree_quadratic_voting
|
30ec9002d03153a6b22c9f1eedfe242b199c1255
|
[
"MIT"
] | null | null | null |
qv/pages.py
|
Furuneko/otree_quadratic_voting
|
30ec9002d03153a6b22c9f1eedfe242b199c1255
|
[
"MIT"
] | null | null | null |
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
page_sequence = [
QV
]
| 28
| 89
| 0.614907
|
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class QV(Page):
form_model = 'player'
form_fields = ['q' + str(n) for n in range(1, len(Constants.questions)+1)] + \
['q' + str(n) + '_agree' for n in range(1, len(Constants.questions)+1)]
def vars_for_template(self):
return {
'title': self.session.vars['survey_title'],
'num_questions': len(Constants.questions),
'questions': self.participant.vars['questions'],
'credits': Constants.vote_credits
}
page_sequence = [
QV
]
| 252
| 220
| 23
|
26a2050769dd854ae446a223707a3abffc1952e2
| 1,832
|
py
|
Python
|
random-priority-raffle.py
|
greghaskins/random-priority-raffle
|
15cb8425a13adcd5c9b56dc3fff63646060c83a4
|
[
"MIT"
] | null | null | null |
random-priority-raffle.py
|
greghaskins/random-priority-raffle
|
15cb8425a13adcd5c9b56dc3fff63646060c83a4
|
[
"MIT"
] | null | null | null |
random-priority-raffle.py
|
greghaskins/random-priority-raffle
|
15cb8425a13adcd5c9b56dc3fff63646060c83a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import random
from pprint import pprint
import yaml
import raffle
# ------------------------
# Command-line interface
# ------------------------
USAGE = f"""
Usage: {os.path.basename(__file__)} config_file [random_seed]
config_file (required):
Raffle configuration file in YAML format. See config.sample.yaml
for an example.
random_seed (optional):
An optional seed value to use for the underlying random number
generator. Use this parameter for greater control and repeatable
results. If not specified, the random number generator will use
cryptographic random values provided by the operating system.
"""
try:
with open(sys.argv[1], 'r') as config_file:
configuration = yaml.safe_load(config_file)
random_seed = sys.argv[2] if len(sys.argv) > 2 else None
except (IndexError, IOError, yaml.parser.ParserError) as e:
sys.stderr.write(USAGE)
raise e
try:
prizes = configuration['prizes']
entries = configuration['entries']
preferences = configuration['preferences']
except KeyError as e:
sys.stderr.write(f"Invalid configuration file: {repr(e)}\n")
sys.exit(1)
if random_seed:
print(f"Using random seed: {random_seed}")
random_source = random.Random(random_seed)
else:
print("Using system random number generator")
random_source = random.SystemRandom()
print("Running raffle with configuration:")
pprint(configuration)
results = raffle.raffle(prizes, entries, preferences, random_source)
leftover_prizes = list(prizes)
print("=" * 78)
print("Results:\n")
for i, (participant, prize) in enumerate(results):
print(f"{i + 1}: {participant} -> {prize}")
leftover_prizes.remove(prize)
print("=" * 78)
print("Leftover prizes:\n")
pprint(leftover_prizes)
| 26.171429
| 72
| 0.691048
|
#!/usr/bin/env python3
import os
import sys
import random
from pprint import pprint
import yaml
import raffle
# ------------------------
# Command-line interface
# ------------------------
USAGE = f"""
Usage: {os.path.basename(__file__)} config_file [random_seed]
config_file (required):
Raffle configuration file in YAML format. See config.sample.yaml
for an example.
random_seed (optional):
An optional seed value to use for the underlying random number
generator. Use this parameter for greater control and repeatable
results. If not specified, the random number generator will use
cryptographic random values provided by the operating system.
"""
try:
with open(sys.argv[1], 'r') as config_file:
configuration = yaml.safe_load(config_file)
random_seed = sys.argv[2] if len(sys.argv) > 2 else None
except (IndexError, IOError, yaml.parser.ParserError) as e:
sys.stderr.write(USAGE)
raise e
try:
prizes = configuration['prizes']
entries = configuration['entries']
preferences = configuration['preferences']
except KeyError as e:
sys.stderr.write(f"Invalid configuration file: {repr(e)}\n")
sys.exit(1)
if random_seed:
print(f"Using random seed: {random_seed}")
random_source = random.Random(random_seed)
else:
print("Using system random number generator")
random_source = random.SystemRandom()
print("Running raffle with configuration:")
pprint(configuration)
results = raffle.raffle(prizes, entries, preferences, random_source)
leftover_prizes = list(prizes)
print("=" * 78)
print("Results:\n")
for i, (participant, prize) in enumerate(results):
print(f"{i + 1}: {participant} -> {prize}")
leftover_prizes.remove(prize)
print("=" * 78)
print("Leftover prizes:\n")
pprint(leftover_prizes)
| 0
| 0
| 0
|
211a957319f53b0ed619653772aec6e92b690c12
| 294
|
py
|
Python
|
notaso/comments/admin.py
|
jpadilla/notaso
|
1c2f94d36b3d360d70f6c9937beb053beb8d8ad3
|
[
"MIT"
] | 11
|
2017-03-16T21:47:51.000Z
|
2021-11-30T12:38:59.000Z
|
notaso/comments/admin.py
|
jpadilla/notaso
|
1c2f94d36b3d360d70f6c9937beb053beb8d8ad3
|
[
"MIT"
] | 43
|
2015-01-13T14:14:48.000Z
|
2021-12-29T14:21:25.000Z
|
notaso/comments/admin.py
|
jpadilla/notaso
|
1c2f94d36b3d360d70f6c9937beb053beb8d8ad3
|
[
"MIT"
] | 5
|
2015-09-27T15:05:36.000Z
|
2019-05-14T17:09:06.000Z
|
from django.contrib import admin
from .models import Comment
admin.site.register(Comment, CommentAdmin)
| 24.5
| 77
| 0.755102
|
from django.contrib import admin
from .models import Comment
class CommentAdmin(admin.ModelAdmin):
list_display = ("created_at", "body", "professor", "created_by")
search_fields = ["body", "professor__first_name", "professor__last_name"]
admin.site.register(Comment, CommentAdmin)
| 0
| 163
| 23
|
57f61a27645356777c47ccb25a3133113ccffa1b
| 734
|
py
|
Python
|
output_newtargs/cp/pandas_merge_CP_bglf4_scores_newinformers_pkis1.py
|
SpencerEricksen/informers
|
5fd3934f5789c371026fc9eece1846ff1294122b
|
[
"MIT"
] | null | null | null |
output_newtargs/cp/pandas_merge_CP_bglf4_scores_newinformers_pkis1.py
|
SpencerEricksen/informers
|
5fd3934f5789c371026fc9eece1846ff1294122b
|
[
"MIT"
] | 1
|
2019-01-15T22:17:25.000Z
|
2019-01-16T12:14:39.000Z
|
output_newtargs/cp/pandas_merge_CP_bglf4_scores_newinformers_pkis1.py
|
SpencerEricksen/informers
|
5fd3934f5789c371026fc9eece1846ff1294122b
|
[
"MIT"
] | 1
|
2019-01-15T12:36:15.000Z
|
2019-01-15T12:36:15.000Z
|
#!/home/ssericksen/anaconda2/bin/python2.7
import pandas as pd
import numpy as np
# load Ching-Pei's compound scores for BGLF4 with PKIS1
df1 = pd.read_csv('bglf4_pkis1', sep=" ")
df1.set_index('fid', inplace=True)
df1.columns = ['BGLF4']
df1.index.rename('molid', inplace=True)
df1.index = df1.index.map(str)
# load informer list as dataframe
df2 = pd.read_csv('new_pkis1_informers_CP.csv', header=None)
df2.set_index(0, inplace=True)
df2.index.rename('molid', inplace=True)
df2.columns = ['BGLF4']
df2.index = df2.index.map(str)
# merge dataframes
df3 = pd.concat( [df1, df2], axis=0 )
print("duplicated indices: {}").format( df3.duplicated().sum() )
# check duplicates for PKIS1 molid '11959682'
print( df3.loc['11959682'] )
| 27.185185
| 64
| 0.723433
|
#!/home/ssericksen/anaconda2/bin/python2.7
import pandas as pd
import numpy as np
# load Ching-Pei's compound scores for BGLF4 with PKIS1
df1 = pd.read_csv('bglf4_pkis1', sep=" ")
df1.set_index('fid', inplace=True)
df1.columns = ['BGLF4']
df1.index.rename('molid', inplace=True)
df1.index = df1.index.map(str)
# load informer list as dataframe
df2 = pd.read_csv('new_pkis1_informers_CP.csv', header=None)
df2.set_index(0, inplace=True)
df2.index.rename('molid', inplace=True)
df2.columns = ['BGLF4']
df2.index = df2.index.map(str)
# merge dataframes
df3 = pd.concat( [df1, df2], axis=0 )
print("duplicated indices: {}").format( df3.duplicated().sum() )
# check duplicates for PKIS1 molid '11959682'
print( df3.loc['11959682'] )
| 0
| 0
| 0
|
54ba5ac4b0ad8172cb60636d5bb318552a5456aa
| 3,169
|
py
|
Python
|
fwenchino/quique.py
|
fwenchino/lambdata-fwenchino
|
f5bfecbd858086df90af4c7280162928ef615a4f
|
[
"MIT"
] | 1
|
2019-09-04T15:24:32.000Z
|
2019-09-04T15:24:32.000Z
|
fwenchino/quique.py
|
fwenchino/lambdata-fwenchino
|
f5bfecbd858086df90af4c7280162928ef615a4f
|
[
"MIT"
] | null | null | null |
fwenchino/quique.py
|
fwenchino/lambdata-fwenchino
|
f5bfecbd858086df90af4c7280162928ef615a4f
|
[
"MIT"
] | 1
|
2019-08-14T15:46:45.000Z
|
2019-08-14T15:46:45.000Z
|
import pandas as pd
import numpy as np
| 30.76699
| 87
| 0.535185
|
import pandas as pd
import numpy as np
def null_report(df):
total = df.isnull().sum()
perc = total / df.isnull().count() * 100
tt = pd.concat([total, perc], axis=1, keys=['Total', 'Percent'])
types = []
for col in df.columns:
dtypeimport pandas as pd
import numpy as np
class null_report():
"""This class provides dataframe NaN reporting functionality in tidy form
"""
def generate_report(self):
total = self.isnull().sum()
perc = total / self.isnull().count() * 100
new_frame = pd.concat([total, perc], axis=1, keys=['Total', 'Percent'])
types = []
for col in self.columns:
dtype = str(self[col].dtype)
types.append(dtype)
new_frame['Types'] = types
return np.transpose(new_frame)
def train_val_test_split(df):
train, val, test = np.split(df.sample(frac=1), [int(.6
* len(df)), int(.8 * len(df))])
return train, val, test
def add_list_to_df(df, lst):
"""This function takes a dataframe and a list,
then adds the list to the dataframe as a new column
"""
s = pd.Series(lst)
return pd.concat([df, s], axis=1)
def simple_confusion_matrix(y_true, y_pred):
y_true = pd.Series(y_true, name='True')
y_pred = pd.Series(y_pred, name='Predicted')
return pd.crosstab(y_true, y_pred, margins=True)
def show_full_frames():
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def split_datetime(df, col):
df[col] = df[col].to_datetime()
df['month'] = df[col].dt.month
df['year'] = df[col].dt.year
df['day'] = df[col].dt.day
= str(df[col].dtype)
types.append(dtype)
tt['Types'] = types
return np.transpose(tt)
def train_val_test_split(df):
(train, val, test) = np.split(df.sample(frac=1), [int(.6
* len(df)), int(.8 * len(df))])
return (train, val, test)
class complex_number:
def __init__(self, r=0, i=0):
self.real = r
self.imag = i
def getData(self):
print '{0}+{1}j'.format(self.real, self.image)
def add_list_to_df(df, lst):
s = pd.Series(lst)
return pd.concat(df, s)
def simple_confusion_matrix(y_true, y_pred):
y_true = pd.Series(y_true, name='True')
y_pred = pd.Series(y_pred, name='Predicted')
return pd.crosstab(y_true, y_pred, margins=True)
def show_full_frames():
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def split_datetime(df, col):
df[col] = df[col].to_datetime()
df['month'] = df[col].dt.month
df['year'] = df[col].dt.year
df['day'] = df[col].dt.day
| 2,908
| 0
| 215
|
360ce2242fc5b5c05d51c1ed4b51c81b17534215
| 8,395
|
py
|
Python
|
pytomato/conversion_af_er.py
|
robsonzagrejr/pytomato
|
3da3d9557f398a7ce2f3f8741c7cd70de9bfe05f
|
[
"MIT"
] | 2
|
2021-02-25T14:29:13.000Z
|
2021-04-12T02:53:55.000Z
|
pytomato/conversion_af_er.py
|
robsonzagrejr/pytomato
|
3da3d9557f398a7ce2f3f8741c7cd70de9bfe05f
|
[
"MIT"
] | null | null | null |
pytomato/conversion_af_er.py
|
robsonzagrejr/pytomato
|
3da3d9557f398a7ce2f3f8741c7cd70de9bfe05f
|
[
"MIT"
] | null | null | null |
letters = 'abcdefghijklmnopqrstuvwxyz'
numbers = '0123456789'
"""Teste
Main criado para testar as funรงรตes.
"""
if __name__ == '__main__':
print(er_to_afd('[J-M1-9]abc'))
# er_to_afd('a(a|b)*a')
# er_to_afd('aa*(bb*aa*b)*')
| 28.361486
| 94
| 0.517213
|
class Node:
def __init__(self, left=None,right=None,data=None,father=None,first=None,fulfilled=None):
self.left = left
self.right = right
self.data = data
self.father = father
self.is_first_of_chain = first
self.fulfilled = fulfilled
self.lastpos = None
self.firstpos = None
self.nullable = None
def post_order(self, root):
res = []
if root:
res = self.post_order(root.left)
res = res + self.post_order(root.right)
res.append(root)
return res
def render_tree(er):
string = er.replace(' ','')[::-1]
tree = Node()
last = tree
idx = 0
while(idx < len(string)):
char = string[idx]
if char == '#':
last.data = '#'
last.father = Node(left=last)
last.is_first_of_chain = True
tree = last
else:
last, idx = add_node(idx,string,last)
idx = idx+1
return tree.father.left
def add_node(idx, string, node):
char = string[idx]
if idx+1 < len(string) and string[idx+1] == '\\':
idx += 1
new = concat(Node(data=char),node)
return new.left, idx
if char == ')':
idx = idx+1
char = string[idx]
new_node = Node(data=char, first=True)
new_node.father = Node(left=new_node)
while(not string[idx] == '('):
new_node, idx = add_node(idx+1, string, new_node)
n = new_node
while(n.data):
n = n.father
n = n.left
if not node.data == '*':
new = concat(n,node)
new.left.fulfilled = True
return new.left, idx
else:
node.left = n
node.father.fulfilled = True
return node.father, idx
if char == '(':
return node, idx
if char == '|':
n = node
while(not n.is_first_of_chain):
n = node.father
new = Node(right=n,data='|', father=n.father)
n.father.left = new
n.father = new
return new, idx
if node.fulfilled:
new = concat(Node(data=char),node)
return new.left, idx
if node.data == '|':
node.left = Node(data=char, first=True, father=node)
return node.left, idx
if node.data == '*':
node.left = Node(data=char,father=node)
node.fulfilled = True
return node, idx
new = concat(Node(data=char),node)
return new.left, idx
def concat(node1,node2):
if node2.is_first_of_chain:
is_first = True
node2.is_first_of_chain = False
else:
is_first = False
new = Node(right=node2, data='concat', father=node2.father, first=is_first)
node1.father = new
node2.father.left = new
new.left = node1
return new
def define_nodes(tree):
nodes = tree.post_order(tree)
count = 1
nodes_idx = dict()
for n in nodes:
if n.data == '|':
n.nullable = n.left.nullable or n.right.nullable
n.firstpos = n.left.firstpos | n.right.firstpos
n.lastpos = n.left.lastpos | n.right.lastpos
elif n.data == 'concat':
n.nullable = n.left.nullable and n.right.nullable
if n.left.nullable:
n.firstpos = n.left.firstpos | n.right.firstpos
else:
n.firstpos = n.left.firstpos
if n.right.nullable:
n.lastpos = n.left.lastpos | n.right.lastpos
else:
n.lastpos = n.right.lastpos
elif n.data == '*':
n.nullable = True
n.firstpos = n.left.firstpos
n.lastpos = n.left.lastpos
else:
if n.data == '&':
n.nullable = True
n.firstpos = set()
n.lastpos = set()
else:
n.nullable = False
n.firstpos = set([count])
n.lastpos = set([count])
nodes_idx[f'{count}'] = n.data
count = count + 1
return count-1, nodes_idx
def define_followpos(tree, n_nodes):
nodes = tree.post_order(tree)
followpos = dict()
for idx in range(n_nodes):
followpos[f'{idx+1}'] = set()
for n in nodes:
if n.data == 'concat':
for lastpos_node in n.left.lastpos:
followpos[str(lastpos_node)] = followpos[str(lastpos_node)] | n.right.firstpos
if n.data == '*':
for firstpos_node in n.lastpos:
followpos[str(firstpos_node)] = followpos[str(firstpos_node)] | n.firstpos
return followpos, tree.firstpos
def afd(followpos, nodes_idx, initial_state):
union = dict()
states = list()
states.append(initial_state)
visited_states = list()
automata = dict()
idx = -1
while(not len(states) == 0):
state = states.pop()
visited_states.append(state)
for pos in state:
node = nodes_idx.get(str(pos))
if not node == '#':
if not union.__contains__(node):
union[node] = set(followpos.get(str(pos)))
else:
union[node] = union.get(node) | set(followpos.get(str(pos)))
for s in union.items():
if visited_states.count(s[1]) == 0:
states.append(s[1])
if automata.get(str(state)):
automata[str(state)]['states'] = union.copy()
else:
idx += 1
automata[str(state)] = {'states': union.copy(), 'name': f'q{idx}'}
union.clear()
return automata
def format_afd(automata, initial_state, final, alphabet):
initial_state = [str(i) for i in initial_state]
afd = dict()
afd['n_estados'] = len(automata)
afd['inicial'] = "{" + ', '.join(initial_state) + "}"
afd['inicial'] = automata[afd['inicial']]['name']
afd['aceitacao'] = list()
afd['alfabeto'] = list(alphabet)
afd['transicoes'] = dict()
for transiction in automata:
trans = automata.get(transiction)
if transiction.find(final) >= 0:
afd.get('aceitacao').append(trans['name'])
t = dict()
for a in alphabet:
tr = trans['states'].get(a)
if (tr):
t[a] = [automata.get(f'{tr}')['name']]
#else:
# t[a] = []
afd.get('transicoes')[automata.get(transiction)['name']] = t
return afd
letters = 'abcdefghijklmnopqrstuvwxyz'
numbers = '0123456789'
def transform_suffix(suffix):
string = ''
is_until = False
for c in suffix:
if c == '-':
is_until = True
else:
if is_until:
if c.isnumeric():
s = numbers.split(string[-1])[1]
else:
if c.isupper():
s = letters.upper().split(string[-1])[1]
else:
s = letters.split(string[-1])[1]
string += s.split(c)[0] + c
is_until = False
else:
string += c
str_ref = ''
for c in string:
str_ref += c + '|'
return '(' + str_ref[0:-1] + ')'
def refatorate_regex(string):
preffix = ''
is_bracket = False
for c in string:
if c == '[':
is_bracket = True
suffix = ''
elif c == ']':
is_bracket = False
preffix += transform_suffix(suffix)
else:
if is_bracket:
suffix += c
else:
preffix += c
return preffix + '#'
def er_to_afd(string):
string = refatorate_regex(string)
tree = render_tree(string)
n_nodes, nodes_idx = define_nodes(tree)
followpos, initial_state = define_followpos(tree, n_nodes)
automata = afd(followpos,nodes_idx,initial_state)
final = [item[0] for item in list(nodes_idx.items()) if item[1] == '#'][0]
alphabet = set([item[1] for item in list(nodes_idx.items()) if not item[1] == '#'])
return format_afd(automata, initial_state, final, alphabet)
"""Teste
Main criado para testar as funรงรตes.
"""
if __name__ == '__main__':
print(er_to_afd('[J-M1-9]abc'))
# er_to_afd('a(a|b)*a')
# er_to_afd('aa*(bb*aa*b)*')
| 7,851
| -10
| 313
|
4efe3582bfe30a1821bdbca1585fa90da4e93489
| 18,842
|
py
|
Python
|
accelerator_abstract/models/base_core_profile.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | 6
|
2017-06-14T19:34:01.000Z
|
2020-03-08T07:16:59.000Z
|
accelerator_abstract/models/base_core_profile.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | 160
|
2017-06-20T17:12:13.000Z
|
2022-03-30T13:53:12.000Z
|
accelerator_abstract/models/base_core_profile.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from datetime import datetime
from decimal import Decimal
from pytz import utc
import swapper
from django.conf import settings
from django.core.validators import (
RegexValidator,
MaxLengthValidator,
)
from django.db import models
from django.db.models import Q
from sorl.thumbnail import ImageField
from django.utils.safestring import mark_safe
from accelerator_abstract.models.accelerator_model import AcceleratorModel
from accelerator_abstract.models.base_user_role import (
BaseUserRole,
)
from accelerator_abstract.models.base_base_profile import (
EXPERT_USER_TYPE,
)
from accelerator_abstract.models.base_user_utils import (
has_staff_clearance,
)
from accelerator_abstract.models.base_program import (
ACTIVE_PROGRAM_STATUS,
ENDED_PROGRAM_STATUS,
)
INVITED_JUDGE_ALERT = (
"<h4>{first_name}, we would like to invite you to be a judge at "
"MassChallenge!</h4>"
"<p> </p>"
"<p>{round_name} judging occurs from {start_date} to {end_date}! "
"Of all our potential judges, we would like you, {first_name}, "
"to take part."
"</p><p> </p>"
'<p><a class="btn btn-primary" href="/expert/commitments/">Click '
"here to tell us your availability"
"</a></p> <p> </p>"
)
MENTOR_TYPE_HELPTEXT = (
"Allowed Values: "
"F - Functional Expert, "
"P - Partner, "
"T - Technical, "
"E - Entrepreneur, "
"N - Once accepted, now rejected, "
"X - Not Accepted as a Mentor (may still be a judge)")
JUDGE_TYPE_HELPTEXT = (
"Allowed Values: "
"1 - Round 1 Judge, "
"2 - Round 2 Judge, "
"3 - Pre-final Judge, "
"4 - Final Judge, "
"0 - Once Accepted, now rejected, "
"X - Not Accepted as a Judge (May still be a mentor)")
IDENTITY_HELP_TEXT_VALUE = (mark_safe(
'Select as many options as you feel best represent your identity. '
'Please press and hold Control (CTRL) on PCs or '
'Command (⌘) on Macs to select multiple options'))
JUDGE_FIELDS_TO_LABELS = {'desired_judge_label': 'Desired Judge',
'confirmed_judge_label': 'Judge'}
BIO_MAX_LENGTH = 7500
PRIVACY_CHOICES = (("staff", "MC Staff Only"),
("finalists and staff", "Finalists and MC Staff"),
("public", "All Users"),)
BASE_INTEREST = "I would like to participate in MassChallenge %s"
BASE_TOPIC = ("Please describe the topic(s) you would be available "
"to speak%s about%s")
REF_BY_TEXT = ("If someone referred you to MassChallenge, please provide "
"their name (and organization if relevant). Otherwise, please "
"indicate how you learned about the opportunity to participate "
"at MassChallenge (helps us understand the effectiveness of "
"our outreach programs).")
OTHER_EXPERTS_TEXT = ("We're always looking for more great experts to join "
"the MassChallenge community and program. We welcome "
"the names and contact info (email) of individuals you "
"think could be great additions to the program, as well "
"as how you think they might want to be involved "
"(Judge, Mentor, etc.) Also, please encourage these "
"individuals to fill out their own Expert Profile.")
| 37.608782
| 79
| 0.635495
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from datetime import datetime
from decimal import Decimal
from pytz import utc
import swapper
from django.conf import settings
from django.core.validators import (
RegexValidator,
MaxLengthValidator,
)
from django.db import models
from django.db.models import Q
from sorl.thumbnail import ImageField
from django.utils.safestring import mark_safe
from accelerator_abstract.models.accelerator_model import AcceleratorModel
from accelerator_abstract.models.base_user_role import (
BaseUserRole,
)
from accelerator_abstract.models.base_base_profile import (
EXPERT_USER_TYPE,
)
from accelerator_abstract.models.base_user_utils import (
has_staff_clearance,
)
from accelerator_abstract.models.base_program import (
ACTIVE_PROGRAM_STATUS,
ENDED_PROGRAM_STATUS,
)
INVITED_JUDGE_ALERT = (
"<h4>{first_name}, we would like to invite you to be a judge at "
"MassChallenge!</h4>"
"<p> </p>"
"<p>{round_name} judging occurs from {start_date} to {end_date}! "
"Of all our potential judges, we would like you, {first_name}, "
"to take part."
"</p><p> </p>"
'<p><a class="btn btn-primary" href="/expert/commitments/">Click '
"here to tell us your availability"
"</a></p> <p> </p>"
)
MENTOR_TYPE_HELPTEXT = (
"Allowed Values: "
"F - Functional Expert, "
"P - Partner, "
"T - Technical, "
"E - Entrepreneur, "
"N - Once accepted, now rejected, "
"X - Not Accepted as a Mentor (may still be a judge)")
JUDGE_TYPE_HELPTEXT = (
"Allowed Values: "
"1 - Round 1 Judge, "
"2 - Round 2 Judge, "
"3 - Pre-final Judge, "
"4 - Final Judge, "
"0 - Once Accepted, now rejected, "
"X - Not Accepted as a Judge (May still be a mentor)")
IDENTITY_HELP_TEXT_VALUE = (mark_safe(
'Select as many options as you feel best represent your identity. '
'Please press and hold Control (CTRL) on PCs or '
'Command (⌘) on Macs to select multiple options'))
JUDGE_FIELDS_TO_LABELS = {'desired_judge_label': 'Desired Judge',
'confirmed_judge_label': 'Judge'}
BIO_MAX_LENGTH = 7500
PRIVACY_CHOICES = (("staff", "MC Staff Only"),
("finalists and staff", "Finalists and MC Staff"),
("public", "All Users"),)
BASE_INTEREST = "I would like to participate in MassChallenge %s"
BASE_TOPIC = ("Please describe the topic(s) you would be available "
"to speak%s about%s")
REF_BY_TEXT = ("If someone referred you to MassChallenge, please provide "
"their name (and organization if relevant). Otherwise, please "
"indicate how you learned about the opportunity to participate "
"at MassChallenge (helps us understand the effectiveness of "
"our outreach programs).")
OTHER_EXPERTS_TEXT = ("We're always looking for more great experts to join "
"the MassChallenge community and program. We welcome "
"the names and contact info (email) of individuals you "
"think could be great additions to the program, as well "
"as how you think they might want to be involved "
"(Judge, Mentor, etc.) Also, please encourage these "
"individuals to fill out their own Expert Profile.")
class BaseCoreProfile(AcceleratorModel):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
gender_identity = models.ManyToManyField(
swapper.get_model_name(
AcceleratorModel.Meta.app_label, 'GenderChoices'),
help_text=IDENTITY_HELP_TEXT_VALUE,
blank=True
)
gender_self_description = models.TextField(blank=True, default="")
phone = models.CharField(
verbose_name="Phone",
max_length=20,
validators=[RegexValidator(
regex='^[0-9x.+() -]+$',
message='Digits and +()-.x only')],
blank=True)
linked_in_url = models.URLField(
verbose_name="LinkedIn profile URL",
blank=True)
facebook_url = models.URLField(
verbose_name="Facebook profile URL",
blank=True)
twitter_handle = models.CharField(
verbose_name="Twitter handle",
max_length=40,
blank=True)
personal_website_url = models.URLField(
verbose_name="Website URL",
max_length=255,
blank=True)
landing_page = models.CharField(
verbose_name="Current landing page within the site",
validators=[RegexValidator(
"^[^:]*$",
"Must be a page within the site"), ],
max_length=200,
blank=True)
image = ImageField(
upload_to='profile_pics',
verbose_name="Profile Picture",
help_text="Suggested size: <400px on the short side",
blank=True)
drupal_id = models.IntegerField(blank=True, null=True)
drupal_creation_date = models.DateTimeField(blank=True, null=True)
drupal_last_login = models.DateTimeField(blank=True, null=True)
interest_categories = models.ManyToManyField(
to=swapper.get_model_name(AcceleratorModel.Meta.app_label,
'InterestCategory'),
blank=True)
users_last_activity = models.DateTimeField(blank=True, null=True)
current_program = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
'Program'),
blank=True,
null=True,
on_delete=models.CASCADE,
)
program_families = models.ManyToManyField(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
'ProgramFamily'),
help_text="Which of our Program Families would you like to be "
"involved with?",
related_name="interested_%(class)s",
blank=True
)
user_type = None
default_page = "member_homepage"
newsletter_sender = models.BooleanField(default=False)
birth_year = models.DateField(blank=True, null=True)
ethno_racial_identification = models.ManyToManyField(
swapper.get_model_name(
AcceleratorModel.Meta.app_label, 'EthnoRacialIdentity'
),
blank=True,
help_text=IDENTITY_HELP_TEXT_VALUE
)
authorization_to_share_ethno_racial_identity = models.BooleanField(
default=False,
)
bio = models.TextField(blank=True,
default="",
validators=[MaxLengthValidator(BIO_MAX_LENGTH)])
title = models.CharField(
max_length=255,
blank=True,
verbose_name="Professional Title")
company = models.CharField(
max_length=255,
blank=True,
verbose_name="Company Name")
expert_category = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"ExpertCategory"),
verbose_name="I primarily consider myself a(n)",
related_name="%(class)s_experts",
blank=True, null=True, # added
on_delete=models.CASCADE)
primary_industry = models.ForeignKey(
settings.MPTT_SWAPPABLE_INDUSTRY_MODEL,
verbose_name="Primary Industry",
related_name="%(class)s_experts",
limit_choices_to={'level__exact': 0},
null=True,
blank=True,
on_delete=models.CASCADE)
additional_industries = models.ManyToManyField(
settings.MPTT_SWAPPABLE_INDUSTRY_MODEL,
verbose_name="Additional Industries",
help_text=(mark_safe(
'You may select up to 5 related industries. To select multiple '
'industries, please press and hold Control (CTRL) on PCs or '
'Command (⌘) on Macs.')),
related_name="%(class)s_secondary_experts",
blank=True,
)
functional_expertise = models.ManyToManyField(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
'FunctionalExpertise'),
verbose_name="Functional Expertise",
related_name="%(class)s_experts",
blank=True)
public_website_consent = models.BooleanField(
verbose_name="Public Website Consent",
blank=False,
null=False,
default=False)
privacy_email = models.CharField(
max_length=64,
verbose_name="Privacy - Email",
choices=PRIVACY_CHOICES,
blank=True,
default=PRIVACY_CHOICES[1][0])
privacy_phone = models.CharField(
max_length=64,
verbose_name="Privacy - Phone",
choices=PRIVACY_CHOICES,
blank=True,
default=PRIVACY_CHOICES[1][0])
privacy_web = models.CharField(
max_length=64,
verbose_name="Privacy - Web",
choices=PRIVACY_CHOICES,
blank=True,
default=PRIVACY_CHOICES[1][0])
home_program_family = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"ProgramFamily"),
verbose_name="Home Program Family",
blank=True,
null=True,
on_delete=models.CASCADE)
judge_interest = models.BooleanField(
verbose_name="Judge",
help_text=BASE_INTEREST % 'as a Judge',
default=False)
mentor_interest = models.BooleanField(
verbose_name="Mentor",
help_text=BASE_INTEREST % 'as a Mentor',
default=False)
speaker_interest = models.BooleanField(
verbose_name="Speaker",
help_text=BASE_INTEREST % 'as a Speaker',
default=False)
speaker_topics = models.TextField(
verbose_name="Speaker Topics",
help_text=BASE_TOPIC % ('', ''),
blank=True)
office_hours_interest = models.BooleanField(
verbose_name="Office Hours",
help_text=BASE_INTEREST % 'by holding Office Hours',
default=False)
office_hours_topics = models.TextField(
verbose_name="Office Hour Topics",
help_text=BASE_TOPIC % (' to Finalists', ' during Office Hours'),
blank=True)
referred_by = models.TextField(
max_length=500,
blank=True,
help_text=REF_BY_TEXT)
other_potential_experts = models.TextField(
max_length=500,
blank=True,
help_text=OTHER_EXPERTS_TEXT)
salutation = models.CharField(
max_length=255,
blank=True)
mentor_type = models.CharField(
max_length=1,
blank=True,
help_text=MENTOR_TYPE_HELPTEXT,
verbose_name="Mentor Type")
judge_type = models.CharField(
max_length=1,
blank=True,
help_text=JUDGE_TYPE_HELPTEXT,
verbose_name="Judge Type")
public_website_consent_checked = models.BooleanField(
verbose_name="Public Website Consent Check",
blank=False,
null=False,
default=False)
mentoring_specialties = models.ManyToManyField(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
'MentoringSpecialties'),
verbose_name="Mentoring Specialties",
help_text='Hold down "Control", or "Command" on a Mac,'
'to select more than one.',
related_name="%(class)s_experts",
blank=True)
expert_group = models.CharField(
verbose_name="Expert Group",
max_length=10,
blank=True)
reliability = models.DecimalField(
max_digits=3,
decimal_places=2,
default=Decimal("1.00"),
blank=True,
null=True)
internal_notes = models.TextField(
max_length=500,
blank=True,
help_text="Internal notes only for use by MassChallenge Staff "
"(not visible to Expert)")
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_coreprofile'
abstract = True
def __str__(self):
identifier = self.full_name()
ptype = ''
if self.user_type is not None:
ptype = ("%s " % self.user_type).title()
return "%sProfile for %s" % (ptype, identifier)
def full_name(self):
return self.user.full_name()
def image_url(self):
if str(self.image):
return self.image.storage.url(
self.image.name)
else:
return ""
def is_judge(self, *args, **kwargs):
"""prevent attribute errors on subclasses
"""
return False
def is_program_graduate(self, program=None):
""" This checks if the user is an alumni or graduate of the program
"""
qs = self.user.programrolegrant_set.filter(
program_role__user_role__name=BaseUserRole.FINALIST,
program_role__program__program_status=ENDED_PROGRAM_STATUS)
if program:
qs = qs.filter(program_role__program=program)
return qs.exists()
def is_alum_in_residence(self, program=None):
qs = self.user.programrolegrant_set.filter(
program_role__user_role__name=BaseUserRole.AIR)
if program:
qs = qs.filter(program_role__program=program)
return qs.exists()
def is_mentor(self, program=None):
"""If program is specified, is the expert a mentor in that program.
Otherwise, is the expert a mentor in any program.
"""
if program:
return self.user.programrolegrant_set.filter(
program_role__program__exact=program,
program_role__user_role__name=BaseUserRole.MENTOR).exists()
else:
return self.user.programrolegrant_set.filter(
program_role__user_role__name=BaseUserRole.MENTOR).exists()
def user_roles(self):
return set([prg.program_role.user_role
for prg in self.user.programrolegrant_set.all()
if prg.program_role.user_role is not None])
def is_office_hour_holder(self):
user_role_names = set([ur.name for ur in self.user_roles()])
return len(user_role_names.intersection(
BaseUserRole.OFFICE_HOUR_ROLES)) > 0
def is_partner(self):
PartnerTeamMember = swapper.load_model(
'accelerator', 'PartnerTeamMember')
return PartnerTeamMember.objects.filter(
team_member=self.user).exists()
def is_partner_admin(self):
PartnerTeamMember = swapper.load_model(
'accelerator', 'PartnerTeamMember')
return PartnerTeamMember.objects.filter(
team_member=self.user,
partner_administrator=True).exists()
def get_active_alerts(self, page=None):
"""no op
"""
return []
def _get_staff_landing_page(self):
if has_staff_clearance(self.user):
return '/staff'
def role_based_landing_page(self, exclude_role_names=[]):
if self.user_type.upper() == EXPERT_USER_TYPE:
return "/dashboard/expert/overview/"
JudgingRound = swapper.load_model(AcceleratorModel.Meta.app_label,
"JudgingRound")
UserRole = swapper.load_model(
'accelerator', 'UserRole')
now = utc.localize(datetime.now())
active_judging_round_labels = JudgingRound.objects.filter(
end_date_time__gt=now,
is_active=True).values_list("confirmed_judge_label",
flat=True)
active_judge_grants = Q(
program_role__user_role__name=UserRole.JUDGE,
program_role__user_label_id__in=active_judging_round_labels)
desired_judging_round_labels = JudgingRound.objects.filter(
end_date_time__gt=now).values_list("desired_judge_label",
flat=True)
desired_judge_grants = Q(
program_role__user_role__name=UserRole.DESIRED_JUDGE,
program_role__user_label__in=desired_judging_round_labels
)
active_mentor_grants = Q(
program_role__user_role__name=UserRole.MENTOR,
program_role__program__program_status=ACTIVE_PROGRAM_STATUS
)
REMAINING_ROLES = UserRole.objects.exclude(
name__in=[UserRole.JUDGE,
UserRole.DESIRED_JUDGE,
UserRole.MENTOR]).values_list("name", flat=True)
remaining_grants = Q(
program_role__user_role__name__in=REMAINING_ROLES,
program_role__user_role__isnull=False,
program_role__landing_page__isnull=False)
query = self.user.programrolegrant_set.filter(
active_judge_grants |
desired_judge_grants |
active_mentor_grants |
remaining_grants).exclude(
program_role__landing_page="").exclude(
program_role__landing_page__isnull=True)
if exclude_role_names:
query = query.exclude(
program_role__user_role__name__in=exclude_role_names)
grant = query.order_by("-program_role__program__end_date",
"program_role__user_role__sort_order"
).first()
if grant:
return grant.program_role.landing_page
return self.default_page
def calc_landing_page(self):
return (
self._get_staff_landing_page() or
self.role_based_landing_page())
def check_landing_page(self):
page = self.landing_page or self.calc_landing_page()
if page == "/":
return self.default_page
return page
def first_startup(self, statuses=[]):
startup_memberships = self.user.startupteammember_set.order_by(
'-startup__created_datetime')
if statuses:
startup_memberships = startup_memberships.filter(
startup__startupstatus__program_startup_status__in=statuses)
if startup_memberships:
return startup_memberships.first().startup
return None
def interest_category_names(self):
return [interest.name for interest in self.interest_categories.all()]
def program_family_names(self):
return [pf.name for pf in self.program_families.all()]
def confirmed_mentor_programs(self):
return list(self.user.programrolegrant_set.filter(
program_role__user_role__name=BaseUserRole.MENTOR).values_list(
'program_role__program__name', flat=True))
def confirmed_memtor_program_families_all(self):
return list(self.user.programrolegrant_set.filter(
program_role__user_role__name=BaseUserRole.MENTOR).values_list(
"program_role__program__program_family__name", flat=True
).distinct())
| 5,235
| 10,152
| 23
|
53efe294a1220ff799829ac423b0c9a968ee15e5
| 2,527
|
py
|
Python
|
game.py
|
AILab-FOI/MMO-IF
|
74a633bb7687ffdca8b3043046b0c572d5cc2969
|
[
"MIT"
] | null | null | null |
game.py
|
AILab-FOI/MMO-IF
|
74a633bb7687ffdca8b3043046b0c572d5cc2969
|
[
"MIT"
] | null | null | null |
game.py
|
AILab-FOI/MMO-IF
|
74a633bb7687ffdca8b3043046b0c572d5cc2969
|
[
"MIT"
] | null | null | null |
import re
import asyncio
import pexpect as px
import sys
from glulxe.interface import i7Game
from avatar import Avatar
GAME_FILE_NAME = "rooms.gblorb"
game = None
current_location = None
EXIT_COMMANDS = ["quit", "exit"]
ROOM_SELECTION_PATTERN = 'You entered (.*) room'
MESSAGE_PARAMS_PATTERN = '@([^\s]+) (.*)'
agent = None
if __name__ == "__main__":
if len(sys.argv) == 3:
jid = sys.argv[1]
password = sys.argv[2]
loop = asyncio.get_event_loop()
loop.run_until_complete(main(jid, password))
| 24.066667
| 90
| 0.663633
|
import re
import asyncio
import pexpect as px
import sys
from glulxe.interface import i7Game
from avatar import Avatar
GAME_FILE_NAME = "rooms.gblorb"
game = None
current_location = None
EXIT_COMMANDS = ["quit", "exit"]
ROOM_SELECTION_PATTERN = 'You entered (.*) room'
MESSAGE_PARAMS_PATTERN = '@([^\s]+) (.*)'
agent = None
def get_room_name(response):
if match := re.search(ROOM_SELECTION_PATTERN, response, re.IGNORECASE):
return match.group(1)
return None
def get_message_params(response):
if match := re.search(MESSAGE_PARAMS_PATTERN, response, re.IGNORECASE):
receiver = match.group(1)
message = match.group(2)
if not receiver is None and not message is None:
return (receiver, message)
return None
async def change_location(response):
location = get_room_name(response)
global current_location
if not location is None and not location is current_location:
current_location = location
loop = asyncio.get_event_loop()
loop.create_task(agent.send_location(location))
await asyncio.sleep(1)
async def send_message_to_player(command):
try:
(player, message) = get_message_params(command)
await agent.send_msg(player, message)
except:
pass
async def process_command(command):
# is communication
if command.startswith('@'):
await send_message_to_player(command)
return
output = game.next(command)
print(output)
# location change
if 'west' in command or 'east' in command or 'north' in command or 'south' in command:
await change_location(output)
async def start_agent(jid, password):
global agent
agent = Avatar(
jid,
password
)
agent.start()
# wait for agent to start up
await asyncio.sleep(2)
async def start_game():
global game
game = i7Game(GAME_FILE_NAME, interactive=False)
intro = game.intro()
print(intro)
await change_location(intro)
async def main(jid, password):
await start_agent(jid, password)
await start_game()
loop = asyncio.get_event_loop()
while True:
cmd = input('--> ')
if cmd in EXIT_COMMANDS:
break
loop.create_task(process_command(cmd.lower()))
await asyncio.sleep(1)
if __name__ == "__main__":
if len(sys.argv) == 3:
jid = sys.argv[1]
password = sys.argv[2]
loop = asyncio.get_event_loop()
loop.run_until_complete(main(jid, password))
| 1,803
| 0
| 184
|
91b93cc884ac8c0ea566b0f4ff9cf827afc1c82d
| 605
|
py
|
Python
|
solvers/brick_heads/instructions/gen_instructions.py
|
Anthony102899/Lego-ImageGenerator
|
52b19c8bb20f77a3394675e7c037c943a50c1e15
|
[
"Unlicense"
] | 1
|
2022-03-20T10:23:38.000Z
|
2022-03-20T10:23:38.000Z
|
solvers/brick_heads/instructions/gen_instructions.py
|
Anthony102899/Lego-ImageGenerator
|
52b19c8bb20f77a3394675e7c037c943a50c1e15
|
[
"Unlicense"
] | null | null | null |
solvers/brick_heads/instructions/gen_instructions.py
|
Anthony102899/Lego-ImageGenerator
|
52b19c8bb20f77a3394675e7c037c943a50c1e15
|
[
"Unlicense"
] | null | null | null |
from bricks_modeling.file_IO.model_writer import write_bricks_to_file_with_steps, write_model_to_file
from util.debugger import MyDebugger
from bricks_modeling.file_IO.model_reader import read_model_from_file, read_bricks_from_file
'''
We assume the following information is provided:
1) assembly order
2) grouping
3) default camera view
'''
if __name__ == "__main__":
debugger = MyDebugger("brick_heads")
file_path = r"data/full_models/steped_talor.ldr"
model = read_model_from_file(file_path, read_fake_bricks=True)
write_model_to_file(model, debugger.file_path(f"complete_full.ldr"))
| 35.588235
| 101
| 0.813223
|
from bricks_modeling.file_IO.model_writer import write_bricks_to_file_with_steps, write_model_to_file
from util.debugger import MyDebugger
from bricks_modeling.file_IO.model_reader import read_model_from_file, read_bricks_from_file
'''
We assume the following information is provided:
1) assembly order
2) grouping
3) default camera view
'''
if __name__ == "__main__":
debugger = MyDebugger("brick_heads")
file_path = r"data/full_models/steped_talor.ldr"
model = read_model_from_file(file_path, read_fake_bricks=True)
write_model_to_file(model, debugger.file_path(f"complete_full.ldr"))
| 0
| 0
| 0
|
c4854e4f5f1c246890c85558e1848fb6019895b8
| 2,047
|
py
|
Python
|
sweeper/cloud/base/cloud_provider.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
sweeper/cloud/base/cloud_provider.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
sweeper/cloud/base/cloud_provider.py
|
dominoFire/sweeper
|
26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9
|
[
"MIT"
] | null | null | null |
from sweeper.cloud import resource_config_combinations
class CloudProvider:
"""
A CloudProvider object represents a Cloud Computing service
that sweeper can manage in order to execute a workflow in this
cloud base
"""
def __init__(self):
"""
Default constructor. You should overwrite all of this
class for creating a new Cloud base
"""
self.name = "Base Cloud Provider"
"""Name of the cloud base"""
def create_vm(self, name, config, **kwargs):
"""
Creates a virtual machine in the cloud base service
"""
raise NotImplementedError("You must implement create_vm")
def delete_vm(self, name):
"""
Deletes the named virtual machine provided by this CloudProvider
:param name: Name of the cloud resource to delete from this cloud base
:return: None
"""
raise NotImplementedError("You must implement delete_vm")
def get_config(self, config_name):
"""
Get a configuration name provided
:param config_name: Name of the Configuration Name provided by this cloud base
:return: as ResourceConfig object
"""
raise NotImplementedError("You must implement get_config")
def list_configs(self):
"""
List all available configurations provided by this cloud base
:return: A list of ResourceConfig Objects
"""
raise NotImplementedError("You must implement list_configs")
# NOTE: We assume Method create_instance is implemented in each Cloud Provider Class
# but, I can't find a way to create an interface for such static method
def possible_configs(self, num):
"""
Returns all possible combinations of VM resources
that has the number of :num: resources required.
You should call this method from the implementation classes
"""
configs = self.list_configs()
combs = resource_config_combinations(num, configs)
return combs
| 31.984375
| 88
| 0.656571
|
from sweeper.cloud import resource_config_combinations
class CloudProvider:
"""
A CloudProvider object represents a Cloud Computing service
that sweeper can manage in order to execute a workflow in this
cloud base
"""
def __init__(self):
"""
Default constructor. You should overwrite all of this
class for creating a new Cloud base
"""
self.name = "Base Cloud Provider"
"""Name of the cloud base"""
def create_vm(self, name, config, **kwargs):
"""
Creates a virtual machine in the cloud base service
"""
raise NotImplementedError("You must implement create_vm")
def delete_vm(self, name):
"""
Deletes the named virtual machine provided by this CloudProvider
:param name: Name of the cloud resource to delete from this cloud base
:return: None
"""
raise NotImplementedError("You must implement delete_vm")
def get_config(self, config_name):
"""
Get a configuration name provided
:param config_name: Name of the Configuration Name provided by this cloud base
:return: as ResourceConfig object
"""
raise NotImplementedError("You must implement get_config")
def list_configs(self):
"""
List all available configurations provided by this cloud base
:return: A list of ResourceConfig Objects
"""
raise NotImplementedError("You must implement list_configs")
# NOTE: We assume Method create_instance is implemented in each Cloud Provider Class
# but, I can't find a way to create an interface for such static method
def possible_configs(self, num):
"""
Returns all possible combinations of VM resources
that has the number of :num: resources required.
You should call this method from the implementation classes
"""
configs = self.list_configs()
combs = resource_config_combinations(num, configs)
return combs
| 0
| 0
| 0
|
406fc37da635061f79eb792d85f802ecf740e1cf
| 800
|
py
|
Python
|
data/Stats.py
|
T-amairi/IOTA
|
f7a212be681a002413219adca56f69bcdfbe8d17
|
[
"MIT"
] | 3
|
2021-06-28T19:42:11.000Z
|
2021-08-11T08:23:10.000Z
|
data/Stats.py
|
T-amairi/IOTA
|
f7a212be681a002413219adca56f69bcdfbe8d17
|
[
"MIT"
] | null | null | null |
data/Stats.py
|
T-amairi/IOTA
|
f7a212be681a002413219adca56f69bcdfbe8d17
|
[
"MIT"
] | 1
|
2022-03-21T14:12:07.000Z
|
2022-03-21T14:12:07.000Z
|
import re
import glob
import os
path = r".\data\log"
os.chdir(path)
t = []
logs = glob.glob("log*.txt")
Nbrun = len(logs)
for log in logs:
l = open(log,'r')
m = re.findall("(?<=Elapsed: )(.*?)(?=s)",l.read())
if float(m[-1]) > 0:
t.append(float(m[-1]))
l.close()
if t:
t = float(sum(t)/len(t))
print("Average time of execution:",t,"seconds")
path = r"..\tracking"
os.chdir(path)
TipsFile = glob.glob("Number*.txt")
NbModule = 0
for file in TipsFile:
NbTips = 0
Nbrun = 0
f = open(file,'r')
for line in f.readlines():
NbTips += int(line)
Nbrun += 1
NbTips = NbTips/Nbrun
print("Average number of tips for NodeModule[" + str(NbModule) + "]:",NbTips)
NbModule += 1
f.close()
| 19.512195
| 82
| 0.535
|
import re
import glob
import os
path = r".\data\log"
os.chdir(path)
t = []
logs = glob.glob("log*.txt")
Nbrun = len(logs)
for log in logs:
l = open(log,'r')
m = re.findall("(?<=Elapsed: )(.*?)(?=s)",l.read())
if float(m[-1]) > 0:
t.append(float(m[-1]))
l.close()
if t:
t = float(sum(t)/len(t))
print("Average time of execution:",t,"seconds")
path = r"..\tracking"
os.chdir(path)
TipsFile = glob.glob("Number*.txt")
NbModule = 0
for file in TipsFile:
NbTips = 0
Nbrun = 0
f = open(file,'r')
for line in f.readlines():
NbTips += int(line)
Nbrun += 1
NbTips = NbTips/Nbrun
print("Average number of tips for NodeModule[" + str(NbModule) + "]:",NbTips)
NbModule += 1
f.close()
| 0
| 0
| 0
|
35fa41d37e98a2e529dec8561a025c496d6009c4
| 12,922
|
py
|
Python
|
lib/surface/topic/filters.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/topic/filters.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/topic/filters.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource filters supplementary help."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.calliope import base
from googlecloudsdk.core.resource import resource_topics
class Filters(base.TopicCommand):
"""Resource filters supplementary help."""
detailed_help = {
'DESCRIPTION':
textwrap.dedent("""\
{description}
+
Note: Depending on the specific server API, filtering may be done
entirely by the client, entirely by the server, or by a combination
of both.
### Filter Expressions
A filter expression is a Boolean function that selects the resources
to print from a list of resources. Expressions are composed
of terms connected by logic operators.
*LogicOperator*::
Logic operators must be in uppercase: *AND*, *OR*, *NOT*.
Additionally, expressions containing both *AND* and *OR* must be
parenthesized to disambiguate precedence.
*NOT* _term-1_:::
True if _term-1_ is False, otherwise False.
_term-1_ *AND* _term-2_:::
True if both _term-1_ and _term-2_ are true.
_term-1_ *OR* _term-2_:::
True if at least one of _term-1_ or _term-2_ is true.
_term-1_ _term-2_:::
Term conjunction (implicit *AND*) is True if both _term-1_
and _term-2_ are true. Conjunction has lower precedence than *OR*.
*Terms*::
A term is a _key_ _operator_ _value_ tuple, where _key_ is a dotted
name that evaluates to the value of a resource attribute, and _value_
may be:
*number*::: integer or floating point numeric constant
*unquoted literal*::: character sequence terminated by space, ( or )
*quoted literal*::: _"..."_ or _'...'_
Most filter expressions need to be quoted in shell commands. If you
use _'...'_ shell quotes then use _"..."_ filter string literal quotes
and vice versa.
Quoted literals will be interpreted as string values, even when the
value could also be a valid number. For example, 'key:1e9' will be
interpreted as a key named 'key' with the string value '1e9', rather
than with the float value of one billion expressed in scientific
notation.
*Operator Terms*::
_key_ *:* _simple-pattern_:::
*:* operator evaluation is changing for consistency across Google
APIs. The current default is deprecated and will be dropped shortly.
A warning will be displayed when a --filter expression would return
different matches using both the deprecated and new implementations.
+
The current deprecated default is True if _key_ contains
_simple-pattern_. The match is case insensitive. It allows one
```*``` that matches any sequence of 0 or more characters.
If ```*``` is specified then the match is anchored, meaning all
characters from the beginning and end of the value must match.
+
The new implementation is True if _simple-pattern_ matches any
_word_ in _key_. Words are locale specific but typically consist of
alpha-numeric characters. Non-word characters that do not appear in
_simple-pattern_ are ignored. The matching is anchored and case
insensitive. An optional trailing ```*``` does a word prefix match.
+
Use _key_```:*``` to test if _key_ is defined and
```-```_key_```:*``` to test if _key_ is undefined.
_key_ *:(* _simple-pattern_ ... *)*:::
True if _key_ matches any _simple-pattern_ in the
(space, tab, newline, comma) separated list.
_key_ *=* _value_:::
True if _key_ is equal to _value_, or [deprecated] equivalent to *:*
with the exception that the trailing ```*``` prefix match is not
supported.
+
For historical reasons, this operation currently behaves differently
for different Google APIs. For many APIs, this is True if key is
equal to value. For a few APIs, this is currently equivalent to *:*,
with the exception that the trailing ```*``` prefix match is not
supported. However, this behaviour is being phased out, and use of
```=``` for those APIs is deprecated; for those APIs, if you want
matching, you should use ```:``` instead of ```=```, and if you want
to test for equality, you can use
_key_ <= _value_ AND _key_ >= _value_.
_key_ *=(* _value_ ... *)*:::
True if _key_ is equal to any _value_ in the
(space, tab, newline, *,*) separated list.
_key_ *!=* _value_:::
True if _key_ is not _value_. Equivalent to
-_key_=_value_ and NOT _key_=_value_.
_key_ *<* _value_:::
True if _key_ is less than _value_. If both _key_ and
_value_ are numeric then numeric comparison is used, otherwise
lexicographic string comparison is used.
_key_ *<=* _value_:::
True if _key_ is less than or equal to _value_. If both
_key_ and _value_ are numeric then numeric comparison is used,
otherwise lexicographic string comparison is used.
_key_ *>=* _value_:::
True if _key_ is greater than or equal to _value_. If
both _key_ and _value_ are numeric then numeric comparison is used,
otherwise lexicographic string comparison is used.
_key_ *>* _value_:::
True if _key_ is greater than _value_. If both _key_ and
_value_ are numeric then numeric comparison is used, otherwise
lexicographic string comparison is used.
_key_ *~* _value_:::
True if _key_ contains a match for the RE (regular expression) pattern
_value_.
_key_ *!*~ _value_:::
True if _key_ does not contain a match for the RE (regular expression)
pattern _value_.
For more about regular expression syntax, see:
https://docs.python.org/3/library/re.html#re-syntax which follows the
PCRE dialect.
### Determine which fields are available for filtering
In order to build filters, it is often helpful to review some
representative fields returned from commands. One simple way to do
this is to add `--format=yaml --limit=1` to a command. With these
flags, a single record is returned and its full contents are displayed
as a YAML document. For example, a list of project fields could be
generated by running:
$ gcloud projects list --format=yaml --limit=1
This might display the following data:
```yaml
createTime: '2021-02-10T19:19:49.242Z'
lifecycleState: ACTIVE
name: MyProject
parent:
id: '123'
type: folder
projectId: my-project
projectNumber: '456'
```
Using this data, one way of filtering projects is by their parent's ID
by specifying ``parent.id'' as the _key_.
### Filter on a custom or nested list in response
By default the filter exprespression operates on root level resources.
In order to filter on a nested list(not at the root level of the json)
, one can use the `--flatten` flag to provide a the `resource-key` to
list. For example, To list members under `my-project` that have an
editor role, one can run:
$ gcloud projects get-iam-policy cloudsdktest --flatten=bindings --filter=bindings.role:roles/editor --format='value(bindings.members)'
""").format(
description=resource_topics.ResourceDescription('filter')),
'EXAMPLES':
textwrap.dedent("""\
List all Google Compute Engine instance resources:
$ gcloud compute instances list
List Compute Engine instance resources that have machineType
*f1-micro*:
$ gcloud compute instances list --filter="machineType:f1-micro"
List Compute Engine instance resources using a regular expression for
zone *us* and not MachineType *f1-micro*:
$ gcloud compute instances list --filter="zone ~ us AND -machineType:f1-micro"
List Compute Engine instance resources with tag *my-tag*:
$ gcloud compute instances list --filter="tags.items=my-tag"
List Compute Engine instance resources with tag *my-tag* or
*my-other-tag*:
$ gcloud compute instances list --filter="tags.items=(my-tag,my-other-tag)"
List Compute Engine instance resources with tag *my-tag* and
*my-other-tag*:
$ gcloud compute instances list --filter="tags.items=my-tag AND tags.items=my-other-tag"
List Compute Engine instance resources which either have tag *my-tag*
but not *my-other-tag* or have tag *alternative-tag*:
$ gcloud compute instances list --filter="(tags.items=my-tag AND -tags.items=my-other-tag) OR tags.items=alternative-tag"
List Compute Engine instance resources which contain the key *fingerprint*
in the *metadata* object:
$ gcloud compute instances list --limit=1 --filter="metadata.list(show="keys"):fingerprint"
List Compute Engine instance resources with label *my-label* with any
value:
$ gcloud compute instances list --filter="labels.my-label:*"
List Container Registry images that have a tag with the value
'30e5504145':
$ gcloud container images list-tags --filter="'tags:30e5504145'"
The last example encloses the filter expression in single quotes
because the value '30e5504145' could be interpreted as a number in
scientific notation.
List in JSON format those projects where the labels match specific
values (e.g. label.env is 'test' and label.version is alpha):
$ gcloud projects list --format="json" --filter="labels.env=test AND labels.version=alpha"
List projects that were created on and after a specific date:
$ gcloud projects list --format="table(projectNumber,projectId,createTime)" --filter="createTime>=2018-01-15"
List projects that were created on and after a specific date and time
and sort from oldest to newest (with dates and times listed according
to the local timezone):
$ gcloud projects list --format="table(projectNumber,projectId,createTime.date(tz=LOCAL))" --filter="createTime>=2018-01-15T12:00:00" --sort-by=createTime
List projects that were created within the last two weeks, using
ISO8601 durations:
$ gcloud projects list --format="table(projectNumber,projectId,createTime)" --filter="createTime>-P2W"
For more about ISO8601 durations, see: https://en.wikipedia.org/wiki/ISO_8601
+
The table below shows examples of pattern matching if used with
the `:` operator:
PATTERN | VALUE | MATCHES | DEPRECATED_MATCHES
--- | --- | --- | ---
abc* | abcpdqxyz | True | True
abc | abcpdqxyz | False | True
pdq* | abcpdqxyz | False | False
pdq | abcpdqxyz | False | True
xyz* | abcpdqxyz | False | False
xyz | abcpdqxyz | False | True
* | abcpdqxyz | True | True
* | (None) | False | False
* | ('') | False | False
* | (otherwise) | True | True
abc* | abc.pdq.xyz | True | True
abc | abc.pdq.xyz | True | True
abc.pdq | abc.pdq.xyz | True | True
pdq* | abc.pdq.xyz | True | False
pdq | abc.pdq.xyz | True | True
pdq.xyz | abc.pdq.xyz | True | True
xyz* | abc.pdq.xyz | True | False
xyz | abc.pdq.xyz | True | True
"""),
}
| 39.76
| 166
| 0.627225
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource filters supplementary help."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.calliope import base
from googlecloudsdk.core.resource import resource_topics
class Filters(base.TopicCommand):
"""Resource filters supplementary help."""
detailed_help = {
'DESCRIPTION':
textwrap.dedent("""\
{description}
+
Note: Depending on the specific server API, filtering may be done
entirely by the client, entirely by the server, or by a combination
of both.
### Filter Expressions
A filter expression is a Boolean function that selects the resources
to print from a list of resources. Expressions are composed
of terms connected by logic operators.
*LogicOperator*::
Logic operators must be in uppercase: *AND*, *OR*, *NOT*.
Additionally, expressions containing both *AND* and *OR* must be
parenthesized to disambiguate precedence.
*NOT* _term-1_:::
True if _term-1_ is False, otherwise False.
_term-1_ *AND* _term-2_:::
True if both _term-1_ and _term-2_ are true.
_term-1_ *OR* _term-2_:::
True if at least one of _term-1_ or _term-2_ is true.
_term-1_ _term-2_:::
Term conjunction (implicit *AND*) is True if both _term-1_
and _term-2_ are true. Conjunction has lower precedence than *OR*.
*Terms*::
A term is a _key_ _operator_ _value_ tuple, where _key_ is a dotted
name that evaluates to the value of a resource attribute, and _value_
may be:
*number*::: integer or floating point numeric constant
*unquoted literal*::: character sequence terminated by space, ( or )
*quoted literal*::: _"..."_ or _'...'_
Most filter expressions need to be quoted in shell commands. If you
use _'...'_ shell quotes then use _"..."_ filter string literal quotes
and vice versa.
Quoted literals will be interpreted as string values, even when the
value could also be a valid number. For example, 'key:1e9' will be
interpreted as a key named 'key' with the string value '1e9', rather
than with the float value of one billion expressed in scientific
notation.
*Operator Terms*::
_key_ *:* _simple-pattern_:::
*:* operator evaluation is changing for consistency across Google
APIs. The current default is deprecated and will be dropped shortly.
A warning will be displayed when a --filter expression would return
different matches using both the deprecated and new implementations.
+
The current deprecated default is True if _key_ contains
_simple-pattern_. The match is case insensitive. It allows one
```*``` that matches any sequence of 0 or more characters.
If ```*``` is specified then the match is anchored, meaning all
characters from the beginning and end of the value must match.
+
The new implementation is True if _simple-pattern_ matches any
_word_ in _key_. Words are locale specific but typically consist of
alpha-numeric characters. Non-word characters that do not appear in
_simple-pattern_ are ignored. The matching is anchored and case
insensitive. An optional trailing ```*``` does a word prefix match.
+
Use _key_```:*``` to test if _key_ is defined and
```-```_key_```:*``` to test if _key_ is undefined.
_key_ *:(* _simple-pattern_ ... *)*:::
True if _key_ matches any _simple-pattern_ in the
(space, tab, newline, comma) separated list.
_key_ *=* _value_:::
True if _key_ is equal to _value_, or [deprecated] equivalent to *:*
with the exception that the trailing ```*``` prefix match is not
supported.
+
For historical reasons, this operation currently behaves differently
for different Google APIs. For many APIs, this is True if key is
equal to value. For a few APIs, this is currently equivalent to *:*,
with the exception that the trailing ```*``` prefix match is not
supported. However, this behaviour is being phased out, and use of
```=``` for those APIs is deprecated; for those APIs, if you want
matching, you should use ```:``` instead of ```=```, and if you want
to test for equality, you can use
_key_ <= _value_ AND _key_ >= _value_.
_key_ *=(* _value_ ... *)*:::
True if _key_ is equal to any _value_ in the
(space, tab, newline, *,*) separated list.
_key_ *!=* _value_:::
True if _key_ is not _value_. Equivalent to
-_key_=_value_ and NOT _key_=_value_.
_key_ *<* _value_:::
True if _key_ is less than _value_. If both _key_ and
_value_ are numeric then numeric comparison is used, otherwise
lexicographic string comparison is used.
_key_ *<=* _value_:::
True if _key_ is less than or equal to _value_. If both
_key_ and _value_ are numeric then numeric comparison is used,
otherwise lexicographic string comparison is used.
_key_ *>=* _value_:::
True if _key_ is greater than or equal to _value_. If
both _key_ and _value_ are numeric then numeric comparison is used,
otherwise lexicographic string comparison is used.
_key_ *>* _value_:::
True if _key_ is greater than _value_. If both _key_ and
_value_ are numeric then numeric comparison is used, otherwise
lexicographic string comparison is used.
_key_ *~* _value_:::
True if _key_ contains a match for the RE (regular expression) pattern
_value_.
_key_ *!*~ _value_:::
True if _key_ does not contain a match for the RE (regular expression)
pattern _value_.
For more about regular expression syntax, see:
https://docs.python.org/3/library/re.html#re-syntax which follows the
PCRE dialect.
### Determine which fields are available for filtering
In order to build filters, it is often helpful to review some
representative fields returned from commands. One simple way to do
this is to add `--format=yaml --limit=1` to a command. With these
flags, a single record is returned and its full contents are displayed
as a YAML document. For example, a list of project fields could be
generated by running:
$ gcloud projects list --format=yaml --limit=1
This might display the following data:
```yaml
createTime: '2021-02-10T19:19:49.242Z'
lifecycleState: ACTIVE
name: MyProject
parent:
id: '123'
type: folder
projectId: my-project
projectNumber: '456'
```
Using this data, one way of filtering projects is by their parent's ID
by specifying ``parent.id'' as the _key_.
### Filter on a custom or nested list in response
By default the filter exprespression operates on root level resources.
In order to filter on a nested list(not at the root level of the json)
, one can use the `--flatten` flag to provide a the `resource-key` to
list. For example, To list members under `my-project` that have an
editor role, one can run:
$ gcloud projects get-iam-policy cloudsdktest --flatten=bindings --filter=bindings.role:roles/editor --format='value(bindings.members)'
""").format(
description=resource_topics.ResourceDescription('filter')),
'EXAMPLES':
textwrap.dedent("""\
List all Google Compute Engine instance resources:
$ gcloud compute instances list
List Compute Engine instance resources that have machineType
*f1-micro*:
$ gcloud compute instances list --filter="machineType:f1-micro"
List Compute Engine instance resources using a regular expression for
zone *us* and not MachineType *f1-micro*:
$ gcloud compute instances list --filter="zone ~ us AND -machineType:f1-micro"
List Compute Engine instance resources with tag *my-tag*:
$ gcloud compute instances list --filter="tags.items=my-tag"
List Compute Engine instance resources with tag *my-tag* or
*my-other-tag*:
$ gcloud compute instances list --filter="tags.items=(my-tag,my-other-tag)"
List Compute Engine instance resources with tag *my-tag* and
*my-other-tag*:
$ gcloud compute instances list --filter="tags.items=my-tag AND tags.items=my-other-tag"
List Compute Engine instance resources which either have tag *my-tag*
but not *my-other-tag* or have tag *alternative-tag*:
$ gcloud compute instances list --filter="(tags.items=my-tag AND -tags.items=my-other-tag) OR tags.items=alternative-tag"
List Compute Engine instance resources which contain the key *fingerprint*
in the *metadata* object:
$ gcloud compute instances list --limit=1 --filter="metadata.list(show="keys"):fingerprint"
List Compute Engine instance resources with label *my-label* with any
value:
$ gcloud compute instances list --filter="labels.my-label:*"
List Container Registry images that have a tag with the value
'30e5504145':
$ gcloud container images list-tags --filter="'tags:30e5504145'"
The last example encloses the filter expression in single quotes
because the value '30e5504145' could be interpreted as a number in
scientific notation.
List in JSON format those projects where the labels match specific
values (e.g. label.env is 'test' and label.version is alpha):
$ gcloud projects list --format="json" --filter="labels.env=test AND labels.version=alpha"
List projects that were created on and after a specific date:
$ gcloud projects list --format="table(projectNumber,projectId,createTime)" --filter="createTime>=2018-01-15"
List projects that were created on and after a specific date and time
and sort from oldest to newest (with dates and times listed according
to the local timezone):
$ gcloud projects list --format="table(projectNumber,projectId,createTime.date(tz=LOCAL))" --filter="createTime>=2018-01-15T12:00:00" --sort-by=createTime
List projects that were created within the last two weeks, using
ISO8601 durations:
$ gcloud projects list --format="table(projectNumber,projectId,createTime)" --filter="createTime>-P2W"
For more about ISO8601 durations, see: https://en.wikipedia.org/wiki/ISO_8601
+
The table below shows examples of pattern matching if used with
the `:` operator:
PATTERN | VALUE | MATCHES | DEPRECATED_MATCHES
--- | --- | --- | ---
abc* | abcpdqxyz | True | True
abc | abcpdqxyz | False | True
pdq* | abcpdqxyz | False | False
pdq | abcpdqxyz | False | True
xyz* | abcpdqxyz | False | False
xyz | abcpdqxyz | False | True
* | abcpdqxyz | True | True
* | (None) | False | False
* | ('') | False | False
* | (otherwise) | True | True
abc* | abc.pdq.xyz | True | True
abc | abc.pdq.xyz | True | True
abc.pdq | abc.pdq.xyz | True | True
pdq* | abc.pdq.xyz | True | False
pdq | abc.pdq.xyz | True | True
pdq.xyz | abc.pdq.xyz | True | True
xyz* | abc.pdq.xyz | True | False
xyz | abc.pdq.xyz | True | True
"""),
}
| 0
| 0
| 0
|
e7ea76e3a00992a63464a4b9c3737bee379992e0
| 471
|
py
|
Python
|
ai2_replication/tables.py
|
georgetown-cset/ai-definitions-for-policymaking
|
667e928c8bb30f6e02696ac71081c6bae4096f50
|
[
"ADSL"
] | 1
|
2020-06-24T20:45:03.000Z
|
2020-06-24T20:45:03.000Z
|
ai2_replication/tables.py
|
georgetown-cset/ai-definitions-for-policymaking
|
667e928c8bb30f6e02696ac71081c6bae4096f50
|
[
"ADSL"
] | null | null | null |
ai2_replication/tables.py
|
georgetown-cset/ai-definitions-for-policymaking
|
667e928c8bb30f6e02696ac71081c6bae4096f50
|
[
"ADSL"
] | null | null | null |
from bq import create_client, read_sql, query
DATASET = 'ai2_replication'
client = create_client()
make_table('institutions')
make_table('paper_authors_w_countries')
make_table('language')
make_table('ai_papers_any_author')
make_table('paper_author_institution')
make_table('oecd_comparison')
| 24.789474
| 65
| 0.757962
|
from bq import create_client, read_sql, query
DATASET = 'ai2_replication'
client = create_client()
def make_table(table, **kw):
sql = read_sql(f'../ai2_replication/{table}.sql')
job = query(sql, table, dataset=DATASET, truncate=True, **kw)
return job.result()
make_table('institutions')
make_table('paper_authors_w_countries')
make_table('language')
make_table('ai_papers_any_author')
make_table('paper_author_institution')
make_table('oecd_comparison')
| 151
| 0
| 23
|
93de89bf39c112dd5fd852b80fc612aaf44d3160
| 3,477
|
py
|
Python
|
app01/models.py
|
xinxinliang/ksDjango
|
0c0f4a5842cf225e77035b716979fcf9b8d03311
|
[
"Apache-2.0"
] | 13
|
2021-03-11T12:35:29.000Z
|
2022-02-25T02:22:47.000Z
|
app01/models.py
|
xinxinliang/ksDjango
|
0c0f4a5842cf225e77035b716979fcf9b8d03311
|
[
"Apache-2.0"
] | 1
|
2021-11-04T03:02:10.000Z
|
2021-11-04T03:02:10.000Z
|
app01/models.py
|
xinxinliang/ksDjango
|
0c0f4a5842cf225e77035b716979fcf9b8d03311
|
[
"Apache-2.0"
] | 4
|
2021-06-12T19:27:01.000Z
|
2022-02-04T05:13:54.000Z
|
from django.db import models
from datetime import datetime
# Create your models here.
| 37.387097
| 119
| 0.708944
|
from django.db import models
from datetime import datetime
# Create your models here.
class UserTitle(models.Model):
#ๅฅณไธบF๏ผ็ทไธบM
GENDER = [
(0,"ๆช็ฅ"),
(1,"็ท"),
(2,"ๅฅณ")
]
STATE = [
(0,"0ๅๆฌก็ฌๅ"),
(1,"1ksVideo"),
(2,"1ksLive"),
(3,"2ksVideo+ksLive"),
(4,"3videoMP4"),
(5,"4vieo+liveMP4")
]
USERIMG = "https://tx2.a.yximgs.com/uhead/AB/2020/08/17/09/BMjAyMDA4MTcwOTM2MDNfMjQ0NzAyMDZfMV9oZDM4Nl8xODU=_s.jpg"
userID = models.CharField(max_length=256,unique=True,verbose_name="็จๆทid")
userName = models.CharField(max_length=256,verbose_name="็จๆทๅ")
createTime = models.DateTimeField(default=datetime.now,verbose_name="ๅๅปบๆถ้ด")
stateUser = models.IntegerField(choices=STATE,verbose_name="็จๆทไฟกๆฏ็ถๆ",default=0)
ksID = models.CharField(max_length=128,verbose_name="ๅฟซๆid",default="xxxxxxxxxxxxxx")
user_text = models.CharField(max_length=2560,verbose_name="็จๆท็ฎ่ฟฐ",default="xxxxxxxxxxxxx")
gender = models.IntegerField(choices=GENDER,verbose_name="ๆงๅซ",default=0)
fan = models.CharField(max_length=32,verbose_name="็ฒไธๆฐ",default="-1")
xinzuo = models.CharField(max_length=32,verbose_name="ๆๅบง",default="ๆช็ฅ")
cityName = models.CharField(max_length=32,verbose_name="ๅฐๅ",default="ๆช็ฅ")
follow = models.CharField(max_length=32,verbose_name="ๅ
ณๆณจ็ๆฐ้",default="-1")
photo = models.CharField(max_length=32,verbose_name="ไฝๅๆฐ้",default="-1")
userImg = models.CharField(max_length=256,verbose_name="ๅพ็ๅฐๅ",default=USERIMG)
def __str__(self):
return self.userName
class Mate:
verbose_name = verbose_name_plural = "็จๆทIDๅๅๅญ"
class UserVideo(models.Model):
STATE = [
(1,"้ป่ฎคksVideo"),
(2,"ksVideo+ksLive")
]
# ๅฝ่ขซๅ็
งๅ ้คๆถ๏ผ่ชๅทฑไน่ขซๅ ้ค
theUser = models.ForeignKey(UserTitle,on_delete=models.CASCADE)
videoID = models.CharField(max_length=128,default="xxxxxxxxxxxxxx",verbose_name="่ง้ขid")
caption = models.CharField(max_length=512,default="ๆๆ ",verbose_name="่ง้ขๆ่ฟฐ")
coversUrl = models.CharField(max_length=512,default="xxxxxxxxxxx",verbose_name="่ง้ขๅฐ้ข")
videoPath = models.CharField(max_length=512,default="xxxxxxxxxxxxx",verbose_name="่ง้ขๅฐๅ")
realLikeCount = models.CharField(max_length=64,default="xxxxxxxxxxx",verbose_name="ๅ
ทไฝ็น่ตๆฐ้")
animatedCoverUrl = models.CharField(max_length=512,default="xxxxxxxx",verbose_name="ๅฐ้ขๅจ็ป")
stateVideo = models.IntegerField(choices=STATE,default=1,verbose_name="็ถๆ")
displayView = models.CharField(max_length=64,default="-1",verbose_name="ๆญๆพ้")
displayComment = models.CharField(max_length=64,default="-1",verbose_name="่ฏ่ฎบๆฐ")
def __str__(self):
return self.videoID
class Mate:
verbose_name = verbose_name_plural = "่ง้ขไฟกๆฏ"
class UserPhoto(models.Model):
thephotoUser = models.ForeignKey(UserTitle,on_delete=models.CASCADE)
photoID = models.CharField(max_length=128,verbose_name="็ธๅid",default="xxxxxxxx")
caption = models.CharField(max_length=512,verbose_name="็ธๅๆ่ฟฐ",default="ๆๆ ")
displayView = models.CharField(max_length=32,verbose_name="ๆญๆพ้",default="-1")
displayLike = models.CharField(max_length=32,verbose_name="็น่ตๆฐ",default="-1")
displayComment = models.CharField(max_length=32,verbose_name="่ฏ่ฎบๆฐ",default="-1")
imgUrls = models.CharField(max_length=5000,default=" ")
def __str__(self):
return self.photoID
class Mate:
verbose_name = verbose_name_plural = "็ธๅไฟกๆฏ"
| 77
| 3,525
| 69
|
8826c003f0775c51783e8ad89aa5dd24eeb638cd
| 1,710
|
py
|
Python
|
instruments/common/example/image-classification/train_imagenet.py
|
All-less/mxnet-speculative-synchronization
|
f9220e4d8451768eeee3e680bb0b2edf8f91b9f3
|
[
"MIT"
] | 6
|
2017-12-09T06:36:20.000Z
|
2019-10-09T07:59:18.000Z
|
instruments/common/example/image-classification/train_imagenet.py
|
All-less/mxnet-speculative-synchronization
|
f9220e4d8451768eeee3e680bb0b2edf8f91b9f3
|
[
"MIT"
] | null | null | null |
instruments/common/example/image-classification/train_imagenet.py
|
All-less/mxnet-speculative-synchronization
|
f9220e4d8451768eeee3e680bb0b2edf8f91b9f3
|
[
"MIT"
] | 2
|
2019-12-27T12:24:08.000Z
|
2019-12-27T12:26:32.000Z
|
import os
import argparse
import logging
role = os.getenv('DMLC_ROLE').upper()
if role == 'WORKER':
role = 'Worker' # backward compatibility
rank = os.getenv('DMLC_{}_ID'.format(role.upper()))
logging.basicConfig(level=logging.INFO, format='%(asctime)s {0}[{1}] %(message)s'.format(role, rank))
from common import find_mxnet, data, fit
import mxnet as mx
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser(description="train imagenet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
fit.add_fit_args(parser)
data.add_data_args(parser)
data.add_data_aug_args(parser)
# use a large aug level
data.set_data_aug_level(parser, 3)
parser.set_defaults(
# network
network = 'resnet',
num_layers = 18,
# data
data_train = '/home/ubuntu/ILSVRC2012/ILSVRC2012_dataset_train.rec', # ALL DATA MUST BE PLACED IN A FOLDER
data_val = '/home/ubuntu/ILSVRC2012/ILSVRC2012_dataset_val.rec', # INSTEAD OF A BUCKET
num_classes = 1000,
num_examples = 281167,
image_shape = '3,224,224',
min_random_scale = 1, # if input image has min size k, suggest to use
# 256.0/x, e.g. 0.533 for 480
# train
lr = 0.03,
num_epochs = 80,
lr_step_epochs = '30,60',
disp_batches = 1
)
args = parser.parse_args()
# load network
from importlib import import_module
net = import_module('symbols.'+args.network)
sym = net.get_symbol(**vars(args))
# train
fit.fit(args, sym, data.get_rec_iter)
| 35.625
| 120
| 0.609942
|
import os
import argparse
import logging
role = os.getenv('DMLC_ROLE').upper()
if role == 'WORKER':
role = 'Worker' # backward compatibility
rank = os.getenv('DMLC_{}_ID'.format(role.upper()))
logging.basicConfig(level=logging.INFO, format='%(asctime)s {0}[{1}] %(message)s'.format(role, rank))
from common import find_mxnet, data, fit
import mxnet as mx
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser(description="train imagenet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
fit.add_fit_args(parser)
data.add_data_args(parser)
data.add_data_aug_args(parser)
# use a large aug level
data.set_data_aug_level(parser, 3)
parser.set_defaults(
# network
network = 'resnet',
num_layers = 18,
# data
data_train = '/home/ubuntu/ILSVRC2012/ILSVRC2012_dataset_train.rec', # ALL DATA MUST BE PLACED IN A FOLDER
data_val = '/home/ubuntu/ILSVRC2012/ILSVRC2012_dataset_val.rec', # INSTEAD OF A BUCKET
num_classes = 1000,
num_examples = 281167,
image_shape = '3,224,224',
min_random_scale = 1, # if input image has min size k, suggest to use
# 256.0/x, e.g. 0.533 for 480
# train
lr = 0.03,
num_epochs = 80,
lr_step_epochs = '30,60',
disp_batches = 1
)
args = parser.parse_args()
# load network
from importlib import import_module
net = import_module('symbols.'+args.network)
sym = net.get_symbol(**vars(args))
# train
fit.fit(args, sym, data.get_rec_iter)
| 0
| 0
| 0
|
4848485d15db0cd77a31069526785a7cedbeb90e
| 26,456
|
py
|
Python
|
transitfeed/loader.py
|
opentransitmap/transitfeed
|
65d9a789dd8f58ffcb1a158a1807e1ee74b688ee
|
[
"Apache-2.0"
] | null | null | null |
transitfeed/loader.py
|
opentransitmap/transitfeed
|
65d9a789dd8f58ffcb1a158a1807e1ee74b688ee
|
[
"Apache-2.0"
] | null | null | null |
transitfeed/loader.py
|
opentransitmap/transitfeed
|
65d9a789dd8f58ffcb1a158a1807e1ee74b688ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import codecs
import csv
import os
import re
import zipfile
from . import gtfsfactoryuser
from . import problems
from . import util
from .compat import StringIO
| 40.329268
| 88
| 0.570759
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import codecs
import csv
import os
import re
import zipfile
from . import gtfsfactoryuser
from . import problems
from . import util
from .compat import StringIO
class Loader:
def __init__(
self,
feed_path=None,
schedule=None,
problems=problems.default_problem_reporter,
extra_validation=False,
load_stop_times=True,
memory_db=True,
zip=None,
check_duplicate_trips=False,
gtfs_factory=None,
):
"""Initialize a new Loader object.
Args:
feed_path: string path to a zip file or directory
schedule: a Schedule object or None to have one created
problems: a ProblemReporter object, the default reporter raises an
exception for each problem
extra_validation: True if you would like extra validation
load_stop_times: load the stop_times table, used to speed load time when
times are not needed. The default is True.
memory_db: if creating a new Schedule object use an in-memory sqlite
database instead of creating one in a temporary file
zip: a zipfile.ZipFile object, optionally used instead of path
"""
if gtfs_factory is None:
gtfs_factory = gtfsfactoryuser.GtfsFactoryUser().GetGtfsFactory()
if not schedule:
schedule = gtfs_factory.Schedule(
problem_reporter=problems,
memory_db=memory_db,
check_duplicate_trips=check_duplicate_trips,
)
self._extra_validation = extra_validation
self._schedule = schedule
self._problems = problems
self._path = feed_path
self._zip = zip
self._load_stop_times = load_stop_times
self._gtfs_factory = gtfs_factory
def _DetermineFormat(self):
"""Determines whether the feed is in a form that we understand, and
if so, returns True."""
if self._zip:
# If zip was passed to __init__ then path isn't used
assert not self._path
return True
if not isinstance(self._path, basestring) and hasattr(self._path, "read"):
# A file-like object, used for testing with a StringIO file
self._zip = zipfile.ZipFile(self._path, mode="r")
return True
if not os.path.exists(self._path):
self._problems.FeedNotFound(self._path)
return False
if self._path.endswith(".zip"):
try:
self._zip = zipfile.ZipFile(self._path, mode="r")
except IOError: # self._path is a directory
pass
except zipfile.BadZipfile:
self._problems.UnknownFormat(self._path)
return False
if not self._zip and not os.path.isdir(self._path):
self._problems.UnknownFormat(self._path)
return False
return True
def _GetFileNames(self):
"""Returns a list of file names in the feed."""
if self._zip:
return self._zip.namelist()
else:
return os.listdir(self._path)
def _CheckFileNames(self):
filenames = self._GetFileNames()
known_filenames = self._gtfs_factory.GetKnownFilenames()
for feed_file in filenames:
if feed_file not in known_filenames:
if not feed_file.startswith("."):
# Don't worry about .svn files and other hidden files
# as this will break the tests.
self._problems.UnknownFile(feed_file)
def _GetUtf8Contents(self, file_name):
"""Check for errors in file_name and return a string for csv reader."""
contents = self._FileContents(file_name)
if not contents: # Missing file
return
# Check for errors that will prevent csv.reader from working
if len(contents) >= 2 and contents[0:2] in (
codecs.BOM_UTF16_BE,
codecs.BOM_UTF16_LE,
):
self._problems.FileFormat("appears to be encoded in utf-16", (file_name,))
# Convert and continue, so we can find more errors
contents = codecs.getdecoder("utf-16")(contents)[0].encode("utf-8")
null_index = contents.find("\0")
if null_index != -1:
# It is easier to get some surrounding text than calculate the exact
# row_num
m = re.search(r".{,20}\0.{,20}", contents, re.DOTALL)
self._problems.FileFormat(
'contains a null in text "%s" at byte %d'
% (codecs.getencoder("string_escape")(m.group()), null_index + 1),
(file_name,),
)
return
# strip out any UTF-8 Byte Order Marker (otherwise it'll be
# treated as part of the first column name, causing a mis-parse)
contents = contents.lstrip(codecs.BOM_UTF8)
return contents
def _ReadCsvDict(self, file_name, cols, required, deprecated):
"""Reads lines from file_name, yielding a dict of unicode values."""
assert file_name.endswith(".txt")
table_name = file_name[0:-4]
contents = self._GetUtf8Contents(file_name)
if not contents:
return
eol_checker = util.EndOfLineChecker(
StringIO(contents), file_name, self._problems
)
# The csv module doesn't provide a way to skip trailing space, but when I
# checked 15/675 feeds had trailing space in a header row and 120 had spaces
# after fields. Space after header fields can cause a serious parsing
# problem, so warn. Space after body fields can cause a problem time,
# integer and id fields; they will be validated at higher levels.
reader = csv.reader(eol_checker, skipinitialspace=True)
raw_header = next(reader)
header_occurrences = util.defaultdict(lambda: 0)
header = []
valid_columns = [] # Index into raw_header and raw_row
for i, h in enumerate(raw_header):
h_stripped = h.strip()
if not h_stripped:
self._problems.CsvSyntax(
description="The header row should not contain any blank values. "
"The corresponding column will be skipped for the "
"entire file.",
context=(file_name, 1, [""] * len(raw_header), raw_header),
type=problems.TYPE_ERROR,
)
continue
elif h != h_stripped:
self._problems.CsvSyntax(
description="The header row should not contain any "
"space characters.",
context=(file_name, 1, [""] * len(raw_header), raw_header),
type=problems.TYPE_WARNING,
)
header.append(h_stripped)
valid_columns.append(i)
header_occurrences[h_stripped] += 1
for name, count in header_occurrences.items():
if count > 1:
self._problems.DuplicateColumn(
header=name, file_name=file_name, count=count
)
self._schedule._table_columns[table_name] = header
# check for unrecognized columns, which are often misspellings
header_context = (file_name, 1, [""] * len(header), header)
valid_cols = cols + [deprecated_name for (deprecated_name, _) in deprecated]
unknown_cols = set(header) - set(valid_cols)
if len(unknown_cols) == len(header):
self._problems.CsvSyntax(
description="The header row did not contain any known column "
"names. The file is most likely missing the header row "
"or not in the expected CSV format.",
context=(file_name, 1, [""] * len(raw_header), raw_header),
type=problems.TYPE_ERROR,
)
else:
for col in unknown_cols:
# this is provided in order to create a nice colored list of
# columns in the validator output
self._problems.UnrecognizedColumn(file_name, col, header_context)
# check for missing required columns
missing_cols = set(required) - set(header)
for col in missing_cols:
# this is provided in order to create a nice colored list of
# columns in the validator output
self._problems.MissingColumn(file_name, col, header_context)
# check for deprecated columns
for (deprecated_name, new_name) in deprecated:
if deprecated_name in header:
self._problems.DeprecatedColumn(
file_name, deprecated_name, new_name, header_context
)
line_num = 1 # First line read by reader.next() above
for raw_row in reader:
line_num += 1
if len(raw_row) == 0: # skip extra empty lines in file
continue
if len(raw_row) > len(raw_header):
self._problems.OtherProblem(
"Found too many cells (commas) in line "
'%d of file "%s". Every row in the file '
"should have the same number of cells as "
"the header (first line) does." % (line_num, file_name),
(file_name, line_num),
type=problems.TYPE_WARNING,
)
if len(raw_row) < len(raw_header):
self._problems.OtherProblem(
"Found missing cells (commas) in line "
'%d of file "%s". Every row in the file '
"should have the same number of cells as "
"the header (first line) does." % (line_num, file_name),
(file_name, line_num),
type=problems.TYPE_WARNING,
)
# raw_row is a list of raw bytes which should be valid utf-8. Convert each
# valid_columns of raw_row into Unicode.
valid_values = []
unicode_error_columns = [] # index of valid_values elements with an error
for i in valid_columns:
try:
valid_values.append(raw_row[i].decode("utf-8"))
except UnicodeDecodeError:
# Replace all invalid characters with REPLACEMENT CHARACTER (U+FFFD)
valid_values.append(
codecs.getdecoder("utf8")(raw_row[i], errors="replace")[0]
)
unicode_error_columns.append(len(valid_values) - 1)
except IndexError:
break
# The error report may contain a dump of all values in valid_values so
# problems can not be reported until after converting all of raw_row to
# Unicode.
for i in unicode_error_columns:
self._problems.InvalidValue(
header[i],
valid_values[i],
"Unicode error",
(file_name, line_num, valid_values, header),
)
# We strip ALL whitespace from around values. This matches the behavior
# of both the Google and OneBusAway GTFS parser.
valid_values = [value.strip() for value in valid_values]
d = dict(zip(header, valid_values))
yield (d, line_num, header, valid_values)
# TODO: Add testing for this specific function
def _ReadCSV(self, file_name, cols, required, deprecated):
"""Reads lines from file_name, yielding a list of unicode values
corresponding to the column names in cols."""
contents = self._GetUtf8Contents(file_name)
if not contents:
return
eol_checker = util.EndOfLineChecker(
StringIO(contents), file_name, self._problems
)
reader = csv.reader(eol_checker) # Use excel dialect
header = next(reader)
header = map(lambda x: x.strip(), header) # trim any whitespace
header_occurrences = util.defaultdict(lambda: 0)
for column_header in header:
header_occurrences[column_header] += 1
for name, count in header_occurrences.items():
if count > 1:
self._problems.DuplicateColumn(
header=name, file_name=file_name, count=count
)
# check for unrecognized columns, which are often misspellings
header_context = (file_name, 1, [""] * len(header), header)
valid_cols = cols + [deprecated_name for (deprecated_name, _) in deprecated]
unknown_cols = set(header).difference(set(valid_cols))
for col in unknown_cols:
# this is provided in order to create a nice colored list of
# columns in the validator output
self._problems.UnrecognizedColumn(file_name, col, header_context)
# check for missing required columns
col_index = [-1] * len(cols)
for i in range(len(cols)):
if cols[i] in header:
col_index[i] = header.index(cols[i])
elif cols[i] in required:
self._problems.MissingColumn(file_name, cols[i], header_context)
# check for deprecated columns
for (deprecated_name, new_name) in deprecated:
if deprecated_name in header:
self._problems.DeprecatedColumn(
file_name, deprecated_name, new_name, header_context
)
row_num = 1
for row in reader:
row_num += 1
if len(row) == 0: # skip extra empty lines in file
continue
if len(row) > len(header):
self._problems.OtherProblem(
"Found too many cells (commas) in line "
'%d of file "%s". Every row in the file '
"should have the same number of cells as "
"the header (first line) does." % (row_num, file_name),
(file_name, row_num),
type=problems.TYPE_WARNING,
)
if len(row) < len(header):
self._problems.OtherProblem(
"Found missing cells (commas) in line "
'%d of file "%s". Every row in the file '
"should have the same number of cells as "
"the header (first line) does." % (row_num, file_name),
(file_name, row_num),
type=problems.TYPE_WARNING,
)
result = [None] * len(cols)
unicode_error_columns = [] # A list of column numbers with an error
for i in range(len(cols)):
ci = col_index[i]
if ci >= 0:
if len(row) <= ci: # handle short CSV rows
result[i] = u""
else:
try:
result[i] = row[ci].decode("utf-8").strip()
except UnicodeDecodeError:
# Replace all invalid characters with
# REPLACEMENT CHARACTER (U+FFFD)
result[i] = codecs.getdecoder("utf8")(
row[ci], errors="replace"
)[0].strip()
unicode_error_columns.append(i)
for i in unicode_error_columns:
self._problems.InvalidValue(
cols[i],
result[i],
"Unicode error",
(file_name, row_num, result, cols),
)
yield (result, row_num, cols)
def _HasFile(self, file_name):
"""Returns True if there's a file in the current feed with the
given file_name in the current feed."""
if self._zip:
return file_name in self._zip.namelist()
else:
file_path = os.path.join(self._path, file_name)
return os.path.exists(file_path) and os.path.isfile(file_path)
def _FileContents(self, file_name):
results = None
if self._zip:
try:
results = self._zip.read(file_name)
except KeyError: # file not found in archve
self._problems.MissingFile(file_name)
return None
else:
try:
data_file = open(os.path.join(self._path, file_name), "rb")
results = data_file.read()
except IOError: # file not found
self._problems.MissingFile(file_name)
return None
if not results:
self._problems.EmptyFile(file_name)
return results
def _LoadFeed(self):
loading_order = self._gtfs_factory.GetLoadingOrder()
for filename in loading_order:
if not self._gtfs_factory.IsFileRequired(filename) and not self._HasFile(
filename
):
pass # File is not required, and feed does not have it.
else:
object_class = self._gtfs_factory.GetGtfsClassByFileName(filename)
for (d, row_num, header, row) in self._ReadCsvDict(
filename,
object_class._FIELD_NAMES,
object_class._REQUIRED_FIELD_NAMES,
object_class._DEPRECATED_FIELD_NAMES,
):
self._problems.SetFileContext(filename, row_num, row, header)
instance = object_class(field_dict=d)
instance.SetGtfsFactory(self._gtfs_factory)
if not instance.ValidateBeforeAdd(self._problems):
continue
instance.AddToSchedule(self._schedule, self._problems)
instance.ValidateAfterAdd(self._problems)
self._problems.ClearContext()
def _LoadCalendar(self):
file_name = "calendar.txt"
file_name_dates = "calendar_dates.txt"
if not self._HasFile(file_name) and not self._HasFile(file_name_dates):
self._problems.MissingFile(file_name)
return
# map period IDs to (period object, (file_name, row_num, row, cols))
periods = {}
service_period_class = self._gtfs_factory.ServicePeriod
# process calendar.txt
if self._HasFile(file_name):
has_useful_contents = False
for (row, row_num, cols) in self._ReadCSV(
file_name,
service_period_class._FIELD_NAMES,
service_period_class._REQUIRED_FIELD_NAMES,
service_period_class._DEPRECATED_FIELD_NAMES,
):
context = (file_name, row_num, row, cols)
self._problems.SetFileContext(*context)
period = service_period_class(field_list=row)
if period.service_id in periods:
self._problems.DuplicateID("service_id", period.service_id)
else:
periods[period.service_id] = (period, context)
self._problems.ClearContext()
# process calendar_dates.txt
if self._HasFile(file_name_dates):
# ['service_id', 'date', 'exception_type']
for (row, row_num, cols) in self._ReadCSV(
file_name_dates,
service_period_class._FIELD_NAMES_CALENDAR_DATES,
service_period_class._REQUIRED_FIELD_NAMES_CALENDAR_DATES,
service_period_class._DEPRECATED_FIELD_NAMES_CALENDAR_DATES,
):
context = (file_name_dates, row_num, row, cols)
self._problems.SetFileContext(*context)
service_id = row[0]
period = None
if service_id in periods:
period = periods[service_id][0]
else:
period = service_period_class(service_id)
periods[period.service_id] = (period, context)
exception_type = row[2]
if exception_type == u"1":
period.SetDateHasService(row[1], True, self._problems)
elif exception_type == u"2":
period.SetDateHasService(row[1], False, self._problems)
else:
self._problems.InvalidValue("exception_type", exception_type)
self._problems.ClearContext()
# Now insert the periods into the schedule object, so that they're
# validated with both calendar and calendar_dates info present
for period, context in periods.values():
self._problems.SetFileContext(*context)
self._schedule.AddServicePeriodObject(period, self._problems)
self._problems.ClearContext()
def _LoadShapes(self):
file_name = "shapes.txt"
if not self._HasFile(file_name):
return
shapes = {} # shape_id to shape object
shape_class = self._gtfs_factory.Shape
for (d, row_num, header, row) in self._ReadCsvDict(
file_name,
shape_class._FIELD_NAMES,
shape_class._REQUIRED_FIELD_NAMES,
shape_class._DEPRECATED_FIELD_NAMES,
):
file_context = (file_name, row_num, row, header)
self._problems.SetFileContext(*file_context)
shapepoint = self._gtfs_factory.ShapePoint(field_dict=d)
if not shapepoint.ParseAttributes(self._problems):
continue
if shapepoint.shape_id in shapes:
shape = shapes[shapepoint.shape_id]
else:
shape = shape_class(shapepoint.shape_id)
shape.SetGtfsFactory(self._gtfs_factory)
shapes[shapepoint.shape_id] = shape
shape.AddShapePointObjectUnsorted(shapepoint, self._problems)
self._problems.ClearContext()
for shape_id, shape in shapes.items():
self._schedule.AddShapeObject(shape, self._problems)
del shapes[shape_id]
def _LoadStopTimes(self):
stop_time_class = self._gtfs_factory.StopTime
for (row, row_num, cols) in self._ReadCSV(
"stop_times.txt",
stop_time_class._FIELD_NAMES,
stop_time_class._REQUIRED_FIELD_NAMES,
stop_time_class._DEPRECATED_FIELD_NAMES,
):
file_context = ("stop_times.txt", row_num, row, cols)
self._problems.SetFileContext(*file_context)
(
trip_id,
arrival_time,
departure_time,
stop_id,
stop_sequence,
stop_headsign,
pickup_type,
drop_off_type,
shape_dist_traveled,
timepoint,
) = row
try:
sequence = int(stop_sequence)
except (TypeError, ValueError):
self._problems.InvalidValue(
"stop_sequence", stop_sequence, "This should be a number."
)
continue
if sequence < 0:
self._problems.InvalidValue(
"stop_sequence", sequence, "Sequence numbers should be 0 or higher."
)
if stop_id not in self._schedule.stops:
self._problems.InvalidValue(
"stop_id", stop_id, "This value wasn't defined in stops.txt"
)
continue
stop = self._schedule.stops[stop_id]
if trip_id not in self._schedule.trips:
self._problems.InvalidValue(
"trip_id", trip_id, "This value wasn't defined in trips.txt"
)
continue
trip = self._schedule.trips[trip_id]
# If self._problems.Report returns then StopTime.__init__ will return
# even if the StopTime object has an error. Thus this code may add a
# StopTime that didn't validate to the database.
# Trip.GetStopTimes then tries to make a StopTime from the invalid data
# and calls the problem reporter for errors. An ugly solution is to
# wrap problems and a better solution is to move all validation out of
# __init__. For now make sure Trip.GetStopTimes gets a problem reporter
# when called from Trip.Validate.
stop_time = stop_time_class(
self._problems,
stop,
arrival_time,
departure_time,
stop_headsign,
pickup_type,
drop_off_type,
shape_dist_traveled,
stop_sequence=sequence,
timepoint=timepoint,
)
trip._AddStopTimeObjectUnordered(stop_time, self._schedule)
self._problems.ClearContext()
# stop_times are validated in Trip.ValidateChildren, called by
# Schedule.Validate
def Load(self):
self._problems.ClearContext()
if not self._DetermineFormat():
return self._schedule
self._CheckFileNames()
self._LoadCalendar()
self._LoadShapes()
self._LoadFeed()
if self._load_stop_times:
self._LoadStopTimes()
if self._zip:
self._zip.close()
self._zip = None
if self._extra_validation:
self._schedule.Validate(self._problems, validate_children=False)
return self._schedule
| 9,843
| 15,785
| 23
|
5ba01580c6da41b657c147d3afe1463ceba3337a
| 4,050
|
py
|
Python
|
sitetree/tests/conftest.py
|
jonkiparsky/django-sitetree
|
4b9ab29ee7c26c20cd7711b8261cc1cadd8c4e50
|
[
"BSD-3-Clause"
] | null | null | null |
sitetree/tests/conftest.py
|
jonkiparsky/django-sitetree
|
4b9ab29ee7c26c20cd7711b8261cc1cadd8c4e50
|
[
"BSD-3-Clause"
] | null | null | null |
sitetree/tests/conftest.py
|
jonkiparsky/django-sitetree
|
4b9ab29ee7c26c20cd7711b8261cc1cadd8c4e50
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from pytest_djangoapp import configure_djangoapp_plugin
pytest_plugins = configure_djangoapp_plugin(
extend_INSTALLED_APPS=[
'django.contrib.admin',
],
)
@pytest.fixture
def build_tree():
"""Builds a sitetree from dict definition.
Returns items indexed by urls.
Example:
items_map = build_tree(
{'alias': 'mytree'},
[{
'title': 'one', 'url': '/one/', 'children': [
{'title': 'subone', 'url': '/subone/'}
]
}]
)
"""
from sitetree.models import Tree, TreeItem
from django.contrib.auth.models import Permission
return build
@pytest.fixture
| 37.850467
| 114
| 0.465679
|
import pytest
from pytest_djangoapp import configure_djangoapp_plugin
pytest_plugins = configure_djangoapp_plugin(
extend_INSTALLED_APPS=[
'django.contrib.admin',
],
)
@pytest.fixture
def build_tree():
"""Builds a sitetree from dict definition.
Returns items indexed by urls.
Example:
items_map = build_tree(
{'alias': 'mytree'},
[{
'title': 'one', 'url': '/one/', 'children': [
{'title': 'subone', 'url': '/subone/'}
]
}]
)
"""
from sitetree.models import Tree, TreeItem
from django.contrib.auth.models import Permission
def build(tree_dict, items):
def attach_items(tree, items, parent=None):
for item_dict in items:
children = item_dict.pop('children', [])
access_permissions = item_dict.pop('access_permissions', [])
item = TreeItem(**item_dict)
item.tree = tree
item.parent = parent
item.save()
for permission in access_permissions:
item.access_permissions.add(Permission.objects.get(codename=permission))
items_map['%s' % item.url] = item
children and attach_items(tree, children, parent=item)
items_map = {}
tree = Tree(**tree_dict)
tree.save()
attach_items(tree, items)
return items_map
return build
@pytest.fixture
def common_tree(build_tree):
items = build_tree(
{'alias': 'mytree'},
[{
'title': 'Home', 'url': '/home/', 'children': [
{'title': 'Users', 'url': '/users/', 'children': [
{'title': 'Moderators', 'url': '/users/moderators/'},
{'title': 'Ordinary', 'url': '/users/ordinary/'},
{'title': 'Hidden', 'hidden': True, 'url': '/users/hidden/'},
]},
{'title': 'Articles', 'url': '/articles/', 'children': [
{'title': 'About cats', 'url': '/articles/cats/', 'children': [
{'title': 'Good', 'url': '/articles/cats/good/'},
{'title': 'Bad', 'url': '/articles/cats/bad/'},
{'title': 'Ugly', 'url': '/articles/cats/ugly/'},
]},
{'title': 'About dogs', 'url': '/articles/dogs/'},
{'title': 'About mice', 'inmenu': False, 'url': '/articles/mice/'},
]},
{'title': 'Contacts', 'inbreadcrumbs': False, 'url': '/contacts/', 'children': [
{'title': 'Russia', 'url': '/contacts/russia/',
'hint': 'The place', 'description': 'Russian Federation', 'children': [
{'title': 'Web', 'alias': 'ruweb', 'url': '/contacts/russia/web/', 'children': [
{'title': 'Public {{ subtitle }}', 'url': '/contacts/russia/web/public/'},
{'title': 'Private',
'url': '/contacts/russia/web/private/',
'hint': 'Private Area Hint',
'description': 'Private Area Description',
},
]},
{'title': 'Postal', 'insitetree': False, 'url': '/contacts/russia/postal/'},
]},
{'title': 'Australia', 'urlaspattern': True, 'url': 'contacts_australia australia_var',
'children': [
{'title': 'Alice Springs', 'access_loggedin': True, 'url': '/contacts/australia/alice/'},
{'title': 'Darwin', 'access_guest': True, 'url': '/contacts/australia/darwin/'},
]},
{'title': 'China', 'urlaspattern': True, 'url': 'contacts_china china_var'},
]},
]
}]
)
items[''] = items['/home/']
return items
| 3,293
| 0
| 49
|
bf1414b0416a3eb72adcb754f86a3570bc77a1ae
| 433
|
py
|
Python
|
code/tools/pull_sz_starts.py
|
akashpattnaik/pre-ictal-similarity
|
85f963aa0c6d2d0a6e971ffa005c400e136a0a76
|
[
"MIT"
] | null | null | null |
code/tools/pull_sz_starts.py
|
akashpattnaik/pre-ictal-similarity
|
85f963aa0c6d2d0a6e971ffa005c400e136a0a76
|
[
"MIT"
] | null | null | null |
code/tools/pull_sz_starts.py
|
akashpattnaik/pre-ictal-similarity
|
85f963aa0c6d2d0a6e971ffa005c400e136a0a76
|
[
"MIT"
] | null | null | null |
import numpy as np
| 28.866667
| 57
| 0.639723
|
import numpy as np
def pull_sz_starts(patient, metadata):
assert(patient in metadata)
sz_names = metadata[patient]["Events"]["Ictal"]
sz_starts = []
for sz_name in sz_names:
if patient == "HUP111":
if 'D01' in sz_names[sz_name]['iEEG_record']:
continue
sz_starts.append(sz_names[sz_name]["SeizureEEC"])
sz_starts = np.array(sz_starts)
return np.unique(sz_starts)
| 392
| 0
| 23
|
7ebda6c3eb3ba2e6b4feae34ffc9e247ff699693
| 1,930
|
py
|
Python
|
kaggle/machine-learning/underfitting_and_overfitting.py
|
matteougolotti/ML
|
759eff5f5bcaa41e9ff19a2d78869bd4b968324d
|
[
"MIT"
] | null | null | null |
kaggle/machine-learning/underfitting_and_overfitting.py
|
matteougolotti/ML
|
759eff5f5bcaa41e9ff19a2d78869bd4b968324d
|
[
"MIT"
] | null | null | null |
kaggle/machine-learning/underfitting_and_overfitting.py
|
matteougolotti/ML
|
759eff5f5bcaa41e9ff19a2d78869bd4b968324d
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
iowa_file_path = 'rain.csv'
home_data = pd.read_csv(iowa_file_path)
# Create target object and call it y
y = home_data.SalePrice
# Create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit Model
iowa_model.fit(train_X, train_y)
# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE: {:,.0f}".format(val_mae))
# Find best tree dept to reduce overfitting and underfitting
candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500]
# Write loop to find the ideal tree size from candidate_max_leaf_nodes
candidate = 0
min_mae = get_mae(candidate_max_leaf_nodes[0], train_X, val_X, train_y, val_y)
for i in range(len(candidate_max_leaf_nodes)):
n = candidate_max_leaf_nodes[i]
mae = get_mae(n, train_X, val_X, train_y, val_y)
if mae < min_mae:
min_mae = mae
candidate = i
# Store the best value of max_leaf_nodes (it will be either 5, 25, 50, 100, 250 or 500)
best_tree_size = candidate_max_leaf_nodes[candidate]
print(candidate)
# Final optimized model
final_model = DecisionTreeRegressor(max_leaf_nodes = 100, random_state = 0)
final_model.fit(X, y)
| 33.275862
| 103
| 0.765285
|
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)
iowa_file_path = 'rain.csv'
home_data = pd.read_csv(iowa_file_path)
# Create target object and call it y
y = home_data.SalePrice
# Create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit Model
iowa_model.fit(train_X, train_y)
# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE: {:,.0f}".format(val_mae))
# Find best tree dept to reduce overfitting and underfitting
candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500]
# Write loop to find the ideal tree size from candidate_max_leaf_nodes
candidate = 0
min_mae = get_mae(candidate_max_leaf_nodes[0], train_X, val_X, train_y, val_y)
for i in range(len(candidate_max_leaf_nodes)):
n = candidate_max_leaf_nodes[i]
mae = get_mae(n, train_X, val_X, train_y, val_y)
if mae < min_mae:
min_mae = mae
candidate = i
# Store the best value of max_leaf_nodes (it will be either 5, 25, 50, 100, 250 or 500)
best_tree_size = candidate_max_leaf_nodes[candidate]
print(candidate)
# Final optimized model
final_model = DecisionTreeRegressor(max_leaf_nodes = 100, random_state = 0)
final_model.fit(X, y)
| 253
| 0
| 23
|
b7c8dadccc4c73a9db593b8f6745709e72ed05ab
| 790
|
py
|
Python
|
fractal.py
|
nayanshah/python
|
250d5dfe7d48a15d53288d7a9f371ff7c66de57c
|
[
"MIT"
] | null | null | null |
fractal.py
|
nayanshah/python
|
250d5dfe7d48a15d53288d7a9f371ff7c66de57c
|
[
"MIT"
] | null | null | null |
fractal.py
|
nayanshah/python
|
250d5dfe7d48a15d53288d7a9f371ff7c66de57c
|
[
"MIT"
] | 1
|
2020-05-21T15:13:36.000Z
|
2020-05-21T15:13:36.000Z
|
from turtle import *
# Fractals
if __name__ == '__main__':
draw_fractal(5, 90, 10, 'FX', 'X', 'X+YF+', 'Y', '-FX-Y')
| 25.483871
| 98
| 0.517722
|
from turtle import *
# Fractals
def draw_fractal(length, angle, level, initial_state, target, replacement, target2, replacement2):
state = initial_state
for counter in range(level):
state2 = ''
for character in state:
if character == target:
state2 += replacement
elif character == target2:
state2 += replacement2
else:
state2 += character
state = state2
# draw
for character in state:
if character == 'F':
forward(length)
elif character == '+':
right(angle)
elif character == '-':
left(angle)
if __name__ == '__main__':
draw_fractal(5, 90, 10, 'FX', 'X', 'X+YF+', 'Y', '-FX-Y')
| 637
| 0
| 22
|
0889099ad9836adbaca7686283915041684afcf0
| 146
|
py
|
Python
|
fileconversions/conversions/jpeg_to_pdf_conversion.py
|
wilbertom/fileconversions
|
c48fda9b2804524fc57d1f6963d09645825b0da6
|
[
"MIT"
] | null | null | null |
fileconversions/conversions/jpeg_to_pdf_conversion.py
|
wilbertom/fileconversions
|
c48fda9b2804524fc57d1f6963d09645825b0da6
|
[
"MIT"
] | null | null | null |
fileconversions/conversions/jpeg_to_pdf_conversion.py
|
wilbertom/fileconversions
|
c48fda9b2804524fc57d1f6963d09645825b0da6
|
[
"MIT"
] | null | null | null |
from .command_conversion import CommandConversion
| 20.857143
| 49
| 0.780822
|
from .command_conversion import CommandConversion
class JpegToPdf(CommandConversion):
command_name = 'convert'
output_extension = 'pdf'
| 0
| 72
| 23
|
76b48b0af6e5bf545ed6ea33494e598053b496cb
| 1,989
|
py
|
Python
|
Doc2Vector/data/datamake.py
|
sladesha/algorithm
|
3ade2e7fd4a7b3acb6eb4f99ef81227ba51569e4
|
[
"MIT"
] | 520
|
2018-02-07T05:48:49.000Z
|
2022-03-07T02:03:06.000Z
|
Doc2Vector/data/datamake.py
|
WADRHAW/deep_learning
|
3ade2e7fd4a7b3acb6eb4f99ef81227ba51569e4
|
[
"MIT"
] | 15
|
2019-02-20T15:11:11.000Z
|
2022-02-10T00:39:57.000Z
|
Doc2Vector/data/datamake.py
|
WADRHAW/deep_learning
|
3ade2e7fd4a7b3acb6eb4f99ef81227ba51569e4
|
[
"MIT"
] | 251
|
2017-12-28T09:46:13.000Z
|
2022-03-20T13:39:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/1/10 3:23 PM
# @Author : Slade
# @File : datamake.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import numpy as np
flags = tf.app.flags
flags.DEFINE_string("input_dir", "./data/", "input dir")
flags.DEFINE_string("output_dir", "./text/data/", "output dir")
FLAGS = flags.FLAGS
# ่ฟ ไธค่ฝฆ ่ฅฟ็ ๅฐ ๅไบฌ ๅฐไป
# 23 1023 94 782 4234 10304
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 32.080645
| 114
| 0.604827
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/1/10 3:23 PM
# @Author : Slade
# @File : datamake.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import numpy as np
flags = tf.app.flags
flags.DEFINE_string("input_dir", "./data/", "input dir")
flags.DEFINE_string("output_dir", "./text/data/", "output dir")
FLAGS = flags.FLAGS
# ่ฟ ไธค่ฝฆ ่ฅฟ็ ๅฐ ๅไบฌ ๅฐไป
# 23 1023 94 782 4234 10304
def gen_tfrecords(in_file):
basename = os.path.basename(in_file) + ".tfrecord"
out_file = os.path.join(FLAGS.output_dir, basename)
tfrecord_out = tf.python_io.TFRecordWriter(out_file)
with open(in_file) as fi:
idx = 0
for line in fi:
fields = line.strip().split(' ')
for i in range(len(fields)):
content = np.array(fields[max(0, i - 2):i] + fields[i + 1:min(i + 3, len(fields))])
target = np.array([fields[i]])
feature = {
"context_word": tf.train.Feature(int64_list=tf.train.Int64List(value=content.astype(np.int))),
"target_word": tf.train.Feature(int64_list=tf.train.Int64List(value=target.astype(np.int))),
"cate_id": tf.train.Feature(int64_list=tf.train.Int64List(value=[idx]))
}
idx += 1
# serialized to Example
example = tf.train.Example(features=tf.train.Features(feature=feature))
print(example)
serialized = example.SerializeToString()
print(serialized)
print(".....")
tfrecord_out.write(serialized)
# ๆฐๆฎๆๅ
ๅฎๆ
tfrecord_out.close()
def main(_):
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
gen_tfrecords(FLAGS.input_dir + "test.txt")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 1,365
| 0
| 45
|
c777c78c953cfbfd3c1f69bdc31089c5cd84467c
| 3,743
|
py
|
Python
|
tests/fixtures.py
|
WilmerLab/mofun
|
ec95f2c4455a37ff73d0f595b56f4a246924c2dd
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
WilmerLab/mofun
|
ec95f2c4455a37ff73d0f595b56f4a246924c2dd
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
WilmerLab/mofun
|
ec95f2c4455a37ff73d0f595b56f4a246924c2dd
|
[
"MIT"
] | null | null | null |
from math import sqrt
from pathlib import Path
import ase.io
import numpy as np
from numpy.linalg import norm
from numpy.testing import assert_equal as np_assert_equal
import pytest
from pytest import approx
import tests
from mofun import Atoms
from mofun.helpers import typekey
sqrt2_2 = sqrt(2) / 2
sqrt3_2 = sqrt(3) / 2
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 34.027273
| 118
| 0.674058
|
from math import sqrt
from pathlib import Path
import ase.io
import numpy as np
from numpy.linalg import norm
from numpy.testing import assert_equal as np_assert_equal
import pytest
from pytest import approx
import tests
from mofun import Atoms
from mofun.helpers import typekey
sqrt2_2 = sqrt(2) / 2
sqrt3_2 = sqrt(3) / 2
def random_positions(num):
return np.random.rand(num, 3) * 100
def assert_topo(topo, expected_topo, types=None, expected_types=None, coeffs=None, expected_coeffs=None):
# check right atoms are part of the topo
sorted_topo = sorted([typekey(t) for t in topo])
sorted_expected_topo = sorted([typekey(t) for t in expected_topo])
np_assert_equal(sorted_topo, sorted_expected_topo)
# check types are mapped (assume coeffs are ordered the same!)
if types is not None and expected_types is not None:
sorted_topo_w_types = sorted([(*typekey(t), types[i]) for i, t in enumerate(topo)])
sorted_expected_topo_w_types = sorted([(*typekey(t), expected_types[i]) for i, t in enumerate(expected_topo)])
np_assert_equal(sorted_topo_w_types, sorted_expected_topo_w_types)
# check coeffs for each type are equal
if coeffs is not None and expected_coeffs is not None:
np_assert_equal(coeffs, expected_coeffs)
def assert_benzene(coords):
# incomplete sample
p = coords
assert norm(p[0] - p[1]) == approx(2.42, 5e-2)
assert norm(p[0] - p[3]) == approx(1.40, 5e-2)
assert norm(p[0] - p[4]) == approx(2.79, 5e-2)
assert norm(p[5] - p[8]) == approx(0.93, 5e-2)
@pytest.fixture
def linear_cnnc():
yield Atoms(elements='CNNC', positions=[(0., 0., 0), (1.0, 0., 0.), (2.0, 0., 0.), (3.0, 0., 0.)],
bonds=[(0,1), (1,2), (2,3)], bond_types=[0] * 3,
angles=[(0,1,2), (1,2,3)], angle_types=[0,0],
dihedrals=[(0,1,2,3)], dihedral_types=[0], cell=15*np.identity(3))
@pytest.fixture
def octane():
# CH3 CH2 CH2 CH2 CH2 CH2 CH2 CH3 #
with Path("tests/molecules/octane.xyz") as path:
structure = Atoms.from_ase_atoms(ase.io.read(path))
structure.cell = 60 * np.identity(3)
structure.translate((30., 30., 30.))
yield structure
@pytest.fixture
def half_octane():
# CH3 CH2 CH2 CH2 #
with Path("tests/molecules/half_octane.xyz") as path:
structure = Atoms.from_ase_atoms(ase.io.read(path))
structure.cell = 60 * np.identity(3)
structure.translate((30., 30., 30.))
yield structure
@pytest.fixture
def hkust1_cif():
with Path("tests/hkust-1/hkust-1-with-bonds.cif") as path:
yield Atoms.load_p1_cif(path)
@pytest.fixture
def hkust1_3x3x3_xyz():
with Path("tests/hkust-1/hkust-1-3x3x3.xyz") as path:
structure = Atoms.from_ase_atoms(ase.io.read(path))
structure.cell = 79.0290 * np.identity(3)
yield structure
@pytest.fixture
def hkust1_3x3x3_cif():
with Path("tests/hkust-1/hkust-1-3x3x3.cif") as path:
yield Atoms.load_p1_cif(path)
@pytest.fixture
def benzene():
with Path("tests/molecules/benzene.xyz") as path:
yield Atoms.from_ase_atoms(ase.io.read(path))
@pytest.fixture
def uio66_linker_no_bonds():
with Path("tests/uio66/uio66-linker-no-bonds.lmpdat").open() as fd:
yield Atoms.load_lmpdat(fd, atom_format="atomic")
@pytest.fixture
def uio66_linker_some_bonds():
# this was a modified UIO-66-F linker with bonds defined for the C-F bond. The F's have been
# replaced by H's.
with Path("tests/uio66/uio66-linker.lmpdat").open() as fd:
yield Atoms.load_lmpdat(fd, atom_format="atomic")
@pytest.fixture
def uio66_linker_cml():
with Path("tests/uio66/uio66-linker.cml") as path:
yield Atoms.load_cml(path)
| 2,956
| 0
| 289
|
9588a3d4cd7e7a5db5bcec909837d679078f7fb9
| 2,535
|
py
|
Python
|
src/scs_core/position/nmea/nmea_report.py
|
seoss/scs_core
|
0d4323c5697a39eb44a887f179ba5dca3716c1d2
|
[
"MIT"
] | 3
|
2019-03-12T01:59:58.000Z
|
2020-09-12T07:27:42.000Z
|
src/scs_core/position/nmea/nmea_report.py
|
seoss/scs_core
|
0d4323c5697a39eb44a887f179ba5dca3716c1d2
|
[
"MIT"
] | 1
|
2018-04-20T07:58:38.000Z
|
2021-03-27T08:52:45.000Z
|
src/scs_core/position/nmea/nmea_report.py
|
seoss/scs_core
|
0d4323c5697a39eb44a887f179ba5dca3716c1d2
|
[
"MIT"
] | 4
|
2017-09-29T13:08:43.000Z
|
2019-10-09T09:13:58.000Z
|
"""
Created on 31 Dec 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
A helper class for validating and preparing GPS module output strings.
https://www.nmea.org
https://en.wikipedia.org/wiki/NMEA_0183
"""
# --------------------------------------------------------------------------------------------------------------------
class NMEAReport(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
# ----------------------------------------------------------------------------------------------------------------
@classmethod
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, fields):
"""
Constructor
"""
self.__fields = fields
# ----------------------------------------------------------------------------------------------------------------
@property
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
| 26.40625
| 118
| 0.390138
|
"""
Created on 31 Dec 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
A helper class for validating and preparing GPS module output strings.
https://www.nmea.org
https://en.wikipedia.org/wiki/NMEA_0183
"""
# --------------------------------------------------------------------------------------------------------------------
class NMEAReport(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def checksum(cls, text):
cs = 0
for c in text[1:]:
cs ^= ord(c)
return cs
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, line):
main = line.strip().split("*")
if len(main) != 2:
raise ValueError("malformed line:%s" % (line.strip()))
fields = [item.strip() for item in main[0].split(",")]
cs = int(main[1], 16)
if cs != cls.checksum(main[0]):
raise ValueError("invalid checksum:%s" % (line.strip()))
return NMEAReport(fields)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, fields):
"""
Constructor
"""
self.__fields = fields
def __len__(self):
return len(self.__fields)
# ----------------------------------------------------------------------------------------------------------------
@property
def message_id(self):
return self.str(0) if len(self) > 0 else None
# ----------------------------------------------------------------------------------------------------------------
def int(self, index):
number_str = self.str(index)
number = None if number_str is None else int(number_str)
return number
def float(self, index, precision):
index_str = self.str(index)
number = None if index_str is None else float(index_str)
if number is None:
return None
return round(number, precision)
def str(self, index):
return self.__fields[index] if len(self.__fields[index]) > 0 else None
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "NMEAReport:{fields:%s}" % self.__fields
| 1,032
| 0
| 213
|
8376ebe3fb4d0496496aef10e2331b23800d0f80
| 2,280
|
py
|
Python
|
dojo/db_migrations/0063_jira_refactor.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,772
|
2018-01-22T23:32:15.000Z
|
2022-03-31T14:49:33.000Z
|
dojo/db_migrations/0063_jira_refactor.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 3,461
|
2018-01-20T19:12:28.000Z
|
2022-03-31T17:14:39.000Z
|
dojo/db_migrations/0063_jira_refactor.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,173
|
2018-01-23T07:10:23.000Z
|
2022-03-31T14:40:43.000Z
|
# Generated by Django 2.2.16 on 2020-11-07 11:31
from django.db import migrations, models
import django.db.models.deletion
| 35.076923
| 159
| 0.60307
|
# Generated by Django 2.2.16 on 2020-11-07 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dojo', '0062_add_vuln_id_from_tool'),
]
operations = [
migrations.DeleteModel(
name='JIRA_Clone',
),
migrations.DeleteModel(
name='JIRA_Details_Cache',
),
migrations.RenameModel(
old_name='JIRA_PKey',
new_name='JIRA_Project',
),
migrations.AddField(
model_name='jira_issue',
name='jira_change',
field=models.DateTimeField(help_text='The date the linked Jira issue was last modified.', null=True, verbose_name='Jira last update'),
),
migrations.AddField(
model_name='jira_issue',
name='jira_creation',
field=models.DateTimeField(help_text='The date a Jira issue was created from this finding.', null=True, verbose_name='Jira creation'),
),
migrations.RenameModel(
old_name='JIRA_Conf',
new_name='JIRA_Instance',
),
migrations.RenameField(
model_name='jira_project',
old_name='conf',
new_name='jira_instance',
),
migrations.AddField(
model_name='jira_issue',
name='jira_project',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='dojo.JIRA_Project'),
),
migrations.AddField(
model_name='JIRA_Project',
name='engagement',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.Engagement'),
),
migrations.AlterField(
model_name='JIRA_Project',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.Product'),
),
migrations.AlterField(
model_name='jira_project',
name='jira_instance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.JIRA_Instance', verbose_name='JIRA Instance'),
),
]
| 0
| 2,132
| 23
|
f2e3923b50c3ce6f9c16b2637347a6d6f8f1281d
| 584
|
py
|
Python
|
chaoshi/chaoshi/items.py
|
basicworld/scrapy.com.jd
|
414a1827909c514dddedb552b8732d5b07a5d441
|
[
"MIT"
] | null | null | null |
chaoshi/chaoshi/items.py
|
basicworld/scrapy.com.jd
|
414a1827909c514dddedb552b8732d5b07a5d441
|
[
"MIT"
] | null | null | null |
chaoshi/chaoshi/items.py
|
basicworld/scrapy.com.jd
|
414a1827909c514dddedb552b8732d5b07a5d441
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
| 24.333333
| 51
| 0.690068
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ChaoshiCategoryItem(scrapy.Item):
# define the fields for your item here like:
cateUrl = scrapy.Field() # ๅ็ฑป็url
cateName = scrapy.Field() # ๅ็ฑปๅ็งฐ ๅญๅจไบ็บงๅ็ฑป
class ChaoshiGoodsItem(scrapy.Item):
# define the fields for your item here like:
goodsName = scrapy.Field()
goodsId = scrapy.Field()
goodsUrl = scrapy.Field()
goodsPrice = scrapy.Field()
goodsPicUrl = scrapy.Field()
| 0
| 396
| 46
|
d33b2a5160487e545d49f24a69782cecb3600af5
| 215
|
py
|
Python
|
Tests/Test_add_new_movie.py
|
agafonovOleg402Targeting/Se-python-16
|
bc0bf860f470d0c325ee8bb9aaae4059352fb18a
|
[
"Apache-2.0"
] | null | null | null |
Tests/Test_add_new_movie.py
|
agafonovOleg402Targeting/Se-python-16
|
bc0bf860f470d0c325ee8bb9aaae4059352fb18a
|
[
"Apache-2.0"
] | null | null | null |
Tests/Test_add_new_movie.py
|
agafonovOleg402Targeting/Se-python-16
|
bc0bf860f470d0c325ee8bb9aaae4059352fb18a
|
[
"Apache-2.0"
] | null | null | null |
from conftest import app
from model.User import User
| 23.888889
| 33
| 0.725581
|
from conftest import app
from model.User import User
def test_add_new_movie(app):
app.login(User.Admin())
assert app.is_logged_in()
app.add_new_movie()
app.logout()
assert app.is_not_logged_in()
| 140
| 0
| 23
|
3c8d880715812d51e091ed9db6fa6f0f5b6498ad
| 27,654
|
py
|
Python
|
tests/unittests/BuscoConfig_unittests.py
|
aglabx/aglab_busco
|
a6f763e044cf649d82bc40b45b1c67c7dc09ee38
|
[
"MIT"
] | null | null | null |
tests/unittests/BuscoConfig_unittests.py
|
aglabx/aglab_busco
|
a6f763e044cf649d82bc40b45b1c67c7dc09ee38
|
[
"MIT"
] | null | null | null |
tests/unittests/BuscoConfig_unittests.py
|
aglabx/aglab_busco
|
a6f763e044cf649d82bc40b45b1c67c7dc09ee38
|
[
"MIT"
] | null | null | null |
import unittest
from busco import BuscoConfig
import shutil
import os
from unittest.mock import Mock
from unittest.mock import patch, call
| 42.349158
| 88
| 0.658567
|
import unittest
from busco import BuscoConfig
import shutil
import os
from unittest.mock import Mock
from unittest.mock import patch, call
class TestBuscoConfig(unittest.TestCase):
maxDiff = None
def setUp(self):
self.maxDiff = None
self.base_config = "config/config.ini"
self.params = {
"auto-lineage": False,
"auto-lineage-euk": False,
"auto-lineage-prok": False,
"config_file": None,
"cpu": None,
"evalue": None,
"force": False,
"help": "==SUPPRESS==",
"in": None,
"limit": None,
"lineage_dataset": None,
"list_datasets": "==SUPPRESS==",
"mode": None,
"offline": False,
"out": None,
"out_path": None,
"quiet": False,
"restart": False,
"metaeuk_parameters": None,
"metaeuk_rerun_parameters": None,
"use_augustus": False,
"augustus_parameters": None,
"augustus_species": None,
"long": False,
"datasets_version": None,
"download_base_url": None,
"download_path": None,
"update-data": False,
"version": "==SUPPRESS==",
"tar": False,
}
self.test_params = {
"in": "input_test",
"out": "output_test",
"mode": "mode_test",
}
self.config_structure = {
"augustus": ["path", "command"],
"busco_run": [
"in",
"out",
"out_path",
"mode",
"auto-lineage",
"auto-lineage-prok",
"auto-lineage-euk",
"cpu",
"force",
"restart",
"download_path",
"datasets_version",
"quiet",
"offline",
"long",
"augustus_parameters",
"augustus_species",
"download_base_url",
"lineage_dataset",
"update-data",
"metaeuk_parameters",
"metaeuk_rerun_parameters",
"evalue",
"limit",
"use_augustus",
"batch_mode",
"tar",
],
"etraining": ["path", "command"],
"gff2gbSmallDNA.pl": ["path", "command"],
"hmmsearch": ["path", "command"],
"makeblastdb": ["path", "command"],
"metaeuk": ["path", "command"],
"new_species.pl": ["path", "command"],
"optimize_augustus.pl": ["path", "command"],
"prodigal": ["path", "command"],
"sepp": ["path", "command"],
"tblastn": ["path", "command"],
}
def test_read_config_file(self):
config = BuscoConfig.BaseConfig()
config.conf_file = self.base_config
config._load_config_file()
self.assertIn("busco_run", config.sections())
def test_read_config_file_ioerror(self):
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BaseConfig()
config.conf_file = "/path/not/found"
config._load_config_file()
def test_read_config_file_parseerror(self):
config_path = "tests/config_parseerror_test.ini"
test_config_contents = "in=input_file\n"
with open(config_path, "w") as f:
f.write(test_config_contents)
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BaseConfig()
config.conf_file = config_path
config._load_config_file()
os.remove(config_path)
def test_read_config_file_duplicateerror(self):
config_path = "tests/config_duplicate_test.ini"
test_config_contents = "[busco_run]\n" "in=input_file\n" "in=input_file\n"
with open(config_path, "w") as f:
f.write(test_config_contents)
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BaseConfig()
config.conf_file = config_path
config._load_config_file()
os.remove(config_path)
def test_config_update_args_bool(self):
update_params = {
"force": True,
"offline": True,
"quiet": True,
"restart": True,
}
config = BuscoConfig.BuscoConfigMain(self.base_config, update_params)
config.configure()
self.assertEqual(
update_params,
{key: config.getboolean("busco_run", key) for key in update_params.keys()},
)
def test_config_update_args_nonbool(self):
update_params = {
"cpu": "10",
"evalue": "0.01",
"in": "input_file",
"limit": "1",
"lineage_dataset": "test",
"mode": "test",
"out": "test",
"out_path": "test",
}
config = BuscoConfig.BuscoConfigMain(self.base_config, update_params)
config.configure()
self.assertEqual(
update_params,
{key: config.get("busco_run", key) for key in update_params.keys()},
)
def test_config_default_params(self):
correct_default_params = {
"auto-lineage": False,
"auto-lineage-euk": False,
"auto-lineage-prok": False,
"cpu": "1",
"datasets_version": "odb10",
"download_base_url": "https://busco-data.ezlab.org/v5/data/",
"download_path": os.path.join(os.getcwd(), "busco_downloads"),
"evalue": "0.001",
"force": False,
"limit": "3",
"long": False,
"offline": False,
"out_path": os.getcwd(),
"quiet": False,
"restart": False,
"update-data": False,
"use_augustus": False,
}
config = BuscoConfig.BuscoConfigMain(self.base_config, {})
config.configure()
config_default_filled = {
key: config.get("busco_run", key) for key in correct_default_params
}
self.assertEqual(
{key: str(val) for key, val in correct_default_params.items()},
config_default_filled,
)
@patch(
"busco.BuscoConfig.BuscoConfigMain.getboolean",
side_effect=[True, False, False, True, False],
)
def test_config_auto_lineage_settings(self, *args):
for _ in range(2):
config = BuscoConfig.BuscoConfigMain(self.base_config, {})
config.configure()
self.assertEqual(config.get("busco_run", "auto-lineage"), "True")
@patch("busco.BuscoConfig.BuscoConfigMain.getboolean", return_value=True)
def test_config_auto_lineage_both_selected_warning(self, *args):
with self.assertLogs(BuscoConfig.logger, "WARNING"):
config = BuscoConfig.BuscoConfigMain(self.base_config, {})
config.configure()
self.assertEqual(config.get("busco_run", "auto-lineage-euk"), "False")
self.assertEqual(config.get("busco_run", "auto-lineage-prok"), "False")
def test_mandatory_keys_check_log(self):
with self.assertLogs(BuscoConfig.logger, 20):
params_test = {"in": "input_file", "out": "output_name", "mode": "genome"}
config = BuscoConfig.BuscoConfigMain(self.base_config, params_test)
config.configure()
config._check_mandatory_keys_exist()
def test_mandatory_keys_check_missing_param_in(self):
with self.assertRaises(BuscoConfig.BatchFatalError):
params_test = {"out": "output_name", "mode": "genome"}
config = BuscoConfig.BuscoConfigMain(self.base_config, params_test)
config.configure()
config._check_mandatory_keys_exist()
def test_mandatory_keys_check_missing_param_mode(self):
with self.assertRaises(BuscoConfig.BatchFatalError):
params_test = {"in": "input_file", "out": "output_name"}
config = BuscoConfig.BuscoConfigMain(self.base_config, params_test)
config.configure()
config._check_mandatory_keys_exist()
def test_mandatory_keys_check_missing_param_out(self):
with self.assertRaises(BuscoConfig.BatchFatalError):
params_test = {"in": "input_file", "mode": "genome"}
config = BuscoConfig.BuscoConfigMain(self.base_config, params_test)
config.configure()
config._check_mandatory_keys_exist()
def test_previous_run_check_without_existing_run(self):
output_dir = os.path.join(os.getcwd(), self.test_params["out"])
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
self.assertIsNone(config._check_no_previous_run())
def test_previous_run_check_with_existing_run_no_force(self):
previous_run_name = "test_busco_run_dir"
os.makedirs(previous_run_name, exist_ok=True)
self.test_params["out"] = previous_run_name
self.test_params["force"] = "False"
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_no_previous_run()
shutil.rmtree(previous_run_name)
def test_previous_run_check_with_existing_run_with_force_and_log(self):
previous_run_name = "test_busco_run_dir"
os.makedirs(previous_run_name, exist_ok=True)
self.test_params["out"] = previous_run_name
self.test_params["force"] = "True"
with self.assertLogs(BuscoConfig.logger, 20):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_no_previous_run()
self.assertFalse(os.path.exists(previous_run_name))
try: # In case of test failure, remove tmp folder anyway
shutil.rmtree(previous_run_name)
except FileNotFoundError:
pass
def test_previous_run_check_without_existing_run_and_restart(self):
self.test_params["restart"] = "True"
with self.assertLogs(BuscoConfig.logger, "WARNING"):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_no_previous_run()
self.assertEqual(config.getboolean("busco_run", "restart"), False)
def test_previous_run_check_with_existing_run_and_restart(self):
previous_run_name = "test_busco_run_dir"
os.makedirs(previous_run_name, exist_ok=True)
self.test_params.update({"out": previous_run_name, "restart": True})
with self.assertLogs(BuscoConfig.logger, "INFO"):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_no_previous_run()
self.assertEqual(config.getboolean("busco_run", "restart"), True)
shutil.rmtree(previous_run_name)
def test_create_required_paths(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.main_out = os.path.join(
config.get("busco_run", "out_path"), config.get("busco_run", "out")
)
config._create_required_paths()
output_dir = os.path.join(os.getcwd(), self.test_params["out"])
self.assertTrue(os.path.exists(output_dir))
shutil.rmtree(output_dir)
def test_config_structure(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
self.assertEqual(
set(config.PERMITTED_OPTIONS), set(self.config_structure["busco_run"])
)
def test_catch_disallowed_keys(self):
for section_name in self.config_structure:
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.set(section_name, "forbidden_option", "forbidden_value")
config._check_allowed_keys()
def test_out_value_check_invalid(self):
for str_format in ["/path/to/output", "output/"]:
self.test_params["out"] = str_format
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_out_value()
def test_out_value_check_valid(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
self.assertIsNone(config._check_out_value())
def test_limit_value_out_of_range(self):
for lim_val in [-1, 0, 25]:
self.test_params["limit"] = lim_val
with self.assertRaises(BuscoConfig.BatchFatalError):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_limit_value()
def test_limit_value_within_range(self):
for lim_val in [1, 20]:
self.test_params["limit"] = lim_val
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
self.assertIsNone(config._check_limit_value())
def test_evalue_nondefault(self):
self.test_params["evalue"] = 1
with self.assertLogs(BuscoConfig.logger, level="WARNING"):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_evalue()
@patch("__main__.BuscoConfig_unittests.BuscoConfig.logger.warning")
def test_evalue_default(self, mock_logger):
self.test_params["evalue"] = 0.001
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._check_evalue()
mock_logger.assert_not_called()
def test_expand_all_paths_tilde(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.set("busco_run", "download_path", "~/test_download_path")
config._expand_all_paths()
self.assertEqual(
config.get("busco_run", "download_path"),
os.path.expanduser("~/test_download_path"),
)
def test_expand_all_paths_relative_path_current_dir(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.set("busco_run", "out_path", "./test_out_path")
config._expand_all_paths()
self.assertEqual(
config.get("busco_run", "out_path"), os.path.abspath("./test_out_path")
)
def test_expand_all_paths_relative_path_parent_dir(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.set("busco_run", "in", "../test_input_file")
config._expand_all_paths()
self.assertEqual(
config.get("busco_run", "in"), os.path.abspath("../test_input_file")
)
def test_expand_all_paths_hmmsearch(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.set("hmmsearch", "path", "~/test_hmmsearch_path")
config._expand_all_paths()
self.assertEqual(
config.get("hmmsearch", "path"), os.path.expanduser("~/test_hmmsearch_path")
)
@patch(
"__main__.BuscoConfig_unittests.BuscoConfig.os.path.isdir", return_value=True
)
def test_batch_mode_true(self, *args):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.set = Mock()
config._check_batch_mode()
calls = [call("busco_run", "batch_mode", "True")]
config.set.assert_has_calls(calls)
@patch(
"__main__.BuscoConfig_unittests.BuscoConfig.os.path.isdir", return_value=False
)
@patch(
"__main__.BuscoConfig_unittests.BuscoConfig.os.path.isfile", return_value=True
)
def test_batch_mode_false_with_file(self, *args):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.set = Mock()
config._check_batch_mode()
@patch(
"__main__.BuscoConfig_unittests.BuscoConfig.os.path.isdir", return_value=False
)
@patch(
"__main__.BuscoConfig_unittests.BuscoConfig.os.path.isfile", return_value=False
)
def test_batch_mode_false_with_error(self, *args):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.set = Mock()
with self.assertRaises(BuscoConfig.BatchFatalError):
config._check_batch_mode()
def test_required_input_exists_false(self):
input_filename = "test_input_file"
if os.path.exists(input_filename):
os.remove(input_filename)
self.test_params["in"] = input_filename
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
with self.assertRaises(BuscoConfig.BatchFatalError):
config._check_required_input_exists()
@patch("__main__.BuscoConfig_unittests.BuscoConfig.BuscoDownloadManager")
def test_downloader_initialized(self, mock_downloader):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config._init_downloader()
mock_downloader.assert_called()
@patch("__main__.BuscoConfig_unittests.BuscoConfig.PrettyLog")
def test_log_config(self, mock_pretty_log):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
with self.assertLogs(BuscoConfig.logger, level="DEBUG"):
config.log_config()
mock_pretty_log.assert_called()
@patch.object(BuscoConfig.BuscoConfigMain, "log_config")
@patch.object(BuscoConfig.BuscoConfigMain, "_init_downloader")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_batch_mode")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_required_input_exists")
@patch.object(BuscoConfig.BuscoConfigMain, "_expand_all_paths")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_evalue")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_limit_value")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_out_value")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_allowed_keys")
@patch.object(BuscoConfig.BuscoConfigMain, "_create_required_paths")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_no_previous_run")
@patch.object(BuscoConfig.BuscoConfigMain, "_check_mandatory_keys_exist")
def test_validation(
self,
mock_check_mandatory_keys,
mock_check_no_previous_run,
mock_create_required_paths,
mock_check_allowed_keys,
mock_check_out_value,
mock_check_limit_value,
mock_check_evalue,
mock_expand_all_paths,
mock_check_input,
mock_check_batch,
mock_init_downloader,
mock_log_config,
):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.validate()
mock_check_mandatory_keys.assert_called()
mock_check_no_previous_run.assert_called()
mock_create_required_paths.assert_called()
mock_check_allowed_keys.assert_called()
mock_check_out_value.assert_called()
mock_check_limit_value.assert_called()
mock_check_evalue.assert_called()
mock_expand_all_paths.assert_called()
mock_check_input.assert_called()
mock_check_batch.assert_called()
mock_init_downloader.assert_called()
mock_log_config.assert_called()
def test_check_lineage_present_false(self):
try:
del self.test_params["lineage_dataset"] # just in case, probably redundant
except KeyError:
pass
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
self.assertFalse(config.check_lineage_present())
def test_check_lineage_present_true_with_dataset_version_correct(self):
self.test_params["lineage_dataset"] = "test_dataset_odb10"
self.test_params["datasets_version"] = "odb10"
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.check_lineage_present()
self.assertEqual(
config.get("busco_run", "datasets_version"),
self.test_params["datasets_version"],
)
def test_check_lineage_present_true_with_dataset_version_mismatch(self):
self.test_params["lineage_dataset"] = "test_dataset_odb10"
self.test_params["datasets_version"] = "odb11"
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
with self.assertLogs(BuscoConfig.logger, level="WARNING"):
config.check_lineage_present()
self.assertEqual(
config.get("busco_run", "datasets_version"),
self.test_params["lineage_dataset"].split("_")[-1],
)
def test_check_lineage_present_true_with_odb_missing(self):
self.test_params["lineage_dataset"] = "test_dataset"
self.test_params["datasets_version"] = "odb10"
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
config.check_lineage_present()
self.assertEqual(
config.get("busco_run", "lineage_dataset"),
"{}_{}".format(
self.test_params["lineage_dataset"],
self.test_params["datasets_version"],
),
)
def test_check_lineage_present_true_with_invalid_dataset_version(self):
self.test_params["lineage_dataset"] = "test_dataset"
self.test_params["datasets_version"] = "odb11"
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
with self.assertRaises(BuscoConfig.BatchFatalError):
config.check_lineage_present()
def test_set_results_dirname(self):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
test_dataset_path = "/path/to/lineage_dataset"
with patch("busco.BuscoConfig.BuscoConfig.set"):
config.set_results_dirname(test_dataset_path)
config.set.assert_called_with(
"busco_run", "lineage_results_dir", "run_lineage_dataset"
)
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.download_lineage_file")
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
@patch("busco.BuscoConfig.BuscoConfigAuto.set_results_dirname")
@patch("busco.BuscoConfig.BuscoConfig")
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
def test_autoconfig_init_propagates_mainconfig(self, mock_propagate, *args):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
BuscoConfig.BuscoConfigAuto(config, None)
mock_propagate.assert_called_with(config)
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.download_lineage_file")
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
@patch("busco.BuscoConfig.BuscoConfig")
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.set_results_dirname")
def test_autoconfig_init_sets_results_dirname(self, mock_set_dirname, *args):
BuscoConfig.BuscoConfigAuto(None, "lineage")
mock_set_dirname.assert_called_with("lineage")
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset")
@patch("busco.BuscoConfig.BuscoConfig")
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
def test_autoconfig_init_creates_paths(self, mock_create_paths, *args):
BuscoConfig.BuscoConfigAuto(None, None)
mock_create_paths.assert_called()
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset_config")
@patch("busco.BuscoConfig.BuscoConfig")
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.set_results_dirname")
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
@patch("busco.BuscoConfig.BuscoConfigAuto.download_lineage_file")
def test_autoconfig_init_downloads_lineage(self, mock_download_lineage, *args):
BuscoConfig.BuscoConfigAuto(None, "lineage")
mock_download_lineage.assert_called_with("lineage")
@patch("busco.BuscoConfig.BuscoConfig")
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.set_results_dirname")
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
@patch("busco.BuscoConfig.BuscoConfigAuto.download_lineage_file")
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset_config")
def test_autoconfig_init_loads_lineage_config(self, mock_load_dataset, *args):
BuscoConfig.BuscoConfigAuto(None, None)
mock_load_dataset.assert_called()
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset")
@patch("busco.BuscoConfig.BuscoConfig.__init__")
def test_autoconfig_init_calls_super(self, mock_config_parent, *args):
BuscoConfig.BuscoConfigAuto(None, None)
mock_config_parent.assert_called()
@patch("busco.BuscoConfig.BuscoConfigAuto._create_required_paths")
@patch("busco.BuscoConfig.BuscoConfigAuto.download_lineage_file")
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset_config")
def test_propagate_config(self, *args):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.params)
config.configure()
config.downloader = Mock()
autoconfig = BuscoConfig.BuscoConfigAuto(config, "test")
autoconfig._propagate_config(config)
self.assertEqual(autoconfig, config)
@patch("busco.BuscoConfig.BuscoConfigAuto.load_dataset_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.download_lineage_file")
@patch("busco.BuscoConfig.BuscoConfigAuto._propagate_config")
@patch("busco.BuscoConfig.BuscoConfigAuto.set_results_dirname")
@patch("busco.BuscoConfig.BuscoConfigAuto.get", return_value="test")
@patch("busco.BuscoConfig.BuscoConfig._create_required_paths")
def test_autolineage_create_path_method_calls_parent(
self, mock_create_paths, *args
):
config = BuscoConfig.BuscoConfigMain(self.base_config, self.test_params)
config.configure()
BuscoConfig.BuscoConfigAuto(config, None)
mock_create_paths.assert_called_with("test/auto_lineage")
def tearDown(self):
self.test_params = {}
| 21,546
| 5,945
| 23
|
be1e37cc8ea7da820395a7b6bd5ced6c48fb173b
| 1,366
|
py
|
Python
|
migrations/versions/8fde055f9d29_add_driver_switch_activity_status.py
|
MTES-MCT/mobilic-api
|
b3754de2282262fd60a27dc90e40777df9c1e230
|
[
"MIT"
] | null | null | null |
migrations/versions/8fde055f9d29_add_driver_switch_activity_status.py
|
MTES-MCT/mobilic-api
|
b3754de2282262fd60a27dc90e40777df9c1e230
|
[
"MIT"
] | 8
|
2021-04-19T17:47:55.000Z
|
2022-02-16T17:40:18.000Z
|
migrations/versions/8fde055f9d29_add_driver_switch_activity_status.py
|
MTES-MCT/mobilic-api
|
b3754de2282262fd60a27dc90e40777df9c1e230
|
[
"MIT"
] | null | null | null |
"""Add driver switch activity status
Revision ID: 8fde055f9d29
Revises: 8fe63e4276dc
Create Date: 2020-02-15 16:46:48.890628
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "8fde055f9d29"
down_revision = "8fe63e4276dc"
branch_labels = None
depends_on = None
| 23.551724
| 62
| 0.590044
|
"""Add driver switch activity status
Revision ID: 8fde055f9d29
Revises: 8fe63e4276dc
Create Date: 2020-02-15 16:46:48.890628
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "8fde055f9d29"
down_revision = "8fe63e4276dc"
branch_labels = None
depends_on = None
def upgrade():
op.drop_constraint("activityvalidationstatus", "activity")
op.alter_column(
"activity",
"validation_status",
type_=sa.Enum(
"no_activity_switch",
"driver_switch",
"unauthorized_submitter",
"conflicting_with_history",
"validated",
"pending",
"rejected",
name="activityvalidationstatus",
native_enum=False,
),
nullable=False,
)
# ### end Alembic commands ###
def downgrade():
op.drop_constraint("activityvalidationstatus", "activity")
op.alter_column(
"activity",
"validation_status",
type_=sa.Enum(
"no_activity_switch",
"unauthorized_submitter",
"conflicting_with_history",
"validated",
"pending",
"rejected",
name="activityvalidationstatus",
native_enum=False,
),
nullable=False,
)
# ### end Alembic commands ###
| 1,001
| 0
| 46
|
a5ae2c8bc00df24165fd508fc5d03ce301c458c9
| 11,501
|
py
|
Python
|
uweclang/corpus/manager.py
|
SkySchermer/uweclang
|
c4404b550c8c1e6d22eff0a5ddeb8127080b2ad3
|
[
"MIT"
] | null | null | null |
uweclang/corpus/manager.py
|
SkySchermer/uweclang
|
c4404b550c8c1e6d22eff0a5ddeb8127080b2ad3
|
[
"MIT"
] | null | null | null |
uweclang/corpus/manager.py
|
SkySchermer/uweclang
|
c4404b550c8c1e6d22eff0a5ddeb8127080b2ad3
|
[
"MIT"
] | null | null | null |
"""UWEC Language Tools manager module
Provides functions for defining and managing a corpus.
"""
# Python 3 forward compatability imports.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
import hashlib
import uweclang.batch
from itertools import chain
# Import async module.
import trollius as asyncio
from trollius import From
# Setup logger.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
def _default_filter(meta_data):
"""The default meta data filter which accepts all files. """
return True
def default_metadata_function(filename):
"""A function producing a dictionary of metadata for a given file. This
is the default implementation producing the file name, location, extension,
and file size.
Arguments:
filename (str): The name of the file.
Returns:
None: If the file path is invalid.
(dict): A dictionary containing the metadata.
"""
if not (filename and os.path.isfile(filename)):
return None
metadata = dict()
# Collect basic metadata:
metadata['filename'] = os.path.basename(filename)
metadata['location'] = os.path.abspath(filename)
ext = uweclang.split_ext(filename)
metadata['base'] = ext[0]
metadata['extension'] = ext[1]
metadata['size'] = os.path.getsize(filename)
# Get word count:
# with open(os.path.abspath(filename), 'r') as f:
# words = 0
# buf_size = 1024 * 1024
# read_f = f.read # loop optimization
# buf = read_f(buf_size)
# while buf:
# try:
# words += buf.count('/')
# buf = read_f(buf_size)
# except UnicodeDecodeError as e:
# pass # Skip decode error?
metadata['word_count'] = 0#words
return metadata
def get_file_md5(filename):
"""Returns the MD5 hash of the given file.
"""
block_size = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(block_size)
return hasher.hexdigest()
class Corpus(object):
"""A corpus object for managing a collection of tagged text files.
Attributes:
file_metadata (dict): A dictionary containing corpus meta data for
files indexed by ID.
"""
def add_files(self,
search_locations,
extensions=None,
recursive=False):
"""Searches for files in the given locations and adds them to the
corpus.
Arguments:
search_locations ([str]): A list of files and directories to search.
extensions ([str]): The file extensions to find in directories.
Defaults to None, which will find all files.
recursive: (bool): Whether to search directories recursively.
Note: Files given in search_locations that do not have the specified
extensions will be included in the output. The extensions argument
only effects files in the directories given.
"""
log = logging.getLogger('uweclang.corpus.manager')
files = uweclang.get_files(search_locations,
extensions=extensions,
recursive=recursive)
self._file_count += files[1]
for f in files[0]:
log.debug('Adding file %s', str(f))
# Get file meta data:
self.file_metadata[self._current_id] = self._meta_op(f)
meta = self.file_metadata[self._current_id]
meta['corpus_id'] = self._current_id
# meta['MD5'] = get_file_md5(f)
# Get file count:
self._word_count += meta['word_count']
# Set next file ID:
self._current_id += 1
# Log File add.
log.info('Adding %s files to corpus.', self._file_count)
@property
@property
def get_file_ids(self, predicate=None):
"""Returns a list of file ids in the corpus.
Arguments:
predicate (dict -> bool): A predicate for selecting files based on
metadata. Only file ids satisfying the predicate will be
returned.
"""
if predicate:
return (k for k in self.file_metadata.keys()
if predicate(file_metadata[k]))
else:
return self.file_metadata.keys()
def get_file_text(self, file_id):
"""Returns the tagged text of the file given by its ID."""
if not self.file_metadata.get(file_id):
return None
with open(self.file_metadata[file_id]['location'], 'r') as f:
return f.read()
def file_modified(self, file_id):
"""Returns true if the file's MD5 hash has changes since it was added
to the corpus.
"""
if not self.file_metadata.get(file_id):
return None
md5 = get_file_md5(self.file_metadata[file_id]['location'])
return md5 != self.file_metadata[file_id]['MD5']
def get_file_metadata(self, file_id):
"""Returns the text of the file associated with the given file_id."""
return self.file_metadata.get(file_id)
def get_id_for_file(self, filename):
"""Returns the id of the given file in the corpus or None if it is not
present.
"""
for k, v in self.file_metadata.items():
if v['location'] == os.path.abspath(filename):
return k
return None
def files(self, meta_filter=None, exclude_modified=False):
"""Returns an iterator over the metadata and text of each file in the
corpus.
"""
meta_filter = meta_filter or _default_filter
for x in self.get_file_ids():
if (meta_filter(self.get_file_metadata(x))
and not (exclude_modified and self.file_modified(x))):
yield (self.get_file_metadata(x), self.get_file_text(x))
def execute_queries(
self,
queries,
definitions=None,
meta_filter=None,
exclude_modified=False):
"""Runs the given queries on the corpus asynchronously.
Arguments:
queries ([Query]): The queries to run.
definitions (dict): A dictionary defining query terms.
meta_filter (dict -> bool): A function taking file meta data and
returning whether the file should be queried.
exclude_modified (bool): Whether to exclude modified files from
the query.
Returns:
[Result]: An iterator producing the results of the query.
"""
log = logging.getLogger('uweclang.corpus.manager')
results = []
# Get filtered files from corpus.
try:
files = self.files(
meta_filter=meta_filter,
exclude_modified=exclude_modified)
except Exception as e:
raise CorpusException(e)
try:
log.debug('Executing query batch.')
for index, (meta, tagged) in enumerate(files):
# Extract TaggedToken list from file.
text = list(chain.from_iterable(uweclang.read_tagged_string(tagged)))
# Execute search.
for i, query in enumerate(queries):
log.debug('Running query #%d on file #%d', i, index)
res = query.match(text, source_id=index, definitions=definitions)
if res:
results.append(res)
return chain.from_iterable(results)
except Exception as e:
raise QueryExecutionError(e)
def execute_queries_async(
self,
queries,
definitions=None,
meta_filter=None,
exclude_modified=False):
"""Runs the given queries on the corpus asynchronously.
Arguments:
queries ([Query]): The queries to run.
definitions (dict): A dictionary defining query terms.
meta_filter (dict -> bool): A function taking file meta data and
returning whether the file should be queried.
exclude_modified (bool): Whether to exclude modified files from
the query.
Returns:
[Result]: An iterator producing the results of the query.
"""
log = logging.getLogger('uweclang.corpus.manager')
results = []
# Get filtered files from corpus.
try:
files = self.files(
meta_filter=meta_filter,
exclude_modified=exclude_modified)
except Exception as e:
raise CorpusException(e)
status = {
'completed' : 0,
'total': 0,
} # Dictionary needed since `nonlocal` is not in Python 2.7.
log.debug('Executing query batch (async.)')
# Function for searching a single file.
# Worker function for running a file search.
@asyncio.coroutine
# Create asynchronous task list.
loop = asyncio.get_event_loop()
tasks = []
for index, (meta, tagged) in enumerate(files):
log.debug('Added task %d', index)
tasks.append(asyncio.ensure_future(worker(meta, tagged, index)))
# Run tasks.
status['total'] = len(tasks)
log.info('Starting %d tasks.', status['total'])
data = loop.run_until_complete(asyncio.gather(*tuple(tasks)))
# Shutdown event loop and logger.
loop.close()
logging.shutdown()
results = (task.result() for task in tasks if task.result())
return chain.from_iterable(results)
| 33.144092
| 85
| 0.589949
|
"""UWEC Language Tools manager module
Provides functions for defining and managing a corpus.
"""
# Python 3 forward compatability imports.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
import hashlib
import uweclang.batch
from itertools import chain
# Import async module.
import trollius as asyncio
from trollius import From
# Setup logger.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
def _default_filter(meta_data):
"""The default meta data filter which accepts all files. """
return True
def default_metadata_function(filename):
"""A function producing a dictionary of metadata for a given file. This
is the default implementation producing the file name, location, extension,
and file size.
Arguments:
filename (str): The name of the file.
Returns:
None: If the file path is invalid.
(dict): A dictionary containing the metadata.
"""
if not (filename and os.path.isfile(filename)):
return None
metadata = dict()
# Collect basic metadata:
metadata['filename'] = os.path.basename(filename)
metadata['location'] = os.path.abspath(filename)
ext = uweclang.split_ext(filename)
metadata['base'] = ext[0]
metadata['extension'] = ext[1]
metadata['size'] = os.path.getsize(filename)
# Get word count:
# with open(os.path.abspath(filename), 'r') as f:
# words = 0
# buf_size = 1024 * 1024
# read_f = f.read # loop optimization
# buf = read_f(buf_size)
# while buf:
# try:
# words += buf.count('/')
# buf = read_f(buf_size)
# except UnicodeDecodeError as e:
# pass # Skip decode error?
metadata['word_count'] = 0#words
return metadata
def get_file_md5(filename):
"""Returns the MD5 hash of the given file.
"""
block_size = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(block_size)
return hasher.hexdigest()
class Corpus(object):
"""A corpus object for managing a collection of tagged text files.
Attributes:
file_metadata (dict): A dictionary containing corpus meta data for
files indexed by ID.
"""
def __init__(self,
search_locations=[],
extensions=None,
recursive=False,
meta_op=default_metadata_function):
# Save metadata function.
self._meta_op = meta_op
self.file_metadata = dict()
self._current_id = 0
self._file_count = 0
self._word_count = 0
self.add_files(search_locations,
extensions=extensions,
recursive=recursive)
def add_files(self,
search_locations,
extensions=None,
recursive=False):
"""Searches for files in the given locations and adds them to the
corpus.
Arguments:
search_locations ([str]): A list of files and directories to search.
extensions ([str]): The file extensions to find in directories.
Defaults to None, which will find all files.
recursive: (bool): Whether to search directories recursively.
Note: Files given in search_locations that do not have the specified
extensions will be included in the output. The extensions argument
only effects files in the directories given.
"""
log = logging.getLogger('uweclang.corpus.manager')
files = uweclang.get_files(search_locations,
extensions=extensions,
recursive=recursive)
self._file_count += files[1]
for f in files[0]:
log.debug('Adding file %s', str(f))
# Get file meta data:
self.file_metadata[self._current_id] = self._meta_op(f)
meta = self.file_metadata[self._current_id]
meta['corpus_id'] = self._current_id
# meta['MD5'] = get_file_md5(f)
# Get file count:
self._word_count += meta['word_count']
# Set next file ID:
self._current_id += 1
# Log File add.
log.info('Adding %s files to corpus.', self._file_count)
@property
def word_count(self):
return self._word_count
@property
def file_count(self):
return self._file_count
def get_file_ids(self, predicate=None):
"""Returns a list of file ids in the corpus.
Arguments:
predicate (dict -> bool): A predicate for selecting files based on
metadata. Only file ids satisfying the predicate will be
returned.
"""
if predicate:
return (k for k in self.file_metadata.keys()
if predicate(file_metadata[k]))
else:
return self.file_metadata.keys()
def get_file_text(self, file_id):
"""Returns the tagged text of the file given by its ID."""
if not self.file_metadata.get(file_id):
return None
with open(self.file_metadata[file_id]['location'], 'r') as f:
return f.read()
def file_modified(self, file_id):
"""Returns true if the file's MD5 hash has changes since it was added
to the corpus.
"""
if not self.file_metadata.get(file_id):
return None
md5 = get_file_md5(self.file_metadata[file_id]['location'])
return md5 != self.file_metadata[file_id]['MD5']
def get_file_metadata(self, file_id):
"""Returns the text of the file associated with the given file_id."""
return self.file_metadata.get(file_id)
def get_id_for_file(self, filename):
"""Returns the id of the given file in the corpus or None if it is not
present.
"""
for k, v in self.file_metadata.items():
if v['location'] == os.path.abspath(filename):
return k
return None
def files(self, meta_filter=None, exclude_modified=False):
"""Returns an iterator over the metadata and text of each file in the
corpus.
"""
meta_filter = meta_filter or _default_filter
for x in self.get_file_ids():
if (meta_filter(self.get_file_metadata(x))
and not (exclude_modified and self.file_modified(x))):
yield (self.get_file_metadata(x), self.get_file_text(x))
def execute_queries(
self,
queries,
definitions=None,
meta_filter=None,
exclude_modified=False):
"""Runs the given queries on the corpus asynchronously.
Arguments:
queries ([Query]): The queries to run.
definitions (dict): A dictionary defining query terms.
meta_filter (dict -> bool): A function taking file meta data and
returning whether the file should be queried.
exclude_modified (bool): Whether to exclude modified files from
the query.
Returns:
[Result]: An iterator producing the results of the query.
"""
log = logging.getLogger('uweclang.corpus.manager')
results = []
# Get filtered files from corpus.
try:
files = self.files(
meta_filter=meta_filter,
exclude_modified=exclude_modified)
except Exception as e:
raise CorpusException(e)
try:
log.debug('Executing query batch.')
for index, (meta, tagged) in enumerate(files):
# Extract TaggedToken list from file.
text = list(chain.from_iterable(uweclang.read_tagged_string(tagged)))
# Execute search.
for i, query in enumerate(queries):
log.debug('Running query #%d on file #%d', i, index)
res = query.match(text, source_id=index, definitions=definitions)
if res:
results.append(res)
return chain.from_iterable(results)
except Exception as e:
raise QueryExecutionError(e)
def execute_queries_async(
self,
queries,
definitions=None,
meta_filter=None,
exclude_modified=False):
"""Runs the given queries on the corpus asynchronously.
Arguments:
queries ([Query]): The queries to run.
definitions (dict): A dictionary defining query terms.
meta_filter (dict -> bool): A function taking file meta data and
returning whether the file should be queried.
exclude_modified (bool): Whether to exclude modified files from
the query.
Returns:
[Result]: An iterator producing the results of the query.
"""
log = logging.getLogger('uweclang.corpus.manager')
results = []
# Get filtered files from corpus.
try:
files = self.files(
meta_filter=meta_filter,
exclude_modified=exclude_modified)
except Exception as e:
raise CorpusException(e)
status = {
'completed' : 0,
'total': 0,
} # Dictionary needed since `nonlocal` is not in Python 2.7.
log.debug('Executing query batch (async.)')
# Function for searching a single file.
def query_file(meta, tagged, index):
results = []
# Extract TaggedToken list from file.
text = list(chain.from_iterable(uweclang.read_tagged_string(tagged)))
# Execute search.
try:
for i, query in enumerate(queries):
res = query.match(text, source_id=index, definitions=definitions)
if res:
results.extend(res)
except Exception as e:
raise QueryExecutionError(e)
# Update status variables.
status['completed'] += 1
log.debug('Completed file %d', index)
percent = int(status['completed'] / status['total'] * 100)
log.info('%d%% complete', percent)
return results
# Worker function for running a file search.
@asyncio.coroutine
def worker(meta, tagged, index):
log.debug('Starting file %d', index)
return loop.run_in_executor(None, query_file, meta, tagged, index)
# Create asynchronous task list.
loop = asyncio.get_event_loop()
tasks = []
for index, (meta, tagged) in enumerate(files):
log.debug('Added task %d', index)
tasks.append(asyncio.ensure_future(worker(meta, tagged, index)))
# Run tasks.
status['total'] = len(tasks)
log.info('Starting %d tasks.', status['total'])
data = loop.run_until_complete(asyncio.gather(*tuple(tasks)))
# Shutdown event loop and logger.
loop.close()
logging.shutdown()
results = (task.result() for task in tasks if task.result())
return chain.from_iterable(results)
| 1,461
| 0
| 138
|
75dfa67ec64313cca39da2e97c93a3e2e3458650
| 4,731
|
py
|
Python
|
galaxy/python/GalaxySpectrumVVDS.py
|
AndresSixtos/pyeBOSS
|
4750908c8bc409633bef8f790133e3a1f3f0c9e4
|
[
"CC0-1.0"
] | 1
|
2017-05-23T13:03:27.000Z
|
2017-05-23T13:03:27.000Z
|
galaxy/python/GalaxySpectrumVVDS.py
|
AndresSixtos/pyeBOSS
|
4750908c8bc409633bef8f790133e3a1f3f0c9e4
|
[
"CC0-1.0"
] | null | null | null |
galaxy/python/GalaxySpectrumVVDS.py
|
AndresSixtos/pyeBOSS
|
4750908c8bc409633bef8f790133e3a1f3f0c9e4
|
[
"CC0-1.0"
] | 2
|
2017-09-26T11:17:30.000Z
|
2021-09-14T06:09:18.000Z
|
"""
.. class:: GalaxySpectrumVVDS
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
The class GalaxySpectrumVVDS is dedicated to handling VVDS spectra
"""
from os.path import join
import os
import numpy as n
import astropy.io.fits as fits
import glob
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
from LineFittingLibrary import *
lfl = LineFittingLibrary()
from filterList import *
from lineListAir import *
class GalaxySpectrumVVDS:
"""
Loads the environement proper to the vvds survey.
Two modes of operation : flux calibration or line fitting
:param catalog_entry: an entry of the vvds catalog
:param calibration: if the class is loaded with intention of flux calibrating the vvds data.
:param lineFits: if the class is loaded with intention of fitting line fluxes on the vvds spectra.
"""
def openObservedSpectrum(self):
"""
reads a VVDS pectrum
returns the wavelength, the flux and the error on the flux and two arrays for masking purpose
"""
spL=glob.glob(join(self.vvds_spectra_dir,"sc_*" + str(self.catalog_entry['NUM']) + "*atm_clean.fits"))
#print spL
if len(spL)==1 :
specFileName=spL[0]
spectraHDU=fits.open(specFileName)
wl=spectraHDU[0].header['CRVAL1'] + spectraHDU[0].header['CDELT1'] * n.arange(2,spectraHDU[0].header['NAXIS1']+2)
fl=spectraHDU[0].data[0]
noiseFileName=glob.glob(join(self.vvds_spectra_dir,"sc_*"+str(self.catalog_entry['NUM'])+"*noise.fits"))[0]
noiseHDU=fits.open(noiseFileName)
flErr=noiseHDU[0].data[0]
self.wavelength,self.fluxl,self.fluxlErr=wl,fl,flErr
else :
self.wavelength,self.fluxl,self.fluxlErr= [-1,-1.],[-1,-1.],[-1,-1.]
def plotFit(self, outputFigureNameRoot, ymin = 1e-19, ymax = 1e-17):
"""
Plots the spectrum and the line fits in a few figures
"""
ok = (self.fluxl >0 ) & (self.fluxl > 1.2* self.fluxlErr)
p.figure(1,(12,4))
p.axes([0.1,0.2,0.85,0.75])
p.errorbar(self.wavelength[ok],self.fluxl[ok]/self.catalog_entry['fo'],yerr = self.fluxlErr[ok]/self.catalog_entry['fo'], linewidth=1, alpha= 0.4, label='spectrum')
p.xlabel('wavelength [A]')
p.ylabel(r'f$_\lambda$ [erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]')
p.yscale('log')
p.ylim((ymin, ymax))
gl = p.legend(loc=0,fontsize=12)
gl.set_frame_on(False)
p.savefig( outputFigureNameRoot + "-all.png" )
p.clf()
a0_1 = (1+self.catalog_entry['Z'])*O2_3727
a0_2 = (1+self.catalog_entry['Z'])*O2_3729
continu= self.catalog_entry['O2_3728_continu']
aas =n.arange(self.catalog_entry['O2_3728_a0']-70, self.catalog_entry['O2_3728_a0']+70,0.1)
flMod=lambda aa,sigma,F0,sh :continu+ lfl.gaussianLineNC(aa,sigma,(1-sh)*F0,a0_1)+lfl.gaussianLineNC(aa,sigma,sh*F0,a0_2)
model = flMod(aas, self.catalog_entry['O2_3728_sigma'], self.catalog_entry['O2_3728_flux'],0.58 )# self.catalog_entry['O2_3728_share'])
p.figure(2,(4,4))
p.axes([0.21,0.2,0.78,0.7])
p.errorbar(self.wavelength,self.fluxl/self.catalog_entry['fo'],yerr = self.fluxlErr/self.catalog_entry['fo'])
p.plot(aas, model/self.catalog_entry['fo'],'g',label='model', lw=2)
p.xlabel('wavelength [A]')
p.ylabel(r'f$_\lambda$ [erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]')
p.yscale('log')
p.ylim((ymin, ymax))
p.xlim(( self.catalog_entry['O2_3728_a0']-100, self.catalog_entry['O2_3728_a0']+100))
gl = p.legend(loc=0,fontsize=12)
gl.set_frame_on(False)
p.title('[OII] 3727')
p.savefig( outputFigureNameRoot + "-O2_3728.png")
p.clf()
a0 = self.catalog_entry['O3_5007_a0']
continu= self.catalog_entry['O3_5007_continu']
aas =n.arange(self.catalog_entry['O3_5007_a0']-70, self.catalog_entry['O3_5007_a0']+70,0.1)
flMod=lambda aa,sigma,F0: lfl.gaussianLine(aa,sigma,F0,a0,continu)
model = flMod(aas, self.catalog_entry['O3_5007_sigma'], self.catalog_entry['O3_5007_flux'])
p.figure(2,(4,4))
p.axes([0.21,0.2,0.78,0.7])
p.errorbar(self.wavelength,self.fluxl/self.catalog_entry['fo'],yerr = self.fluxlErr/self.catalog_entry['fo'])
p.plot(aas, model/self.catalog_entry['fo'],'g',label='model', lw =2)
p.xlabel('wavelength [A]')
p.ylabel(r'f$_\lambda$ [erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]')
p.yscale('log')
p.ylim((ymin, ymax))
p.xlim(( self.catalog_entry['O3_5007_a0']-100, self.catalog_entry['O3_5007_a0']+100))
gl = p.legend(loc=0,fontsize=12)
gl.set_frame_on(False)
p.title('[OIII] 5007')
p.savefig( outputFigureNameRoot + "-O3_5007.png")
p.clf()
| 39.756303
| 166
| 0.695413
|
"""
.. class:: GalaxySpectrumVVDS
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
The class GalaxySpectrumVVDS is dedicated to handling VVDS spectra
"""
from os.path import join
import os
import numpy as n
import astropy.io.fits as fits
import glob
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
from LineFittingLibrary import *
lfl = LineFittingLibrary()
from filterList import *
from lineListAir import *
class GalaxySpectrumVVDS:
"""
Loads the environement proper to the vvds survey.
Two modes of operation : flux calibration or line fitting
:param catalog_entry: an entry of the vvds catalog
:param calibration: if the class is loaded with intention of flux calibrating the vvds data.
:param lineFits: if the class is loaded with intention of fitting line fluxes on the vvds spectra.
"""
def __init__(self,catalog_entry,lineFits=False):
self.catalog_entry=catalog_entry
self.database_dir = os.environ['DATA_DIR']
self.vvds_dir = join(self.database_dir,"VVDS")
self.vvds_catalog_dir = join(self.vvds_dir,"catalogs")
self.vvds_spectra_dir = join(self.vvds_dir,"spectra")
def openObservedSpectrum(self):
"""
reads a VVDS pectrum
returns the wavelength, the flux and the error on the flux and two arrays for masking purpose
"""
spL=glob.glob(join(self.vvds_spectra_dir,"sc_*" + str(self.catalog_entry['NUM']) + "*atm_clean.fits"))
#print spL
if len(spL)==1 :
specFileName=spL[0]
spectraHDU=fits.open(specFileName)
wl=spectraHDU[0].header['CRVAL1'] + spectraHDU[0].header['CDELT1'] * n.arange(2,spectraHDU[0].header['NAXIS1']+2)
fl=spectraHDU[0].data[0]
noiseFileName=glob.glob(join(self.vvds_spectra_dir,"sc_*"+str(self.catalog_entry['NUM'])+"*noise.fits"))[0]
noiseHDU=fits.open(noiseFileName)
flErr=noiseHDU[0].data[0]
self.wavelength,self.fluxl,self.fluxlErr=wl,fl,flErr
else :
self.wavelength,self.fluxl,self.fluxlErr= [-1,-1.],[-1,-1.],[-1,-1.]
def plotFit(self, outputFigureNameRoot, ymin = 1e-19, ymax = 1e-17):
"""
Plots the spectrum and the line fits in a few figures
"""
ok = (self.fluxl >0 ) & (self.fluxl > 1.2* self.fluxlErr)
p.figure(1,(12,4))
p.axes([0.1,0.2,0.85,0.75])
p.errorbar(self.wavelength[ok],self.fluxl[ok]/self.catalog_entry['fo'],yerr = self.fluxlErr[ok]/self.catalog_entry['fo'], linewidth=1, alpha= 0.4, label='spectrum')
p.xlabel('wavelength [A]')
p.ylabel(r'f$_\lambda$ [erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]')
p.yscale('log')
p.ylim((ymin, ymax))
gl = p.legend(loc=0,fontsize=12)
gl.set_frame_on(False)
p.savefig( outputFigureNameRoot + "-all.png" )
p.clf()
a0_1 = (1+self.catalog_entry['Z'])*O2_3727
a0_2 = (1+self.catalog_entry['Z'])*O2_3729
continu= self.catalog_entry['O2_3728_continu']
aas =n.arange(self.catalog_entry['O2_3728_a0']-70, self.catalog_entry['O2_3728_a0']+70,0.1)
flMod=lambda aa,sigma,F0,sh :continu+ lfl.gaussianLineNC(aa,sigma,(1-sh)*F0,a0_1)+lfl.gaussianLineNC(aa,sigma,sh*F0,a0_2)
model = flMod(aas, self.catalog_entry['O2_3728_sigma'], self.catalog_entry['O2_3728_flux'],0.58 )# self.catalog_entry['O2_3728_share'])
p.figure(2,(4,4))
p.axes([0.21,0.2,0.78,0.7])
p.errorbar(self.wavelength,self.fluxl/self.catalog_entry['fo'],yerr = self.fluxlErr/self.catalog_entry['fo'])
p.plot(aas, model/self.catalog_entry['fo'],'g',label='model', lw=2)
p.xlabel('wavelength [A]')
p.ylabel(r'f$_\lambda$ [erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]')
p.yscale('log')
p.ylim((ymin, ymax))
p.xlim(( self.catalog_entry['O2_3728_a0']-100, self.catalog_entry['O2_3728_a0']+100))
gl = p.legend(loc=0,fontsize=12)
gl.set_frame_on(False)
p.title('[OII] 3727')
p.savefig( outputFigureNameRoot + "-O2_3728.png")
p.clf()
a0 = self.catalog_entry['O3_5007_a0']
continu= self.catalog_entry['O3_5007_continu']
aas =n.arange(self.catalog_entry['O3_5007_a0']-70, self.catalog_entry['O3_5007_a0']+70,0.1)
flMod=lambda aa,sigma,F0: lfl.gaussianLine(aa,sigma,F0,a0,continu)
model = flMod(aas, self.catalog_entry['O3_5007_sigma'], self.catalog_entry['O3_5007_flux'])
p.figure(2,(4,4))
p.axes([0.21,0.2,0.78,0.7])
p.errorbar(self.wavelength,self.fluxl/self.catalog_entry['fo'],yerr = self.fluxlErr/self.catalog_entry['fo'])
p.plot(aas, model/self.catalog_entry['fo'],'g',label='model', lw =2)
p.xlabel('wavelength [A]')
p.ylabel(r'f$_\lambda$ [erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]')
p.yscale('log')
p.ylim((ymin, ymax))
p.xlim(( self.catalog_entry['O3_5007_a0']-100, self.catalog_entry['O3_5007_a0']+100))
gl = p.legend(loc=0,fontsize=12)
gl.set_frame_on(False)
p.title('[OIII] 5007')
p.savefig( outputFigureNameRoot + "-O3_5007.png")
p.clf()
| 269
| 0
| 23
|
4d85537526d1f1be42b55ee7c3665cfaba14c3d0
| 460
|
py
|
Python
|
setup.py
|
EBjerrum/RAscore
|
d7430abeeb4246bcd9d2314e5ca9e00963dfb7ba
|
[
"MIT"
] | null | null | null |
setup.py
|
EBjerrum/RAscore
|
d7430abeeb4246bcd9d2314e5ca9e00963dfb7ba
|
[
"MIT"
] | null | null | null |
setup.py
|
EBjerrum/RAscore
|
d7430abeeb4246bcd9d2314e5ca9e00963dfb7ba
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name="RAscore", # Replace with your own username
version="2020.9",
author="Reymond Group/Molecular AI AstraZeneca",
author_email="amol.thakkar@dcb.unibe.ch",
license="MIT",
description="Computation of retrosynthetic accessibility from machine learening of CASP predictions",
url="https://github.com/reymond-group/RAscore",
packages=setuptools.find_packages(),
python_requires='>=3.7',
)
| 32.857143
| 105
| 0.728261
|
import setuptools
setuptools.setup(
name="RAscore", # Replace with your own username
version="2020.9",
author="Reymond Group/Molecular AI AstraZeneca",
author_email="amol.thakkar@dcb.unibe.ch",
license="MIT",
description="Computation of retrosynthetic accessibility from machine learening of CASP predictions",
url="https://github.com/reymond-group/RAscore",
packages=setuptools.find_packages(),
python_requires='>=3.7',
)
| 0
| 0
| 0
|
8ad32c038b411b3dc200c3cd070929e827a76ab5
| 3,559
|
py
|
Python
|
tests/epyccel/test_epyccel_complex_func.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 206
|
2018-06-28T00:28:47.000Z
|
2022-03-29T05:17:03.000Z
|
tests/epyccel/test_epyccel_complex_func.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 670
|
2018-07-23T11:02:24.000Z
|
2022-03-30T07:28:05.000Z
|
tests/epyccel/test_epyccel_complex_func.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 19
|
2019-09-19T06:01:00.000Z
|
2022-03-29T05:17:06.000Z
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
import numpy as np
import pytest
from numpy.random import rand, randint
import modules.complex_func as mod
from pyccel.epyccel import epyccel
@pytest.mark.parametrize("f", [ mod.create_complex_literal__int_int,
mod.create_complex_literal__int_float,
mod.create_complex_literal__int_complex,
mod.create_complex_literal__float_int,
mod.create_complex_literal__float_float,
mod.create_complex_literal__float_complex,
mod.create_complex_literal__complex_int,
mod.create_complex_literal__complex_float,
mod.create_complex_literal__complex_complex,
mod.cast_complex_literal] )
| 31.776786
| 71
| 0.676595
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
import numpy as np
import pytest
from numpy.random import rand, randint
import modules.complex_func as mod
from pyccel.epyccel import epyccel
@pytest.mark.parametrize("f", [ mod.create_complex_literal__int_int,
mod.create_complex_literal__int_float,
mod.create_complex_literal__int_complex,
mod.create_complex_literal__float_int,
mod.create_complex_literal__float_float,
mod.create_complex_literal__float_complex,
mod.create_complex_literal__complex_int,
mod.create_complex_literal__complex_float,
mod.create_complex_literal__complex_complex,
mod.cast_complex_literal] )
def test_create_complex_literal(f, language):
f_epyc = epyccel(f, language = language)
assert f_epyc() == f()
def test_create_complex_var__int_int(language):
f = mod.create_complex_var__int_int
f_epyc = epyccel(f, language = language)
a = randint(100)
b = randint(100)
assert f_epyc(a,b) == f(a,b)
def test_create_complex_var__int_complex(language):
f = mod.create_complex_var__int_complex
f_epyc = epyccel(f, language = language)
a = randint(100)
b = complex(randint(100), randint(100))
assert f_epyc(a,b) == f(a,b)
def test_create_complex_var__complex_float(language):
f = mod.create_complex_var__complex_float
f_epyc = epyccel(f, language = language)
a = complex(randint(100), randint(100))
b = rand()*100
assert f_epyc(a,b) == f(a,b)
def test_create_complex_var__complex_complex(language):
f = mod.create_complex_var__complex_complex
f_epyc = epyccel(f, language = language)
a = complex(randint(100), randint(100))
b = complex(randint(100), randint(100))
assert f_epyc(a,b) == f(a,b)
def test_create_complex__int_int(language):
f = mod.create_complex__int_int
f_epyc = epyccel(f, language = language)
a = randint(100)
assert f_epyc(a) == f(a)
def test_create_complex_0__int_int(language):
f = mod.create_complex_0__int_int
f_epyc = epyccel(f, language = language)
a = randint(100)
assert f_epyc(a) == f(a)
def test_create_complex__float_float(language):
f = mod.create_complex__float_float
f_epyc = epyccel(f, language = language)
a = rand()*100
assert f_epyc(a) == f(a)
def test_create_complex_0__float_float(language):
f = mod.create_complex_0__float_float
f_epyc = epyccel(f, language = language)
a = rand()*100
assert f_epyc(a) == f(a)
def test_create_complex__complex_complex(language):
f = mod.create_complex__complex_complex
f_epyc = epyccel(f, language = language)
a = complex(randint(100), randint(100))
assert f_epyc(a) == f(a)
def test_cast_complex_1(language):
f = mod.cast_complex_1
f_epyc = epyccel(f, language = language)
a = np.complex64(complex(randint(100), randint(100)))
assert np.isclose(f_epyc(a), f(a), rtol = 1e-7, atol = 1e-8)
def test_cast_complex_2(language):
f = mod.cast_complex_2
f_epyc = epyccel(f, language = language)
a = np.complex128(complex(randint(100), randint(100)))
assert f_epyc(a) == f(a)
def test_cast_float_complex(language):
f = mod.cast_float_complex
f_epyc = epyccel(f, language = language)
a = rand()*100
b = complex(randint(100), randint(100))
assert f_epyc(a,b) == f(a,b)
| 2,372
| 0
| 298
|
edbc7186a89a966ff7d588e9b0e0a99d5c18903d
| 1,180
|
py
|
Python
|
setup.py
|
glanyx/segachan
|
b7694cc44e7ac0a261d8f3412347c50b8026fd6f
|
[
"MIT"
] | null | null | null |
setup.py
|
glanyx/segachan
|
b7694cc44e7ac0a261d8f3412347c50b8026fd6f
|
[
"MIT"
] | null | null | null |
setup.py
|
glanyx/segachan
|
b7694cc44e7ac0a261d8f3412347c50b8026fd6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from sweeperbot._version import __version__
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
setup(
name="sweeperbot",
version=__version__,
description="Test",
long_description=readme + "\n\n" + history,
author="Glanyx",
author_email="mikekornet@live.co.uk",
url="https://github.com/glanyx/segachan/",
entry_points={"console_scripts": ["sweeperbot=sweeperbot.launch:main"]},
include_package_data=True,
license="GNU General Public License v3",
zip_safe=False,
keywords=[
"sweeperbot",
"sweeper",
"bot",
"discord",
"benedict",
"benedict 9940",
"segachan",
],
classifiers=[
"Development Status :: 2- Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
)
| 27.44186
| 76
| 0.622881
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from sweeperbot._version import __version__
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
setup(
name="sweeperbot",
version=__version__,
description="Test",
long_description=readme + "\n\n" + history,
author="Glanyx",
author_email="mikekornet@live.co.uk",
url="https://github.com/glanyx/segachan/",
entry_points={"console_scripts": ["sweeperbot=sweeperbot.launch:main"]},
include_package_data=True,
license="GNU General Public License v3",
zip_safe=False,
keywords=[
"sweeperbot",
"sweeper",
"bot",
"discord",
"benedict",
"benedict 9940",
"segachan",
],
classifiers=[
"Development Status :: 2- Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
)
| 0
| 0
| 0
|
ed7a72592c78b45f52182233b37b4c42c9305353
| 65,295
|
py
|
Python
|
myigbot.py
|
Afafabdb/MyIGBot
|
43a5c24993598d6827a735e620acb2a41e9fbbd0
|
[
"MIT"
] | 91
|
2020-11-14T15:13:06.000Z
|
2021-07-27T18:14:45.000Z
|
myigbot.py
|
Afafabdb/MyIGBot
|
43a5c24993598d6827a735e620acb2a41e9fbbd0
|
[
"MIT"
] | 25
|
2020-11-16T21:52:39.000Z
|
2021-05-04T20:53:24.000Z
|
myigbot.py
|
Afafabdb/MyIGBot
|
43a5c24993598d6827a735e620acb2a41e9fbbd0
|
[
"MIT"
] | 18
|
2020-11-17T14:30:32.000Z
|
2021-07-16T22:23:21.000Z
|
import requests
import os
from datetime import datetime
import json
from bs4 import BeautifulSoup as bs
import time
import random
import string
| 48.510401
| 448
| 0.524665
|
import requests
import os
from datetime import datetime
import json
from bs4 import BeautifulSoup as bs
import time
import random
import string
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class MyIGBot:
def __init__(self, username, password, use_cookie = True, proxy=None):
self.username = username
self.password = password
self.use_cookie = use_cookie
self.proxy = proxy
self.path = os.getcwd()
if use_cookie == False or os.path.exists(self.path+f'//cookie_{self.username}.bot') == False:
link = 'https://www.instagram.com/'
login_url = 'https://www.instagram.com/accounts/login/ajax/'
time_now = int(datetime.now().timestamp())
response = requests.get(link, proxies=self.proxy)
try:
csrf = response.cookies['csrftoken']
except:
letters = string.ascii_lowercase
csrf = ''.join(random.choice(letters) for i in range(8))
payload = {
'username': self.username,
'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{time_now}:{self.password}',
'queryParams': {},
'optIntoOneTap': 'false'
}
login_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://www.instagram.com/accounts/login/",
"x-csrftoken": csrf
}
login_response = requests.post(login_url, data=payload, headers=login_header, proxies=self.proxy)
json_data = json.loads(login_response.text)
cookies = login_response.cookies
cookie_jar = cookies.get_dict()
try:
self.csrf_token = cookie_jar['csrftoken']
except:
self.csrf_token = csrf
try:
if json_data["authenticated"]:
pass
else:
print(bcolors.FAIL+"[โ] Login Failed!"+bcolors.ENDC, login_response.text)
quit()
except KeyError:
try:
if json_data["two_factor_required"]:
self.ig_nrcb = cookie_jar['ig_nrcb']
self.ig_did = cookie_jar['ig_did']
self.mid = cookie_jar['mid']
otp = input(bcolors.OKBLUE+'[!] Two Factor Auth. Detected! Enter Code Here: '+bcolors.ENDC)
twofactor_url = 'https://www.instagram.com/accounts/login/ajax/two_factor/'
twofactor_payload = {
'username': self.username,
'verificationCode': otp,
'identifier': json_data["two_factor_info"]["two_factor_identifier"],
'queryParams': {}
}
twofactor_header = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded",
"cookie": 'ig_did='+self.ig_did+'; ig_nrcb='+self.ig_nrcb+'; csrftoken='+self.csrf_token+'; mid='+self.mid,
"origin": "https://www.instagram.com",
"referer": "https://www.instagram.com/accounts/login/two_factor?next=%2F",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "0",
"x-instagram-ajax": "00c4537694a4",
"x-requested-with": "XMLHttpRequest"
}
login_response = requests.post(twofactor_url, data=twofactor_payload, headers=twofactor_header, proxies=self.proxy)
try:
if login_response.headers['Set-Cookie'] != 0:
pass
except:
try:
if json_data["message"]=="checkpoint_required":
self.ig_nrcb = cookie_jar['ig_nrcb']
self.ig_did = cookie_jar['ig_did']
self.mid = cookie_jar['mid']
url='https://www.instagram.com'+json_data['checkpoint_url']
header = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded",
"cookie": 'ig_did='+self.ig_did+'; ig_nrcb='+self.ig_nrcb+'; csrftoken='+self.csrf_token+'; mid='+self.mid,
"origin": "https://www.instagram.com",
"referer": 'https://instagram.com'+json_data['checkpoint_url'],
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "0",
"x-instagram-ajax": "e8e20d8ba618",
"x-requested-with": "XMLHttpRequest"
}
code=input(bcolors.OKBLUE+json.loads(requests.post(url, headers=header, data={'choice': '1'}).text, proxies=self.proxy)['extraData']['content'][1]['text']+' > '+bcolors.ENDC)
if json.loads(requests.post(url, headers=header, data={'security_code': code}).text, proxies=self.proxy)['type']=='CHALLENGE_REDIRECTION':
login_response = requests.post(login_url, data=payload, headers=login_header, proxies=self.proxy)
else:
print(bcolors.FAIL+'[โ] Login Failed!'+bcolors.ENDC)
quit()
except:
print(bcolors.FAIL+'[โ] Login Failed!'+bcolors.ENDC)
quit()
except KeyError:
try:
if json_data["message"]=="checkpoint_required":
self.ig_nrcb = cookie_jar['ig_nrcb']
self.ig_did = cookie_jar['ig_did']
self.mid = cookie_jar['mid']
url='https://www.instagram.com'+json_data['checkpoint_url']
header = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded",
"cookie": 'ig_did='+self.ig_did+'; ig_nrcb='+self.ig_nrcb+'; csrftoken='+self.csrf_token+'; mid='+self.mid,
"origin": "https://www.instagram.com",
"referer": 'https://instagram.com'+json_data['checkpoint_url'],
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "0",
"x-instagram-ajax": "e8e20d8ba618",
"x-requested-with": "XMLHttpRequest"
}
code=input(bcolors.OKBLUE+json.loads(requests.post(url, headers=header, data={'choice': '1'}).text, proxies=self.proxy)['extraData']['content'][1]['text']+' > '+bcolors.ENDC)
if json.loads(requests.post(url, headers=header, data={'security_code': code}).text, proxies=self.proxy)['type']=='CHALLENGE_REDIRECTION':
login_response = requests.post(login_url, data=payload, headers=login_header, proxies=self.proxy)
else:
print(bcolors.FAIL+'[โ] Login Failed!'+bcolors.ENDC)
quit()
except:
print(bcolors.FAIL+'[โ] Login Failed!'+bcolors.ENDC)
quit()
self.sessionid = login_response.headers['Set-Cookie'].split('sessionid=')[1].split(';')[0]
self.userId = login_response.headers['Set-Cookie'].split('ds_user_id=')[1].split(';')[0]
self.cookie = "sessionid=" + self.sessionid + "; csrftoken=" + self.csrf_token + "; ds_user_id=" + self.userId + ";"
create_cookie = open(self.path+f'//cookie_{self.username}.bot', 'w+', encoding='utf-8')
create_cookie.write(self.cookie)
create_cookie.close()
self.session = requests.session()
cookie_obj = requests.cookies.create_cookie(
name='sessionid', secure=True, value=self.sessionid)
self.session.cookies.set_cookie(cookie_obj)
elif os.path.exists(self.path+f'//cookie_{self.username}.bot'):
try:
read_cookie = open(self.path+f'//cookie_{self.username}.bot', 'r', encoding='utf-8')
self.cookie = read_cookie.read()
read_cookie.close()
homelink = 'https://www.instagram.com/op/'
self.session = requests.session()
self.sessionid = self.cookie.split('=')[1].split(';')[0]
self.csrf_token = self.cookie.split('=')[2].split(';')[0]
cookie_obj = requests.cookies.create_cookie(
name='sessionid', secure=True, value=self.sessionid)
self.session.cookies.set_cookie(cookie_obj)
login_response = self.session.get(homelink, proxies=self.proxy)
time.sleep(1)
soup = bs(login_response.text, 'html.parser')
soup.find("strong", {"class": "-cx-PRIVATE-NavBar__username -cx-PRIVATE-NavBar__username__"}).get_text()
except AttributeError:
print(bcolors.FAIL+"[โ] Login Failed! Cookie file is corupted!"+bcolors.ENDC)
os.remove(self.path+f'//cookie_{self.username}.bot')
print(bcolors.WARNING+"[-] Deleted Corupted Cookie File! Try Again!"+bcolors.ENDC)
quit()
def already_liked(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
resp = self.session.get(post_link, proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = post_link.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = post_link.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
liked = data_json["graphql"]["shortcode_media"]["viewer_has_liked"]
return bool(liked)
def like(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
try:
if self.already_liked(post_link) == False:
resp = self.session.get(post_link, proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = post_link.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = post_link.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
id_post = data_json["graphql"]["shortcode_media"]["id"]
url_post = f"https://www.instagram.com/web/likes/{id_post}/like/"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "0",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"origin": "https://www.instagram.com",
"referer": post_link,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_post, headers=headers, proxies=self.proxy)
if response.status_code != 200:
return response.status_code
else:
return 208
except:
return 403
return 200
def unlike(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
try:
if self.already_liked(post_link) == True:
resp = self.session.get(post_link, proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = post_link.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = post_link.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
id_post = data_json["graphql"]["shortcode_media"]["id"]
url_post = f"https://www.instagram.com/web/likes/{id_post}/unlike/"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "0",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"origin": "https://www.instagram.com",
"referer": post_link,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_post, headers=headers, proxies=self.proxy)
if response.status_code != 200:
return response.status_code
else:
return 208
except:
return 403
return 200
def like_recent(self, username):
resp = self.session.get('https://www.instagram.com/'+username+'/', proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
try:
shortcode = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']["edge_owner_to_timeline_media"]["edges"][0]["node"]["shortcode"]
return self.like('https://www.instagram.com/p/'+shortcode+'/')
except IndexError:
return 404
except KeyError:
return 404
def comment(self, post_link, comment_text):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
try:
resp = self.session.get(post_link, proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = post_link.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = post_link.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
id_post = data_json["graphql"]["shortcode_media"]["id"]
url_post = f"https://www.instagram.com/web/comments/{id_post}/add/"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "39",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"origin": "https://www.instagram.com",
"referer": post_link,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_post, headers=headers, data=f"comment_text={comment_text}&replied_to_comment_id=".encode('utf-8'), proxies=self.proxy)
if response.status_code != 200:
return response.status_code
except:
return 403
return 200
def comment_recent(self, username, comment_text):
resp = self.session.get('https://www.instagram.com/'+username+'/', proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
try:
shortcode = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']["edge_owner_to_timeline_media"]["edges"][0]["node"]["shortcode"]
return self.comment('https://www.instagram.com/p/'+shortcode+'/', comment_text)
except IndexError:
return 404
except KeyError:
return 404
def already_followed(self, username):
resp = self.session.get('https://www.instagram.com/'+username+'/', proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
followed = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['followed_by_viewer']
return bool(followed)
def follow(self, username):
try:
if self.already_followed(username) == False:
resp = self.session.get('https://www.instagram.com/'+username+'/', proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
id_page = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url_page = f"https://www.instagram.com/web/friendships/{id_page}/follow/"
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '0',
'content-type': 'application/x-www-form-urlencoded',
'cookie': self.cookie,
"origin": "https://www.instagram.com",
"referer": f"https://www.instagram.com/{username}/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_page, headers=headers, proxies=self.proxy)
if response.status_code == 200:
return 200
else:
return response.status_code
else:
return 208
except KeyError:
return 404
def unfollow(self, username):
try:
if self.already_followed(username) == True:
resp = self.session.get('https://www.instagram.com/'+username+'/', proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
id_page = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url_page = f"https://www.instagram.com/web/friendships/{id_page}/unfollow/"
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '0',
'content-type': 'application/x-www-form-urlencoded',
'cookie': self.cookie,
"origin": "https://www.instagram.com",
"referer": f"https://www.instagram.com/{username}/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_page, headers=headers, proxies=self.proxy)
if response.status_code == 200:
return 200
else:
return response.status_code
else:
return 208
except KeyError:
return 404
def story_view(self, username):
try:
resp = self.session.get('https://www.instagram.com/'+username+'/', proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
try:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
return 404
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
surl = f'https://www.instagram.com/graphql/query/?query_hash=c9c56db64beb4c9dea2d17740d0259d9&variables=%7B%22reel_ids%22%3A%5B%22{page_id}%22%5D%2C%22tag_names%22%3A%5B%5D%2C%22location_ids%22%3A%5B%5D%2C%22highlight_reel_ids%22%3A%5B%5D%2C%22precomposed_overlay%22%3Afalse%2C%22show_story_viewer_list%22%3Atrue%2C%22story_viewer_fetch_count%22%3A50%2C%22story_viewer_cursor%22%3A%22%22%2C%22stories_video_dash_manifest%22%3Afalse%7D'
resp = self.session.get(surl, proxies=self.proxy)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
data_json = json.loads(str(soup))
story_count = len(data_json["data"]["reels_media"][0]["items"])
for i in range(0, story_count):
id_story = data_json["data"]["reels_media"][0]["items"][i]['id']
taken_at_timestamp = data_json["data"]["reels_media"][0]["items"][i]['taken_at_timestamp']
stories_page = f"https://www.instagram.com/stories/reel/seen"
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '127',
'content-type': 'application/x-www-form-urlencoded',
'cookie': self.cookie,
"origin": "https://www.instagram.com",
"referer": f"https://www.instagram.com/stories/{username}/{id_story}/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
data = {
'reelMediaId': id_story,
'reelMediaOwnerId': page_id,
'reelId': page_id,
'reelMediaTakenAt': taken_at_timestamp,
'viewSeenAt': taken_at_timestamp
}
requests.request("POST", stories_page, headers=headers, data=data, proxies=self.proxy)
except IndexError:
return 404
except KeyError:
return 404
return 200
def upload_post(self, image_path, caption=''):
micro_time = int(datetime.now().timestamp())
headers = {
"content-type": "image / jpg",
"content-length": "1",
"X-Entity-Name": f"fb_uploader_{micro_time}",
"Offset": "0",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"x-entity-length": "1",
"X-Instagram-Rupload-Params": f'{{"media_type": 1, "upload_id": {micro_time}, "upload_media_height": 1080, "upload_media_width": 1080}}',
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "1217981644879628",
"cookie": self.cookie
}
upload_response = requests.post(f'https://www.instagram.com/rupload_igphoto/fb_uploader_{micro_time}',
data=open(image_path, "rb"), headers=headers, proxies=self.proxy)
json_data = json.loads(upload_response.text)
upload_id = json_data['upload_id']
if json_data["status"] == "ok":
url = "https://www.instagram.com/create/configure/"
payload = 'upload_id=' + upload_id + '&caption=' + caption + '&usertags=&custom_accessibility_caption=&retry_timeout='
headers = {
'authority': 'www.instagram.com',
'x-ig-www-claim': 'hmac.AR2-43UfYbG2ZZLxh-BQ8N0rqGa-hESkcmxat2RqMAXejXE3',
'x-instagram-ajax': 'adb961e446b7-hot',
'content-type': 'application/x-www-form-urlencoded',
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'x-csrftoken': self.csrf_token,
'x-ig-app-id': '1217981644879628',
'origin': 'https://www.instagram.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.instagram.com/create/details/',
'accept-language': 'en-US,en;q=0.9,fa-IR;q=0.8,fa;q=0.7',
'cookie': self.cookie
}
response = requests.request("POST", url, headers=headers, data=payload, proxies=self.proxy)
json_data = json.loads(response.text)
if json_data["status"] == "ok":
return 200
else:
return 400
def upload_story(self, image_path):
micro_time = int(datetime.now().timestamp())
headers = {
"content-type": "image / jpg",
"content-length": "1",
"X-Entity-Name": f"fb_uploader_{micro_time}",
"Offset": "0",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"x-entity-length": "1",
"X-Instagram-Rupload-Params": f'{{"media_type": 1, "upload_id": {micro_time}, "upload_media_height": 1080, "upload_media_width": 1080}}',
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "1217981644879628",
"cookie": self.cookie
}
upload_response = requests.post(f'https://www.instagram.com/rupload_igphoto/fb_uploader_{micro_time}',
data=open(image_path, "rb"), headers=headers, proxies=self.proxy)
json_data = json.loads(upload_response.text)
upload_id = json_data['upload_id']
if json_data["status"] == "ok":
url = "https://www.instagram.com/create/configure_to_story/"
payload = 'upload_id=' + upload_id + '&caption=&usertags=&custom_accessibility_caption=&retry_timeout='
headers = {
'authority': 'www.instagram.com',
'x-ig-www-claim': 'hmac.AR2-43UfYbG2ZZLxh-BQ8N0rqGa-hESkcmxat2RqMAXejXE3',
'x-instagram-ajax': 'adb961e446b7-hot',
'content-type': 'application/x-www-form-urlencoded',
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'x-csrftoken': self.csrf_token,
'x-ig-app-id': '1217981644879628',
'origin': 'https://www.instagram.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.instagram.com/create/details/',
'accept-language': 'en-US,en;q=0.9,fa-IR;q=0.8,fa;q=0.7',
'cookie': self.cookie
}
response = requests.request("POST", url, headers=headers, data=payload, proxies=self.proxy)
json_data = json.loads(response.text)
if json_data["status"] == "ok":
return 200
else:
return 400
def hashtag_posts(self, hashtag, limit=20):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=9b498c08113f1e09617a1703c22b2f32&variables=%7B%22tag_name%22%3A%22{hashtag}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
post_count = len(json.loads(response)['data']['hashtag']['edge_hashtag_to_media']['edges'])
if limit > post_count:
limit = post_count
links=[]
for i in range(0, limit):
links.append('https://instagram.com/p/'+json.loads(response)['data']['hashtag']['edge_hashtag_to_media']['edges'][i]['node']['shortcode'])
return links
def location_posts(self, location_url, limit=20):
id_location = location_url.split('/locations/')[1].split('/')[0]
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=36bd0f2bf5911908de389b8ceaa3be6d&variables=%7B%22id%22%3A%22{id_location}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
post_count = len(json.loads(response)['data']['location']['edge_location_to_media']['edges'])
if limit > post_count:
limit = post_count
links=[]
for i in range(0, limit):
links.append('https://instagram.com/p/'+json.loads(response)['data']['location']['edge_location_to_media']['edges'][i]['node']['shortcode'])
return links
def user_posts_count(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
post_count = json.loads(response)['graphql']['user']['edge_owner_to_timeline_media']['count']
return post_count
def user_followers_count(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
followers_count = json.loads(response)['graphql']['user']['edge_followed_by']['count']
return followers_count
def user_follow_count(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
follow_count = json.loads(response)['graphql']['user']['edge_follow']['count']
return follow_count
def like_count(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
like_count = json.loads(response)['graphql']['shortcode_media']['edge_media_preview_like']['count']
return like_count
def comment_count(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
comment_count = json.loads(response)['graphql']['shortcode_media']['edge_media_preview_comment']['count']
return comment_count
def user_posts(self, username, limit=50):
posts_have = self.user_posts_count(username)
if posts_have < limit:
limit=posts_have
limit_k=limit
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
user_id = json.loads(response)['graphql']['user']['id']
links=[]
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=003056d32c2554def87228bc3fd9668a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
post_count = len(json.loads(response)['data']['user']['edge_owner_to_timeline_media']['edges'])
if limit > post_count:
limit = post_count
for i in range(0, limit):
links.append('https://instagram.com/p/'+json.loads(response)['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['shortcode'])
if limit_k > 50:
limit = limit_k - 50
limit_k = limit
while limit_k > 0:
try:
after = json.loads(response)['data']['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=003056d32c2554def87228bc3fd9668a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy).text
post_count = len(json.loads(response)['data']['user']['edge_owner_to_timeline_media']['edges'])
if limit > post_count:
limit = post_count
limit_k -= limit
for i in range(0, limit):
links.append('https://instagram.com/p/'+json.loads(response)['data']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['shortcode'])
limit = limit_k
except:
break
return links
def user_follows(self, username, limit=49):
followed = self.user_follow_count(username)
if followed < limit:
limit=followed
limit_k=limit
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
user_id = json.loads(response)['graphql']['user']['id']
usernames=[]
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=d04b0a864b4b54837c0d870b0e77e076&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
follow_count = len(json.loads(response)['data']['user']['edge_follow']['edges'])
if limit > follow_count:
limit = follow_count
for i in range(0, limit):
usernames.append(json.loads(response)['data']['user']['edge_follow']['edges'][i]['node']['username'])
if limit_k > 49:
limit = limit_k - 49
limit_k = limit
while limit_k > 0:
try:
after = json.loads(response)['data']['user']['edge_follow']['page_info']['end_cursor']
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=d04b0a864b4b54837c0d870b0e77e076&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy).text
follow_count = len(json.loads(response)['data']['user']['edge_follow']['edges'])
if limit > follow_count:
limit = follow_count
limit_k -= limit
for i in range(0, limit):
usernames.append(json.loads(response)['data']['user']['edge_follow']['edges'][i]['node']['username'])
limit = limit_k
except:
break
return usernames
def user_followers(self, username, limit=49):
follower = self.user_followers_count(username)
if follower < limit:
limit=follower
limit_k=limit
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
user_id = json.loads(response)['graphql']['user']['id']
usernames=[]
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
follower_count = len(json.loads(response)['data']['user']['edge_followed_by']['edges'])
if limit > follower_count:
limit = follower_count
for i in range(0, limit):
usernames.append(json.loads(response)['data']['user']['edge_followed_by']['edges'][i]['node']['username'])
if limit_k > 49:
limit = limit_k - 49
limit_k = limit
while limit_k > 0:
try:
after = json.loads(response)['data']['user']['edge_followed_by']['page_info']['end_cursor']
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy).text
follower_count = len(json.loads(response)['data']['user']['edge_followed_by']['edges'])
if limit > follower_count:
limit = follower_count
limit_k -= limit
for i in range(0, limit):
usernames.append(json.loads(response)['data']['user']['edge_followed_by']['edges'][i]['node']['username'])
limit = limit_k
except:
break
return usernames
def post_likers(self, post_link, limit=50):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
likers = self.like_count(post_link)
if likers < limit:
limit=likers
limit_k=limit
headers = self._get_headers()
shortcode = post_link.split('/p/')[1].replace('/', '')
usernames=[]
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=d5d763b1e2acf209d62d22d184488e57&variables=%7B%22shortcode%22%3A%22{shortcode}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
like_count = len(json.loads(response)['data']['shortcode_media']['edge_liked_by']['edges'])
if limit > like_count:
limit = like_count
for i in range(0, limit):
usernames.append(json.loads(response)['data']['shortcode_media']['edge_liked_by']['edges'][i]['node']['username'])
if limit_k > 50:
limit = limit_k - 50
limit_k = limit
while limit_k > 0:
try:
after = json.loads(response)['data']['shortcode_media']['edge_liked_by']['page_info']['end_cursor']
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=d5d763b1e2acf209d62d22d184488e57&variables=%7B%22shortcode%22%3A%22{shortcode}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy).text
like_count = len(json.loads(response)['data']['shortcode_media']['edge_liked_by']['edges'])
if limit > like_count:
limit = like_count
limit_k -= limit
for i in range(0, limit):
usernames.append(json.loads(response)['data']['shortcode_media']['edge_liked_by']['edges'][i]['node']['username'])
limit = limit_k
except:
break
return usernames
def post_commenters(self, post_link, limit=50):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
commenters = self.comment_count(post_link)
if commenters < limit:
limit=commenters
limit_k=limit
headers = self._get_headers()
shortcode = post_link.split('/p/')[1].replace('/', '')
usernames=[]
response = self.session.get(f'https://www.instagram.com/graphql/query/?query_hash=bc3296d1ce80a24b1b6e40b1e72903f5&variables=%7B%22shortcode%22%3A%22{shortcode}%22%2C%22first%22%3A{limit}%7D', headers=headers, proxies=self.proxy).text
comment_count = len(json.loads(response)['data']['shortcode_media']['edge_media_to_parent_comment']['edges'])
if limit > comment_count:
limit = comment_count
for i in range(0, limit):
usernames.append(json.loads(response)['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['owner']['username'])
if limit_k > 50:
limit = limit_k - 50
limit_k = limit
while limit_k > 0:
try:
response = self.session.get('https://www.instagram.com/graphql/query/?query_hash=bc3296d1ce80a24b1b6e40b1e72903f5&variables={%22shortcode%22:%22'+shortcode+'%22,%22first%22:50,%22after%22:'+json.dumps(json.loads(response)['data']['shortcode_media']['edge_media_to_parent_comment']['page_info']['end_cursor'])+'}', headers=headers, proxies=self.proxy).text
comment_count = len(json.loads(response)['data']['shortcode_media']['edge_media_to_parent_comment']['edges'])
if limit > comment_count:
limit = comment_count
limit_k -= limit
for i in range(0, limit):
usernames.append(json.loads(response)['data']['shortcode_media']['edge_media_to_parent_comment']['edges'][i]['node']['owner']['username'])
limit = limit_k
except:
break
return usernames
def feed_posts(self):
headers = self._get_headers()
response = self.session.get('https://www.instagram.com/graphql/query/?query_hash=c699b185975935ae2a457f24075de8c7', headers=headers, proxies=self.proxy).text
post_count = len(json.loads(response)['data']['user']['edge_web_feed_timeline']['edges'])
feed_posts = []
for i in range(0, post_count):
feed_posts.append('https://instagram.com/p/'+json.loads(response)['data']['user']['edge_web_feed_timeline']['edges'][i]['node']['shortcode'])
return feed_posts
def post_owner(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
owner = json.loads(response)['graphql']['shortcode_media']['owner']['username']
return owner
def post_caption(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
caption = json.loads(response)['graphql']['shortcode_media']['edge_media_to_caption']['edges'][0]['node']['text']
return caption
def post_location(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
location = {"id": json.loads(response)['graphql']['shortcode_media']['location']['id'], "name": json.loads(response)['graphql']['shortcode_media']['location']['name']}
return location
def post_hashtags(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
hashtag_filter = self.post_caption(post_link).replace('\n', ' ').split()
hashtags=[]
for hashtag in hashtag_filter:
if hashtag.startswith('#'):
hashtags.append(hashtag)
return hashtags
def post_tagged_user(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
tagged_users = []
try:
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
tag_count = len(json.loads(response)['graphql']['shortcode_media']['edge_sidecar_to_children']['edges'][0]['node']['edge_media_to_tagged_user']['edges'])
for i in range(0, tag_count):
tagged_users.append(json.loads(response)['graphql']['shortcode_media']['edge_sidecar_to_children']['edges'][0]['node']['edge_media_to_tagged_user']['edges'][i]['node']['user']['username'])
except:
try:
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
tag_count = len(json.loads(response)['graphql']['shortcode_media']['edge_media_to_tagged_user']['edges'])
for i in range(0, tag_count):
tagged_users.append(json.loads(response)['graphql']['shortcode_media']['edge_media_to_tagged_user']['edges'][i]['node']['user']['username'])
except:
pass
return tagged_users
def post_time(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
time = {"timestamp": json.loads(response)['graphql']['shortcode_media']['taken_at_timestamp'], "datetime": str(datetime.fromtimestamp(json.loads(response)['graphql']['shortcode_media']['taken_at_timestamp']))}
return time
def post_type(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
if bool(json.loads(response)['graphql']['shortcode_media']['is_video']):
post_type='video'
else:
post_type='picture'
return post_type
def video_views_count(self, post_link):
if post_link.find('/tv/') != -1:
post_link = post_link.replace('/tv/', '/p/')
try:
post_link = post_link.replace(post_link.split('/p/')[1].split('/')[1], '')
except:
pass
if self.post_type(post_link) == 'video':
headers = self._get_headers()
if post_link[-1] == '/':
post_link = post_link[:-1]
response = self.session.get(f'{post_link}/?__a=1', headers=headers, proxies=self.proxy).text
view_count = json.loads(response)['graphql']['shortcode_media']['video_view_count']
return view_count
def followed_by_me(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
followed_by_viewer = bool(json.loads(response)['graphql']['user']['followed_by_viewer'])
return followed_by_viewer
def follows_me(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
follows_viewer = bool(json.loads(response)['graphql']['user']['follows_viewer'])
return follows_viewer
def user_external_url(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
url = json.loads(response)['graphql']['user']['external_url']
return url
def verified_user(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
is_verified = bool(json.loads(response)['graphql']['user']['is_verified'])
return is_verified
def private_user(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
is_private = bool(json.loads(response)['graphql']['user']['is_private'])
return is_private
def user_bio(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
bio = json.loads(response)['graphql']['user']['biography']
return bio
def user_dp(self, username):
headers = self._get_headers()
response = self.session.get(f'https://www.instagram.com/{username}/?__a=1', headers=headers, proxies=self.proxy).text
dp_url = json.loads(response)['graphql']['user']['profile_pic_url_hd']
return dp_url
def _get_headers(self, options=None):
if options is None:
options = dict()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "0",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
for key, value in options.items():
headers[key] = value
return headers
| 63,636
| 207
| 1,294
|
7f9cb787068686be642ce592396f41b89b8d5767
| 545
|
py
|
Python
|
app/test/test1.py
|
saint816/fishbook
|
80a4b563a05086c85eb347286d28bb0e6258ff1c
|
[
"MIT"
] | null | null | null |
app/test/test1.py
|
saint816/fishbook
|
80a4b563a05086c85eb347286d28bb0e6258ff1c
|
[
"MIT"
] | null | null | null |
app/test/test1.py
|
saint816/fishbook
|
80a4b563a05086c85eb347286d28bb0e6258ff1c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name๏ผ test1
Description : ๅค็บฟ็จๅฎ็ฐ
Author : pengsheng
date๏ผ 2019-04-20
-------------------------------------------------
"""
import threading
new_thread = threading.Thread(target=worker, name='new_thread')
new_thread.start()
# ๆดๅ ๅ
ๅๅฉ็จCPU็ๆง่ฝไผๅฟ(็บฟ็จๆง่กๆฏๅผๆญฅ็)
# ๅผๆญฅ็ผ็จๅค็จไบ่งฃๅณๆง่ฝ้ฎ้ข,ไธ่ฌ้ฎ้ข่ฝๅค็จๅๆญฅๅฐฑ็จๅๆญฅ
t = threading.current_thread()
print(t.getName())
| 21.8
| 63
| 0.53578
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name๏ผ test1
Description : ๅค็บฟ็จๅฎ็ฐ
Author : pengsheng
date๏ผ 2019-04-20
-------------------------------------------------
"""
import threading
def worker():
print('i am thread')
t = threading.current_thread()
print(t.getName())
new_thread = threading.Thread(target=worker, name='new_thread')
new_thread.start()
# ๆดๅ ๅ
ๅๅฉ็จCPU็ๆง่ฝไผๅฟ(็บฟ็จๆง่กๆฏๅผๆญฅ็)
# ๅผๆญฅ็ผ็จๅค็จไบ่งฃๅณๆง่ฝ้ฎ้ข,ไธ่ฌ้ฎ้ข่ฝๅค็จๅๆญฅๅฐฑ็จๅๆญฅ
t = threading.current_thread()
print(t.getName())
| 75
| 0
| 23
|
535a5f8a51e655f145cc0b06696fd8a683da4409
| 221
|
py
|
Python
|
__init__.py
|
klonuo/SublimeJEDI
|
ee58759cbbfbd052bd0a972b85b1666e0e1cb6e9
|
[
"MIT"
] | 1
|
2016-09-20T20:50:53.000Z
|
2016-09-20T20:50:53.000Z
|
__init__.py
|
klonuo/SublimeJEDI
|
ee58759cbbfbd052bd0a972b85b1666e0e1cb6e9
|
[
"MIT"
] | null | null | null |
__init__.py
|
klonuo/SublimeJEDI
|
ee58759cbbfbd052bd0a972b85b1666e0e1cb6e9
|
[
"MIT"
] | null | null | null |
# fix absolute imports on ST3
# TODO: remove
#import sys
#import os
#sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
try:
from sublime_jedi import *
except ImportError:
from .sublime_jedi import *
| 20.090909
| 63
| 0.733032
|
# fix absolute imports on ST3
# TODO: remove
#import sys
#import os
#sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
try:
from sublime_jedi import *
except ImportError:
from .sublime_jedi import *
| 0
| 0
| 0
|
667544762c302b9391cb48414210868246d7d11a
| 9,969
|
py
|
Python
|
oras/content/file.py
|
vsoch/oras-python
|
45374c6187b98f171f85dffd75a31877b6ec12ce
|
[
"MIT"
] | 1
|
2021-12-06T08:54:51.000Z
|
2021-12-06T08:54:51.000Z
|
oras/content/file.py
|
vsoch/oras-python
|
45374c6187b98f171f85dffd75a31877b6ec12ce
|
[
"MIT"
] | 1
|
2021-11-28T18:59:21.000Z
|
2021-11-28T18:59:21.000Z
|
oras/content/file.py
|
vsoch/oras-python
|
45374c6187b98f171f85dffd75a31877b6ec12ce
|
[
"MIT"
] | null | null | null |
__author__ = "Vanessa Sochat"
__copyright__ = "Copyright 2021, Vanessa Sochat"
__license__ = "MPL 2.0"
import os
import time
import tarfile
import tempfile
import time
from oras.logger import logger
import oras.utils as utils
import oras.defaults as defaults
from .const import TempFilePattern, AnnotationUnpack, AnnotationDigest
from .utils import resolve_name, tar_directory
from .readerat import sizeReaderAt
from .utils import tar_directory
from .opts import CdWriterOpts, WithOutputHash
from .iowriter import IoContentWriter
import opencontainers.image.v1.annotations as annotations
import opencontainers.image.v1.descriptor as descriptor
class FileStore:
"""
A FileStore provides content from the file system
"""
def map_path(self, name, path):
"""
Map a name to a path
"""
path = self.resolve_path(path)
self.path_map[name] = path
return path
def resolve_path(self, name):
"""
Return the path by name
"""
path = self.path_map.get(name)
if path or (path and os.path.isabs(path)):
return path
return os.path.join(self.root, path)
def set(self, desc):
"""
Save a descriptor to the map.
"""
self.descriptor[desc.Digest.value] = desc
def add(self, name, media_type, path):
"""
Add a file reference
"""
path = path or name
path = self.map_path(name, path)
if os.path.isdir(path):
desc = self.descriptor_from_dir(name, media_type, path)
elif os.path.isfile(path):
desc = self.descriptor_from_file(media_type, path)
else:
logger.exit("%s is not a valid path." % path)
desc.Annotations[annotations.AnnotationTitle] = name
self.set(desc)
return desc
def descriptor_from_file(self, media_type, path):
"""
Get a descriptor from file.
"""
if not os.path.exists(path):
logger.exit("%s does not exist." % path)
try:
digest = utils.get_file_hash(path)
except:
logger.exit("Cannot calculate digest for %s" % path)
if not media_type:
media_type = defaults.DefaultBlobMediaType
stat = os.stat(path)
return descriptor.Descriptor(mediaType=media_type, digest=digest, size=stat.st_size)
def descriptor_from_dir(self, name, media_type, root):
"""
Get a descriptor from a director
"""
name = self.map_path(name, tmpfie)
# Compress directory to tmpfile
tar = tar_directory(root, name, strip_times=self.reproducible)
# Get digest
digest = "sha256:%s" % utils.get_file_hash(tar)
# generate descriptor
if not media_type:
media_type = defaults.DefaultBlobMediaType
info = os.stat(tar)
# Question: what is the difference between AnnotationDigest and digest?
annotations = {"AnnotationDigest": digest, "AnnotationUnpack": True}
return descriptor.Descriptor(mediaType=media_type, digest=digest,size=info.st_size, annotations=annotations)
def temp_file(self):
"""
Create and store a temporary file
"""
filen = tempfile.NamedTemporaryFile(prefix=TempFilePattern)
self.tmp_files[filen.name] = filen
return filen
def close(self):
"""Close frees up resources used by the file store
"""
for name, filen in self.tmp_files.items():
filen.close()
if os.path.exists(name):
os.remove(name)
def set(self, desc):
"""
Set an OCI descriptor
"""
self.descriptor[desc.Digest] = desc
def get(desc):
"""
Get an OCI descriptor
"""
value = self.descriptor.get(desc.Digest)
if not value:
return descriptor.Descriptor()
return value
def reader_at(self, desc):
"""ReaderAt provides contents
"""
desc = self.get(desc)
if not desc:
sys.exit("Could not find descriptor.")
name = resolve_name(desc)
if not name:
sys.exit("Cannot resolve name for %s" % desc)
path = self.resolve_path(name)
fileo = open(path, 'r')
return sizeReaderAt(fileo, desc.size)
def writer(self, opts):
"""Writer begins or resumes the active writer identified by desc
"""
wopts = CdWriterOpts()
wopts.update(opts)
desc = wopts.Desc
name = resolve_name(desc)
# if we were not told to ignore NoName, then return an error
if not name and not self.ignore_no_name:
sys.exit("Cannot resolve name for %s" % desc)
elif not name and self.ignore_no_name:
# just return a nil writer - we do not want to calculate the hash, so just use
# whatever was passed in the descriptor
return IoContentWriter(WithOutputHash(desc.Digest)
path = self.resolve_write_path(name)
filen, after_commit = self.create_write_path(path, desc, name)
now = time.time()
# STOPPED HERE need to find content.Status
status =
status: content.Status{
Ref: name,
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
return FileWriter(store=self, fileh=filen, desc=desc, status=status, after_commit=after_commit)
def resolve_write_path(self, name):
"""Resolve the write path
"""
path = self.resolve_path(name)
if not self.allow_path_traversal_on_write:
base = os.path.abspath(self.root)
target = os.path.abspath(path)
rel = os.path.relpath(base, target)
if rel.startswith("../") or rel == "..":
return ""
if self.disable_overwrite:
print("NEED TO CHECK OVERWRITE")
# TODO what do we want to check here, if writable?
#if os.stat(path)
# if _, err := os.Stat(path); err == nil {
# return "", ErrOverwriteDisallowed
# } else if !os.IsNotExist(err) {
# return "", err
return path
def create_write_path(self, path, desc, prefix):
"""
Create a write path?
"""
value = desc.Annotations.get(AnnotationUnpack)
if not value:
os.makedirs(os.path.dirname(path))
with open(path, 'w') as fd:
pass
return filen, None
os.makedirs(path)
filen = tempfile.mkstemp()[1]
checksum = desc.Annotations.get(AnnotationDigest)
return filen, after_commit
class FileWriter:
def __init__(self, store, fileh, desc, status, after_commit, digester=None):
self.store = store # *FileStore
self.file = fileh # *os.File
self.desc = desc # ocispec.Descriptor
self.status = status # content.Status
self.after_commit = after_commit # func()
self.digester = digester or digest.Canonical.Digester() # TODO what is this?
func (w *fileWriter) Status() (content.Status, error) {
return w.status, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *fileWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
func (w *fileWriter) Write(p []byte) (n int, err error) {
n, err = w.file.Write(p)
w.digester.Hash().Write(p[:n])
w.status.Offset += int64(len(p))
w.status.UpdatedAt = time.Now()
return n, err
}
func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
if w.file == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
file := w.file
w.file = nil
if err := file.Sync(); err != nil {
file.Close()
return errors.Wrap(err, "sync failed")
}
fileInfo, err := file.Stat()
if err != nil {
file.Close()
return errors.Wrap(err, "stat failed")
}
if err := file.Close(); err != nil {
return errors.Wrap(err, "failed to close file")
}
if size > 0 && size != fileInfo.Size() {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size)
}
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
w.store.set(w.desc)
if w.afterCommit != nil {
return w.afterCommit()
}
return nil
}
// Close the writer, flushing any unwritten data and leaving the progress in
// tact.
func (w *fileWriter) Close() error {
if w.file == nil {
return nil
}
w.file.Sync()
err := w.file.Close()
w.file = nil
return err
}
func (w *fileWriter) Truncate(size int64) error {
if size != 0 {
return ErrUnsupportedSize
}
w.status.Offset = 0
w.digester.Hash().Reset()
if _, err := w.file.Seek(0, io.SeekStart); err != nil {
return err
}
return w.file.Truncate(0)
}
| 28.812139
| 117
| 0.611395
|
__author__ = "Vanessa Sochat"
__copyright__ = "Copyright 2021, Vanessa Sochat"
__license__ = "MPL 2.0"
import os
import time
import tarfile
import tempfile
import time
from oras.logger import logger
import oras.utils as utils
import oras.defaults as defaults
from .const import TempFilePattern, AnnotationUnpack, AnnotationDigest
from .utils import resolve_name, tar_directory
from .readerat import sizeReaderAt
from .utils import tar_directory
from .opts import CdWriterOpts, WithOutputHash
from .iowriter import IoContentWriter
import opencontainers.image.v1.annotations as annotations
import opencontainers.image.v1.descriptor as descriptor
class FileStore:
"""
A FileStore provides content from the file system
"""
def __init__(self, **kwargs):
self.root = kwargs.get("root")
self.descriptor = kwargs.get('descriptor', {})
self.path_map = kwargs.get("path_map", {})
self.tmp_files = kwargs.get("tmp_files", {})
self.ignore_no_name = kwargs.get("ignore_no_name", False)
self.disable_overwrite = kwargs.get("disable_overwrite", False)
self.allow_path_traversal_on_write = kwargs.get("allow_path_traversal_on_write", False)
self.reproducible = kwargs.get("reproducible", False)
def map_path(self, name, path):
"""
Map a name to a path
"""
path = self.resolve_path(path)
self.path_map[name] = path
return path
def resolve_path(self, name):
"""
Return the path by name
"""
path = self.path_map.get(name)
if path or (path and os.path.isabs(path)):
return path
return os.path.join(self.root, path)
def set(self, desc):
"""
Save a descriptor to the map.
"""
self.descriptor[desc.Digest.value] = desc
def add(self, name, media_type, path):
"""
Add a file reference
"""
path = path or name
path = self.map_path(name, path)
if os.path.isdir(path):
desc = self.descriptor_from_dir(name, media_type, path)
elif os.path.isfile(path):
desc = self.descriptor_from_file(media_type, path)
else:
logger.exit("%s is not a valid path." % path)
desc.Annotations[annotations.AnnotationTitle] = name
self.set(desc)
return desc
def descriptor_from_file(self, media_type, path):
"""
Get a descriptor from file.
"""
if not os.path.exists(path):
logger.exit("%s does not exist." % path)
try:
digest = utils.get_file_hash(path)
except:
logger.exit("Cannot calculate digest for %s" % path)
if not media_type:
media_type = defaults.DefaultBlobMediaType
stat = os.stat(path)
return descriptor.Descriptor(mediaType=media_type, digest=digest, size=stat.st_size)
def descriptor_from_dir(self, name, media_type, root):
"""
Get a descriptor from a director
"""
name = self.map_path(name, tmpfie)
# Compress directory to tmpfile
tar = tar_directory(root, name, strip_times=self.reproducible)
# Get digest
digest = "sha256:%s" % utils.get_file_hash(tar)
# generate descriptor
if not media_type:
media_type = defaults.DefaultBlobMediaType
info = os.stat(tar)
# Question: what is the difference between AnnotationDigest and digest?
annotations = {"AnnotationDigest": digest, "AnnotationUnpack": True}
return descriptor.Descriptor(mediaType=media_type, digest=digest,size=info.st_size, annotations=annotations)
def temp_file(self):
"""
Create and store a temporary file
"""
filen = tempfile.NamedTemporaryFile(prefix=TempFilePattern)
self.tmp_files[filen.name] = filen
return filen
def close(self):
"""Close frees up resources used by the file store
"""
for name, filen in self.tmp_files.items():
filen.close()
if os.path.exists(name):
os.remove(name)
def set(self, desc):
"""
Set an OCI descriptor
"""
self.descriptor[desc.Digest] = desc
def get(desc):
"""
Get an OCI descriptor
"""
value = self.descriptor.get(desc.Digest)
if not value:
return descriptor.Descriptor()
return value
def reader_at(self, desc):
"""ReaderAt provides contents
"""
desc = self.get(desc)
if not desc:
sys.exit("Could not find descriptor.")
name = resolve_name(desc)
if not name:
sys.exit("Cannot resolve name for %s" % desc)
path = self.resolve_path(name)
fileo = open(path, 'r')
return sizeReaderAt(fileo, desc.size)
def writer(self, opts):
"""Writer begins or resumes the active writer identified by desc
"""
wopts = CdWriterOpts()
wopts.update(opts)
desc = wopts.Desc
name = resolve_name(desc)
# if we were not told to ignore NoName, then return an error
if not name and not self.ignore_no_name:
sys.exit("Cannot resolve name for %s" % desc)
elif not name and self.ignore_no_name:
# just return a nil writer - we do not want to calculate the hash, so just use
# whatever was passed in the descriptor
return IoContentWriter(WithOutputHash(desc.Digest)
path = self.resolve_write_path(name)
filen, after_commit = self.create_write_path(path, desc, name)
now = time.time()
# STOPPED HERE need to find content.Status
status =
status: content.Status{
Ref: name,
Total: desc.Size,
StartedAt: now,
UpdatedAt: now,
},
return FileWriter(store=self, fileh=filen, desc=desc, status=status, after_commit=after_commit)
def resolve_write_path(self, name):
"""Resolve the write path
"""
path = self.resolve_path(name)
if not self.allow_path_traversal_on_write:
base = os.path.abspath(self.root)
target = os.path.abspath(path)
rel = os.path.relpath(base, target)
if rel.startswith("../") or rel == "..":
return ""
if self.disable_overwrite:
print("NEED TO CHECK OVERWRITE")
# TODO what do we want to check here, if writable?
#if os.stat(path)
# if _, err := os.Stat(path); err == nil {
# return "", ErrOverwriteDisallowed
# } else if !os.IsNotExist(err) {
# return "", err
return path
def create_write_path(self, path, desc, prefix):
"""
Create a write path?
"""
value = desc.Annotations.get(AnnotationUnpack)
if not value:
os.makedirs(os.path.dirname(path))
with open(path, 'w') as fd:
pass
return filen, None
os.makedirs(path)
filen = tempfile.mkstemp()[1]
checksum = desc.Annotations.get(AnnotationDigest)
def after_commit():
return extract_tar_gzip(path, prefix, filen, checksum)
return filen, after_commit
class FileWriter:
def __init__(self, store, fileh, desc, status, after_commit, digester=None):
self.store = store # *FileStore
self.file = fileh # *os.File
self.desc = desc # ocispec.Descriptor
self.status = status # content.Status
self.after_commit = after_commit # func()
self.digester = digester or digest.Canonical.Digester() # TODO what is this?
func (w *fileWriter) Status() (content.Status, error) {
return w.status, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *fileWriter) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
func (w *fileWriter) Write(p []byte) (n int, err error) {
n, err = w.file.Write(p)
w.digester.Hash().Write(p[:n])
w.status.Offset += int64(len(p))
w.status.UpdatedAt = time.Now()
return n, err
}
func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
if w.file == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
file := w.file
w.file = nil
if err := file.Sync(); err != nil {
file.Close()
return errors.Wrap(err, "sync failed")
}
fileInfo, err := file.Stat()
if err != nil {
file.Close()
return errors.Wrap(err, "stat failed")
}
if err := file.Close(); err != nil {
return errors.Wrap(err, "failed to close file")
}
if size > 0 && size != fileInfo.Size() {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size)
}
if dgst := w.digester.Digest(); expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
w.store.set(w.desc)
if w.afterCommit != nil {
return w.afterCommit()
}
return nil
}
// Close the writer, flushing any unwritten data and leaving the progress in
// tact.
func (w *fileWriter) Close() error {
if w.file == nil {
return nil
}
w.file.Sync()
err := w.file.Close()
w.file = nil
return err
}
func (w *fileWriter) Truncate(size int64) error {
if size != 0 {
return ErrUnsupportedSize
}
w.status.Offset = 0
w.digester.Hash().Reset()
if _, err := w.file.Seek(0, io.SeekStart); err != nil {
return err
}
return w.file.Truncate(0)
}
| 568
| 0
| 65
|
230b134009ad25e00f5dd4e42fe32cc5038b7a5c
| 4,180
|
py
|
Python
|
time_series_experiments/nbeats/blocks/_base.py
|
vikua/time-series-experiments
|
2f9d3fa842866c39c8c1a9906c8c5d4870a6f7da
|
[
"MIT"
] | null | null | null |
time_series_experiments/nbeats/blocks/_base.py
|
vikua/time-series-experiments
|
2f9d3fa842866c39c8c1a9906c8c5d4870a6f7da
|
[
"MIT"
] | 4
|
2020-10-11T15:14:48.000Z
|
2022-02-10T02:28:07.000Z
|
time_series_experiments/nbeats/blocks/_base.py
|
vikua/time-series-experiments
|
2f9d3fa842866c39c8c1a9906c8c5d4870a6f7da
|
[
"MIT"
] | null | null | null |
from tensorflow import keras
from tensorflow.keras import backend as K
| 34.262295
| 88
| 0.583493
|
from tensorflow import keras
from tensorflow.keras import backend as K
class Block(keras.layers.Layer):
def __init__(
self,
units,
theta_units,
layers=4,
stack_id=0,
activation="relu",
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super(Block, self).__init__(**kwargs)
self.units = units
self.theta_units = theta_units
self.layers = layers
self.stack_id = stack_id
self.activation = keras.activations.get(activation)
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.weigts = {}
self.biases = {}
self.theta_b_W = None
self.theta_f_W = None
def build(self, input_shape):
super(Block, self).build(input_shape)
input_dim = input_shape[-1]
for i in range(self.layers):
W = self.add_weight(
name="W_stack_{}_layer_{}".format(self.stack_id, i),
shape=(input_dim, self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
b = self.add_weight(
name="b_stack_{}_layer_{}".format(self.stack_id, i),
shape=(self.units,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
self.weigts[i] = W
self.biases[i] = b
input_dim = self.units
self.theta_b_W = self.add_weight(
name="stack_{}_theta_b_W".format(self.stack_id),
shape=(self.units, self.theta_units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
self.theta_f_W = self.add_weight(
name="stack_{}_theta_f_W".format(self.stack_id),
shape=(self.units, self.theta_units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
def call(self, inputs):
outputs = inputs
for i in range(self.layers):
outputs = K.dot(outputs, self.weigts[i])
outputs = K.bias_add(outputs, self.biases[i], data_format="channels_last")
outputs = self.activation(outputs)
theta_b_output = K.dot(outputs, self.theta_b_W)
theta_f_output = K.dot(outputs, self.theta_f_W)
return theta_b_output, theta_f_output
def get_config(self):
config = super(Block, self).get_config()
config.update(
{
"stack_id": self.stack_id,
"units": self.units,
"layers": self.layers,
"activation": keras.activations.serialize(self.activation),
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(self.bias_initializer),
"kernel_regularizer": keras.regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": keras.regularizers.serialize(self.bias_regularizer),
"kernel_constraint": keras.constraints.serialize(
self.kernel_constraint
),
"bias_constraint": keras.constraints.serialize(self.bias_constraint),
}
)
return config
| 3,967
| 11
| 130
|
0f501a353e1da1c5ed4afdd4c955cf72bd3a1124
| 5,069
|
py
|
Python
|
cail/utils.py
|
Stanford-ILIAD/Confidence-Aware-Imitation-Learning
|
1d8af0e4ab87a025885133a2384d5a937329b2f5
|
[
"MIT"
] | 16
|
2021-10-30T15:19:37.000Z
|
2022-03-23T12:57:49.000Z
|
cail/utils.py
|
syzhang092218-source/Confidence-Aware-Imitation-Learning
|
1d8af0e4ab87a025885133a2384d5a937329b2f5
|
[
"MIT"
] | null | null | null |
cail/utils.py
|
syzhang092218-source/Confidence-Aware-Imitation-Learning
|
1d8af0e4ab87a025885133a2384d5a937329b2f5
|
[
"MIT"
] | 2
|
2021-11-29T11:28:16.000Z
|
2022-03-06T14:12:47.000Z
|
import numpy as np
import torch
import torch.nn as nn
import time
from tqdm import tqdm
from .buffer import Buffer
from .algo.base import Expert
from .env import NormalizedEnv
def soft_update(target, source, tau):
"""Soft update for SAC"""
for t, s in zip(target.parameters(), source.parameters()):
t.data.mul_(1.0 - tau)
t.data.add_(tau * s.data)
def disable_gradient(network: nn.Module):
"""Disable the gradients of parameters in the network"""
for param in network.parameters():
param.requires_grad = False
def add_random_noise(action, std):
"""Add random noise to the action"""
action += np.random.randn(*action.shape) * std
return action.clip(-1.0, 1.0)
def collect_demo(
env: NormalizedEnv,
algo: Expert,
buffer_size: int,
device: torch.device,
std: float,
p_rand: float,
seed: int = 0
):
"""
Collect demonstrations using the well-trained policy
Parameters
----------
env: NormalizedEnv
environment to collect demonstrations
algo: Expert
well-trained algorithm used to collect demonstrations
buffer_size: int
size of the buffer, also the number of s-a pairs in the demonstrations
device: torch.device
cpu or cuda
std: float
standard deviation add to the policy
p_rand: float
with probability of p_rand, the policy will act randomly
seed: int
random seed
Returns
-------
buffer: Buffer
buffer of demonstrations
mean_return: float
average episode reward
"""
env.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
buffer = Buffer(
buffer_size=buffer_size,
state_shape=env.observation_space.shape,
action_shape=env.action_space.shape,
device=device
)
total_return = 0.0
num_steps = []
num_episodes = 0
state = env.reset()
t = 0
episode_return = 0.0
episode_steps = 0
for _ in tqdm(range(1, buffer_size + 1)):
t += 1
if np.random.rand() < p_rand:
action = env.action_space.sample()
else:
action = algo.exploit(state)
action = add_random_noise(action, std)
next_state, reward, done, _ = env.step(action)
mask = True if t == env.max_episode_steps else done
buffer.append(state, action, reward, mask, next_state)
episode_return += reward
episode_steps += 1
if done or t == env.max_episode_steps:
num_episodes += 1
total_return += episode_return
state = env.reset()
t = 0
episode_return = 0.0
num_steps.append(episode_steps)
episode_steps = 0
state = next_state
mean_return = total_return / num_episodes
print(f'Mean return of the expert is {mean_return}')
print(f'Max episode steps is {np.max(num_steps)}')
print(f'Min episode steps is {np.min(num_steps)}')
return buffer, mean_return
def evaluation(
env: NormalizedEnv,
algo: Expert,
episodes: int,
render: bool,
seed: int = 0,
delay: float = 0.03
):
"""
Evaluate the well-trained policy
Parameters
----------
env: NormalizedEnv
environment to evaluate the policy
algo: Expert
well-trained policy to be evaluated
episodes: int
number of episodes used in evaluation
render: bool
render the environment or not
seed: int
random seed
delay: float
number of seconds to delay while rendering, in case the agent moves too fast
Returns
-------
mean_return: float
average episode reward
"""
env.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
total_return = 0.0
num_episodes = 0
num_steps = []
state = env.reset()
t = 0
episode_return = 0.0
episode_steps = 0
while num_episodes < episodes:
t += 1
action = algo.exploit(state)
next_state, reward, done, _ = env.step(action)
episode_return += reward
episode_steps += 1
state = next_state
if render:
env.render()
time.sleep(delay)
if done or t == env.max_episode_steps:
num_episodes += 1
total_return += episode_return
state = env.reset()
t = 0
episode_return = 0.0
num_steps.append(episode_steps)
episode_steps = 0
mean_return = total_return / num_episodes
print(f'Mean return of the policy is {mean_return}')
print(f'Max episode steps is {np.max(num_steps)}')
print(f'Min episode steps is {np.min(num_steps)}')
return mean_return
| 25.994872
| 85
| 0.583547
|
import numpy as np
import torch
import torch.nn as nn
import time
from tqdm import tqdm
from .buffer import Buffer
from .algo.base import Expert
from .env import NormalizedEnv
def soft_update(target, source, tau):
"""Soft update for SAC"""
for t, s in zip(target.parameters(), source.parameters()):
t.data.mul_(1.0 - tau)
t.data.add_(tau * s.data)
def disable_gradient(network: nn.Module):
"""Disable the gradients of parameters in the network"""
for param in network.parameters():
param.requires_grad = False
def add_random_noise(action, std):
"""Add random noise to the action"""
action += np.random.randn(*action.shape) * std
return action.clip(-1.0, 1.0)
def collect_demo(
env: NormalizedEnv,
algo: Expert,
buffer_size: int,
device: torch.device,
std: float,
p_rand: float,
seed: int = 0
):
"""
Collect demonstrations using the well-trained policy
Parameters
----------
env: NormalizedEnv
environment to collect demonstrations
algo: Expert
well-trained algorithm used to collect demonstrations
buffer_size: int
size of the buffer, also the number of s-a pairs in the demonstrations
device: torch.device
cpu or cuda
std: float
standard deviation add to the policy
p_rand: float
with probability of p_rand, the policy will act randomly
seed: int
random seed
Returns
-------
buffer: Buffer
buffer of demonstrations
mean_return: float
average episode reward
"""
env.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
buffer = Buffer(
buffer_size=buffer_size,
state_shape=env.observation_space.shape,
action_shape=env.action_space.shape,
device=device
)
total_return = 0.0
num_steps = []
num_episodes = 0
state = env.reset()
t = 0
episode_return = 0.0
episode_steps = 0
for _ in tqdm(range(1, buffer_size + 1)):
t += 1
if np.random.rand() < p_rand:
action = env.action_space.sample()
else:
action = algo.exploit(state)
action = add_random_noise(action, std)
next_state, reward, done, _ = env.step(action)
mask = True if t == env.max_episode_steps else done
buffer.append(state, action, reward, mask, next_state)
episode_return += reward
episode_steps += 1
if done or t == env.max_episode_steps:
num_episodes += 1
total_return += episode_return
state = env.reset()
t = 0
episode_return = 0.0
num_steps.append(episode_steps)
episode_steps = 0
state = next_state
mean_return = total_return / num_episodes
print(f'Mean return of the expert is {mean_return}')
print(f'Max episode steps is {np.max(num_steps)}')
print(f'Min episode steps is {np.min(num_steps)}')
return buffer, mean_return
def evaluation(
env: NormalizedEnv,
algo: Expert,
episodes: int,
render: bool,
seed: int = 0,
delay: float = 0.03
):
"""
Evaluate the well-trained policy
Parameters
----------
env: NormalizedEnv
environment to evaluate the policy
algo: Expert
well-trained policy to be evaluated
episodes: int
number of episodes used in evaluation
render: bool
render the environment or not
seed: int
random seed
delay: float
number of seconds to delay while rendering, in case the agent moves too fast
Returns
-------
mean_return: float
average episode reward
"""
env.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
total_return = 0.0
num_episodes = 0
num_steps = []
state = env.reset()
t = 0
episode_return = 0.0
episode_steps = 0
while num_episodes < episodes:
t += 1
action = algo.exploit(state)
next_state, reward, done, _ = env.step(action)
episode_return += reward
episode_steps += 1
state = next_state
if render:
env.render()
time.sleep(delay)
if done or t == env.max_episode_steps:
num_episodes += 1
total_return += episode_return
state = env.reset()
t = 0
episode_return = 0.0
num_steps.append(episode_steps)
episode_steps = 0
mean_return = total_return / num_episodes
print(f'Mean return of the policy is {mean_return}')
print(f'Max episode steps is {np.max(num_steps)}')
print(f'Min episode steps is {np.min(num_steps)}')
return mean_return
| 0
| 0
| 0
|
83cf34bf170321319bfa3699b032ea060d189625
| 4,204
|
py
|
Python
|
tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/forward_rate_agreement/forward_rate_agreement_test.py
|
DevarakondaV/tf-quant-finance
|
4502b843ca138c2ae8ad77978a2cf52fa38dbbe5
|
[
"Apache-2.0"
] | 1
|
2021-04-22T15:48:38.000Z
|
2021-04-22T15:48:38.000Z
|
tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/forward_rate_agreement/forward_rate_agreement_test.py
|
dsdinter/tf-quant-finance
|
b2b27e682cc091d251a53515fef96b14812acb1c
|
[
"Apache-2.0"
] | null | null | null |
tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/forward_rate_agreement/forward_rate_agreement_test.py
|
dsdinter/tf-quant-finance
|
b2b27e682cc091d251a53515fef96b14812acb1c
|
[
"Apache-2.0"
] | 1
|
2021-01-30T09:32:16.000Z
|
2021-01-30T09:32:16.000Z
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for forward rate agreement."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
framework = tff.experimental.pricing_platform.framework
business_days = framework.core.business_days
currencies = framework.core.currencies
daycount_conventions = framework.core.daycount_conventions
interpolation_method = framework.core.interpolation_method
instrument_protos = tff.experimental.pricing_platform.instrument_protos
date_pb2 = instrument_protos.date
decimal_pb2 = instrument_protos.decimal
period_pb2 = instrument_protos.period
fra_pb2 = instrument_protos.forward_rate_agreement
rate_instruments = tff.experimental.pricing_platform.framework.rate_instruments
forward_rate_agreement = rate_instruments.forward_rate_agreement
market_data = tff.experimental.pricing_platform.framework.market_data
DayCountConventions = daycount_conventions.DayCountConventions
BusinessDayConvention = business_days.BusinessDayConvention
RateIndex = instrument_protos.rate_indices.RateIndex
Currency = currencies.Currency
@test_util.run_all_in_graph_and_eager_modes
if __name__ == "__main__":
tf.test.main()
| 41.623762
| 95
| 0.74215
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for forward rate agreement."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
framework = tff.experimental.pricing_platform.framework
business_days = framework.core.business_days
currencies = framework.core.currencies
daycount_conventions = framework.core.daycount_conventions
interpolation_method = framework.core.interpolation_method
instrument_protos = tff.experimental.pricing_platform.instrument_protos
date_pb2 = instrument_protos.date
decimal_pb2 = instrument_protos.decimal
period_pb2 = instrument_protos.period
fra_pb2 = instrument_protos.forward_rate_agreement
rate_instruments = tff.experimental.pricing_platform.framework.rate_instruments
forward_rate_agreement = rate_instruments.forward_rate_agreement
market_data = tff.experimental.pricing_platform.framework.market_data
DayCountConventions = daycount_conventions.DayCountConventions
BusinessDayConvention = business_days.BusinessDayConvention
RateIndex = instrument_protos.rate_indices.RateIndex
Currency = currencies.Currency
@test_util.run_all_in_graph_and_eager_modes
class ForwardRateAgreementTest(tf.test.TestCase):
def test_from_proto_price(self):
fra_1 = fra_pb2.ForwardRateAgreement(
short_position=True,
fixing_date=date_pb2.Date(year=2021, month=5, day=21),
currency=Currency.USD(),
fixed_rate=decimal_pb2.Decimal(nanos=31340000),
notional_amount=decimal_pb2.Decimal(units=10000),
daycount_convention=DayCountConventions.ACTUAL_360(),
business_day_convention=BusinessDayConvention.MODIFIED_FOLLOWING(),
floating_rate_term=fra_pb2.FloatingRateTerm(
floating_rate_type=RateIndex(type="LIBOR_3M"),
term=period_pb2.Period(type="MONTH", amount=3)),
settlement_days=2)
fra_2 = fra_pb2.ForwardRateAgreement(
short_position=False,
fixing_date=date_pb2.Date(year=2021, month=5, day=21),
currency=Currency.USD(),
fixed_rate=decimal_pb2.Decimal(nanos=31340000),
notional_amount=decimal_pb2.Decimal(units=10000),
daycount_convention=DayCountConventions.ACTUAL_365(),
business_day_convention=BusinessDayConvention.MODIFIED_FOLLOWING(),
floating_rate_term=fra_pb2.FloatingRateTerm(
floating_rate_type=RateIndex(type="LIBOR_3M"),
term=period_pb2.Period(type="MONTH", amount=3)),
settlement_days=2)
date = [[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8],
[2027, 2, 8], [2030, 2, 8], [2050, 2, 8]]
discount = [0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.8013675,
0.72494879, 0.37602059]
market_data_dict = {"USD": {
"risk_free_curve":
{"dates": date, "discounts": discount},
"LIBOR_3M":
{"dates": date, "discounts": discount},}}
valuation_date = [(2020, 2, 8)]
market = market_data.MarketDataDict(valuation_date, market_data_dict)
fra_portfolio = forward_rate_agreement.ForwardRateAgreement.from_protos(
[fra_1, fra_2, fra_1])
with self.subTest("Batching"):
self.assertLen(fra_portfolio, 2)
price1 = fra_portfolio[0].price(market)
expected1 = np.array([4.05463257, 4.05463257])
with self.subTest("PriceBatch"):
self.assertAllClose(price1, expected1)
price2 = fra_portfolio[1].price(market)
expected2 = np.array([-5.10228969])
with self.subTest("PriceSingle"):
self.assertAllClose(price2, expected2)
if __name__ == "__main__":
tf.test.main()
| 2,302
| 28
| 47
|
24437d77fe7c0ec9561b24914a72b35bfd70e5ea
| 7,035
|
py
|
Python
|
.template_simulation/collect_charges.py
|
lukaselflein/sarah_folderstructure
|
a725271db3d8b5b28b24918b3daf0942fa04dcd8
|
[
"MIT"
] | null | null | null |
.template_simulation/collect_charges.py
|
lukaselflein/sarah_folderstructure
|
a725271db3d8b5b28b24918b3daf0942fa04dcd8
|
[
"MIT"
] | 28
|
2019-03-29T13:34:57.000Z
|
2019-07-04T09:27:07.000Z
|
.template_simulation/collect_charges.py
|
lukaselflein/sarah_folderstructure
|
a725271db3d8b5b28b24918b3daf0942fa04dcd8
|
[
"MIT"
] | null | null | null |
""" Extract charges obtained via HORTON and Bader.
Copyright 2019 Simulation Lab
University of Freiburg
Author: Lukas Elflein <elfleinl@cs.uni-freiburg.de>
"""
import os
import pandas as pd
def create_dir(path='./plotting'):
"""Create new folder for pictures if it does not exist yet."""
if os.path.isdir(path):
return path
os.makedirs(path)
return path
def collect_bader():
"""Find charges and put them in one dataframe."""
# Initialize collection data frame
coll_df = None
# Crawl the directory structure
for subdir, dirs, files in sorted(os.walk('./')):
# Exclude template folders from search
if 'template' in subdir or 'exclude' in subdir:
continue
# Select the folders with cost function
if 'bader_charges' in subdir:
print('Moving to {}'.format(subdir))
# Extract timestamp
time = os.path.split(subdir)[0].replace('./', '').replace('_ps_snapshot', '')
time = int(time)
# Use the first charge file to come across as a template
df = pd.read_csv(os.path.join(subdir, 'bader_charges.csv'), sep=r',\s*',
engine='python')
df['timestamp'] = time
if coll_df is None:
coll_df = df
else:
coll_df = coll_df.append(df)
# The table still contains redundant hydrogen atoms: 1CD3... 2CB3
# Delete everything containing '1C' or '2C'
# print(coll_df[coll_df.atom.str.contains(r'[1-2]C')])
coll_df = coll_df.drop(coll_df[coll_df.atom.str.contains(r'[1-2]C')].index)
print('All collected. Transforming wide to long format ...')
# Transform the wide format into a long format version (for easier plotting)
coll_df = coll_df.rename({'q': 'bader'}, axis=1)
coll_df = pd.melt(coll_df, id_vars=['atom', 'residue', 'timestamp'],
value_vars=['bader'])
coll_df = coll_df.rename({'value': 'charge', 'variable': 'Calculation Variant'},
axis=1)
return coll_df
def collect_horton():
"""Find charges and put them in one dataframe."""
# Initialize collection data frame
coll_df = None
# Crawl the directory structure
for subdir, dirs, files in sorted(os.walk('./')):
# Exclude template folders from search
if 'template' in subdir or 'exclude' in subdir or 'sweep' in subdir:
continue
# Select the folders with cost function
if 'horton_cost_function' in subdir:
print('Moving to {}'.format(subdir))
# Extract timestamp
time = os.path.split(subdir)[0].replace('./', '').replace('_ps_snapshot', '')
time = time.replace('/4_horton_cost_function', '')
time = int(time)
# Use the first charge file to come across as a template
df = pd.read_csv(os.path.join(subdir, 'fitted_point_charges.csv'))
df['timestamp'] = time
if coll_df is None:
coll_df = df
else:
coll_df = coll_df.append(df)
print('All collected. Transforming wide to long format ...')
# Transform the wide format into a long format version (for easier plotting)
coll_df = coll_df.rename({'q': 'constrained', 'q_unconstrained': 'unconstrained'}, axis=1)
coll_df = pd.melt(coll_df, id_vars=['atom', 'residue', 'timestamp'],
value_vars=['constrained', 'unconstrained'])
coll_df = coll_df.rename({'value': 'charge', 'variable': 'Calculation Variant'}, axis=1)
return coll_df
def extract_init_charges(rtp_path, df):
"""Extract charges from rtp file"""
atom_names = df.atom.unique()
residuum_names = df.residue.unique()
charges = pd.DataFrame()
with open(rtp_path, 'r') as rtp_file:
print('Successfully loaded topolgy file {}'.format(rtp_path))
rtp_text = rtp_file.readlines()
current_residuum = None
for line in rtp_text:
# atom names are only unique inside one residuum
# Thus, specify which res we are currently in
for residuum in residuum_names:
if residuum in line:
current_residuum = residuum
break
# Now, we can look up the atom name in the charge table.
# First, select the lines with exactly one atom name
for atom_name in atom_names:
# Select lines with at least one atom name
if atom_name in line[0:7]:
second_entry = line[8:18].replace('+', '')
second_entry = second_entry.replace('-', '').strip()
# Select lines with no atom name in second column
if not second_entry in atom_names:
q_value = float(line[24:34].strip(' '))
charges = charges.append({'atom': atom_name,
'residue': current_residuum,
'q_init': q_value},
ignore_index=True)
return charges
def collect_average():
"""Put averaged charegs in a dataframe."""
# Read charges from averaged cost function
input_path = './horton_charges/fitted_point_charges.csv'
avg_df = pd.read_csv(input_path)
# Rename columns for consistency
avg_df = avg_df.rename({'q': 'averaged cost function'}, axis=1)
# Transform to long format
avg_df = pd.melt(avg_df, id_vars=['atom', 'residue'], value_vars=['averaged cost function'])
avg_df = avg_df.rename({'value': 'charge', 'variable': 'Calculation Variant'}, axis=1)
return avg_df
def main():
"""Collect charges and save them to .csv file"""
# Collect averaged charges
avg_df = collect_average()
print(avg_df.loc[avg_df.atom == 'NA2'])
# Collect all horton charges
print('Collecting HORTON charges ...')
horton_df = collect_horton()
print(horton_df.loc[horton_df.atom == 'NA2'])
# Collect all bader charges
print('Collecting Bader charges ...')
bader_df = collect_bader()
# Paste everything into single dataframe
print('Combining different charges into one table ... ')
constr_df = horton_df.loc[horton_df['Calculation Variant'] == 'constrained']
unconstr_df = horton_df.loc[horton_df['Calculation Variant'] == 'unconstrained']
collect_df = avg_df
collect_df = collect_df.append(constr_df, sort=False)
collect_df = collect_df.append(unconstr_df, sort=False)
collect_df = collect_df.append(bader_df, sort=False)
create_dir(path='./plotting')
collect_df.to_csv('./plotting/all_charges.csv')
if __name__ == '__main__':
main()
| 39.971591
| 102
| 0.585785
|
""" Extract charges obtained via HORTON and Bader.
Copyright 2019 Simulation Lab
University of Freiburg
Author: Lukas Elflein <elfleinl@cs.uni-freiburg.de>
"""
import os
import pandas as pd
def create_dir(path='./plotting'):
"""Create new folder for pictures if it does not exist yet."""
if os.path.isdir(path):
return path
os.makedirs(path)
return path
def collect_bader():
"""Find charges and put them in one dataframe."""
# Initialize collection data frame
coll_df = None
# Crawl the directory structure
for subdir, dirs, files in sorted(os.walk('./')):
# Exclude template folders from search
if 'template' in subdir or 'exclude' in subdir:
continue
# Select the folders with cost function
if 'bader_charges' in subdir:
print('Moving to {}'.format(subdir))
# Extract timestamp
time = os.path.split(subdir)[0].replace('./', '').replace('_ps_snapshot', '')
time = int(time)
# Use the first charge file to come across as a template
df = pd.read_csv(os.path.join(subdir, 'bader_charges.csv'), sep=r',\s*',
engine='python')
df['timestamp'] = time
if coll_df is None:
coll_df = df
else:
coll_df = coll_df.append(df)
# The table still contains redundant hydrogen atoms: 1CD3... 2CB3
# Delete everything containing '1C' or '2C'
# print(coll_df[coll_df.atom.str.contains(r'[1-2]C')])
coll_df = coll_df.drop(coll_df[coll_df.atom.str.contains(r'[1-2]C')].index)
print('All collected. Transforming wide to long format ...')
# Transform the wide format into a long format version (for easier plotting)
coll_df = coll_df.rename({'q': 'bader'}, axis=1)
coll_df = pd.melt(coll_df, id_vars=['atom', 'residue', 'timestamp'],
value_vars=['bader'])
coll_df = coll_df.rename({'value': 'charge', 'variable': 'Calculation Variant'},
axis=1)
return coll_df
def collect_horton():
"""Find charges and put them in one dataframe."""
# Initialize collection data frame
coll_df = None
# Crawl the directory structure
for subdir, dirs, files in sorted(os.walk('./')):
# Exclude template folders from search
if 'template' in subdir or 'exclude' in subdir or 'sweep' in subdir:
continue
# Select the folders with cost function
if 'horton_cost_function' in subdir:
print('Moving to {}'.format(subdir))
# Extract timestamp
time = os.path.split(subdir)[0].replace('./', '').replace('_ps_snapshot', '')
time = time.replace('/4_horton_cost_function', '')
time = int(time)
# Use the first charge file to come across as a template
df = pd.read_csv(os.path.join(subdir, 'fitted_point_charges.csv'))
df['timestamp'] = time
if coll_df is None:
coll_df = df
else:
coll_df = coll_df.append(df)
print('All collected. Transforming wide to long format ...')
# Transform the wide format into a long format version (for easier plotting)
coll_df = coll_df.rename({'q': 'constrained', 'q_unconstrained': 'unconstrained'}, axis=1)
coll_df = pd.melt(coll_df, id_vars=['atom', 'residue', 'timestamp'],
value_vars=['constrained', 'unconstrained'])
coll_df = coll_df.rename({'value': 'charge', 'variable': 'Calculation Variant'}, axis=1)
return coll_df
def extract_init_charges(rtp_path, df):
"""Extract charges from rtp file"""
atom_names = df.atom.unique()
residuum_names = df.residue.unique()
charges = pd.DataFrame()
with open(rtp_path, 'r') as rtp_file:
print('Successfully loaded topolgy file {}'.format(rtp_path))
rtp_text = rtp_file.readlines()
current_residuum = None
for line in rtp_text:
# atom names are only unique inside one residuum
# Thus, specify which res we are currently in
for residuum in residuum_names:
if residuum in line:
current_residuum = residuum
break
# Now, we can look up the atom name in the charge table.
# First, select the lines with exactly one atom name
for atom_name in atom_names:
# Select lines with at least one atom name
if atom_name in line[0:7]:
second_entry = line[8:18].replace('+', '')
second_entry = second_entry.replace('-', '').strip()
# Select lines with no atom name in second column
if not second_entry in atom_names:
q_value = float(line[24:34].strip(' '))
charges = charges.append({'atom': atom_name,
'residue': current_residuum,
'q_init': q_value},
ignore_index=True)
return charges
def collect_average():
"""Put averaged charegs in a dataframe."""
# Read charges from averaged cost function
input_path = './horton_charges/fitted_point_charges.csv'
avg_df = pd.read_csv(input_path)
# Rename columns for consistency
avg_df = avg_df.rename({'q': 'averaged cost function'}, axis=1)
# Transform to long format
avg_df = pd.melt(avg_df, id_vars=['atom', 'residue'], value_vars=['averaged cost function'])
avg_df = avg_df.rename({'value': 'charge', 'variable': 'Calculation Variant'}, axis=1)
return avg_df
def main():
"""Collect charges and save them to .csv file"""
# Collect averaged charges
avg_df = collect_average()
print(avg_df.loc[avg_df.atom == 'NA2'])
# Collect all horton charges
print('Collecting HORTON charges ...')
horton_df = collect_horton()
print(horton_df.loc[horton_df.atom == 'NA2'])
# Collect all bader charges
print('Collecting Bader charges ...')
bader_df = collect_bader()
# Paste everything into single dataframe
print('Combining different charges into one table ... ')
constr_df = horton_df.loc[horton_df['Calculation Variant'] == 'constrained']
unconstr_df = horton_df.loc[horton_df['Calculation Variant'] == 'unconstrained']
collect_df = avg_df
collect_df = collect_df.append(constr_df, sort=False)
collect_df = collect_df.append(unconstr_df, sort=False)
collect_df = collect_df.append(bader_df, sort=False)
create_dir(path='./plotting')
collect_df.to_csv('./plotting/all_charges.csv')
if __name__ == '__main__':
main()
| 0
| 0
| 0
|
453864fe3cdf4c08c938afaf223db5f6a52e6a03
| 8,391
|
py
|
Python
|
main.py
|
foorschtbar/speedtest_ookla-to-influxdb
|
901b69fe57f314150a8383e2db2814f3dc7a5674
|
[
"MIT"
] | null | null | null |
main.py
|
foorschtbar/speedtest_ookla-to-influxdb
|
901b69fe57f314150a8383e2db2814f3dc7a5674
|
[
"MIT"
] | null | null | null |
main.py
|
foorschtbar/speedtest_ookla-to-influxdb
|
901b69fe57f314150a8383e2db2814f3dc7a5674
|
[
"MIT"
] | null | null | null |
import os
import time
import json
import datetime
import subprocess
from pythonping import ping
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
from multiprocessing import Process
# InfluxDB Settings
NAMESPACE = os.getenv('NAMESPACE', 'None')
DB_URL = os.getenv('INFLUX_DB_URL', 'http://localhost')
DB_TOKEN = os.getenv('INFLUX_DB_TOKEN', 'my-token')
DB_ORG = os.getenv('INFLUX_DB_ORG', 'my-org')
DB_BUCKET = os.getenv('INFLUX_DB_BUCKET', 'my-bucket')
DB_TAGS = os.getenv('INFLUX_DB_TAGS', None)
PING_TARGETS = os.getenv('PING_TARGETS', '1.1.1.1, 8.8.8.8')
# Speedtest Settings
# Time between tests (in minutes, converts to seconds).
TEST_INTERVAL = int(os.getenv('SPEEDTEST_INTERVAL', '5')) * 60
# Time before retrying a failed Speedtest (in minutes, converts to seconds).
TEST_FAIL_INTERVAL = int(os.getenv('SPEEDTEST_FAIL_INTERVAL', '5')) * 60
# Specific server ID
SERVER_ID = os.getenv('SPEEDTEST_SERVER_ID', '')
# Time between ping tests (in seconds).
PING_INTERVAL = int(os.getenv('PING_INTERVAL', '5'))
with InfluxDBClient(url=DB_URL, token=DB_TOKEN, org=DB_ORG) as client:
write_api = client.write_api(write_options=SYNCHRONOUS)
pass
# time.sleep(TEST_FAIL_INTERVAL)
if __name__ == '__main__':
print('Speedtest CLI data logger to InfluxDB started...')
main()
| 36.324675
| 474
| 0.546538
|
import os
import time
import json
import datetime
import subprocess
from pythonping import ping
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
from multiprocessing import Process
# InfluxDB Settings
NAMESPACE = os.getenv('NAMESPACE', 'None')
DB_URL = os.getenv('INFLUX_DB_URL', 'http://localhost')
DB_TOKEN = os.getenv('INFLUX_DB_TOKEN', 'my-token')
DB_ORG = os.getenv('INFLUX_DB_ORG', 'my-org')
DB_BUCKET = os.getenv('INFLUX_DB_BUCKET', 'my-bucket')
DB_TAGS = os.getenv('INFLUX_DB_TAGS', None)
PING_TARGETS = os.getenv('PING_TARGETS', '1.1.1.1, 8.8.8.8')
# Speedtest Settings
# Time between tests (in minutes, converts to seconds).
TEST_INTERVAL = int(os.getenv('SPEEDTEST_INTERVAL', '5')) * 60
# Time before retrying a failed Speedtest (in minutes, converts to seconds).
TEST_FAIL_INTERVAL = int(os.getenv('SPEEDTEST_FAIL_INTERVAL', '5')) * 60
# Specific server ID
SERVER_ID = os.getenv('SPEEDTEST_SERVER_ID', '')
# Time between ping tests (in seconds).
PING_INTERVAL = int(os.getenv('PING_INTERVAL', '5'))
with InfluxDBClient(url=DB_URL, token=DB_TOKEN, org=DB_ORG) as client:
write_api = client.write_api(write_options=SYNCHRONOUS)
pass
def init_db():
pass
def pkt_loss(data):
if 'packetLoss' in data.keys():
return int(data['packetLoss'])
else:
return 0
def tag_selection(data):
tags = DB_TAGS
options = {}
# tag_switch takes in _data and attaches CLIoutput to more readable ids
tag_switch = {
'namespace': NAMESPACE,
'isp': data['isp'],
'interface': data['interface']['name'],
'internal_ip': data['interface']['internalIp'],
'interface_mac': data['interface']['macAddr'],
'vpn_enabled': (False if data['interface']['isVpn'] == 'false' else True),
'external_ip': data['interface']['externalIp'],
'server_id': data['server']['id'],
'server_name': data['server']['name'],
'server_location': data['server']['location'],
'server_country': data['server']['country'],
'server_host': data['server']['host'],
'server_port': data['server']['port'],
'server_ip': data['server']['ip'],
'speedtest_id': data['result']['id'],
'speedtest_url': data['result']['url']
}
if tags is None:
tags = 'namespace'
elif '*' in tags:
return tag_switch
else:
tags = 'namespace, ' + tags
tags = tags.split(',')
for tag in tags:
# split the tag string, strip and add selected tags to {options} with corresponding tag_switch data
tag = tag.strip()
options[tag] = tag_switch[tag]
return options
def format_for_influx(data):
# There is additional data in the speedtest-cli output but it is likely not necessary to store.
influx_data = [
{
'measurement': 'ping',
'time': data['timestamp'],
'fields': {
'jitter': data['ping']['jitter'],
'latency': data['ping']['latency']
}
},
{
'measurement': 'download',
'time': data['timestamp'],
'fields': {
# Byte to Megabit
'bandwidth': data['download']['bandwidth'] / 125000,
'bytes': data['download']['bytes'],
'elapsed': data['download']['elapsed']
}
},
{
'measurement': 'upload',
'time': data['timestamp'],
'fields': {
# Byte to Megabit
'bandwidth': data['upload']['bandwidth'] / 125000,
'bytes': data['upload']['bytes'],
'elapsed': data['upload']['elapsed']
}
},
{
'measurement': 'packetLoss',
'time': data['timestamp'],
'fields': {
'packetLoss': pkt_loss(data)
}
},
{
'measurement': 'speeds',
'time': data['timestamp'],
'fields': {
'jitter': data['ping']['jitter'],
'latency': data['ping']['latency'],
'packetLoss': pkt_loss(data),
# Byte to Megabit
'bandwidth_down': data['download']['bandwidth'] / 125000,
'bytes_down': data['download']['bytes'],
'elapsed_down': data['download']['elapsed'],
# Byte to Megabit
'bandwidth_up': data['upload']['bandwidth'] / 125000,
'bytes_up': data['upload']['bytes'],
'elapsed_up': data['upload']['elapsed']
}
}
]
tags = tag_selection(data)
if tags is not None:
for measurement in influx_data:
measurement['tags'] = tags
return influx_data
def speedtest():
if not SERVER_ID:
speedtest = subprocess.run(
["speedtest", "--accept-license", "--accept-gdpr", "-f", "json"], capture_output=True)
print("Automatic server choice")
else:
speedtest = subprocess.run(
["speedtest", "--accept-license", "--accept-gdpr", "-f", "json", "--server-id=" + SERVER_ID], capture_output=True)
print("Manual server choice : ID = " + SERVER_ID)
if speedtest.returncode == 0: # Speedtest was successful.
print("Speedtest Successful :")
data_json = json.loads(speedtest.stdout)
print("time: " + str(data_json['timestamp']) + " - ping: " + str(data_json['ping']['latency']) + " ms - download: " + str(data_json['download']['bandwidth']/125000) + " Mb/s - upload: " + str(data_json['upload']['bandwidth'] / 125000) + " Mb/s - isp: " + data_json['isp'] + " - ext. IP: " + data_json['interface']['externalIp'] + " - server id: " + str(data_json['server']['id']) + " (" + data_json['server']['name'] + " @ " + data_json['server']['location'] + ")")
data = format_for_influx(data_json)
try:
write_api.write(bucket=DB_BUCKET, record=data)
print("Speedtest data written to DB successfully")
except InfluxDBError as e:
print("Speedtest data write failed.")
else: # Speedtest failed.
print("Speedtest Failed :")
print(speedtest.stderr)
print(speedtest.stdout)
# time.sleep(TEST_FAIL_INTERVAL)
def pingtest():
timestamp = datetime.datetime.utcnow()
for target in PING_TARGETS.split(','):
target = target.strip()
pingtest = ping(target, verbose=False, timeout=1, count=1, size=128)
data = [
{
'measurement': 'pings',
'time': timestamp,
'tags': {
'namespace': NAMESPACE,
'target' : target
},
'fields': {
'success' : int(pingtest._responses[0].error_message is None),
'rtt': float(0 if pingtest._responses[0].error_message is not None else pingtest.rtt_avg_ms)
}
}
]
try:
write_api.write(bucket=DB_BUCKET, record=data)
print("Ping data written to DB successfully")
except InfluxDBError as e:
print("Ping data write failed.")
def main():
pPing = Process(target=pingtest)
pSpeed = Process(target=speedtest)
init_db() # Setup the database if it does not already exist.
loopcount = 0
while (1): # Run a Speedtest and send the results to influxDB indefinitely.
if loopcount == 0 or loopcount % PING_INTERVAL == 0:
if pPing.is_alive():
pPing.terminate()
pPing = Process(target=pingtest)
pPing.start()
if loopcount == 0 or loopcount % TEST_INTERVAL == 0:
if pSpeed.is_alive():
pSpeed.terminate()
pSpeed = Process(target=speedtest)
pSpeed.start()
if loopcount % ( PING_INTERVAL * TEST_INTERVAL ) == 0:
loopcount = 0
time.sleep(1)
loopcount += 1
if __name__ == '__main__':
print('Speedtest CLI data logger to InfluxDB started...')
main()
| 6,815
| 0
| 175
|
0fd7d1fa2baad176d6e5962f6138008014fa633a
| 2,444
|
py
|
Python
|
Cnc-Calculators-V.2/Moduler/ra.py
|
UniQueKakarot/Redesigned_Cnc-Calculators
|
0ec83234444ecb5765b14bf77782f99e432b5473
|
[
"MIT"
] | null | null | null |
Cnc-Calculators-V.2/Moduler/ra.py
|
UniQueKakarot/Redesigned_Cnc-Calculators
|
0ec83234444ecb5765b14bf77782f99e432b5473
|
[
"MIT"
] | 1
|
2021-06-02T00:32:00.000Z
|
2021-06-02T00:32:00.000Z
|
Cnc-Calculators-V.2/Moduler/ra.py
|
UniQueKakarot/Redesigned_Cnc-Calculators
|
0ec83234444ecb5765b14bf77782f99e432b5473
|
[
"MIT"
] | null | null | null |
""" This module contains the RA calculator """
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivy.properties import StringProperty
from Moduler.customwidgets import MyLabel
from Moduler.customwidgets import MyTextInput
from Moduler.datasaving import SurfaceRaData
Builder.load_string(
"""
<BoxLayout>:
orientation: 'horizontal'
<MyTextInput>:
<Ra>:
feed: feed
nr: nr
cols: 1
padding: 10
spacing: 10
BoxLayout:
size_hint_y: None
height: "40dp"
Label:
text: "Feedrate: "
MyTextInput:
id: feed
hint_text: "mm/o"
multiline: False
write_tab: False
on_text_validate: root.calc()
BoxLayout:
size_hint_y: None
height: "40dp"
Label:
text: "Nose Radius: "
MyTextInput:
id: nr
hint_text: "mm"
multiline: False
write_tab: False
on_text_validate: root.calc()
BoxLayout:
size_hint_y: None
height: "40dp"
Button:
text: "Calculate!"
on_press: root.calc()
BoxLayout:
#size_hint_y: None
#height: "200dp"
Label:
BoxLayout:
size_hint_y: None
height: "40dp"
MyLabel:
text: "Ra: "
bcolor: [1, 1, 1, 0.15]
MyLabel:
text: root.surface_ra
bcolor: [1, 1, 1, 0.15]
"""
)
class Ra(GridLayout):
""" Main class for the RA module """
surface_ra = StringProperty()
def calc(self):
""" Calculating RA """
try:
feed = self.feed.text
feed = feed.replace(',', '.')
feed = float(feed)
except ValueError:
pass
try:
nose_radius = self.nr.text
nose_radius = nose_radius.replace(',', '.')
nose_radius = float(nose_radius)
except ValueError:
pass
try:
result = ((feed**2) / (nose_radius*24)) * 1000
result = round(result, 2)
except(TypeError, ZeroDivisionError):
result = "Please input values"
self.surface_ra = str(result)
SurfaceRaData("Database.xlsx").filesave(self.feed.text,
self.nr.text,
result)
| 21.068966
| 63
| 0.51473
|
""" This module contains the RA calculator """
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivy.properties import StringProperty
from Moduler.customwidgets import MyLabel
from Moduler.customwidgets import MyTextInput
from Moduler.datasaving import SurfaceRaData
Builder.load_string(
"""
<BoxLayout>:
orientation: 'horizontal'
<MyTextInput>:
<Ra>:
feed: feed
nr: nr
cols: 1
padding: 10
spacing: 10
BoxLayout:
size_hint_y: None
height: "40dp"
Label:
text: "Feedrate: "
MyTextInput:
id: feed
hint_text: "mm/o"
multiline: False
write_tab: False
on_text_validate: root.calc()
BoxLayout:
size_hint_y: None
height: "40dp"
Label:
text: "Nose Radius: "
MyTextInput:
id: nr
hint_text: "mm"
multiline: False
write_tab: False
on_text_validate: root.calc()
BoxLayout:
size_hint_y: None
height: "40dp"
Button:
text: "Calculate!"
on_press: root.calc()
BoxLayout:
#size_hint_y: None
#height: "200dp"
Label:
BoxLayout:
size_hint_y: None
height: "40dp"
MyLabel:
text: "Ra: "
bcolor: [1, 1, 1, 0.15]
MyLabel:
text: root.surface_ra
bcolor: [1, 1, 1, 0.15]
"""
)
class Ra(GridLayout):
""" Main class for the RA module """
surface_ra = StringProperty()
def calc(self):
""" Calculating RA """
try:
feed = self.feed.text
feed = feed.replace(',', '.')
feed = float(feed)
except ValueError:
pass
try:
nose_radius = self.nr.text
nose_radius = nose_radius.replace(',', '.')
nose_radius = float(nose_radius)
except ValueError:
pass
try:
result = ((feed**2) / (nose_radius*24)) * 1000
result = round(result, 2)
except(TypeError, ZeroDivisionError):
result = "Please input values"
self.surface_ra = str(result)
SurfaceRaData("Database.xlsx").filesave(self.feed.text,
self.nr.text,
result)
| 0
| 0
| 0
|
babbc8cc7067faba7f4cecd9fb2dba005c06f6f1
| 209
|
py
|
Python
|
nhdpy/__init__.py
|
jsta/nhdpy
|
38f52a68907e4d838715c77b18e61450eb775c72
|
[
"MIT"
] | null | null | null |
nhdpy/__init__.py
|
jsta/nhdpy
|
38f52a68907e4d838715c77b18e61450eb775c72
|
[
"MIT"
] | 8
|
2020-11-12T16:42:23.000Z
|
2021-03-04T19:00:09.000Z
|
nhdpy/__init__.py
|
jsta/nhdpy
|
38f52a68907e4d838715c77b18e61450eb775c72
|
[
"MIT"
] | null | null | null |
"""Top-level package for nhdpy."""
__author__ = """Jemma Stachelek"""
__email__ = 'stachel2@msu.edu'
__version__ = '0.1.0'
from .nhdpy import nhd_get
from .nhdpy import nhd_list
from .nhdpy import nhd_load
| 19
| 34
| 0.727273
|
"""Top-level package for nhdpy."""
__author__ = """Jemma Stachelek"""
__email__ = 'stachel2@msu.edu'
__version__ = '0.1.0'
from .nhdpy import nhd_get
from .nhdpy import nhd_list
from .nhdpy import nhd_load
| 0
| 0
| 0
|
df287b191ac5a2dd737815fd551244686d241923
| 25,180
|
py
|
Python
|
mmdet/core/loss/losses.py
|
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
|
67b8955eb59137590dbadc6aac45529ae9459e4a
|
[
"Apache-2.0"
] | 62
|
2020-04-15T09:01:23.000Z
|
2022-02-24T04:27:52.000Z
|
mmdet/core/loss/losses.py
|
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
|
67b8955eb59137590dbadc6aac45529ae9459e4a
|
[
"Apache-2.0"
] | 10
|
2020-04-15T09:05:19.000Z
|
2022-01-04T08:05:59.000Z
|
mmdet/core/loss/losses.py
|
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
|
67b8955eb59137590dbadc6aac45529ae9459e4a
|
[
"Apache-2.0"
] | 10
|
2020-04-28T06:58:09.000Z
|
2021-11-18T00:57:34.000Z
|
# TODO merge naive and weighted loss.
import numpy as np
import torch
import torch.nn.functional as F
from ..bbox import bbox_overlaps
from ...ops import sigmoid_focal_loss
from ..bbox.transforms import delta2bbox
# added by Shengkai Wu
# implement iou_balanced cross entropy loss.
def iou_balanced_cross_entropy(pred, label, weight, iou, eta = 1.5, avg_factor=None, reduce=True):
"""
iou_balanced cross entropy loss to make the training process to focus more on positives with higher
iou.
:param pred: tesnor of shape (batch*num_samples, num_class)
:param label: tensor of shape (batch*num_samples), store gt labels such as
0, 1, 2, 80 for corresponding class(0 represent background).
:param weight: tensor of shape (batch*num_samples), 1 for all the elements;
:param iou: tensor of shape (batch*num_samples), iou between predicted boxes and corresponding ground
truth boxes for positives and 0 for negatives.
:param eta: control to which extent the training process focuses on the positives with high iou.
:param avg_factor:
:param reduce:
:return:
"""
# avg_factor = batch*num_samples
# if avg_factor is None:
# avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw1 = F.cross_entropy(pred, label, reduction='none')
target = iou.new_zeros(iou.size(0))
# target_1 = iou.new_zeros(iou.size(0))
# the way to get the indexes of positive example may be wrong; is it right?
# pos_inds_1 = label > 0
# target_1[pos_inds_1] = 1
# modify the way to get the indexes
pos_inds = (label > 0).nonzero().view(-1)
# pos_inds = (label >= 1).nonzero().view(-1)
target[pos_inds] = 1.0
# pos_inds_test = target.nonzero().view(-1)
method_1 = True
normalization = True
method_2 = False
threshold = 0.66
# threshold = torch.min(iou[pos_inds]).item()
method_3 = False
target = target.type_as(pred)
if method_1:
if normalization:
iou_weights = (1 - target) + (target * iou).pow(eta)
# normalized to keep the sum of loss for positive examples unchanged;
raw2 = raw1*iou_weights
normalizer = (raw1 * target).sum() / ((raw2 * target).sum() + 1e-6)
normalized_iou_weights = (1 - target) + (target * iou).pow(eta) * normalizer
normalized_iou_weights = normalized_iou_weights.detach()
raw = raw1*normalized_iou_weights
else:
weight_pos = 1.8
iou_weights = (1 - target) + (target * iou).pow(eta)*weight_pos
iou_weights = iou_weights.detach()
raw = raw1*iou_weights
elif method_2:
iou_weights = (1 - target) + (target*(1 + (iou - threshold))).pow(eta)
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
elif method_3:
ones_weight = iou.new_ones(iou.size(0))
iou_weights_1 = torch.where(iou > threshold, 1.0 + (iou - threshold), ones_weight)
# iou_weights = (1 - target) + (target * iou_weights_1).pow(eta)
iou_weights = (1 - target) + target * iou_weights_1
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
# raw = (raw1 * iou_weights +raw1)/2
# print('test_loss')
if avg_factor is None:
# avg_factor = max(torch.sum(normalized_iou_weights).float().item(), 1.)
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
def consistent_loss(pred, label, weight, iou, avg_factor=None, reduce=True):
"""
:param pred: tesnor of shape (batch*num_samples, num_class)
:param label: tensor of shape (batch*num_samples), store gt labels such as
0, 1, 2, 80 for corresponding class(0 represent background).
:param weight: tensor of shape (batch*num_samples), 1 for all the elements;
:param iou: tensor of shape (batch*num_samples), iou between proposals and corresponding ground
truth boxes for positives and 0 for negatives.
:param avg_factor:
:param reduce:
:return:
"""
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw1 = F.cross_entropy(pred, label, reduction='none')
target = iou.new_zeros(iou.size(0))
pos_inds = (label > 0).nonzero().view(-1)
target[pos_inds] = 1.0
threshold = 0.5
ones_weight = iou.new_ones(iou.size(0))
iou_weights_1 = torch.where(iou > threshold, 1.0 + (iou - threshold), ones_weight)
iou_weights = (1 - target) + target * iou_weights_1
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
def iou_balanced_binary_cross_entropy(pred, label, weight, iou, eta = 1.5, avg_factor=None, reduce=True):
"""
:param pred: tensor of shape (num_examples, 1)
:param label: tensor of shape (num_examples, 1)
:param weight: tensor of shape (num_examples, 1)
:param iou: tensor of shape (num_examples), containing the iou for all the regressed
positive examples.
:param eta:
:param avg_factor:
:return:
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw1 = F.binary_cross_entropy_with_logits(pred, label.float(),reduction='none')
target = label.new_zeros(label.size())
# target_1 = iou.new_zeros(iou.size(0))
# the way to get the indexes of positive example may be wrong; is it wright?
# pos_inds_1 = label > 0
# target_1[pos_inds_1] = 1
# modify the way to get the indexes
# label_squeeze = torch.squeeze(label)
# pos_inds = (label > 0).nonzero().view(-1)
# print('the size of label is ', label.size())
pos_inds = (label > 0).nonzero()
# print('the size of label_squeeze is ', label_squeeze.size())
target[pos_inds] = 1
# print('the num of positive examples is', torch.sum(target))
# print('the num of positive examples for target_1 is', torch.sum(target_1))
normalization = True
if normalization:
target = target.type_as(pred)
iou = iou.unsqueeze(-1)
# print('the size of target is ', target.size())
# print('the size of iou is ', iou.size())
# print('the size of iou_1 is ', iou_1.size())
iou_weights = (1 - target) + (target * iou).pow(eta)
# print('the size of iou_weights is ', iou_weights.size())
# print('the size of raw1 is ', raw1.size())
# iou_weights.unsqueeze(1)
# normalized to keep the sum of loss for positive examples unchanged;
raw2 = raw1 * iou_weights
normalizer = (raw1 * target).sum() / ((raw2 * target).sum() + 1e-6)
normalized_iou_weights = (1 - target) + (target * iou).pow(eta) * normalizer
normalized_iou_weights = normalized_iou_weights.detach()
raw = raw1 * normalized_iou_weights
else:
target = target.type_as(pred)
weight_pos = 1.8
iou_weights = (1 - target) + (target * iou).pow(eta) * weight_pos
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
# return F.binary_cross_entropy_with_logits(
# pred, label.float(), weight.float(),
# reduction='sum')[None] / avg_factor
# Known from the definition of weight in file anchor_target.py,
# all the elements of tensor 'weight' are 1.
# added by Shengkai Wu
# The focal loss is only computed for negative examples, and the standard binary cross
# entropy loss is computed for the positive examples. This is designed to investigate
# whether hard example mining for positive examples is beneficial for the performance.
def weighted_sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
avg_factor=None,
num_classes=80):
"""
note that
:param pred: tensor of shape (batch*A*width*height, num_class)
:param target: tensor of shape (batch*A*width*height, num_class), only the element for the
positive labels are 1.
:param weight: tensor of shape (batch*A*width*height, num_class), 1 for pos and neg, 0 for the others
:param gamma:
:param alpha:
:param avg_factor:
:param num_classes:
:return:
"""
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6
return py_sigmoid_focal_loss(
pred, target, weight, gamma=gamma, alpha=alpha,
reduction='sum')[None] / avg_factor
# added by Shengkai Wu
# iou-balanced classification loss is designed to strengthen the correlation between classificaiton and
# localization task. The goal is to make that the detections with high IOU with the ground truth boxes also have
# high classification scores.
def iou_balanced_sigmoid_focal_loss(pred,
target,
weight,
iou,
gamma=2.0,
alpha=0.25,
eta=1.5,
avg_factor=None,
num_classes=80):
"""
:param pred: tensor of shape (batch*A*width*height, num_class)
:param target: tensor of shape (batch*A*width*height, num_class), only the positive label is
assigned 1, 0 for others.
:param weight: tensor of shape (batch*A*width*height, num_class), 1 for pos and neg, 0 for the others.
:param iou: tensor of shape (batch*A*width*height), store the iou between predicted boxes and its
corresponding ground truth boxes for the positives and the iou between the predicted boxes and
anchors for negatives.
:param gamma:
:param alpha:
:param eta: control the suppression for the positives of low iou.
:param avg_factor: num_positive_samples. If None,
:param num_classes:
:return:
"""
# if avg_factor is None:
# avg_factor = torch.sum(target).float().item() + 1e-6
# use_diff_thr = True
# pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
loss1 = py_sigmoid_focal_loss(
pred, target, weight, gamma=gamma, alpha=alpha,
reduction='none')
IoU_balanced_Cls = True
threshold = 0.5
if IoU_balanced_Cls:
# compute the normalized weights so that the loss produced by the positive examples
# doesn't change.
iou_expanded = iou.view(-1, 1).expand(-1, target.size()[1])
iou_weights = (1 - target) + (target * iou_expanded).pow(eta)
# iou_weights = iou_weights.detach()
loss2 = loss1*iou_weights
normalizer = (loss1*target).sum()/((loss2*target).sum()+1e-6)
# normalizer = 2.1
normalized_iou_weights = (1-target) + (target*iou_expanded).pow(eta)*normalizer
normalized_iou_weights = normalized_iou_weights.detach()
loss = loss1*normalized_iou_weights
# print('test')
else:
# consistent loss
iou_expanded = iou.view(-1, 1).expand(-1, target.size()[1])
ones_weight = iou_expanded.new_ones(iou_expanded.size())
# print('ones_weight.size() is ', ones_weight.size())
iou_weights_1 = torch.where(iou_expanded > threshold, 1.0 + (iou_expanded - threshold), ones_weight)
# iou_weights = (1 - target) + (target * iou_weights_1).pow(eta)
iou_weights = (1 - target) + target * iou_weights_1
iou_weights = iou_weights.detach()
# loss = loss1 * iou_weights
balance_factor = 0.6
loss = loss1*balance_factor + loss1 * iou_weights*(1-balance_factor)
return torch.sum(loss)[None] / avg_factor
# Known from the definition of weight in file anchor_target.py,
# the elements of tensor 'weight' for positive proposals are one.
# added by Shengkai Wu
# implement the focal loss for localization task.
def weighted_iou_balanced_smoothl1(pred, target, iou, weight, beta=1.0, delta=1.5, avg_factor=None):
"""
:param pred: tensor of shape (batch*A*width*height, 4) or (batch*num_pos, 4)
:param target: tensor of shape (batch*A*width*height, 4), store the parametrized coordinates of target boxes
for the positive anchors and other values are set to be 0. Or tensor of shape (batch*num_pos, 4)
:param iou: tensor of shape (batch*A*width*height)Or tensor of shape (batch*num_pos), store the iou between
predicted boxes and its corresponding groundtruth boxes for the positives and the iou between the predicted
boxes and anchors for negatives.
:param weight: tensor of shape (batch*A*width*height, 4), only the weights for positive anchors are set to
be 1 and other values are set to be 0. Or tensor of shape (batch*num_pos, 4), all the elements are 1.
:param beta:
:param delta: control the suppression for the outliers.
:param avg_factor:
:return:
"""
# the pred and target are transformed to image domain and represented by top-left and bottom-right corners.
assert pred.size() == target.size() and target.numel() > 0
# ignore the positive examples of which the iou after regression is smaller
# than 0.5;
ignore_outliers = False
iou_threshold = 0.5
if ignore_outliers:
filter = iou.new_zeros(iou.size())
filter_extend = filter.view(-1, 1).expand(-1, 4)
ind = (iou >= iou_threshold).nonzero()
filter[ind] = 1
iou = iou * filter
iou_expanded = iou.view(-1, 1).expand(-1, 4)
iou_weight = weight * iou_expanded.pow(delta)
iou_weight = iou_weight.detach()
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
loss1 = smooth_l1_loss(pred, target, beta, reduction='none')
loss2 = loss1*iou_weight
# loss2 = loss1 *filter_extend
return torch.sum(loss2)[None] / avg_factor
def weighted_iou_regression_loss(iou_pred, iou_target, weight, avg_factor=None):
"""
:param iou_pred: tensor of shape (batch*A*width*height) or (batch*num_pos)
:param iou_target: tensor of shape (batch*A*width*height)Or tensor of shape (batch*num_pos), store the iou between
predicted boxes and its corresponding groundtruth boxes for the positives and the iou between the predicted
boxes and anchors for negatives.
:param weight: tensor of shape (batch*A*width*height) or (batch*num_pos), 1 for positives and 0 for negatives and neutrals.
:param avg_factor:
:return:
"""
# iou_pred_sigmoid = iou_pred.sigmoid()
# iou_target = iou_target.detach()
# L2 loss.
# loss = torch.pow((iou_pred_sigmoid - iou_target), 2)*weight
# Binary cross-entropy loss for the positive examples
loss = F.binary_cross_entropy_with_logits(iou_pred, iou_target, reduction='none')* weight
return torch.sum(loss)[None] / avg_factor
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3, reduction='mean'):
"""Improving Object Localization with Fitness NMS and Bounded IoU Loss,
https://arxiv.org/abs/1711.00164.
Args:
pred (tensor): Predicted bboxes.
target (tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
reduction (str): Reduction type.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0] + 1
pred_h = pred[:, 3] - pred[:, 1] + 1
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0] + 1
target_h = target[:, 3] - target[:, 1] + 1
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).view(loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def accuracy(pred, target, topk=1):
"""
:param pred: (batch*num_sample, C)
:param target: (batch*num_sample)
:param topk:
:return:
"""
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, 1, True, True) # (batch*num_sample, 1)
pred_label = pred_label.t() # (1, batch*num_sample)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) # (1, batch*num_sample)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
| 38.918083
| 127
| 0.61668
|
# TODO merge naive and weighted loss.
import numpy as np
import torch
import torch.nn.functional as F
from ..bbox import bbox_overlaps
from ...ops import sigmoid_focal_loss
from ..bbox.transforms import delta2bbox
def weighted_nll_loss(pred, label, weight, avg_factor=None):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw = F.nll_loss(pred, label, reduction='none')
return torch.sum(raw * weight)[None] / avg_factor
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw = F.cross_entropy(pred, label, reduction='none')
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
# added by Shengkai Wu
# implement iou_balanced cross entropy loss.
def iou_balanced_cross_entropy(pred, label, weight, iou, eta = 1.5, avg_factor=None, reduce=True):
"""
iou_balanced cross entropy loss to make the training process to focus more on positives with higher
iou.
:param pred: tesnor of shape (batch*num_samples, num_class)
:param label: tensor of shape (batch*num_samples), store gt labels such as
0, 1, 2, 80 for corresponding class(0 represent background).
:param weight: tensor of shape (batch*num_samples), 1 for all the elements;
:param iou: tensor of shape (batch*num_samples), iou between predicted boxes and corresponding ground
truth boxes for positives and 0 for negatives.
:param eta: control to which extent the training process focuses on the positives with high iou.
:param avg_factor:
:param reduce:
:return:
"""
# avg_factor = batch*num_samples
# if avg_factor is None:
# avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw1 = F.cross_entropy(pred, label, reduction='none')
target = iou.new_zeros(iou.size(0))
# target_1 = iou.new_zeros(iou.size(0))
# the way to get the indexes of positive example may be wrong; is it right?
# pos_inds_1 = label > 0
# target_1[pos_inds_1] = 1
# modify the way to get the indexes
pos_inds = (label > 0).nonzero().view(-1)
# pos_inds = (label >= 1).nonzero().view(-1)
target[pos_inds] = 1.0
# pos_inds_test = target.nonzero().view(-1)
method_1 = True
normalization = True
method_2 = False
threshold = 0.66
# threshold = torch.min(iou[pos_inds]).item()
method_3 = False
target = target.type_as(pred)
if method_1:
if normalization:
iou_weights = (1 - target) + (target * iou).pow(eta)
# normalized to keep the sum of loss for positive examples unchanged;
raw2 = raw1*iou_weights
normalizer = (raw1 * target).sum() / ((raw2 * target).sum() + 1e-6)
normalized_iou_weights = (1 - target) + (target * iou).pow(eta) * normalizer
normalized_iou_weights = normalized_iou_weights.detach()
raw = raw1*normalized_iou_weights
else:
weight_pos = 1.8
iou_weights = (1 - target) + (target * iou).pow(eta)*weight_pos
iou_weights = iou_weights.detach()
raw = raw1*iou_weights
elif method_2:
iou_weights = (1 - target) + (target*(1 + (iou - threshold))).pow(eta)
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
elif method_3:
ones_weight = iou.new_ones(iou.size(0))
iou_weights_1 = torch.where(iou > threshold, 1.0 + (iou - threshold), ones_weight)
# iou_weights = (1 - target) + (target * iou_weights_1).pow(eta)
iou_weights = (1 - target) + target * iou_weights_1
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
# raw = (raw1 * iou_weights +raw1)/2
# print('test_loss')
if avg_factor is None:
# avg_factor = max(torch.sum(normalized_iou_weights).float().item(), 1.)
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
def consistent_loss(pred, label, weight, iou, avg_factor=None, reduce=True):
"""
:param pred: tesnor of shape (batch*num_samples, num_class)
:param label: tensor of shape (batch*num_samples), store gt labels such as
0, 1, 2, 80 for corresponding class(0 represent background).
:param weight: tensor of shape (batch*num_samples), 1 for all the elements;
:param iou: tensor of shape (batch*num_samples), iou between proposals and corresponding ground
truth boxes for positives and 0 for negatives.
:param avg_factor:
:param reduce:
:return:
"""
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw1 = F.cross_entropy(pred, label, reduction='none')
target = iou.new_zeros(iou.size(0))
pos_inds = (label > 0).nonzero().view(-1)
target[pos_inds] = 1.0
threshold = 0.5
ones_weight = iou.new_ones(iou.size(0))
iou_weights_1 = torch.where(iou > threshold, 1.0 + (iou - threshold), ones_weight)
iou_weights = (1 - target) + target * iou_weights_1
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
# print('test')
return F.binary_cross_entropy_with_logits(
pred, label.float(), weight.float(),
reduction='sum')[None] / avg_factor
def iou_balanced_binary_cross_entropy(pred, label, weight, iou, eta = 1.5, avg_factor=None, reduce=True):
"""
:param pred: tensor of shape (num_examples, 1)
:param label: tensor of shape (num_examples, 1)
:param weight: tensor of shape (num_examples, 1)
:param iou: tensor of shape (num_examples), containing the iou for all the regressed
positive examples.
:param eta:
:param avg_factor:
:return:
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw1 = F.binary_cross_entropy_with_logits(pred, label.float(),reduction='none')
target = label.new_zeros(label.size())
# target_1 = iou.new_zeros(iou.size(0))
# the way to get the indexes of positive example may be wrong; is it wright?
# pos_inds_1 = label > 0
# target_1[pos_inds_1] = 1
# modify the way to get the indexes
# label_squeeze = torch.squeeze(label)
# pos_inds = (label > 0).nonzero().view(-1)
# print('the size of label is ', label.size())
pos_inds = (label > 0).nonzero()
# print('the size of label_squeeze is ', label_squeeze.size())
target[pos_inds] = 1
# print('the num of positive examples is', torch.sum(target))
# print('the num of positive examples for target_1 is', torch.sum(target_1))
normalization = True
if normalization:
target = target.type_as(pred)
iou = iou.unsqueeze(-1)
# print('the size of target is ', target.size())
# print('the size of iou is ', iou.size())
# print('the size of iou_1 is ', iou_1.size())
iou_weights = (1 - target) + (target * iou).pow(eta)
# print('the size of iou_weights is ', iou_weights.size())
# print('the size of raw1 is ', raw1.size())
# iou_weights.unsqueeze(1)
# normalized to keep the sum of loss for positive examples unchanged;
raw2 = raw1 * iou_weights
normalizer = (raw1 * target).sum() / ((raw2 * target).sum() + 1e-6)
normalized_iou_weights = (1 - target) + (target * iou).pow(eta) * normalizer
normalized_iou_weights = normalized_iou_weights.detach()
raw = raw1 * normalized_iou_weights
else:
target = target.type_as(pred)
weight_pos = 1.8
iou_weights = (1 - target) + (target * iou).pow(eta) * weight_pos
iou_weights = iou_weights.detach()
raw = raw1 * iou_weights
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
# return F.binary_cross_entropy_with_logits(
# pred, label.float(), weight.float(),
# reduction='sum')[None] / avg_factor
# Known from the definition of weight in file anchor_target.py,
# all the elements of tensor 'weight' are 1.
def py_sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
reduction='mean'):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
weight = (alpha * target + (1 - alpha) * (1 - target)) * weight
weight = weight * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * weight
# the value of reduction_enum is decided by arg 'reduction'
# none: 0, mean:1, sum: 2
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
# added by Shengkai Wu
# The focal loss is only computed for negative examples, and the standard binary cross
# entropy loss is computed for the positive examples. This is designed to investigate
# whether hard example mining for positive examples is beneficial for the performance.
def py_sigmoid_focal_loss_for_negatives(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
reduction='mean'):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = target + pred_sigmoid * (1 - target)
weight = (alpha*target + (1 - alpha) * (1 - target)) * weight
weight = weight * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none') * weight
# the value of reduction_enum is decided by arg 'reduction'
# none: 0, mean:1, sum: 2
# print("only compute the focal loss for negative examples")
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weighted_sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
avg_factor=None,
num_classes=80):
"""
note that
:param pred: tensor of shape (batch*A*width*height, num_class)
:param target: tensor of shape (batch*A*width*height, num_class), only the element for the
positive labels are 1.
:param weight: tensor of shape (batch*A*width*height, num_class), 1 for pos and neg, 0 for the others
:param gamma:
:param alpha:
:param avg_factor:
:param num_classes:
:return:
"""
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6
return py_sigmoid_focal_loss(
pred, target, weight, gamma=gamma, alpha=alpha,
reduction='sum')[None] / avg_factor
# added by Shengkai Wu
# iou-balanced classification loss is designed to strengthen the correlation between classificaiton and
# localization task. The goal is to make that the detections with high IOU with the ground truth boxes also have
# high classification scores.
def iou_balanced_sigmoid_focal_loss(pred,
target,
weight,
iou,
gamma=2.0,
alpha=0.25,
eta=1.5,
avg_factor=None,
num_classes=80):
"""
:param pred: tensor of shape (batch*A*width*height, num_class)
:param target: tensor of shape (batch*A*width*height, num_class), only the positive label is
assigned 1, 0 for others.
:param weight: tensor of shape (batch*A*width*height, num_class), 1 for pos and neg, 0 for the others.
:param iou: tensor of shape (batch*A*width*height), store the iou between predicted boxes and its
corresponding ground truth boxes for the positives and the iou between the predicted boxes and
anchors for negatives.
:param gamma:
:param alpha:
:param eta: control the suppression for the positives of low iou.
:param avg_factor: num_positive_samples. If None,
:param num_classes:
:return:
"""
# if avg_factor is None:
# avg_factor = torch.sum(target).float().item() + 1e-6
# use_diff_thr = True
# pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
loss1 = py_sigmoid_focal_loss(
pred, target, weight, gamma=gamma, alpha=alpha,
reduction='none')
IoU_balanced_Cls = True
threshold = 0.5
if IoU_balanced_Cls:
# compute the normalized weights so that the loss produced by the positive examples
# doesn't change.
iou_expanded = iou.view(-1, 1).expand(-1, target.size()[1])
iou_weights = (1 - target) + (target * iou_expanded).pow(eta)
# iou_weights = iou_weights.detach()
loss2 = loss1*iou_weights
normalizer = (loss1*target).sum()/((loss2*target).sum()+1e-6)
# normalizer = 2.1
normalized_iou_weights = (1-target) + (target*iou_expanded).pow(eta)*normalizer
normalized_iou_weights = normalized_iou_weights.detach()
loss = loss1*normalized_iou_weights
# print('test')
else:
# consistent loss
iou_expanded = iou.view(-1, 1).expand(-1, target.size()[1])
ones_weight = iou_expanded.new_ones(iou_expanded.size())
# print('ones_weight.size() is ', ones_weight.size())
iou_weights_1 = torch.where(iou_expanded > threshold, 1.0 + (iou_expanded - threshold), ones_weight)
# iou_weights = (1 - target) + (target * iou_weights_1).pow(eta)
iou_weights = (1 - target) + target * iou_weights_1
iou_weights = iou_weights.detach()
# loss = loss1 * iou_weights
balance_factor = 0.6
loss = loss1*balance_factor + loss1 * iou_weights*(1-balance_factor)
return torch.sum(loss)[None] / avg_factor
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, reduction='mean')[None]
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
# the value of reduction_enum is decided by arg 'reduction'
# none: 0, mean:1, sum: 2
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
# Known from the definition of weight in file anchor_target.py,
# the elements of tensor 'weight' for positive proposals are one.
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
loss = smooth_l1_loss(pred, target, beta, reduction='none')
# print('the size of pred is ', pred.size())
# print('the size of target is ', target.size())
# print('the size of weight is', weight.size())
return torch.sum(loss * weight)[None] / avg_factor
# added by Shengkai Wu
# implement the focal loss for localization task.
def weighted_iou_balanced_smoothl1(pred, target, iou, weight, beta=1.0, delta=1.5, avg_factor=None):
"""
:param pred: tensor of shape (batch*A*width*height, 4) or (batch*num_pos, 4)
:param target: tensor of shape (batch*A*width*height, 4), store the parametrized coordinates of target boxes
for the positive anchors and other values are set to be 0. Or tensor of shape (batch*num_pos, 4)
:param iou: tensor of shape (batch*A*width*height)Or tensor of shape (batch*num_pos), store the iou between
predicted boxes and its corresponding groundtruth boxes for the positives and the iou between the predicted
boxes and anchors for negatives.
:param weight: tensor of shape (batch*A*width*height, 4), only the weights for positive anchors are set to
be 1 and other values are set to be 0. Or tensor of shape (batch*num_pos, 4), all the elements are 1.
:param beta:
:param delta: control the suppression for the outliers.
:param avg_factor:
:return:
"""
# the pred and target are transformed to image domain and represented by top-left and bottom-right corners.
assert pred.size() == target.size() and target.numel() > 0
# ignore the positive examples of which the iou after regression is smaller
# than 0.5;
ignore_outliers = False
iou_threshold = 0.5
if ignore_outliers:
filter = iou.new_zeros(iou.size())
filter_extend = filter.view(-1, 1).expand(-1, 4)
ind = (iou >= iou_threshold).nonzero()
filter[ind] = 1
iou = iou * filter
iou_expanded = iou.view(-1, 1).expand(-1, 4)
iou_weight = weight * iou_expanded.pow(delta)
iou_weight = iou_weight.detach()
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
loss1 = smooth_l1_loss(pred, target, beta, reduction='none')
loss2 = loss1*iou_weight
# loss2 = loss1 *filter_extend
return torch.sum(loss2)[None] / avg_factor
def weighted_iou_regression_loss(iou_pred, iou_target, weight, avg_factor=None):
"""
:param iou_pred: tensor of shape (batch*A*width*height) or (batch*num_pos)
:param iou_target: tensor of shape (batch*A*width*height)Or tensor of shape (batch*num_pos), store the iou between
predicted boxes and its corresponding groundtruth boxes for the positives and the iou between the predicted
boxes and anchors for negatives.
:param weight: tensor of shape (batch*A*width*height) or (batch*num_pos), 1 for positives and 0 for negatives and neutrals.
:param avg_factor:
:return:
"""
# iou_pred_sigmoid = iou_pred.sigmoid()
# iou_target = iou_target.detach()
# L2 loss.
# loss = torch.pow((iou_pred_sigmoid - iou_target), 2)*weight
# Binary cross-entropy loss for the positive examples
loss = F.binary_cross_entropy_with_logits(iou_pred, iou_target, reduction='none')* weight
return torch.sum(loss)[None] / avg_factor
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='none'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
return loss
def weighted_balanced_l1_loss(pred,
target,
weight,
beta=1.0,
alpha=0.5,
gamma=1.5,
avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
loss = balanced_l1_loss(pred, target, beta, alpha, gamma, reduction='none')
return torch.sum(loss * weight)[None] / avg_factor
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3, reduction='mean'):
"""Improving Object Localization with Fitness NMS and Bounded IoU Loss,
https://arxiv.org/abs/1711.00164.
Args:
pred (tensor): Predicted bboxes.
target (tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
reduction (str): Reduction type.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0] + 1
pred_h = pred[:, 3] - pred[:, 1] + 1
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0] + 1
target_h = target[:, 3] - target[:, 1] + 1
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).view(loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def weighted_iou_loss(pred,
target,
weight,
style='naive',
beta=0.2,
eps=1e-3,
avg_factor=None):
if style not in ['bounded', 'naive']:
raise ValueError('Only support bounded iou loss and naive iou loss.')
inds = torch.nonzero(weight[:, 0] > 0)
if avg_factor is None:
avg_factor = inds.numel() + 1e-6
if inds.numel() > 0:
inds = inds.squeeze(1)
else:
return (pred * weight).sum()[None] / avg_factor
if style == 'bounded':
loss = bounded_iou_loss(
pred[inds], target[inds], beta=beta, eps=eps, reduction='sum')
else:
loss = iou_loss(pred[inds], target[inds], reduction='sum')
loss = loss[None] / avg_factor
return loss
def accuracy(pred, target, topk=1):
"""
:param pred: (batch*num_sample, C)
:param target: (batch*num_sample)
:param topk:
:return:
"""
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, 1, True, True) # (batch*num_sample, 1)
pred_label = pred_label.t() # (1, batch*num_sample)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) # (1, batch*num_sample)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def iou_loss(pred_bboxes, target_bboxes, reduction='mean'):
ious = bbox_overlaps(pred_bboxes, target_bboxes, is_aligned=True)
loss = -ious.log()
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
| 6,879
| 0
| 296
|