hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c91e1c1cf29e15371a4ee6e5142850adc128722
| 1,657
|
py
|
Python
|
utilities/visualisation/log_file_plotter.py
|
bootml/agent
|
84235db931d6e4ef956962961c619994898ebdd5
|
[
"Apache-2.0"
] | null | null | null |
utilities/visualisation/log_file_plotter.py
|
bootml/agent
|
84235db931d6e4ef956962961c619994898ebdd5
|
[
"Apache-2.0"
] | null | null | null |
utilities/visualisation/log_file_plotter.py
|
bootml/agent
|
84235db931d6e4ef956962961c619994898ebdd5
|
[
"Apache-2.0"
] | 1
|
2018-09-27T14:31:41.000Z
|
2018-09-27T14:31:41.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'cnheider'
import csv
import matplotlib.pyplot as plt
import utilities as U
# print(plt.style.available)
plot_style = 'fivethirtyeight'
# plot_style='bmh'
# plot_style='ggplot'
plt.style.use('seaborn-poster')
plt.style.use(plot_style)
plt.rcParams['axes.edgecolor'] = '#ffffff'
plt.rcParams['axes.facecolor'] = '#ffffff'
plt.rcParams['figure.facecolor'] = '#ffffff'
plt.rcParams['patch.edgecolor'] = '#ffffff'
plt.rcParams['patch.facecolor'] = '#ffffff'
plt.rcParams['savefig.edgecolor'] = '#ffffff'
plt.rcParams['savefig.facecolor'] = '#ffffff'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
# set up matplotlib
is_ipython = 'inline' in plt.get_backend()
if is_ipython:
pass
plt.ion()
def simple_plot(file_path, name='Statistic Name'):
with open(file_path, 'r') as f:
agg = U.StatisticAggregator()
reader = csv.reader(f, delimiter=' ', quotechar='|')
for line in reader:
agg.append(float(line[0]))
plt.plot(agg.values)
plt.title(name)
plt.show()
if __name__ == '__main__':
# import configs.base_config as C
# _list_of_files = list(C.LOG_DIRECTORY.glob('*.csv'))
# _latest_model = max(_list_of_files, key=os.path.getctime)
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# import easygui
# print easygui.fileopenbox()
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
file_path = askopenfilename() # show an "Open" dialog box and return the path to the selected file
file_name = file_path.split('/')[-1]
simple_plot(file_path, file_name)
| 25.106061
| 101
| 0.703681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 765
| 0.461678
|
0c91f8beba1444262adffec26a3344ef48edb987
| 435
|
py
|
Python
|
ex3.py
|
SuPoPoo/python-exercise
|
601b87c38c0090406cf532d2f9676b18650a0e0f
|
[
"MIT"
] | null | null | null |
ex3.py
|
SuPoPoo/python-exercise
|
601b87c38c0090406cf532d2f9676b18650a0e0f
|
[
"MIT"
] | null | null | null |
ex3.py
|
SuPoPoo/python-exercise
|
601b87c38c0090406cf532d2f9676b18650a0e0f
|
[
"MIT"
] | null | null | null |
print("I will now count my chickens:")
print ("Hens",25+30/6)
print ("Roosters",100-25*3%4)
print("How I will count the eggs:")
print(3+2+1-5+4%2-1/4+6)
print("Is it true that 3+2<5-7?")
print(3+2<5-7)
print("What is 3+2?", 3+2)
print("What is 5-7?", 5-7)
print("Oh,that's why it's false")
print("How about some more.")
print("Is it greater?",5>-2)
print("Is it greater or equal?",5>=-2)
print("Is it less or equal?",5<=-2)
| 16.730769
| 38
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.551724
|
0c92ac74a41c6d27584a5d7d7e530d1077058597
| 2,385
|
py
|
Python
|
datasets/few_shot_test_pickle.py
|
PengWan-Yang/few-shot-transformer
|
c055239061744124c72960420cd4037495952b6d
|
[
"Apache-2.0"
] | 4
|
2022-02-06T19:51:19.000Z
|
2022-03-15T21:19:23.000Z
|
datasets/few_shot_test_pickle.py
|
PengWan-Yang/few-shot-transformer
|
c055239061744124c72960420cd4037495952b6d
|
[
"Apache-2.0"
] | 1
|
2022-02-06T20:00:15.000Z
|
2022-02-06T20:00:15.000Z
|
datasets/few_shot_test_pickle.py
|
PengWan-Yang/few-shot-transformer
|
c055239061744124c72960420cd4037495952b6d
|
[
"Apache-2.0"
] | null | null | null |
import pickle
# modify validation data
_few_shot_pickle_file = 'few_shot_test_data.pkl'
_few_shot_file = open(_few_shot_pickle_file, 'rb')
data_few_shot = pickle.load(_few_shot_file)
_few_shot_pickle_file = 'few_shot_val_data.pkl'
_few_shot_file = open(_few_shot_pickle_file, 'rb')
data_val = pickle.load(_few_shot_file)
_few_shot_pickle_file = 'few_shot_train_data.pkl'
_few_shot_file = open(_few_shot_pickle_file, 'rb')
data_train = pickle.load(_few_shot_file)
raise 1
for _list in data_few_shot:
for _video in _list:
_video['fg_name'] = _video['fg_name'].replace('/home/tao/dataset/v1-3/train_val_frames_3',
'datasets/activitynet13')
_video['bg_name'] = _video['bg_name'].replace('/home/tao/dataset/v1-3/train_val_frames_3',
'datasets/activitynet13')
pickle.dump(data_few_shot, open(_few_shot_pickle_file, "wb"))
print("done")
# modify testing data
_few_shot_pickle_file = 'few_shot_test_data.pkl'
_few_shot_file = open(_few_shot_pickle_file, 'rb')
data_few_shot = pickle.load(_few_shot_file)
for _list in data_few_shot:
for _video in _list:
_video['fg_name'] = _video['fg_name'].replace('/home/tao/dataset/v1-3/train_val_frames_3',
'datasets/activitynet13')
_video['bg_name'] = _video['bg_name'].replace('/home/tao/dataset/v1-3/train_val_frames_3',
'datasets/activitynet13')
pickle.dump(data_few_shot, open(_few_shot_pickle_file, "wb"))
print("done")
# modify training data
_few_shot_pickle_file = 'few_shot_train_data.pkl'
_few_shot_file = open(_few_shot_pickle_file, 'rb')
data_few_shot = pickle.load(_few_shot_file)
index = 0
for k, _list in data_few_shot.items():
for _video in _list:
_video['video_id'] = "query_{:0>5d}".format(index)
_video['fg_name'] = _video['fg_name'].replace('dataset/activitynet13/train_val_frames_3',
'datasets/activitynet13')
_video['bg_name'] = _video['bg_name'].replace('dataset/activitynet13/train_val_frames_3',
'datasets/activitynet13')
index = index + 1
pickle.dump(data_few_shot, open(_few_shot_pickle_file, "wb"))
print("done")
| 37.857143
| 98
| 0.65283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 771
| 0.32327
|
0c93992159c77c279e8541bafd3b789955b4b418
| 473
|
py
|
Python
|
3/node.py
|
Pavel3P/Machine-Learning
|
441da7de69ebf6cef9ebe54a0b3992918faf1d40
|
[
"MIT"
] | null | null | null |
3/node.py
|
Pavel3P/Machine-Learning
|
441da7de69ebf6cef9ebe54a0b3992918faf1d40
|
[
"MIT"
] | null | null | null |
3/node.py
|
Pavel3P/Machine-Learning
|
441da7de69ebf6cef9ebe54a0b3992918faf1d40
|
[
"MIT"
] | null | null | null |
import numpy as np
class Node:
def __init__(self,
gini: float,
num_samples_per_class: np.ndarray,
) -> None:
self.gini: float = gini
self.num_samples_per_class: np.ndarray = num_samples_per_class
self.predicted_class: int = np.argmax(num_samples_per_class)
self.feature_index: int = 0
self.threshold: float = 0
self.left: Node = None
self.right: Node = None
| 26.277778
| 70
| 0.587738
| 451
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c960c32123fe98899d1aea36a071118d99135d2
| 5,654
|
py
|
Python
|
nemo/collections/nlp/utils/evaluation_utils.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 1
|
2020-08-04T08:29:41.000Z
|
2020-08-04T08:29:41.000Z
|
nemo/collections/nlp/utils/evaluation_utils.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 1
|
2020-06-11T00:54:42.000Z
|
2020-06-11T00:54:42.000Z
|
nemo/collections/nlp/utils/evaluation_utils.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 3
|
2020-03-10T05:10:07.000Z
|
2020-12-08T01:33:35.000Z
|
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from nemo import logging
def analyze_confusion_matrix(cm, dict, max_pairs=10):
"""
Sort all confusions in the confusion matrix by value and display results.
Print results in a format: (name -> name, value)
Args:
cm: Confusion matrix
dict: Dictionary with key as a name and index as a value (Intents or Slots)
max_pairs: Max number of confusions to print
"""
threshold = 5 # just arbitrary value to take confusion with at least this number
confused_pairs = {}
size = cm.shape[0]
for i in range(size):
res = cm[i].argsort()
for j in range(size):
pos = res[size - j - 1]
# no confusion - same row and column
if pos == i:
continue
elif cm[i][pos] >= threshold:
str = f'{dict[i]} -> {dict[pos]}'
confused_pairs[str] = cm[i][pos]
else:
break
# sort by max confusions and print first max_pairs
sorted_confused_pairs = sorted(confused_pairs.items(), key=lambda x: x[1], reverse=True)
for i, pair_str in enumerate(sorted_confused_pairs):
if i >= max_pairs:
break
logging.info(pair_str)
def errors_per_class(cm, dict):
"""
Summarize confusions per each class in the confusion matrix.
It can be useful both for Intents and Slots.
It counts each confusion twice in both directions.
Args:
cm: Confusion matrix
dict: Dictionary with key as a name and index as a value (Intents or Slots)
"""
size = cm.shape[0]
confused_per_class = {}
total_errors = 0
for class_num in range(size):
sum = 0
for i in range(size):
if i != class_num:
sum += cm[class_num][i]
sum += cm[i][class_num]
confused_per_class[dict[class_num]] = sum
total_errors += sum
# logging.info(f'{dict[class_num]} - {sum}')
logging.info(f'Total errors (multiplied by 2): {total_errors}')
sorted_confused_per_class = sorted(confused_per_class.items(), key=lambda x: x[1], reverse=True)
for conf_str in sorted_confused_per_class:
logging.info(conf_str)
def log_misclassified_queries(intent_labels, intent_preds, queries, intent_dict, limit=50):
"""
Display examples of Intent mistakes.
In a format: Query, predicted and labeled intent names.
"""
logging.info(f'*** Misclassified intent queries (limit {limit}) ***')
cnt = 0
for i in range(len(intent_preds)):
if intent_labels[i] != intent_preds[i]:
query = queries[i].split('\t')[0]
logging.info(
f'{query} (predicted: {intent_dict[intent_preds[i]]} - labeled: {intent_dict[intent_labels[i]]})'
)
cnt = cnt + 1
if cnt >= limit:
break
def log_misclassified_slots(
intent_labels, intent_preds, slot_labels, slot_preds, subtokens_mask, queries, intent_dict, slot_dict, limit=50
):
"""
Display examples of Slot mistakes.
In a format: Query, predicted and labeled intent names and list of predicted and labeled slot numbers.
also prints dictionary of the slots at the start for easier reading.
"""
logging.info('')
logging.info(f'*** Misclassified slots queries (limit {limit}) ***')
# print slot dictionary
logging.info(f'Slot dictionary:')
str = ''
for i, slot in enumerate(slot_dict):
str += f'{i} - {slot}, '
if i % 5 == 4 or i == len(slot_dict) - 1:
logging.info(str)
str = ''
logging.info('----------------')
cnt = 0
for i in range(len(intent_preds)):
cur_slot_pred = slot_preds[i][subtokens_mask[i]]
cur_slot_label = slot_labels[i][subtokens_mask[i]]
if not np.all(cur_slot_pred == cur_slot_label):
query = queries[i].split('\t')[0]
logging.info(
f'{query} (predicted: {intent_dict[intent_preds[i]]} - labeled: {intent_dict[intent_labels[i]]})'
)
logging.info(f'p: {cur_slot_pred}')
logging.info(f'l: {cur_slot_label}')
cnt = cnt + 1
if cnt >= limit:
break
def check_problematic_slots(slot_preds_list, slot_dict):
""" Check non compliance of B- and I- slots for datasets that use such slot encoding. """
cnt = 0
# for sentence in slot_preds:
# slots = sentence.split(" ")
sentence = slot_preds_list
for i in range(len(sentence)):
slot_name = slot_dict[int(sentence[i])]
if slot_name.startswith("I-"):
prev_slot_name = slot_dict[int(sentence[i - 1])]
if slot_name[2:] != prev_slot_name[2:]:
print("Problem: " + slot_name + " - " + prev_slot_name)
cnt += 1
print("Total problematic slots: " + str(cnt))
| 37.197368
| 115
| 0.598161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,603
| 0.460382
|
0c9646550e91efca615eedc91a6895d4f88c0e06
| 276
|
py
|
Python
|
fdk_client/common/date_helper.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/common/date_helper.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/common/date_helper.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pytz
from .constants import TIMEZONE
timezone = pytz.timezone(TIMEZONE)
def get_ist_now():
"""Returns Indian Standard Time datetime object.
Returns:
object -- Datetime object
"""
return datetime.now(timezone)
| 16.235294
| 52
| 0.710145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.376812
|
0c966317072167bda3878121dfa458b395fd1b61
| 1,357
|
py
|
Python
|
recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py
|
JasonSWFu/speechbrain
|
cb78ba2b33fceba273b055dc471535344c3053f0
|
[
"Apache-2.0"
] | 3,913
|
2021-03-14T13:54:52.000Z
|
2022-03-30T05:09:55.000Z
|
recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py
|
JasonSWFu/speechbrain
|
cb78ba2b33fceba273b055dc471535344c3053f0
|
[
"Apache-2.0"
] | 667
|
2021-03-14T20:11:17.000Z
|
2022-03-31T04:07:17.000Z
|
recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py
|
JasonSWFu/speechbrain
|
cb78ba2b33fceba273b055dc471535344c3053f0
|
[
"Apache-2.0"
] | 785
|
2021-03-14T13:20:57.000Z
|
2022-03-31T03:26:03.000Z
|
"""
LibriParty Dataset creation by using official metadata.
Author
------
Samuele Cornell, 2020
Mirco Ravanelli, 2020
"""
import os
import sys
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.data_utils import download_file
from local.create_mixtures_from_metadata import create_mixture
import json
from tqdm import tqdm
URL_METADATA = (
"https://www.dropbox.com/s/0u6x6ndyedb4rl7/LibriParty_metadata.zip?dl=1"
)
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
metadata_folder = params["metadata_folder"]
if not os.path.exists(metadata_folder):
os.makedirs(metadata_folder)
# Download meta data from the web
download_file(
URL_METADATA,
metadata_folder + "/meta.zip",
unpack=True,
dest_unpack=metadata_folder,
)
for data_split in ["train", "dev", "eval"]:
with open(os.path.join(metadata_folder, data_split + ".json"), "r") as f:
metadata = json.load(f)
print("Creating data for {} set".format(data_split))
c_folder = os.path.join(params["out_folder"], data_split)
os.makedirs(c_folder, exist_ok=True)
for sess in tqdm(metadata.keys()):
create_mixture(sess, c_folder, params, metadata[sess])
| 28.270833
| 77
| 0.745763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 375
| 0.276345
|
0c9682abb64f3ba26f9ba369881899db7f3b759b
| 3,440
|
py
|
Python
|
tests/test_py4gh.py
|
iCAN-PCM/py4gh
|
192e62d531b5fd8c4c9a04a83c98bd63795578b8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_py4gh.py
|
iCAN-PCM/py4gh
|
192e62d531b5fd8c4c9a04a83c98bd63795578b8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_py4gh.py
|
iCAN-PCM/py4gh
|
192e62d531b5fd8c4c9a04a83c98bd63795578b8
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
from pathlib import Path
import pytest
# from py4gh import __version__
from py4gh.utility import decrypt_files, encrypt_files, get_files
# def test_version():
# assert __version__ == "0.1.0"
@pytest.fixture(scope="session")
def keys(tmpdir_factory):
test_pub1 = tmpdir_factory.mktemp("data").join("test1.pub")
test_sec1 = tmpdir_factory.mktemp("data").join("test1.sec")
test_pub2 = tmpdir_factory.mktemp("data").join("test2.pub")
test_sec2 = tmpdir_factory.mktemp("data").join("test2.sec")
# with open(stdout, "w") as sdf:
# subprocess.run(["echo", "blablaalbla"], stdout=sdf)
p1 = subprocess.Popen(
["crypt4gh-keygen", "--sk", test_sec1, "--pk", test_pub1, "--nocrypt"],
stdin=subprocess.PIPE,
)
p1.stdin.write(b"")
p1.communicate()[0]
p1.stdin.close()
subprocess.run(
["crypt4gh-keygen", "--sk", test_sec2, "--pk", test_pub2, "--nocrypt"],
# stdin=subprocess.PIPE,
text=True,
input="",
# encoding="ascii",
)
return [(test_pub1, test_sec1), (test_pub2, test_sec2)]
@pytest.fixture(scope="session")
def files(tmp_path):
d = tmp_path / "sub"
p = d / "hello.txt"
p.write_text("This is a secret message")
return p
# def test_file(files):
# with open(files, "r") as f:
# print(f.read())
# assert 1 == 3
def test_encryption(keys, tmpdir):
d = tmpdir.mkdir("sub")
f = d / "hello.txt"
f.write("This is a secret message")
files = get_files(d)
err, res = encrypt_files(keys[0][1], [keys[0][0]], files)
proc = subprocess.run(["ls", d], capture_output=True, text=True)
output_list = proc.stdout.split("\n")
assert output_list[1] == "hello.txt.c4gh"
encrypted_file = Path(d / output_list[1])
assert encrypted_file.stat().st_size != 0
print(err)
print(res)
def test_multiple_encryption(keys, tmpdir):
d = tmpdir.mkdir("sub")
f = d / "hello.txt"
message = "This is a secret message"
f.write(message)
files = get_files(d)
encrypt_files(keys[0][1], [keys[0][0], keys[1][0]], files)
proc = subprocess.run(["ls", d], capture_output=True, text=True)
print(proc.stdout)
output_list = proc.stdout.split("\n")
assert output_list[1] == "hello.txt.c4gh"
encrypted_file = Path(d / output_list[1])
# print(encrypted_file.read_bytes()[0])
# assert encrypted_file.read_text() != message
assert encrypted_file.stat().st_size != 0
def test_muliple_encryption_decryption(keys, tmpdir):
d = tmpdir.mkdir("sub")
f = d / "hello.txt"
message = "This is a secret message"
f.write(message)
files = get_files(d)
encrypt_files(keys[0][1], [keys[0][0], keys[1][0]], files)
subprocess.run(["ls", d], capture_output=True, text=True)
subprocess.run(["rm", f])
proc2 = subprocess.run(["ls", d], capture_output=True, text=True)
proc2_out = proc2.stdout.split("\n")
assert len(proc2_out) == 2
assert proc2_out[0] == "hello.txt.c4gh"
assert proc2_out[1] == ""
files2 = get_files(d)
decrypt_files(keys[1][1], files2)
proc = subprocess.run(["ls", d], capture_output=True, text=True)
output_list = proc.stdout.split("\n")
print(output_list)
assert output_list[0] == "hello.txt"
decrypted_file = Path(d / output_list[0])
assert decrypted_file.read_text() == message
assert decrypted_file.stat().st_size != 0
| 31.559633
| 79
| 0.636047
| 0
| 0
| 0
| 0
| 1,051
| 0.305523
| 0
| 0
| 848
| 0.246512
|
0c96e86ca1a15c8434d2cbc7e56c0f749d433cc7
| 2,885
|
py
|
Python
|
test/sca/test_rpa.py
|
scrambler-crypto/pyecsca
|
491abfb548455669abd470382a48dcd07b2eda87
|
[
"MIT"
] | null | null | null |
test/sca/test_rpa.py
|
scrambler-crypto/pyecsca
|
491abfb548455669abd470382a48dcd07b2eda87
|
[
"MIT"
] | null | null | null |
test/sca/test_rpa.py
|
scrambler-crypto/pyecsca
|
491abfb548455669abd470382a48dcd07b2eda87
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from parameterized import parameterized
from pyecsca.ec.context import local
from pyecsca.ec.mult import LTRMultiplier, BinaryNAFMultiplier, WindowNAFMultiplier, LadderMultiplier, \
DifferentialLadderMultiplier
from pyecsca.ec.params import get_params
from pyecsca.sca.re.rpa import MultipleContext
class MultipleContextTests(TestCase):
def setUp(self):
self.secp128r1 = get_params("secg", "secp128r1", "projective")
self.base = self.secp128r1.generator
self.coords = self.secp128r1.curve.coordinate_model
self.add = self.coords.formulas["add-1998-cmo"]
self.dbl = self.coords.formulas["dbl-1998-cmo"]
self.neg = self.coords.formulas["neg"]
self.scale = self.coords.formulas["z"]
@parameterized.expand([
("10", 10),
("2355498743", 2355498743),
("325385790209017329644351321912443757746", 325385790209017329644351321912443757746),
("13613624287328732", 13613624287328732)
])
def test_basic(self, name, scalar):
mult = LTRMultiplier(self.add, self.dbl, self.scale, always=False, complete=False, short_circuit=True)
with local(MultipleContext()) as ctx:
mult.init(self.secp128r1, self.base)
mult.multiply(scalar)
muls = list(ctx.points.values())
self.assertEqual(muls[-1], scalar)
def test_precomp(self):
bnaf = BinaryNAFMultiplier(self.add, self.dbl, self.neg, self.scale)
with local(MultipleContext()) as ctx:
bnaf.init(self.secp128r1, self.base)
muls = list(ctx.points.values())
self.assertListEqual(muls, [1, -1])
wnaf = WindowNAFMultiplier(self.add, self.dbl, self.neg, 3, self.scale)
with local(MultipleContext()) as ctx:
wnaf.init(self.secp128r1, self.base)
muls = list(ctx.points.values())
self.assertListEqual(muls, [1, 2, 3, 5])
def test_ladder(self):
curve25519 = get_params("other", "Curve25519", "xz")
base = curve25519.generator
coords = curve25519.curve.coordinate_model
ladd = coords.formulas["ladd-1987-m"]
dadd = coords.formulas["dadd-1987-m"]
dbl = coords.formulas["dbl-1987-m"]
scale = coords.formulas["scale"]
ladd_mult = LadderMultiplier(ladd, dbl, scale)
with local(MultipleContext()) as ctx:
ladd_mult.init(curve25519, base)
ladd_mult.multiply(1339278426732672313)
muls = list(ctx.points.values())
self.assertEqual(muls[-2], 1339278426732672313)
dadd_mult = DifferentialLadderMultiplier(dadd, dbl, scale)
with local(MultipleContext()) as ctx:
dadd_mult.init(curve25519, base)
dadd_mult.multiply(1339278426732672313)
muls = list(ctx.points.values())
self.assertEqual(muls[-2], 1339278426732672313)
| 41.214286
| 110
| 0.664471
| 2,547
| 0.882842
| 0
| 0
| 593
| 0.205546
| 0
| 0
| 209
| 0.072444
|
0c97356ee6bbe49ca37564ac2a4ced12f750d008
| 624
|
py
|
Python
|
sorting-and-searching/selection-sort.py
|
rayruicai/coding-interview
|
4de5de63fe09eae488bdbde372aa1c0cb4defa85
|
[
"MIT"
] | null | null | null |
sorting-and-searching/selection-sort.py
|
rayruicai/coding-interview
|
4de5de63fe09eae488bdbde372aa1c0cb4defa85
|
[
"MIT"
] | null | null | null |
sorting-and-searching/selection-sort.py
|
rayruicai/coding-interview
|
4de5de63fe09eae488bdbde372aa1c0cb4defa85
|
[
"MIT"
] | null | null | null |
import unittest
# time complexity O(n**2)
# space complexity O(1)
def selection_sort(arr):
n = len(arr)
while n >= 2:
value_max = arr[0]
index_max = 0
for i in range(1, n):
if arr[i] > value_max:
value_max = arr[i]
index_max = i
arr[n-1], arr[index_max] = arr[index_max], arr[n-1]
n -= 1
return arr
class Test(unittest.TestCase):
def test_selection_sort(self):
arr = [3,6,9,7,8,4,2,5,1,9,6]
self.assertEqual(selection_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]);
if __name__ == "__main__":
unittest.main()
| 22.285714
| 71
| 0.543269
| 175
| 0.280449
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.092949
|
0c97d3a32db9b335bffe637b1d619f3774455b40
| 2,930
|
py
|
Python
|
createExeWindows.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
createExeWindows.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
createExeWindows.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: Daniel Garces, Joseph Tarango
# *****************************************************************************/
import os, datetime, traceback, optparse, shutil
import PyInstaller.__main__
def main():
##############################################
# Main function, Options
##############################################
parser = optparse.OptionParser()
parser.add_option("--installer", dest='installer', action='store_true',
default=False, help='Boolean to create installer executable. If false, GUI executable is created '
'instead')
(options, args) = parser.parse_args()
if options.installer is True:
print("Generating Installer...")
pwd = os.getcwd()
dirPath = os.path.join(pwd, 'data/installer')
if os.path.exists(dirPath) and os.path.isdir(dirPath):
print("Previous executable exists. Removing it before generating the new one")
shutil.rmtree(dirPath)
PyInstaller.__main__.run([
'src/installer.py',
'--onefile',
'--clean',
'--debug=all',
# '--windowed',
'--key=RAADEngineTesting123456',
'--workpath=data/installer/temp',
'--distpath=data/installer',
'--specpath=data/installer'
])
else:
print("Generating main GUI...")
pwd = os.getcwd()
dirPath = os.path.join(pwd, 'data/binary')
if os.path.exists(dirPath) and os.path.isdir(dirPath):
print("Previous executable exists. Removing it before generating the new one")
shutil.rmtree(dirPath)
logoLocation = '{0}/src/software/{1}'.format(os.getcwd(), 'Intel_IntelligentSystems.png')
newLocation = '{0}/data/binary/software'.format(os.getcwd())
PyInstaller.__main__.run([
'src/software/gui.py',
'--onefile',
'--clean',
'--debug=all',
# '--windowed',
'--add-data=' + logoLocation + os.pathsep + ".",
'--key=RAADEngineTesting123456',
'--workpath=data/binary/temp',
'--distpath=data/binary',
'--specpath=data/binary',
])
os.mkdir(newLocation)
shutil.copyfile(logoLocation, newLocation + '/Intel_IntelligentSystems.png')
if __name__ == '__main__':
"""Performs execution delta of the process."""
pStart = datetime.datetime.now()
try:
main()
except Exception as errorMain:
print("Fail End Process: {0}".format(errorMain))
traceback.print_exc()
qStop = datetime.datetime.now()
print("Execution time: " + str(qStop - pStart))
| 39.594595
| 121
| 0.509556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,295
| 0.44198
|
0c983c89d954e199eb26dc9eb1e9dfde6cd61d8c
| 1,395
|
py
|
Python
|
day10/django/app4/dateview/updateTeachPlan.py
|
Vanessa-kriby/Python
|
1fbef67852fb362712fc48fa5c3c29eac68fe202
|
[
"Apache-2.0"
] | null | null | null |
day10/django/app4/dateview/updateTeachPlan.py
|
Vanessa-kriby/Python
|
1fbef67852fb362712fc48fa5c3c29eac68fe202
|
[
"Apache-2.0"
] | null | null | null |
day10/django/app4/dateview/updateTeachPlan.py
|
Vanessa-kriby/Python
|
1fbef67852fb362712fc48fa5c3c29eac68fe202
|
[
"Apache-2.0"
] | null | null | null |
from app1.models import *
from app1.util.utils import *
def updateTeachPlan(request):
'''
get:
http://127.0.0.1:8000/app4/updateTeachPlan?tpno=001&credit=7.0&teach_date=2019-08-22&evaluation_method=考查
调用参数:
tpno:计划编号
credit:学分
teach_date:开课日期
evaluation_method:考察方式
post:
http://127.0.0.1:8000/app4/updateTeachPlan
'''
try:
if(request.method=='POST'):
teadata=json.loads(request.body)
data=teadata["data"]
for item in data:
# tpid=request.GET.get("tpno")
# cr=request.GET.get("credit")
# te=request.GET.get("teach_date")
# ev=request.GET.get("evaluation_method")
tpid=item["tpno"]
cr=item["credit"]
te=item["teach_date"]
ev=item["evaluation_method"]
result=TeachPlan.objects.filter(tpno=tpid).update(credit=cr,teach_date=te,evaluation_method=ev)
result=TeachPlan.objects.all().values("tpno","credit","teach_date","evaluation_method","department__dno","department__dname","course__cno","course__cname","teacher__tno","teacher__tname")
return showJsonresult(result)
except Exception as e:
response={}
response['msg']=str(e)
response['err_num']=1
return showJsonerror(response)
| 35.769231
| 195
| 0.593548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 717
| 0.499652
|
0c98a8571671f7ec771a67037041f3b8e9ba1d24
| 274
|
py
|
Python
|
tests/test_tradera.py
|
paeronskruven/lw
|
a2e4b6363656812a0857a8b2cf69be3e710afe94
|
[
"MIT"
] | null | null | null |
tests/test_tradera.py
|
paeronskruven/lw
|
a2e4b6363656812a0857a8b2cf69be3e710afe94
|
[
"MIT"
] | null | null | null |
tests/test_tradera.py
|
paeronskruven/lw
|
a2e4b6363656812a0857a8b2cf69be3e710afe94
|
[
"MIT"
] | null | null | null |
import lw.sources.tradera
def test_valid_query():
results = lw.sources.tradera.TraderaSource().query('a')
assert len(list(results)) > 0
def test_invalid_query():
results = lw.sources.tradera.TraderaSource().query('abc123')
assert len(list(results)) == 0
| 22.833333
| 64
| 0.70438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.040146
|
0c98bb4fe620a4715169adb783364dc34a8d9e45
| 3,973
|
py
|
Python
|
app/app/process_data.py
|
yongjjang/book-rental-service
|
53133c88fed6e8d5d9b1374e951f5aa83598e547
|
[
"MIT"
] | null | null | null |
app/app/process_data.py
|
yongjjang/book-rental-service
|
53133c88fed6e8d5d9b1374e951f5aa83598e547
|
[
"MIT"
] | null | null | null |
app/app/process_data.py
|
yongjjang/book-rental-service
|
53133c88fed6e8d5d9b1374e951f5aa83598e547
|
[
"MIT"
] | null | null | null |
from .database import db_session, init_db
from .models import User, Book, BookRental
from sqlalchemy import func
import datetime
import logging
init_db()
def get_tables(db_table):
"""
@author : TAEYONG LEE
:param db_table: database table in model.py
:type db_table: database model Object
:return type: list[list]
"""
entries = []
queries = db_session.query(db_table)
for q in queries:
s = str(q).split('|')
entries.append(s)
return entries
def add_entry(entry):
"""
@author : TAEYONG LEE
:param entry: database table in model.py
:type entry: database model Object ex) User(param..), Book(params..) etc..
:return Returns True if the operation succeeds, False if it fails
:usage
user = User(id, name...)
add_entry(user)
"""
try:
db_session.add(entry)
db_session.commit()
except Exception as ex:
print(ex)
return False
logging.info("Database : Add Entry Success")
logging.info(str(entry).split('|'))
return True
def delete_entry(db_table, id):
"""
@author : TAEYONG LEE
:param db_table: database table in model.py
:type db_table: database model Object
:param id: database model entry's id
:type id: int
:return Returns True if the operation succeeds, False if it fails
:usage
delete_entry(User, 100)
"""
try:
db_session.query(db_table).filter(db_table.id == id).delete()
db_session.commit()
except Exception as ex:
print(ex)
return False
logging.info("Database : Delete Entry Success")
return True
def search_entry(db_table, condition, keyword):
"""
@author : TAEYONG LEE
:param db_table: database table in model.py
:type db_table: database model Object
:param condition: search condition. ex)User.name
:type condition: db_table.column
:param keyword: search keyword. ex)"Lee"
:type keyword: str or int
:return filtered table.
:usage
**if keyword is str**
entry = search_entry(User, User.name, "yongjjang")
**if keyword is int**
entry = search_entry(User, User.id, 200)
"""
if type(keyword) is str:
result = db_session.query(db_table).filter(condition.ilike('%' + keyword + "%")).first()
entry = str(result).split('|')
return entry
elif type(keyword) is int:
id = keyword
result = db_session.query(db_table).filter(condition == id).first()
entry = str(result).split('|')
return entry
def get_max_id(db_table):
try:
return int(db_session.query(func.max(db_table.id)).scalar())
except:
return 1
def get_rent_date():
rental_date = datetime.date.today()
return_date = rental_date + datetime.timedelta(days=14)
return str(rental_date), str(return_date)
def parse_row(row):
return str(row).split('|')
if __name__ == "__main__":
tst = search_entry(User, User.birthday, "2020%")
print(tst)
rst = User.query.all()
ra = Book.query.filter(Book.name.like("asdasdasd")).all()
if not ra:
print("HI")
# user = User(101, 'yong', '1995-10-06', 'M', 'yongjjang@walking_potato', '010-1234-1231', 'static/images/testImage', True)
# add_entry(user)
#
# if delete_entry(User, 101):
# logging.info("삭제 성공")
# else:
# logging.info("삭제 실패")
#
# max_id = db_session.query(func.max(User.id)).scalar()
# print(type(max_id), max_id)
#
# print(search_entry(User, User.name, "bi"))
# for q in db_session.query(User).filter(User.name.like('%' + "A" + "%")):
# print(q)
# print(type(q))
#
# result = db_session.query(User).filter(User.id == 1).all()
#
# for r in result:
# print(r)
# print(r.column_list)
| 24.524691
| 127
| 0.597533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,089
| 0.52369
|
0c998b3ac75eae9f76dce560875ced69e8123b01
| 6,511
|
py
|
Python
|
cmdb_v0.1/apps/detail/models.py
|
codemaker-man/projects
|
334aac28b72a7b466fba23df4db11e95df13a3ec
|
[
"MIT"
] | 1
|
2018-12-05T05:29:46.000Z
|
2018-12-05T05:29:46.000Z
|
cmdb_v0.1/apps/detail/models.py
|
codemaker-man/projects
|
334aac28b72a7b466fba23df4db11e95df13a3ec
|
[
"MIT"
] | null | null | null |
cmdb_v0.1/apps/detail/models.py
|
codemaker-man/projects
|
334aac28b72a7b466fba23df4db11e95df13a3ec
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from django.db import models
import django.utils.timezone as timezone
# 用户登录信息表(服务器、虚拟机)
class ConnectionInfo(models.Model):
# 用户连接相关信息
ssh_username = models.CharField(max_length=10, default='', verbose_name=u'ssh用户名', null=True)
ssh_userpasswd = models.CharField(max_length=40, default='', verbose_name=u'ssh用户密码', null=True)
ssh_hostip = models.CharField(max_length=40, default='', verbose_name=u'ssh登录的ip', null=True)
ssh_host_port = models.CharField(max_length=10, default='', verbose_name=u'ssh登录的端口', null=True)
ssh_rsa = models.CharField(max_length=64, default='', verbose_name=u'ssh私钥')
rsa_pass = models.CharField(max_length=64, default='', verbose_name=u'私钥的密钥')
# 0-登录失败,1-登录成功
ssh_status = models.IntegerField(default=0, verbose_name=u'用户连接状态,0-登录失败,1-登录成功')
# 1-rsa登录,2-dsa登录,3-普通用户_rsa登录,4-docker成功,5-docker无法登录
ssh_type = models.IntegerField(default=0, verbose_name=u'用户连接类型, 1-rsa登录,2-dsa登录,'
u'3-ssh_rsa登录,4-docker成功,5-docker无法登录')
# 唯一对象标示
sn_key = models.CharField(max_length=256, verbose_name=u"唯一设备ID", default="")
class Meta:
verbose_name = u'用户登录信息表'
verbose_name_plural = verbose_name
db_table = "connectioninfo"
#用户登录信息表(交换机、网络设备)
class NetConnectionInfo(models.Model):
tel_username = models.CharField(max_length=10, default='', verbose_name=u'用户名', null=True)
tel_userpasswd = models.CharField(max_length=40, default='', verbose_name=u'设备用户密码', null=True)
tel_enpasswd = models.CharField(max_length=40, default='', verbose_name=u'设备超级用户密码', null=True)
tel_host_port = models.CharField(max_length=10, default='', verbose_name=u'设备登录的端口', null=True)
tel_hostip = models.CharField(max_length=40, default='', verbose_name=u'设备登录的ip', null=True)
# 0-登录失败,1-登录成功
tel_status = models.IntegerField(default=0, verbose_name=u'用户连接状态,0-登录失败,1-登录成功')
tel_type = models.IntegerField(default=0, verbose_name=u'用户连接类型, 1-普通用户可登录,2-超级用户可登录')
# 唯一对象标示
sn_key = models.CharField(max_length=256, verbose_name=u"唯一设备ID", default="")
dev_info = models.ForeignKey('NetWorkInfo')
class Meta:
verbose_name = u'网络设备用户登录信息'
verbose_name_plural = verbose_name
db_table = "netconnectioninfo"
# 机柜的信息
class CabinetInfo(models.Model):
cab_name = models.CharField(max_length=10, verbose_name=u'机柜编号')
# 1-10分别代表1~10层
cab_lever = models.CharField(max_length=2, verbose_name=u'机器U数,1-10分别代表1~10层')
class Meta:
verbose_name = u'机柜信息表'
verbose_name_plural = verbose_name
db_table = "cabinetinfo"
# 物理服务器信息
class PhysicalServerInfo(models.Model):
# server_name = models.CharField(max_length=15, verbose_name=u'服务器名')
server_ip = models.CharField(max_length=40, verbose_name=u'服务器IP')
# 机器的类型 dell or other?
machine_brand = models.CharField(max_length=60, default='--', verbose_name=u'服务器品牌')
# 机器的类型
# machine_type = models.IntegerField(default=0, verbose_name=u'服务器,0-物理服务器,1-虚拟服务器,2-')
system_ver = models.CharField(max_length=30, default='', verbose_name=u'操作系统版本')
sys_hostname = models.CharField(max_length=15, verbose_name=u'操作系统主机名')
mac = models.CharField(max_length=512, default='', verbose_name=u'MAC地址')
sn = models.CharField(max_length=256, verbose_name=u'SN-主机的唯一标识', default='')
vir_type = models.CharField(max_length=2, verbose_name=u'宿主机类型', default='')
# 物理服务器关联的机柜
ser_cabin = models.ForeignKey('CabinetInfo')
# 用户登录系统信息
conn_phy = models.ForeignKey('ConnectionInfo')
class Meta:
verbose_name = u'物理服务器信息表'
verbose_name_plural = verbose_name
db_table = "physicalserverinfo"
# 虚拟设备信息
class VirtualServerInfo(models.Model):
# server_name = models.CharField(max_length=15, verbose_name=u'服务器名')
server_ip = models.CharField(max_length=40, verbose_name=u'服务器IP')
# 机器的类型 0=kvm,2=虚拟资产,3=网络设备 0=其他类型(未知)
server_type = models.CharField(max_length=80, default='', verbose_name=u'服务器类型:kvm,Vmware,Docker,others')
system_ver = models.CharField(max_length=30, default='', verbose_name=u'操作系统版本')
sys_hostname = models.CharField(max_length=15, verbose_name=u'操作系统主机名')
mac = models.CharField(max_length=512, default='', verbose_name=u'MAC地址')
sn = models.CharField(max_length=256, verbose_name=u'SN-主机的唯一标识', default='')
# 虚拟设备关联的物理服务器
vir_phy = models.ForeignKey('PhysicalServerInfo')
# 用户登录系统信息
conn_vir = models.ForeignKey('ConnectionInfo')
class Meta:
verbose_name = u'虚拟设备表'
verbose_name_plural = verbose_name
db_table = "virtualserverinfo"
# 网络设备表
class NetWorkInfo(models.Model):
host_ip = models.CharField(max_length=40, verbose_name=u'网络设备ip')
host_name = models.CharField(max_length=10, verbose_name=u'网络设备名')
sn = models.CharField(max_length=256, verbose_name=u"SN-设备的唯一标识", default="")
# 网络设备所在的机柜
net_cab = models.ForeignKey('CabinetInfo')
class Meta:
verbose_name = u'网络设备表'
verbose_name_plural = verbose_name
db_table = "networkinfo"
class OtherMachineInfo(models.Model):
ip = models.CharField(max_length=40, verbose_name=u'设备ip')
sn_key = models.CharField(max_length=256, verbose_name=u'设备的唯一标识')
machine_name = models.CharField(max_length=20, verbose_name=u'设备名称')
remark = models.TextField(default='', verbose_name=u'备注')
reson_str = models.CharField(max_length=128,verbose_name=u"归纳原因",default='')
# 关联的机柜
oth_cab = models.ForeignKey('CabinetInfo')
class Meta:
verbose_name = u'其它设备表'
verbose_name_plural = verbose_name
db_table = 'othermachineinfo'
class StatisticsRecord(models.Model):
datatime = models.DateTimeField(verbose_name=u"更新时间",default=timezone.now().strftime('%Y-%m-%d'))
all_count = models.IntegerField(verbose_name=u"所有设备数量",default=0)
pyh_count = models.IntegerField(verbose_name=u"物理设备数量",default=0)
net_count = models.IntegerField(verbose_name=u"网络设备数量",default=0)
other_count = models.IntegerField(verbose_name=u"其他设备数量",default=0)
kvm_count = models.IntegerField(verbose_name=u"KVM设备数量",default=0)
docker_count = models.IntegerField(verbose_name=u"Docker设备数量",default=0)
vmx_count = models.IntegerField(verbose_name=u"VMX设备数量",default=0)
class Meta:
verbose_name = u'扫描后的汇总硬件统计信息'
verbose_name_plural = verbose_name
db_table = 'statisticsrecord'
| 42.835526
| 109
| 0.714176
| 7,314
| 0.961483
| 0
| 0
| 0
| 0
| 0
| 0
| 2,626
| 0.345208
|
0c9a4fc23573fcb066eaa1cdddfd05b22ff7fab8
| 10,370
|
py
|
Python
|
tests/__init__.py
|
pywikibot-catfiles/file-metadata
|
79c585dcb67b966f02485136c4d875d5b5365230
|
[
"MIT"
] | 10
|
2016-07-15T07:07:53.000Z
|
2022-02-17T07:41:03.000Z
|
tests/__init__.py
|
AbdealiJK/file-metadata
|
79c585dcb67b966f02485136c4d875d5b5365230
|
[
"MIT"
] | 48
|
2016-03-14T06:44:36.000Z
|
2016-07-13T00:35:54.000Z
|
tests/__init__.py
|
pywikibot-catfiles/file-metadata
|
79c585dcb67b966f02485136c4d875d5b5365230
|
[
"MIT"
] | 5
|
2017-04-24T07:02:22.000Z
|
2020-12-14T06:23:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, unicode_literals,
print_function)
# flake8: noqa (unused import and line too long due to links)
import os
import random
import string
import struct
import wave
try:
import unittest
except ImportError:
import unittest2 as unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from file_metadata._compat import makedirs, which
from file_metadata.utilities import download
CACHE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files')
file_download_links = {
# Audio
'wikiexample.ogg': 'https://upload.wikimedia.org/wikipedia/commons/c/c8/Example.ogg',
'drums.mid': 'https://upload.wikimedia.org/wikipedia/commons/6/61/Drum_sample.mid',
'bell.wav': 'https://upload.wikimedia.org/wikipedia/commons/9/97/156064_marcolo91_bicycle-bell.wav',
'bell.flac': 'https://upload.wikimedia.org/wikipedia/commons/b/b2/Bell-ring.flac',
'bell.oga': 'https://upload.wikimedia.org/wikipedia/commons/6/6c/Announcement_on_a_wharf.oga',
'bell.ogg': 'https://upload.wikimedia.org/wikipedia/commons/3/34/Sound_Effect_-_Door_Bell.ogg',
'multiline_ffprobe.ogg': 'https://upload.wikimedia.org/wikipedia/commons/5/58/17650_thoschi_issyk-kul.ogg',
# Videos
'veins.ogv': 'https://upload.wikimedia.org/wikipedia/commons/f/f2/POROS_3.ogv',
'ogg_video.ogg': 'https://upload.wikimedia.org/wikipedia/commons/e/e3/2010-06-06-V-German-Flag.ogg',
'sample.webm': 'https://upload.wikimedia.org/wikipedia/commons/a/a5/02_Punktion_des_ausgebildeten_Knopflochs%281%29.webm',
# Images
'ball.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/5/51/1-ball.svg/226px-1-ball.svg.png',
'ball.svg': 'https://upload.wikimedia.org/wikipedia/commons/5/51/1-ball.svg',
'red.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/6/62/Pure_Red.svg/100px-Pure_Red.svg.png',
'green.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Pure_Green.svg/100px-Pure_Green.svg.png',
'blue.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/7/77/Pure_Blue.svg/100px-Pure_Blue.svg.png',
'red.svg': 'https://upload.wikimedia.org/wikipedia/commons/6/62/Pure_Red.svg',
'green.svg': 'https://upload.wikimedia.org/wikipedia/commons/c/c5/Pure_Green.svg',
'blue.svg': 'https://upload.wikimedia.org/wikipedia/commons/7/77/Pure_Blue.svg',
'animated.svg': 'https://upload.wikimedia.org/wikipedia/commons/f/fd/Animated_pendulum.svg',
'animated.gif': 'https://upload.wikimedia.org/wikipedia/commons/d/d7/123_Numbers.gif',
'animated.png': 'https://upload.wikimedia.org/wikipedia/commons/b/b5/Load.png',
'static.gif': 'https://upload.wikimedia.org/wikipedia/commons/e/ed/Pix.gif',
'cmyk.jpg': 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/11/17-barcodes-1-e_ces_2012_01.jpg/524px-17-barcodes-1-e_ces_2012_01.jpg',
'unknown_cmyk.jpg': 'https://upload.wikimedia.org/wikipedia/commons/f/f3/TeXML_dtd.jpg',
# SVG files with different mimetypes
'image_svg_xml.svg': 'https://upload.wikimedia.org/wikipedia/commons/6/62/Pure_Red.svg',
'text_plain.svg': 'https://upload.wikimedia.org/wikipedia/commons/5/57/Color_icon_white.svg',
'text_html.svg': 'https://upload.wikimedia.org/wikipedia/commons/f/fd/Animated_pendulum.svg',
'application_xml.svg': 'https://upload.wikimedia.org/wikipedia/commons/0/0b/Sieve_of_Eratosthenes_animation.svg',
# Images with special exifdata:
'canon_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/7/7b/Annagrah-2_041.JPG',
'nonascii_exifdata.jpg': 'https://upload.wikimedia.org/wikipedia/commons/d/d5/2013-04-25_21-09-18-ecl-lune-mosaic.jpg',
# Images of faces
'mona_lisa.jpg': 'https://upload.wikimedia.org/wikipedia/commons/7/7d/Mona_Lisa_color_restoration.jpg',
'michael_jackson.jpg': 'https://upload.wikimedia.org/wikipedia/commons/7/7e/Michaeljackson_%28cropped%29.jpg',
'charlie_chaplin.jpg': 'https://upload.wikimedia.org/wikipedia/commons/0/00/Charlie_Chaplin.jpg',
'baby_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/1/10/Portrait_of_a_male_baby_%285866018681%29.jpg',
'baby_partial_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/1/1f/Sweet_Baby_Kisses_Family_Love.jpg',
'old_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/1/11/Brazil_%283042571516%29_%282%29.jpg',
'beard_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/6/61/Oskar_Almgren%2C_Stockholm%2C_Sweden_%285859501260%29_%282%29.jpg',
'cat_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/thumb/c/c4/Savannah_Cat_portrait.jpg/400px-Savannah_Cat_portrait.jpg',
'monkey_face.jpg': 'https://upload.wikimedia.org/wikipedia/commons/2/27/Baby_ginger_monkey.jpg',
'woman.xcf': 'https://upload.wikimedia.org/wikipedia/commons/a/af/Beatrix_Podolska_pedagog_muzykolog_Krakow_2008.xcf',
# Barcodes / QR Codes / Data matrices
'qrcode.jpg': 'https://upload.wikimedia.org/wikipedia/commons/5/5b/Qrcode_wikipedia.jpg',
'barcode.png': 'https://upload.wikimedia.org/wikipedia/commons/1/1f/Rationalized-codabar.png',
'datamatrix.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/e/e8/Datamatrix.svg/200px-Datamatrix.svg.png',
'multibarcodes.png': 'https://upload.wikimedia.org/wikipedia/commons/9/98/DHL_Online-Frankierung_-_Paket_bis_5_kg_-_D-USA.png',
'vertical_barcode.jpg': 'https://upload.wikimedia.org/wikipedia/commons/9/9c/Final_Ida_Pasto_vs._Santa_Fe.jpg',
'huge.png': 'https://upload.wikimedia.org/wikipedia/commons/3/31/Grand_paris_express.png',
'blank.xcf': 'https://upload.wikimedia.org/wikipedia/commons/e/e2/Blank_file.xcf',
'example.tiff': 'https://upload.wikimedia.org/wikipedia/commons/b/b0/Dabigatran_binding_pockets.tiff',
# Line drawings
'simple_line_drawing.jpg': 'https://upload.wikimedia.org/wikipedia/commons/c/c6/Destilacija_rakije.jpg',
'detailed_line_drawing.jpg': 'https://upload.wikimedia.org/wikipedia/commons/d/db/Compound_Microscope_1876.JPG',
'very_detailed_line_drawing.jpg': 'https://upload.wikimedia.org/wikipedia/commons/c/cb/Hospital_ward_on_Red_Rover.jpg',
'dark_line_drawing.jpg': 'https://upload.wikimedia.org/wikipedia/commons/3/3e/Bird_in_flight_line_drawing_art.jpg',
# Logos
'wikimedia_logo.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/8/81/Wikimedia-logo.svg/768px-Wikimedia-logo.svg.png',
'wikidata_logo.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/f/ff/Wikidata-logo.svg/1024px-Wikidata-logo.svg.png',
'wikipedia_logo.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/de/Wikipedia_Logo_1.0.png/768px-Wikipedia_Logo_1.0.png',
'commons_logo.png': 'https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Commons-logo.svg/571px-Commons-logo.svg.png',
# Geocoded images
'geotag_osaka.jpg': 'https://upload.wikimedia.org/wikipedia/commons/5/50/Honda_STEPWGN_SPADA%E3%83%BBCool_Spirit_%28RP3%29_rear.JPG',
# Monochrome colors
'blackwhite_monochrome.jpg': 'https://upload.wikimedia.org/wikipedia/commons/2/27/0218_-_Taormina_-_Badia_Vecchia_-_Foto_Giovanni_Dall%27Orto%2C_20-May-2008.jpg',
'blue_monochrome.jpg': 'https://upload.wikimedia.org/wikipedia/commons/9/9f/Paolo_Monti_-_Serie_fotografica_-_BEIC_6358396.jpg',
'green_monochrome.jpg': 'https://upload.wikimedia.org/wikipedia/commons/e/ea/Edvard-dawkins.jpg',
'sepia_monochrome.jpg': 'https://upload.wikimedia.org/wikipedia/commons/c/c7/1926_Hupmobile.jpg',
# Color calibrations
'it8_top_bar.jpg': 'https://upload.wikimedia.org/wikipedia/commons/thumb/9/97/Waterfall_at_Schooner_Head_house_%28NYPL_b11707223-G89F198_003B%29.tiff/lossy-page1-1280px-Waterfall_at_Schooner_Head_house_%28NYPL_b11707223-G89F198_003B%29.tiff.jpg',
'it8_bottom_bar.jpg': 'https://upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Two_boys_sitting_in_a_garden_%28NYPL_b11528957-G90F452_008B%29.tiff/lossy-page1-996px-Two_boys_sitting_in_a_garden_%28NYPL_b11528957-G90F452_008B%29.tiff.jpg',
# Application files
'text.pdf': 'https://upload.wikimedia.org/wikipedia/commons/a/a7/Life_of_Future.pdf',
'image.pdf': 'https://upload.wikimedia.org/wikipedia/commons/4/40/AugerTransition1.pdf',
'empty.djvu': 'https://upload.wikimedia.org/wikipedia/commons/4/42/Vuota.djvu',
}
def fetch_file(name, overwrite=False):
"""
Fetch a file based on the given key. If the file is not found, it is
created appropriately by either generating it or downloading it from
elsewhere.
:param name: The name (key) of the file that is needed.
:param overwrite: Force overwrite if file exists.
:return: The absolute path of the requested file.
"""
filepath = os.path.join(CACHE_DIR, name)
makedirs(CACHE_DIR, exist_ok=True)
if os.path.exists(filepath) and not overwrite: # Use cached file
return filepath
# Miscellaneous files
if name == 'ascii.txt':
with open(filepath, 'w') as file_handler:
file_handler.writelines([
string.ascii_lowercase, '\n', string.ascii_uppercase, '\n',
string.digits, '\n', string.punctuation, '\n'])
elif name == 'file.bin':
with open(filepath, 'wb') as file_hander:
allascii = ''.join(chr(i) for i in range(128))
file_hander.write(allascii.encode('ascii'))
# Music files
elif name == "noise.wav":
wav_file = wave.open(filepath, 'w')
wav_file.setparams((1, 2, 44100, 0, 'NONE', 'not compressed'))
for _ in range(44100): # 1 second
value = struct.pack('h', random.randint(-32767, 32767))
wav_file.writeframes(value)
wav_file.close()
elif name in file_download_links:
download(file_download_links[name], filepath)
else:
raise ValueError('Asked to fetch unknown file {0}.'.format(name))
return filepath
def which_sideeffect(unavailable_executables):
def wrapper(command, *args, **kwargs):
if command in unavailable_executables:
return None
return which(command, *args, **kwargs)
return wrapper
def is_toolserver():
return os.environ.get('INSTANCEPROJECT', None) == 'tools'
def is_travis():
return os.environ.get('TRAVIS', None) == 'true'
| 56.666667
| 250
| 0.73433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,837
| 0.755738
|
0c9ae2b51288ca98ffcd2520e0ff6c3e32a621f6
| 5,707
|
py
|
Python
|
src/api/dataflow/batch/periodic/backend/validator/processings_validator.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/dataflow/batch/periodic/backend/validator/processings_validator.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/dataflow/batch/periodic/backend/validator/processings_validator.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext_lazy as _
from dataflow.batch.exceptions.comp_execptions import BatchTimeCompareError, BatchUnsupportedOperationError
from dataflow.batch.periodic.param_info.builder.periodic_batch_job_builder import PeriodicBatchJobBuilder
from dataflow.batch.utils.time_util import BatchTimeTuple
class ProcessingsValidator(object):
def validate(self, periodic_batch_info_params_obj):
"""
:param periodic_batch_info_params_obj:
:type periodic_batch_info_params_obj:
dataflow.batch.periodic.param_info.periodic_batch_info_params.PeriodicBatchInfoParams
"""
self.validate_input(periodic_batch_info_params_obj)
self.validate_output_data_offset(periodic_batch_info_params_obj)
def validate_input(self, periodic_batch_info_params_obj):
"""
:param periodic_batch_info_params_obj:
:type periodic_batch_info_params_obj:
dataflow.batch.periodic.param_info.periodic_batch_info_params.PeriodicBatchInfoParams
"""
for input_table in periodic_batch_info_params_obj.input_result_tables:
if (
input_table.window_type.lower() == "scroll"
or input_table.window_type.lower() == "slide"
or input_table.window_type.lower() == "accumulate"
):
self.__check_greater_than_value(input_table.window_offset, "window_offset", "0H")
if input_table.window_type.lower() == "slide" or input_table.window_type.lower() == "accumulate":
self.__check_greater_than_value(input_table.window_size, "window_size", "0H")
if input_table.window_type.lower() == "accumulate":
self.__check_greater_than_value(input_table.window_start_offset, "window_start_offset", "0H")
self.__check_greater_than_value(input_table.window_end_offset, "window_end_offset", "0H")
self.__check_less_than_value(
input_table.window_start_offset,
"window_start_offset",
input_table.window_size,
)
self.__check_less_than_value(
input_table.window_end_offset,
"window_end_offset",
input_table.window_size,
)
self.__check_if_null(input_table.accumulate_start_time, "accumulate_start_time")
def __check_greater_than_value(self, check_value, check_name, limit_value):
self.__check_if_null(check_value, check_name)
limit_time_tuple = BatchTimeTuple()
limit_time_tuple.from_jobnavi_format(limit_value)
check_value_tuple = BatchTimeTuple()
check_value_tuple.from_jobnavi_format(check_value)
if check_value_tuple < limit_time_tuple:
raise BatchUnsupportedOperationError(_("{}数值必须大于{}".format(check_name, limit_value)))
def __check_less_than_value(self, check_value, check_name, limit_value):
self.__check_if_null(check_value, check_name)
limit_time_tuple = BatchTimeTuple()
limit_time_tuple.from_jobnavi_format(limit_value)
check_value_tuple = BatchTimeTuple()
check_value_tuple.from_jobnavi_format(check_value)
if check_value_tuple > limit_time_tuple:
raise BatchUnsupportedOperationError(_("{}数值必须小于{}".format(check_name, limit_value)))
def __check_if_null(self, check_value, check_name):
if check_value is None:
raise BatchUnsupportedOperationError(_("{}数值不能是null".format(check_name)))
def validate_output_data_offset(self, periodic_batch_info_params_obj):
"""
:param periodic_batch_info_params_obj:
:type periodic_batch_info_params_obj:
dataflow.batch.periodic.param_info.periodic_batch_info_params.PeriodicBatchInfoParams
"""
try:
PeriodicBatchJobBuilder.calculate_output_offset(
periodic_batch_info_params_obj.input_result_tables,
periodic_batch_info_params_obj.output_result_tables[0],
periodic_batch_info_params_obj.count_freq,
periodic_batch_info_params_obj.schedule_period,
)
except BatchTimeCompareError:
raise BatchUnsupportedOperationError(_("当前配置无法算出默认存储分区,请激活自定义出库配置"))
| 51.414414
| 111
| 0.715437
| 4,085
| 0.701047
| 0
| 0
| 0
| 0
| 0
| 0
| 2,366
| 0.406041
|
0c9ae725d3c7ffae05b2711dd0cf627833bcd823
| 5,855
|
py
|
Python
|
tests/test_build_endpoint.py
|
lsst-sqre/ltd-dasher
|
176e125839b380f005a092189db760b716e8e23d
|
[
"MIT"
] | null | null | null |
tests/test_build_endpoint.py
|
lsst-sqre/ltd-dasher
|
176e125839b380f005a092189db760b716e8e23d
|
[
"MIT"
] | 9
|
2017-01-24T20:28:49.000Z
|
2021-10-04T15:36:17.000Z
|
tests/test_build_endpoint.py
|
lsst-sqre/ltd-dasher
|
176e125839b380f005a092189db760b716e8e23d
|
[
"MIT"
] | null | null | null |
"""Test app.routes.build."""
import responses
mock_product_data = {
"bucket_name": "lsst-the-docs",
"doc_repo": "https://github.com/lsst-sqre/test-059.git",
"domain": "test-059.lsst.io",
"fastly_domain": "n.global-ssl.fastly.net",
"published_url": "https://test-059.lsst.io",
"root_domain": "lsst.io",
"root_fastly_domain": "n.global-ssl.fastly.net",
"self_url": "https://keeper-staging.lsst.codes/products/test-059",
"slug": "test-059",
"surrogate_key": "235becbe0b8349aa88b7f6e086529d77",
"title": "Test Technote Via Bot"
}
mock_editions_data = {
"editions": [
"https://keeper-staging.lsst.codes/editions/388",
"https://keeper-staging.lsst.codes/editions/390"
]
}
mock_edition_388_data = {
"build_url": "https://keeper-staging.lsst.codes/builds/1322",
"date_created": "2017-02-03T23:49:23Z",
"date_ended": None,
"date_rebuilt": "2017-02-03T23:51:21Z",
"product_url": "https://keeper-staging.lsst.codes/products/test-059",
"published_url": "https://test-059.lsst.io",
"self_url": "https://keeper-staging.lsst.codes/editions/388",
"slug": "main",
"surrogate_key": "c1e29b6b1c97450c9d6d854ee3395ec9",
"title": "Latest",
"tracked_refs": [
"master"
]
}
mock_edition_390_data = {
"build_url": "https://keeper-staging.lsst.codes/builds/1324",
"date_created": "2017-02-09T23:40:57Z",
"date_ended": None,
"date_rebuilt": "2017-02-09T23:41:17Z",
"product_url": "https://keeper-staging.lsst.codes/products/test-059",
"published_url": "https://test-059.lsst.io/v/test-branch",
"self_url": "https://keeper-staging.lsst.codes/editions/390",
"slug": "test-branch",
"surrogate_key": "99ab3d93b1b54a4ea49dbe1764b7ea6a",
"title": "test-branch",
"tracked_refs": [
"test-branch"
]
}
mock_builds_data = {
"builds": [
"https://keeper-staging.lsst.codes/builds/1322",
"https://keeper-staging.lsst.codes/builds/1324"
]
}
mock_build_1322_data = {
"bucket_name": "lsst-the-docs",
"bucket_root_dir": "test-059/builds/1",
"date_created": "2017-02-03T23:51:08Z",
"date_ended": None,
"git_refs": [
"master"
],
"github_requester": None,
"product_url": "https://keeper-staging.lsst.codes/products/test-059",
"published_url": "https://test-059.lsst.io/builds/1",
"self_url": "https://keeper-staging.lsst.codes/builds/1322",
"slug": "1",
"surrogate_key": "006e34ec8f714aed956292645bb7e432",
"uploaded": True
}
mock_build_1324_data = {
"bucket_name": "lsst-the-docs",
"bucket_root_dir": "test-059/builds/2",
"date_created": "2017-02-09T23:40:57Z",
"date_ended": None,
"git_refs": [
"test-branch"
],
"github_requester": None,
"product_url": "https://keeper-staging.lsst.codes/products/test-059",
"published_url": "https://test-059.lsst.io/builds/2",
"self_url": "https://keeper-staging.lsst.codes/builds/1324",
"slug": "2",
"surrogate_key": "a7dc0f6b0f4b40cdab851ff68be0ee51",
"uploaded": True
}
mock_bulk_data = {
"product": mock_product_data,
"editions": [
mock_edition_388_data,
mock_edition_390_data
],
"builds": [
mock_build_1322_data,
mock_build_1324_data
]
}
@responses.activate
def test_rebuild_dashboards(anon_client):
"""Test dashboard rebuilds with full client using new bulk metadata
endpoint.
"""
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/products/test-059/dashboard',
json=mock_bulk_data,
status=200,
content_type='application/json')
r = anon_client.post(
'/build',
{
'product_urls': ['https://keeper-staging.lsst.codes/'
'products/test-059']
}
)
assert r.status == 202
@responses.activate
def test_rebuild_dashboards_oldstyle(anon_client):
"""Test dashboard rebuilds with full client using original endpoints."""
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/products/test-059/dashboard',
json={},
status=404,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/products/test-059',
json=mock_product_data,
status=200,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/products/test-059/editions/',
json=mock_editions_data,
status=200,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/editions/388',
json=mock_edition_388_data,
status=200,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/editions/390',
json=mock_edition_390_data,
status=200,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/products/test-059/builds/',
json=mock_builds_data,
status=200,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/builds/1322',
json=mock_build_1322_data,
status=200,
content_type='application/json')
responses.add(
responses.GET,
'https://keeper-staging.lsst.codes/builds/1324',
json=mock_build_1324_data,
status=200,
content_type='application/json')
r = anon_client.post(
'/build',
{
'product_urls': ['https://keeper-staging.lsst.codes/'
'products/test-059']
}
)
assert r.status == 202
| 28.985149
| 76
| 0.625107
| 0
| 0
| 0
| 0
| 2,531
| 0.43228
| 0
| 0
| 3,270
| 0.558497
|
0c9b0609ebab7f0a04accd501a146384f501a809
| 2,650
|
py
|
Python
|
HW2/heart.py
|
MohammadJRanjbar/Data-Mining
|
66492166df12924a754273cdaad169d84968f2e1
|
[
"MIT"
] | null | null | null |
HW2/heart.py
|
MohammadJRanjbar/Data-Mining
|
66492166df12924a754273cdaad169d84968f2e1
|
[
"MIT"
] | null | null | null |
HW2/heart.py
|
MohammadJRanjbar/Data-Mining
|
66492166df12924a754273cdaad169d84968f2e1
|
[
"MIT"
] | null | null | null |
from sklearn import tree
from matplotlib import pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from sklearn import metrics
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from pandas import DataFrame
data = pd.read_csv("heart.csv")
# sns.set(style="ticks", color_codes=True)
# plot=sns.pairplot(data)
# plot.savefig("heart.png")
# pd.crosstab(data.sex,data.target).plot(kind="bar",figsize=(15,6),color=['#1CA53B','#AA1111' ])
# plt.title('Heart Disease Frequency for Sex')
# plt.xlabel('Sex (0 = Female, 1 = Male)')
# plt.xticks(rotation=0)
# plt.legend(["Haven't Disease", "Have Disease"])
# plt.ylabel('Frequency')
# plt.savefig("heart1.png")
# pd.crosstab(data.age,data.target).plot(kind="bar",figsize=(20,6))
# plt.title('Heart Disease Frequency for Ages')
# plt.xlabel('Age')
# plt.ylabel('Frequency')
# plt.savefig('heartDiseaseAndAges.png')
feature_names =["age","sex","cp","trestbps","chol" ,"fbs","restecg","thalach","exang","oldpeak","slope","ca","thal"]
x = data[feature_names].values
y = data["target"].values
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5,shuffle=True)
feature_scaler = StandardScaler()
X_train = feature_scaler.fit_transform(X_train)
X_test = feature_scaler.transform(X_test)
# Krange = range(1,30)
# scores = {}
# scores_list = []
# for k in Krange:
# knn = KNeighborsClassifier(n_neighbors = k)
# knn.fit(X_train,y_train)
# y_pred = knn.predict(X_test)
# scores[k] = metrics.accuracy_score(y_test,y_pred)
# scores_list.append(metrics.accuracy_score(y_test,y_pred))
# plt.plot(Krange,scores_list)
# plt.xlabel("Value of K")
# plt.ylabel("Accuracy")
# plt.savefig("k.png")
# plt.show()
model = KNeighborsClassifier(n_neighbors=7)
model.fit(X_train,y_train)
y_pred= model.predict(X_test)
print("Accuracy KNN:",metrics.accuracy_score(y_test, y_pred))
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5,shuffle=True)
#Create a Gaussian Classifier
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
print("Accuracy NB:",metrics.accuracy_score(y_test, y_pred))
| 29.444444
| 116
| 0.755849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,180
| 0.445283
|
0c9b51976e219b5f5ddeb4bf2182d69d5aa73bdd
| 1,202
|
py
|
Python
|
backend/api/migrations/0002_auto_20210517_0943.py
|
luxu/django-vue-luxu
|
a4da215697df578074e354d43dd1d9995490d0db
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0002_auto_20210517_0943.py
|
luxu/django-vue-luxu
|
a4da215697df578074e354d43dd1d9995490d0db
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0002_auto_20210517_0943.py
|
luxu/django-vue-luxu
|
a4da215697df578074e354d43dd1d9995490d0db
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-05-17 12:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Pavilhao',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.IntegerField()),
],
),
migrations.AlterField(
model_name='message',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.CreateModel(
name='Sentenciado',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('matricula', models.CharField(max_length=50)),
('pavilhao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.pavilhao')),
],
),
]
| 33.388889
| 117
| 0.578203
| 1,078
| 0.896839
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.140599
|
0c9b6c9c5f4b186068f0fcfe5f2c329ce7772e4d
| 740
|
py
|
Python
|
Module 2/Chapter08/template_simple.py
|
real-slim-chadi/Python_Master-the-Art-of-Design-Patterns
|
95ec92272374e330b04d931208abbb184c7c7908
|
[
"MIT"
] | 73
|
2016-09-15T23:07:04.000Z
|
2022-03-05T15:09:48.000Z
|
Module 2/Chapter08/template_simple.py
|
real-slim-chadi/Python_Master-the-Art-of-Design-Patterns
|
95ec92272374e330b04d931208abbb184c7c7908
|
[
"MIT"
] | null | null | null |
Module 2/Chapter08/template_simple.py
|
real-slim-chadi/Python_Master-the-Art-of-Design-Patterns
|
95ec92272374e330b04d931208abbb184c7c7908
|
[
"MIT"
] | 51
|
2016-10-07T20:47:51.000Z
|
2021-12-22T21:00:24.000Z
|
__author__ = 'Chetan'
from abc import ABCMeta, abstractmethod
class AbstractClass(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def operation1(self):
pass
@abstractmethod
def operation2(self):
pass
def template_method(self):
print("Defining the Algorithm. Operation1 follows Operation2")
self.operation2()
self.operation1()
class ConcreteClass(AbstractClass):
def operation1(self):
print("My Concrete Operation1")
def operation2(self):
print("Operation 2 remains same")
class Client:
def main(self):
self.concreate = ConcreteClass()
self.concreate.template_method()
client = Client()
client.main()
| 19.473684
| 70
| 0.660811
| 637
| 0.860811
| 0
| 0
| 108
| 0.145946
| 0
| 0
| 113
| 0.152703
|
0ca19eadb115712fb3c48ed0a589480fef063fda
| 27,687
|
py
|
Python
|
tests/test_home.py
|
jeroenterheerdt/nexia
|
93ff554913e1dad6389b54179eca7c4ec1f29371
|
[
"Apache-2.0"
] | null | null | null |
tests/test_home.py
|
jeroenterheerdt/nexia
|
93ff554913e1dad6389b54179eca7c4ec1f29371
|
[
"Apache-2.0"
] | null | null | null |
tests/test_home.py
|
jeroenterheerdt/nexia
|
93ff554913e1dad6389b54179eca7c4ec1f29371
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for Nexia Home."""
import json
import os
from os.path import dirname
import unittest
import pytest
from nexia.home import NexiaHome
def load_fixture(filename):
"""Load a fixture."""
test_dir = dirname(__file__)
path = os.path.join(test_dir, "fixtures", filename)
with open(path) as fptr:
return fptr.read()
class TestNexiaThermostat(unittest.TestCase):
"""Tests for nexia thermostat."""
def test_update(self):
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2059661)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83261002, 83261005, 83261008, 83261011])
nexia.update_from_json(devices_json)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83261002, 83261005, 83261008, 83261011])
nexia.update_from_json(devices_json)
def test_idle_thermo(self):
"""Get methods for an idle thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2059661)
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "000000")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Downstairs East Wing")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 88.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.36)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 0.35)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.50)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83261002, 83261005, 83261008, 83261011])
def test_idle_thermo_issue_33758(self):
"""Get methods for an idle thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33758.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(12345678)
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "xxxxxx")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Thermostat")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 55.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.43)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 1)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.55)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), True)
self.assertEqual(thermostat.has_emergency_heat(), True)
self.assertEqual(thermostat.is_emergency_heat_active(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [12345678])
def test_idle_thermo_issue_33968_thermostat_1690380(self):
"""Get methods for an cooling thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [1690380])
thermostat = nexia.get_thermostat_by_id(1690380)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83037337, 83037340, 83037343])
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "removed")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Thermostat")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 80.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.55)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.41)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.41)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 0.5)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.55)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), True)
self.assertEqual(thermostat.is_emergency_heat_active(), False)
self.assertEqual(thermostat.get_system_status(), "Cooling")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), True)
def test_active_thermo(self):
"""Get methods for an active thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2293892)
self.assertEqual(thermostat.get_model(), "XL1050")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581321824")
self.assertEqual(thermostat.get_device_id(), "0281B02C")
self.assertEqual(thermostat.get_type(), "XL1050")
self.assertEqual(thermostat.get_name(), "Master Suite")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.get_variable_fan_speed_limits(), (0.35, 1.0))
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_outdoor_temperature(), 87.0)
self.assertEqual(thermostat.get_relative_humidity(), 0.52)
self.assertEqual(thermostat.get_current_compressor_speed(), 0.69)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.69)
self.assertEqual(thermostat.get_fan_speed_setpoint(), 0.35)
self.assertEqual(thermostat.get_dehumidify_setpoint(), 0.45)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "Cooling")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.get_air_cleaner_mode(), "auto")
self.assertEqual(thermostat.is_blower_active(), True)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [83394133, 83394130, 83394136, 83394127, 83394139])
@pytest.mark.skip(reason="not yet supported")
def test_xl624(self):
"""Get methods for an xl624 thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(1111111)
self.assertEqual(thermostat.get_model(), None)
self.assertEqual(thermostat.get_firmware(), "2.8")
self.assertEqual(thermostat.get_dev_build_number(), "0603340208")
self.assertEqual(thermostat.get_device_id(), None)
self.assertEqual(thermostat.get_type(), None)
self.assertEqual(thermostat.get_name(), "Downstairs Hall")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.has_variable_fan_speed(), False)
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Auto")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Cycler"])
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.has_dehumidify_support(), False)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), False)
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [12345678])
def test_xl824_1(self):
"""Get methods for an xl824 thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(2222222)
self.assertEqual(thermostat.get_model(), "XL824")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581314625")
self.assertEqual(thermostat.get_device_id(), "0167CA48")
self.assertEqual(thermostat.get_type(), "XL824")
self.assertEqual(thermostat.get_name(), "Family Room")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.has_variable_fan_speed(), True)
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Circulate")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [88888888])
def test_xl824_2(self):
"""Get methods for an xl824 thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(3333333)
self.assertEqual(thermostat.get_model(), "XL824")
self.assertEqual(thermostat.get_firmware(), "5.9.1")
self.assertEqual(thermostat.get_dev_build_number(), "1581314625")
self.assertEqual(thermostat.get_device_id(), "01573380")
self.assertEqual(thermostat.get_type(), "XL824")
self.assertEqual(thermostat.get_name(), "Upstairs")
self.assertEqual(thermostat.get_deadband(), 3)
self.assertEqual(thermostat.get_setpoint_limits(), (55, 99))
self.assertEqual(thermostat.has_variable_fan_speed(), True)
self.assertEqual(thermostat.get_unit(), "F")
self.assertEqual(thermostat.get_humidity_setpoint_limits(), (0.35, 0.65))
self.assertEqual(thermostat.get_fan_mode(), "Circulate")
self.assertEqual(thermostat.get_fan_modes(), ["Auto", "On", "Circulate"])
self.assertEqual(thermostat.get_current_compressor_speed(), 0.0)
self.assertEqual(thermostat.get_requested_compressor_speed(), 0.0)
self.assertEqual(thermostat.has_dehumidify_support(), True)
self.assertEqual(thermostat.has_humidify_support(), False)
self.assertEqual(thermostat.has_emergency_heat(), False)
self.assertEqual(thermostat.get_system_status(), "System Idle")
self.assertEqual(thermostat.has_air_cleaner(), True)
self.assertEqual(thermostat.is_blower_active(), False)
zone_ids = thermostat.get_zone_ids()
self.assertEqual(zone_ids, [99999999])
class TestNexiaHome(unittest.TestCase):
"""Tests for nexia home."""
def test_basic(self):
"""Basic tests for NexiaHome."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
self.assertEqual(nexia.get_name(), "Hidden")
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2059661, 2059676, 2293892, 2059652])
def test_basic_issue_33758(self):
"""Basic tests for NexiaHome."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33758.json"))
nexia.update_from_json(devices_json)
self.assertEqual(nexia.get_name(), "Hidden")
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [12345678])
class TestNexiaThermostatZone(unittest.TestCase):
"""Tests for nexia thermostat zone."""
def test_zone_issue_33968_zone_83037337(self):
"""Tests for nexia thermostat zone that is cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(1690380)
zone = thermostat.get_zone_by_id(83037337)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Family Room")
self.assertEqual(zone.get_cooling_setpoint(), 77)
self.assertEqual(zone.get_heating_setpoint(), 74)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Closed",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_issue_33968_zone_83037340(self):
"""Tests for nexia thermostat zone that is cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(1690380)
zone = thermostat.get_zone_by_id(83037340)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Office")
self.assertEqual(zone.get_cooling_setpoint(), 77)
self.assertEqual(zone.get_heating_setpoint(), 74)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Open",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_issue_33968_zone_83037343(self):
"""Tests for nexia thermostat zone that is cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33968.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(1690380)
zone = thermostat.get_zone_by_id(83037343)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Master")
self.assertEqual(zone.get_cooling_setpoint(), 77)
self.assertEqual(zone.get_heating_setpoint(), 68)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Open",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_issue_33758(self):
"""Tests for nexia thermostat zone relieving air."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_issue_33758.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(12345678)
zone = thermostat.get_zone_by_id(12345678)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Thermostat NativeZone")
self.assertEqual(zone.get_cooling_setpoint(), 73)
self.assertEqual(zone.get_heating_setpoint(), 68)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Idle",
)
self.assertEqual(
zone.get_setpoint_status(), "Run Schedule - None",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), False)
def test_zone_relieving_air(self):
"""Tests for nexia thermostat zone relieving air."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2293892)
zone = thermostat.get_zone_by_id(83394133)
self.assertEqual(zone.thermostat, thermostat)
self.assertEqual(zone.get_name(), "Bath Closet")
self.assertEqual(zone.get_cooling_setpoint(), 79)
self.assertEqual(zone.get_heating_setpoint(), 63)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Relieving Air",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_cooling_air(self):
"""Tests for nexia thermostat zone cooling."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2293892)
zone = thermostat.get_zone_by_id(83394130)
self.assertEqual(zone.get_name(), "Master")
self.assertEqual(zone.get_cooling_setpoint(), 71)
self.assertEqual(zone.get_heating_setpoint(), 63)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Damper Open",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), True)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_zone_idle(self):
"""Tests for nexia thermostat zone idle."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
thermostat = nexia.get_thermostat_by_id(2059661)
zone = thermostat.get_zone_by_id(83261002)
self.assertEqual(zone.get_name(), "Living East")
self.assertEqual(zone.get_cooling_setpoint(), 79)
self.assertEqual(zone.get_heating_setpoint(), 63)
self.assertEqual(zone.get_current_mode(), "AUTO")
self.assertEqual(
zone.get_requested_mode(), "AUTO",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Idle",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), True)
def test_xl824_idle(self):
"""Tests for nexia xl824 zone idle."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_house_xl624.json"))
nexia.update_from_json(devices_json)
thermostat_ids = nexia.get_thermostat_ids()
self.assertEqual(thermostat_ids, [2222222, 3333333])
thermostat = nexia.get_thermostat_by_id(3333333)
zone = thermostat.get_zone_by_id(99999999)
self.assertEqual(zone.get_name(), "Upstairs NativeZone")
self.assertEqual(zone.get_cooling_setpoint(), 74)
self.assertEqual(zone.get_heating_setpoint(), 62)
self.assertEqual(zone.get_current_mode(), "COOL")
self.assertEqual(
zone.get_requested_mode(), "COOL",
)
self.assertEqual(
zone.get_presets(), ["None", "Home", "Away", "Sleep"],
)
self.assertEqual(
zone.get_preset(), "None",
)
self.assertEqual(
zone.get_status(), "Idle",
)
self.assertEqual(
zone.get_setpoint_status(), "Permanent Hold",
)
self.assertEqual(zone.is_calling(), False)
self.assertEqual(zone.is_in_permanent_hold(), True)
class TestNexiaAutomation(unittest.TestCase):
def test_automations(self):
"""Get methods for an active thermostat."""
nexia = NexiaHome(auto_login=False)
devices_json = json.loads(load_fixture("mobile_houses_123456.json"))
nexia.update_from_json(devices_json)
automation_ids = nexia.get_automation_ids()
self.assertEqual(
automation_ids,
[3467876, 3467870, 3452469, 3452472, 3454776, 3454774, 3486078, 3486091],
)
automation_one = nexia.get_automation_by_id(3467876)
self.assertEqual(automation_one.name, "Away for 12 Hours")
self.assertEqual(
automation_one.description,
"When IFTTT activates the automation Upstairs West Wing will "
"permanently hold the heat to 62.0 and cool to 83.0 AND "
"Downstairs East Wing will permanently hold the heat to 62.0 "
"and cool to 83.0 AND Downstairs West Wing will permanently "
"hold the heat to 62.0 and cool to 83.0 AND Activate the mode "
"named 'Away 12' AND Master Suite will permanently hold the "
"heat to 62.0 and cool to 83.0",
)
self.assertEqual(automation_one.enabled, True)
self.assertEqual(automation_one.automation_id, 3467876)
| 44.946429
| 86
| 0.674757
| 27,330
| 0.987106
| 0
| 0
| 1,932
| 0.06978
| 0
| 0
| 3,308
| 0.119478
|
0ca1f24778ecd88cae66d775c3768ee93dee6382
| 6,217
|
py
|
Python
|
FarmSwapG.py
|
resake/DuelsFarm
|
b6a0da11af4866d4ea6caa30be1436c256a55af4
|
[
"MIT"
] | null | null | null |
FarmSwapG.py
|
resake/DuelsFarm
|
b6a0da11af4866d4ea6caa30be1436c256a55af4
|
[
"MIT"
] | null | null | null |
FarmSwapG.py
|
resake/DuelsFarm
|
b6a0da11af4866d4ea6caa30be1436c256a55af4
|
[
"MIT"
] | null | null | null |
import re
import time
import aiohttp
CHANGE_CLOTHES = True
# Use swap gear
ACCOUNT_ID = 'b8dd6d09-0bf1-4455-99c5-4cec41b3a789'
# account id goes here
class DuelsAPI:
def __init__(self, account_id, **kwargs):
self.account_id = account_id
self.API_ENTRY = kwargs.get('api_entry_url',
'https://api-duels.galapagosgames.com')
self._session = aiohttp.ClientSession()
self._auth_data = dict()
self._all_data = dict()
@property
def profile(self):
return self._all_data.get('profile')
async def login(self):
app_version = await self.get_app_version()
data = {
'ids': [self.account_id],
'appBundle': 'com.deemedyainc.duels',
'appVersion': app_version,
'platform': 'Android',
'language': 'English'
}
all_data = await self._request('/general/login', data)
self._auth_data['id'] = all_data['profile']['_id']
self._auth_data['appVersion'] = app_version
self._auth_data['token'] = all_data['profile']['token']
self._all_data = all_data
return self._all_data
async def get_app_version(self):
app_version = self._auth_data.get('appVersion')
if app_version is not None:
return app_version
google_play_url = ('https://play.google.com/store/apps/details?id'
'=com.deemedyainc.duels&hl=en')
async with self._session.get(google_play_url) as resp:
data = await resp.text()
pattern = (r'<div class="hAyfc"><div class="BgcNfc">Current '
r'Version</div><span class="htlgb"><div '
r'class="IQ1z0d"><span class="htlgb">(?P<version>.*?)'
r'</span></div></span></div>')
version = re.search(pattern, data)
return version['version']
async def skip_queue(self, container_id):
return await self._request('/queue/claim',
{'containerId': container_id})
async def equip_part(self, part_id):
return await self._request('/inventory/equip', {'partId': part_id})
async def get_clan(self, clan_id):
return await self._request('/clan/info', {'clanId': clan_id})
async def get_player(self, player_id):
return await self._request('/profiles/details',
{'playerId': player_id})
async def play_lootfight(self):
return await self._request('/battle/loot/v2')
async def get_opponent(self, repeat_roll=False):
return await self._request('/battle/loot/opponent/v2',
{'reroll': repeat_roll})
async def get_dungeons_leaderboard(self):
return await self._request('/dungeons/leaderboards/top')
async def search_clan(self, clan_name, only_joinable=False, min_level=1):
payload = {'search': clan_name, 'onlyJoinable': only_joinable}
if min_level > 1:
payload.update({'lvl': min_level})
return await self._request('/clans/search', payload)
async def close(self):
if not self._session.closed:
await self._session.close()
async def _request(self, endpoint, additional_data={}, method='POST'):
additional_data.update(self._auth_data)
func_to_call = getattr(self._session, method.lower())
url = self.API_ENTRY + endpoint
async with func_to_call(url, json=additional_data) as resp:
return await resp.json()
async def main():
api = DuelsAPI(ACCOUNT_ID)
await api.login()
bad_equipment_ids = {}
now_equipment_ids = {}
if CHANGE_CLOTHES:
input(
'Pick better equipment in the game, and press `Enter` to continue'
)
for part in api.profile['character']['parts']:
now_equipment_ids.update({part['__type']: part['__id']})
for item in api.profile['inventory']['items']:
bad_item = bad_equipment_ids.get(item['__type'])
if bad_item is not None:
if bad_item['stat_value'] < item['stat']['value']:
continue
payload = {'id': item['__id'], 'stat_value': item['stat']['value']}
bad_equipment_ids[item['__type']] = payload
total_keys = api.profile['Key@Value']
start_time = time.time()
while True:
try:
for item_value in bad_equipment_ids.values():
await api.equip_part(item_value['id'])
await api.get_opponent()
for item_type, item_value in now_equipment_ids.items():
if bad_equipment_ids.get(item_type) is not None:
await api.equip_part(item_value)
loot_fight = await api.play_lootfight()
if loot_fight['battle']['result']:
print('[+] Ez win, win streak: {}'.format(
loot_fight['_u']['WinStreak@Value']
))
for queue in loot_fight['_q']:
await api.skip_queue(queue['_id'])
await api.skip_queue(queue['pid'])
if queue.get('steps') is None:
continue
for step in queue['steps']:
if step['type'] == 'RewardQueue':
if step['items'][0]['type'] != 'Key':
continue
keys_reward = step['items'][0]['reward']
total_keys += keys_reward
print('[+] We have got +{} keys!'.format(
keys_reward))
print('[+] Total keys: {}'.format(total_keys))
print('[+] Time elapsed: {}'.format(
time.time() - start_time
))
else:
print('[-] Ez lose!')
await asyncio.sleep(1.0)
except KeyboardInterrupt:
print('[+] Exit...')
break
if __name__ == '__main__':
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 37.678788
| 79
| 0.551391
| 3,407
| 0.548014
| 0
| 0
| 77
| 0.012385
| 5,444
| 0.875664
| 1,254
| 0.201705
|
0ca263d5ceb8c0df9da68a027a9e2c49d50656ac
| 268
|
py
|
Python
|
data/landice-5g/tiff_to_shp.py
|
scottsfarley93/IceSheetsViz
|
f4af84f16af875c5753dca6b8c173c253d9218d4
|
[
"MIT"
] | null | null | null |
data/landice-5g/tiff_to_shp.py
|
scottsfarley93/IceSheetsViz
|
f4af84f16af875c5753dca6b8c173c253d9218d4
|
[
"MIT"
] | 1
|
2017-02-28T18:49:04.000Z
|
2017-02-28T18:49:55.000Z
|
data/landice-5g/tiff_to_shp.py
|
scottsfarley93/IceSheetsViz
|
f4af84f16af875c5753dca6b8c173c253d9218d4
|
[
"MIT"
] | null | null | null |
import os
for filename in os.listdir("rasters"):
print filename
f = filename.replace(".tiff", "")
tiff = "rasters/" + filename
out = "shapefiles/" + f + ".shp"
cmd = "gdal_polygonize.py " + tiff + " -f 'ESRI Shapefile' " + out
os.system(cmd)
| 24.363636
| 70
| 0.589552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.339552
|
0ca33e888a8c5506799931e71fe1070bf6588145
| 3,758
|
py
|
Python
|
we_sensesim.py
|
y95847frank/GenSense
|
0da122bea9b7bd51444748444700b5f788bd8a48
|
[
"MIT"
] | 3
|
2018-05-31T05:52:18.000Z
|
2019-12-20T07:15:56.000Z
|
we_sensesim.py
|
y95847frank/GenSense
|
0da122bea9b7bd51444748444700b5f788bd8a48
|
[
"MIT"
] | null | null | null |
we_sensesim.py
|
y95847frank/GenSense
|
0da122bea9b7bd51444748444700b5f788bd8a48
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
import utils
import os
from collections import defaultdict
from nltk.corpus import wordnet as wn
from scipy.spatial.distance import cosine
from scipy.spatial.distance import correlation
from numpy.linalg import norm
from scipy.stats import spearmanr, pearsonr
from utils import trim
import pdb
"""
Sense embedding format: see https://github.com/sjauhar/SenseRetrofit
Use ',' to seperate Datasets
"""
def run(path, fname):
'''
if len(sys.argv) != 3:
print("Usage: python we_sensesim.py SenseEmbedding Datasets")
exit(0)
'''
#wvs = utils.readWordVecs(os.path.expanduser(full_path))
wvs = utils.readWordVecs(sys.argv[1])
print("Finish reading vector!")
wvssen = {}
s_list = defaultdict(list)
for sense in wvs:
wvssen[sense.split("%")[0]] = ''
s_list[sense.split("%")[0]].append(sense)
mean_vector = np.mean(wvs.values(), axis=0)
spear_score_max = []
spear_score_avg = []
f_name = []
for name in fname:
full_path = os.path.join(path, name)
filenames = os.path.expanduser(full_path).split(',')
pairs, scores = utils.readDataset(filenames[0], no_skip=True)
#f_name.append(filenames[0])
#print("Pair number for %s: %d"%(filenames[0], len(pairs)))
coefs_max = []
coefs_avg = []
missing = 0
for pair in pairs:
vecs0 = []
trimed_p0 = trim(pair[0], wvssen)
if trimed_p0 not in wvssen:
vecs0.append(mean_vector)
missing += 1
#print trimed_p0,
else:
for sense in s_list[trimed_p0]:
vecs0.append(wvs[sense])
'''
for sense in wvs:
word = sense.split("%")[0]
if trimed_p0 == word:
vecs0.append(wvs[sense])
'''
vecs1 = []
trimed_p1 = trim(pair[1],wvssen)
if trimed_p1 not in wvssen:
vecs1.append(mean_vector)
missing += 1
#print trimed_p1,
else:
for sense in s_list[trimed_p1]:
vecs1.append(wvs[sense])
'''
for sense in wvs:
word = sense.split("%")[0]
if trimed_p1 == word:
vecs1.append(wvs[sense])
'''
'''
max_value and avg_value: see "Multi-Prototype Vector-Space Models of Word Meaning" section 3.2 Measuring Semantic Similarity
http://www.cs.utexas.edu/~ml/papers/reisinger.naacl-2010.pdf
'''
max_value = max([1-cosine(a,b) for a in vecs0 for b in vecs1])
avg_value = np.mean([1-cosine(a,b) for a in vecs0 for b in vecs1])
coefs_max.append(max_value)
coefs_avg.append(avg_value)
spear_max = spearmanr(scores, coefs_max)
pearson_max = pearsonr(scores, coefs_max)
spear_avg = spearmanr(scores, coefs_avg)
pearson_avg = pearsonr(scores, coefs_avg)
spear_score_max.append(spear_max[0])
spear_score_avg.append(spear_avg[0])
print 'type \t',
for i in range(len(fname)):
print fname[i].split('.')[0],
print '\nspear max\t',
for i in range(len(fname)):
print '%.04f,' % (spear_score_max[i]),
print '\nspear avg\t',
for i in range(len(fname)):
print '%.04f,' % (spear_score_avg[i]),
if __name__ == "__main__":
run('./eval_data', ['EN-MEN-n.txt', 'EN-MEN-l.txt', 'EN-TRUK.txt', 'EN-RW.txt', 'EN-WS353.txt', 'EN-WS353-s.txt', 'EN-WS353-r.txt'])
| 33.256637
| 140
| 0.549228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,256
| 0.33422
|
0ca3cc7e85961f379dcec8f7f5d9db60fd5df51d
| 138,423
|
py
|
Python
|
dlkit/abstract_osid/calendaring/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/abstract_osid/calendaring/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/abstract_osid/calendaring/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""Implementations of calendaring abstract base class queries."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class EventQuery:
"""This is the query for searching events.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_implicit(self, match):
"""Matches an event that is implicitly generated.
:param match: ``true`` to match events implicitly generated, ``false`` to match events explicitly defined
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_implicit_terms(self):
"""Clears the implcit terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
implicit_terms = property(fdel=clear_implicit_terms)
@abc.abstractmethod
def match_duration(self, low, high, match):
"""Matches the event duration between the given range inclusive.
:param low: low duration range
:type low: ``osid.calendaring.Duration``
:param high: high duration range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_duration(self, match):
"""Matches an event that has any duration.
:param match: ``true`` to match events with any duration, ``false`` to match events with no start time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_duration_terms(self):
"""Clears the duration terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
duration_terms = property(fdel=clear_duration_terms)
@abc.abstractmethod
def match_recurring_event_id(self, recurring_event_id, match):
"""Matches events that related to the recurring event.
:param recurring_event_id: an ``Id`` for a recurring event
:type recurring_event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``recurring_event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_recurring_event_id_terms(self):
"""Clears the recurring event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
recurring_event_id_terms = property(fdel=clear_recurring_event_id_terms)
@abc.abstractmethod
def supports_recurring_event_query(self):
"""Tests if a ``RecurringEventQuery`` is available for querying recurring events.
:return: ``true`` if a recurring event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_recurring_event_query(self):
"""Gets the query for a recurring event.
Multiple retrievals produce a nested ``OR`` term.
:return: the recurring event query
:rtype: ``osid.calendaring.RecurringEventQuery``
:raise: ``Unimplemented`` -- ``supports_recurring_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_recurring_event_query()`` is ``true``.*
"""
return # osid.calendaring.RecurringEventQuery
recurring_event_query = property(fget=get_recurring_event_query)
@abc.abstractmethod
def match_any_recurring_event(self, match):
"""Matches an event that is part of any recurring event.
:param match: ``true`` to match events part of any recurring event, ``false`` to match only standalone events
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_recurring_event_terms(self):
"""Clears the recurring event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
recurring_event_terms = property(fdel=clear_recurring_event_terms)
@abc.abstractmethod
def match_superseding_event_id(self, superseding_event_id, match):
"""Matches events that relate to the superseding event.
:param superseding_event_id: an ``Id`` for a superseding event
:type superseding_event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseding_event_id_terms(self):
"""Clears the superseding events type terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseding_event_id_terms = property(fdel=clear_superseding_event_id_terms)
@abc.abstractmethod
def supports_superseding_event_query(self):
"""Tests if a ``SupersedingEventQuery`` is available for querying offset events.
:return: ``true`` if a superseding event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_superseding_event_query(self):
"""Gets the query for a superseding event.
Multiple retrievals produce a nested ``OR`` term.
:return: the superseding event query
:rtype: ``osid.calendaring.SupersedingEventQuery``
:raise: ``Unimplemented`` -- ``supports_superseding_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_superseding_event_query()`` is ``true``.*
"""
return # osid.calendaring.SupersedingEventQuery
superseding_event_query = property(fget=get_superseding_event_query)
@abc.abstractmethod
def match_any_superseding_event(self, match):
"""Matches any superseding event.
:param match: ``true`` to match any superseding events, ``false`` otherwise
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseding_event_terms(self):
"""Clears the superseding event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseding_event_terms = property(fdel=clear_superseding_event_terms)
@abc.abstractmethod
def match_offset_event_id(self, offset_event_id, match):
"""Matches events that relates to the offset event.
:param offset_event_id: an ``Id`` for an offset event
:type offset_event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_offset_event_id_terms(self):
"""Clears the recurring events type terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
offset_event_id_terms = property(fdel=clear_offset_event_id_terms)
@abc.abstractmethod
def supports_offset_event_query(self):
"""Tests if an ``OffsetEventQuery`` is available for querying offset events.
:return: ``true`` if an offset event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_offset_event_query(self):
"""Gets the query for an offset event.
Multiple retrievals produce a nested ``OR`` term.
:return: the offset event query
:rtype: ``osid.calendaring.OffsetEventQuery``
:raise: ``Unimplemented`` -- ``supports_offset_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_offset_event_query()`` is ``true``.*
"""
return # osid.calendaring.OffsetEventQuery
offset_event_query = property(fget=get_offset_event_query)
@abc.abstractmethod
def match_any_offset_event(self, match):
"""Matches any offset event.
:param match: ``true`` to match any offset events, ``false`` otherwise
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_offset_event_terms(self):
"""Clears the offset event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
offset_event_terms = property(fdel=clear_offset_event_terms)
@abc.abstractmethod
def match_location_description(self, location, string_match_type, match):
"""Matches the location description string.
:param location: location string
:type location: ``string``
:param string_match_type: string match type
:type string_match_type: ``osid.type.Type``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``location`` is not of ``string_match_type``
:raise: ``NullArgument`` -- ``location`` or ``string_match_type`` is ``null``
:raise: ``Unsupported`` -- ``supports_string_match_type(string_match_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_location_description(self, match):
"""Matches an event that has any location description assigned.
:param match: ``true`` to match events with any location description, ``false`` to match events with no location
description
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_description_terms(self):
"""Clears the location description terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_description_terms = property(fdel=clear_location_description_terms)
@abc.abstractmethod
def match_location_id(self, location_id, match):
"""Sets the location ``Id`` for this query.
:param location_id: a location ``Id``
:type location_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``location_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_id_terms(self):
"""Clears the location ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_id_terms = property(fdel=clear_location_id_terms)
@abc.abstractmethod
def supports_location_query(self):
"""Tests if a ``LocationQuery`` is available for querying locations.
:return: ``true`` if a location query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_location_query(self):
"""Gets the query for a location.
Multiple retrievals produce a nested ``OR`` term.
:return: the location query
:rtype: ``osid.mapping.LocationQuery``
:raise: ``Unimplemented`` -- ``supports_location_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_query()`` is ``true``.*
"""
return # osid.mapping.LocationQuery
location_query = property(fget=get_location_query)
@abc.abstractmethod
def match_any_location(self, match):
"""Matches an event that has any location assigned.
:param match: ``true`` to match events with any location, ``false`` to match events with no location
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_terms(self):
"""Clears the location terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_terms = property(fdel=clear_location_terms)
@abc.abstractmethod
def match_sponsor_id(self, sponsor_id, match):
"""Sets the sponsor ``Id`` for this query.
:param sponsor_id: a sponsor ``Id``
:type sponsor_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``sponsor_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_sponsor_id_terms(self):
"""Clears the sponsor ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
sponsor_id_terms = property(fdel=clear_sponsor_id_terms)
@abc.abstractmethod
def supports_sponsor_query(self):
"""Tests if a ``LocationQuery`` is available for querying sponsors.
:return: ``true`` if a sponsor query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_sponsor_query(self):
"""Gets the query for a sponsor.
Multiple retrievals produce a nested ``OR`` term.
:return: the sponsor query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_sponsor_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sponsor_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
sponsor_query = property(fget=get_sponsor_query)
@abc.abstractmethod
def clear_sponsor_terms(self):
"""Clears the sponsor terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
sponsor_terms = property(fdel=clear_sponsor_terms)
@abc.abstractmethod
def match_coordinate(self, coordinate, match):
"""Matches events whose locations contain the given coordinate.
:param coordinate: a coordinate
:type coordinate: ``osid.mapping.Coordinate``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``coordinate`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_coordinate_terms(self):
"""Clears the cooordinate terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
coordinate_terms = property(fdel=clear_coordinate_terms)
@abc.abstractmethod
def match_spatial_unit(self, spatial_unit, match):
"""Matches events whose locations fall within the given spatial unit.
:param spatial_unit: a spatial unit
:type spatial_unit: ``osid.mapping.SpatialUnit``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``spatial_unit`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_spatial_unit_terms(self):
"""Clears the spatial unit terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
spatial_unit_terms = property(fdel=clear_spatial_unit_terms)
@abc.abstractmethod
def match_commitment_id(self, commitment_id, match):
"""Sets the commitment ``Id`` for this query.
:param commitment_id: a commitment ``Id``
:type commitment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``commitment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_commitment_id_terms(self):
"""Clears the commitment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commitment_id_terms = property(fdel=clear_commitment_id_terms)
@abc.abstractmethod
def supports_commitment_query(self):
"""Tests if a ``CommitmentQuery`` is available for querying recurring event terms.
:return: ``true`` if a commitment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_commitment_query(self):
"""Gets the query for a commitment.
Multiple retrievals produce a nested ``OR`` term.
:return: the commitment query
:rtype: ``osid.calendaring.CommitmentQuery``
:raise: ``Unimplemented`` -- ``supports_commitment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_commitment_query()`` is ``true``.*
"""
return # osid.calendaring.CommitmentQuery
commitment_query = property(fget=get_commitment_query)
@abc.abstractmethod
def match_any_commitment(self, match):
"""Matches an event that has any commitment.
:param match: ``true`` to match events with any commitment, ``false`` to match events with no commitments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_commitment_terms(self):
"""Clears the commitment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commitment_terms = property(fdel=clear_commitment_terms)
@abc.abstractmethod
def match_containing_event_id(self, event_id, match):
"""Sets the event ``Id`` for this query to match events that have the specified event as an ancestor.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_containing_event_id_terms(self):
"""Clears the containing event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
containing_event_id_terms = property(fdel=clear_containing_event_id_terms)
@abc.abstractmethod
def supports_containing_event_query(self):
"""Tests if a containing event query is available.
:return: ``true`` if a containing event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_containing_event_query(self):
"""Gets the query for a containing event.
:return: the containing event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_containing_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_containing_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
containing_event_query = property(fget=get_containing_event_query)
@abc.abstractmethod
def match_any_containing_event(self, match):
"""Matches events with any ancestor event.
:param match: ``true`` to match events with any ancestor event, ``false`` to match events with no ancestor
events
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_containing_event_terms(self):
"""Clears the containing event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
containing_event_terms = property(fdel=clear_containing_event_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying calendars.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_event_query_record(self, event_record_type):
"""Gets the event query record corresponding to the given ``Event`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param event_record_type: an event query record type
:type event_record_type: ``osid.type.Type``
:return: the event query record
:rtype: ``osid.calendaring.records.EventQueryRecord``
:raise: ``NullArgument`` -- ``event_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(event_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.EventQueryRecord
class RecurringEventQuery:
"""This is the query for searching recurring events.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_schedule_id(self, schedule_id, match):
"""Sets the schedule ``Id`` for this query for matching schedules.
:param schedule_id: a schedule ``Id``
:type schedule_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``schedule_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_id_terms(self):
"""Clears the schedule ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_id_terms = property(fdel=clear_schedule_id_terms)
@abc.abstractmethod
def supports_schedule_query(self):
"""Tests if a ``ScheduleQuery`` is available for querying schedules.
:return: ``true`` if a schedule query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_schedule_query(self):
"""Gets the query for a schedule.
Multiple retrievals produce a nested ``OR`` term.
:return: the schedule query
:rtype: ``osid.calendaring.ScheduleQuery``
:raise: ``Unimplemented`` -- ``supports_schedule_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_schedule_query()`` is ``true``.*
"""
return # osid.calendaring.ScheduleQuery
schedule_query = property(fget=get_schedule_query)
@abc.abstractmethod
def match_any_schedule(self, match):
"""Matches a recurring event that has any schedule assigned.
:param match: ``true`` to match recurring events with any schedules, ``false`` to match recurring events with no
schedules
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_terms(self):
"""Clears the schedule terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_terms = property(fdel=clear_schedule_terms)
@abc.abstractmethod
def match_superseding_event_id(self, superseding_event_id, match):
"""Sets the superseding event ``Id`` for this query.
:param superseding_event_id: a superseding event ``Id``
:type superseding_event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``superseding_event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseding_event_id_terms(self):
"""Clears the superseding event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseding_event_id_terms = property(fdel=clear_superseding_event_id_terms)
@abc.abstractmethod
def supports_superseding_event_query(self):
"""Tests if a ``SupersedingEventQuery`` is available for querying superseding events.
:return: ``true`` if a superseding event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_superseding_event_query(self):
"""Gets the query for a superseding event.
Multiple retrievals produce a nested ``OR`` term.
:return: the superseding event query
:rtype: ``osid.calendaring.SupersedingEventQuery``
:raise: ``Unimplemented`` -- ``supports_superseding_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_superseding_event_query()`` is ``true``.*
"""
return # osid.calendaring.SupersedingEventQuery
superseding_event_query = property(fget=get_superseding_event_query)
@abc.abstractmethod
def match_any_superseding_event(self, match):
"""Matches a recurring event that has any superseding event assigned.
:param match: ``true`` to match recurring events with any superseding events, ``false`` to match events with no
superseding events
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseding_event_terms(self):
"""Clears the superseding event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseding_event_terms = property(fdel=clear_superseding_event_terms)
@abc.abstractmethod
def match_specific_meeting_time(self, start, end, match):
"""Matches recurring events with specific dates between the given range inclusive.
:param start: start date
:type start: ``osid.calendaring.DateTime``
:param end: end date
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``zero``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_specific_meeting_time(self, match):
"""Matches a recurring event that has any specific date assigned.
:param match: ``true`` to match recurring events with any specific date, ``false`` to match recurring events
with no specific date
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_specific_meeting_time_terms(self):
"""Clears the blackout terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
specific_meeting_time_terms = property(fdel=clear_specific_meeting_time_terms)
@abc.abstractmethod
def match_event_id(self, event_id, match):
"""Sets the composed event ``Id`` for this query.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_id_terms(self):
"""Clears the event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_id_terms = property(fdel=clear_event_id_terms)
@abc.abstractmethod
def supports_event_query(self):
"""Tests if an ``EventQuery`` is available for querying composed events.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_event_query(self):
"""Gets the query for an event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
event_query = property(fget=get_event_query)
@abc.abstractmethod
def match_any_event(self, match):
"""Matches a recurring event that has any composed event assigned.
:param match: ``true`` to match recurring events with any composed events, ``false`` to match events with no
composed events
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_terms(self):
"""Clears the event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_terms = property(fdel=clear_event_terms)
@abc.abstractmethod
def match_blackout(self, datetime, match):
"""Matches a blackout that contains the given date time.
:param datetime: a datetime
:type datetime: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``datetime`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_blackout(self, match):
"""Matches a recurring event that has any blackout assigned.
:param match: ``true`` to match recurring events with any blackout, ``false`` to match recurring events with no
blackout
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_blackout_terms(self):
"""Clears the blackout terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
blackout_terms = property(fdel=clear_blackout_terms)
@abc.abstractmethod
def match_blackout_inclusive(self, start, end, match):
"""Matches recurring events with blackouts between the given range inclusive.
:param start: start date
:type start: ``osid.calendaring.DateTime``
:param end: end date
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``zero``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_blackout_inclusive_terms(self):
"""Clears the blackout terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
blackout_inclusive_terms = property(fdel=clear_blackout_inclusive_terms)
@abc.abstractmethod
def match_sponsor_id(self, sponsor_id, match):
"""Sets the sponsor ``Id`` for this query.
:param sponsor_id: a sponsor ``Id``
:type sponsor_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``sponsor_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_sponsor_id_terms(self):
"""Clears the sponsor ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
sponsor_id_terms = property(fdel=clear_sponsor_id_terms)
@abc.abstractmethod
def supports_sponsor_query(self):
"""Tests if a ``LocationQuery`` is available for querying sponsors.
:return: ``true`` if a sponsor query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_sponsor_query(self):
"""Gets the query for a sponsor.
Multiple retrievals produce a nested ``OR`` term.
:return: the sponsor query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_sponsor_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sponsor_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
sponsor_query = property(fget=get_sponsor_query)
@abc.abstractmethod
def clear_sponsor_terms(self):
"""Clears the sponsor terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
sponsor_terms = property(fdel=clear_sponsor_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying calendars.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_recurring_event_query_record(self, recurring_event_record_type):
"""Gets the recurring event query recod corresponding to the given ``RecurringEvent`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param recurring_event_record_type: a recurring event query record type
:type recurring_event_record_type: ``osid.type.Type``
:return: the recurring event query record
:rtype: ``osid.calendaring.records.RecurringEventQueryRecord``
:raise: ``NullArgument`` -- ``recurring_event_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(recurring_event_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.RecurringEventQueryRecord
class SupersedingEventQuery:
"""This is the query for searching superseding events.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_superseded_event_id(self, event_id, match):
"""Sets the event ``Id`` for this query for matching attached events.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseded_event_id_terms(self):
"""Clears the event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseded_event_id_terms = property(fdel=clear_superseded_event_id_terms)
@abc.abstractmethod
def supports_superseded_event_query(self):
"""Tests if an ``EventQuery`` is available for querying attached events.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_superseded_event_query(self):
"""Gets the query for an attached event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_superseded_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_superseded_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
superseded_event_query = property(fget=get_superseded_event_query)
@abc.abstractmethod
def clear_superseded_event_terms(self):
"""Clears the event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseded_event_terms = property(fdel=clear_superseded_event_terms)
@abc.abstractmethod
def match_superseding_event_id(self, superseding_event_id, match):
"""Sets the superseding event ``Id`` for this query.
:param superseding_event_id: a superseding event ``Id``
:type superseding_event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``superseding_event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseding_event_id_terms(self):
"""Clears the superseding event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseding_event_id_terms = property(fdel=clear_superseding_event_id_terms)
@abc.abstractmethod
def supports_superseding_event_query(self):
"""Tests if a ``SupersedingEventQuery`` is available.
:return: ``true`` if a superseding event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_superseding_event_query(self):
"""Gets the query for a superseding event.
Multiple retrievals produce a nested ``OR`` term.
:return: the superseding event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_superseding_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_superseding_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
superseding_event_query = property(fget=get_superseding_event_query)
@abc.abstractmethod
def clear_superseding_event_terms(self):
"""Clears the superseding event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseding_event_terms = property(fdel=clear_superseding_event_terms)
@abc.abstractmethod
def match_superseded_date(self, from_, to, match):
"""Matches superseding events that supersede within the given dates inclusive.
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``from`` or ``to`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_superseded_date(self, match):
"""Matches a superseding event that has any superseded date.
:param match: ``true`` to match superseding events with any superseded date, false to match superseding events
with no superseded date
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseded_date_terms(self):
"""Clears the superseded date terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseded_date_terms = property(fdel=clear_superseded_date_terms)
@abc.abstractmethod
def match_superseded_event_position(self, from_, to, match):
"""Matches superseding events that supersede within the denormalized event positions inclusive.
:param from: start position
:type from: ``integer``
:param to: end position
:type to: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- the absolute value of ``from`` is greater than ``to``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_superseded_event_position(self, match):
"""Matches a superseding event that has any superseded position.
:param match: ``true`` to match superseding events with any superseded event position, false to match
superseding events with no superseded event position
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_superseded_event_position_terms(self):
"""Clears the superseded position terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
superseded_event_position_terms = property(fdel=clear_superseded_event_position_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying calendars.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_superseding_event_query_record(self, superseding_event_record_type):
"""Gets the superseding event query record corresponding to the given ``SupersedingEvent`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param superseding_event_record_type: a superseding event query record type
:type superseding_event_record_type: ``osid.type.Type``
:return: the superseding event query record
:rtype: ``osid.calendaring.records.SupersedingEventQueryRecord``
:raise: ``NullArgument`` -- ``superseding_event_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(superseding_event_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.SupersedingEventQueryRecord
class OffsetEventQuery:
"""This is the query for searching events.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_fixed_start_time(self, from_, to, match):
"""Matches a fixed start time between the given range inclusive.
:param from: the start of the range
:type from: ``osid.calendaring.DateTime``
:param to: the end of the range
:type to: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_fixed_start_time(self, match):
"""Matches events with fixed start times.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_start_time_terms(self):
"""Clears the fixed start time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_start_time_terms = property(fdel=clear_fixed_start_time_terms)
@abc.abstractmethod
def match_start_reference_event_id(self, event_id, match):
"""Sets the start reference event ``Id`` for this query.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_start_reference_event_id_terms(self):
"""Clears the start reference event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
start_reference_event_id_terms = property(fdel=clear_start_reference_event_id_terms)
@abc.abstractmethod
def supports_start_reference_event_query(self):
"""Tests if an ``EventQuery`` is available for querying start reference event terms.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_start_reference_event_query(self):
"""Gets the query for the start reference event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_start_reference_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_start_reference_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
start_reference_event_query = property(fget=get_start_reference_event_query)
@abc.abstractmethod
def match_any_start_reference_event(self, match):
"""Matches offset events with any starting reference event.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_start_reference_event_terms(self):
"""Clears the start reference event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
start_reference_event_terms = property(fdel=clear_start_reference_event_terms)
@abc.abstractmethod
def match_fixed_start_offset(self, from_, to, match):
"""Matches a fixed offset amount between the given range inclusive.
:param from: the start of the range
:type from: ``osid.calendaring.Duration``
:param to: the end of the range
:type to: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_fixed_start_offset(self, match):
"""Matches fixed offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_start_offset_terms(self):
"""Clears the fixed offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_start_offset_terms = property(fdel=clear_fixed_start_offset_terms)
@abc.abstractmethod
def match_relative_weekday_start_offset(self, low, high, match):
"""Matches a relative weekday offset amount between the given range inclusive.
:param low: the start of the range
:type low: ``integer``
:param high: the end of the range
:type high: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_weekday_start_offset_terms(self):
"""Clears the relative weekday offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_weekday_start_offset_terms = property(fdel=clear_relative_weekday_start_offset_terms)
@abc.abstractmethod
def match_relative_start_weekday(self, weekday, match):
"""Matches a relative weekday.
:param weekday: the weekday
:type weekday: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_relative_start_weekday(self, match):
"""Matches relative weekday offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_start_weekday_terms(self):
"""Clears the relative weekday terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_start_weekday_terms = property(fdel=clear_relative_start_weekday_terms)
@abc.abstractmethod
def match_fixed_duration(self, low, high, match):
"""Matches a fixed duration between the given range inclusive.
:param low: the start of the range
:type low: ``osid.calendaring.Duration``
:param high: the end of the range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_duration_terms(self):
"""Clears the fixed duration offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_duration_terms = property(fdel=clear_fixed_duration_terms)
@abc.abstractmethod
def match_end_reference_event_id(self, event_id, match):
"""Sets the end reference event ``Id`` for this query.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_end_reference_event_id_terms(self):
"""Clears the end reference event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
end_reference_event_id_terms = property(fdel=clear_end_reference_event_id_terms)
@abc.abstractmethod
def supports_end_reference_event_query(self):
"""Tests if an ``EventQuery`` is available for querying end reference event terms.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_end_reference_event_query(self):
"""Gets the query for the end reference event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_event_reference_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_end_reference_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
end_reference_event_query = property(fget=get_end_reference_event_query)
@abc.abstractmethod
def match_any_end_reference_event(self, match):
"""Matches any end reference event events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_end_reference_event_terms(self):
"""Clears the end reference event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
end_reference_event_terms = property(fdel=clear_end_reference_event_terms)
@abc.abstractmethod
def match_fixed_end_offset(self, from_, to, match):
"""Matches a fixed offset amount between the given range inclusive.
:param from: the start of the range
:type from: ``osid.calendaring.Duration``
:param to: the end of the range
:type to: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_fixed_end_offset(self, match):
"""Matches fixed offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_end_offset_terms(self):
"""Clears the fixed offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_end_offset_terms = property(fdel=clear_fixed_end_offset_terms)
@abc.abstractmethod
def match_relative_weekday_end_offset(self, low, high, match):
"""Matches a relative weekday offset amount between the given range inclusive.
:param low: the start of the range
:type low: ``integer``
:param high: the end of the range
:type high: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_weekday_end_offset_terms(self):
"""Clears the relative weekday offset terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_weekday_end_offset_terms = property(fdel=clear_relative_weekday_end_offset_terms)
@abc.abstractmethod
def match_relative_end_weekday(self, weekday, match):
"""Matches a relative weekday.
:param weekday: the weekday
:type weekday: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_relative_end_weekday(self, match):
"""Matches relative weekday offset events.
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_relative_end_weekday_terms(self):
"""Clears the relative weekday terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
relative_end_weekday_terms = property(fdel=clear_relative_end_weekday_terms)
@abc.abstractmethod
def match_location_description(self, location, string_match_type, match):
"""Matches the location description string.
:param location: location string
:type location: ``string``
:param string_match_type: string match type
:type string_match_type: ``osid.type.Type``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``location`` is not of ``string_match_type``
:raise: ``NullArgument`` -- ``location`` or ``string_match_type`` is ``null``
:raise: ``Unsupported`` -- ``supports_string_match_type(string_match_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_location_description(self, match):
"""Matches an event that has any location description assigned.
:param match: ``true`` to match events with any location description, ``false`` to match events with no location
description
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_description_terms(self):
"""Clears the location description terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_description_terms = property(fdel=clear_location_description_terms)
@abc.abstractmethod
def match_location_id(self, location_id, match):
"""Sets the location ``Id`` for this query.
:param location_id: a location ``Id``
:type location_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``location_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_id_terms(self):
"""Clears the location ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_id_terms = property(fdel=clear_location_id_terms)
@abc.abstractmethod
def supports_location_query(self):
"""Tests if a ``LocationQuery`` is available for querying locations.
:return: ``true`` if a location query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_location_query(self):
"""Gets the query for a location.
Multiple retrievals produce a nested ``OR`` term.
:return: the location query
:rtype: ``osid.mapping.LocationQuery``
:raise: ``Unimplemented`` -- ``supports_location_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_query()`` is ``true``.*
"""
return # osid.mapping.LocationQuery
location_query = property(fget=get_location_query)
@abc.abstractmethod
def match_any_location(self, match):
"""Matches an event that has any location assigned.
:param match: ``true`` to match events with any location, ``false`` to match events with no location
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_terms(self):
"""Clears the location terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_terms = property(fdel=clear_location_terms)
@abc.abstractmethod
def match_sponsor_id(self, sponsor_id, match):
"""Sets the sponsor ``Id`` for this query.
:param sponsor_id: a sponsor ``Id``
:type sponsor_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``sponsor_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_sponsor_id_terms(self):
"""Clears the sponsor ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
sponsor_id_terms = property(fdel=clear_sponsor_id_terms)
@abc.abstractmethod
def supports_sponsor_query(self):
"""Tests if a ``LocationQuery`` is available for querying sponsors.
:return: ``true`` if a sponsor query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_sponsor_query(self):
"""Gets the query for a sponsor.
Multiple retrievals produce a nested ``OR`` term.
:return: the sponsor query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_sponsor_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_sponsor_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
sponsor_query = property(fget=get_sponsor_query)
@abc.abstractmethod
def clear_sponsor_terms(self):
"""Clears the sponsor terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
sponsor_terms = property(fdel=clear_sponsor_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying calendars.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_offset_event_query_record(self, offset_event_record_type):
"""Gets the offset event query record corresponding to the given ``OffsetEvent`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param offset_event_record_type: an offset event query record type
:type offset_event_record_type: ``osid.type.Type``
:return: the offset event query record
:rtype: ``osid.calendaring.records.OffsetEventQueryRecord``
:raise: ``NullArgument`` -- ``offset_event_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(offset_event_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.OffsetEventQueryRecord
class ScheduleQuery:
"""This is the query for searching schedules.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_schedule_slot_id(self, schedule_slot_id, match):
"""Sets the schedule ``Id`` for this query for matching nested schedule slots.
:param schedule_slot_id: a schedule slot ``Id``
:type schedule_slot_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``schedule_slot_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_slot_id_terms(self):
"""Clears the schedule slot ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_slot_id_terms = property(fdel=clear_schedule_slot_id_terms)
@abc.abstractmethod
def supports_schedule_slot_query(self):
"""Tests if a ``ScheduleSlotQuery`` is available for querying sechedule slots.
:return: ``true`` if a schedule slot query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_schedule_slot_query(self):
"""Gets the query for a schedul slot.
Multiple retrievals produce a nested ``OR`` term.
:return: the schedule slot query
:rtype: ``osid.calendaring.ScheduleSlotQuery``
:raise: ``Unimplemented`` -- ``supports_schedule_slot_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_schedule_slot_query()`` is ``true``.*
"""
return # osid.calendaring.ScheduleSlotQuery
schedule_slot_query = property(fget=get_schedule_slot_query)
@abc.abstractmethod
def match_any_schedule_slot(self, match):
"""Matches a schedule that has any schedule slot assigned.
:param match: ``true`` to match schedule with any schedule slots, ``false`` to match schedules with no schedule
slots
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_slot_terms(self):
"""Clears the schedule slot terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_slot_terms = property(fdel=clear_schedule_slot_terms)
@abc.abstractmethod
def match_time_period_id(self, time_period_id, match):
"""Sets the time period ``Id`` for this query.
:param time_period_id: a time period ``Id``
:type time_period_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``time_period_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_period_id_terms(self):
"""Clears the time period ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_period_id_terms = property(fdel=clear_time_period_id_terms)
@abc.abstractmethod
def supports_time_period_query(self):
"""Tests if a ``TimePeriodQuery`` is available.
:return: ``true`` if a time period query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_time_period_query(self):
"""Gets the query for a time period.
Multiple retrievals produce a nested ``OR`` term.
:return: the time period query
:rtype: ``osid.calendaring.TimePeriodQuery``
:raise: ``Unimplemented`` -- ``supports_time_period_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_time_period_query()`` is ``true``.*
"""
return # osid.calendaring.TimePeriodQuery
time_period_query = property(fget=get_time_period_query)
@abc.abstractmethod
def match_any_time_period(self, match):
"""Matches a schedule that has any time period assigned.
:param match: ``true`` to match schedules with any time periods, ``false`` to match schedules with no time
periods
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_period_terms(self):
"""Clears the time period terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_period_terms = property(fdel=clear_time_period_terms)
@abc.abstractmethod
def match_schedule_start(self, low, high, match):
"""Matches the schedule start time between the given range inclusive.
:param low: low time range
:type low: ``osid.calendaring.DateTime``
:param high: high time range
:type high: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_schedule_start(self, match):
"""Matches a schedule that has any start time assigned.
:param match: ``true`` to match schedules with any start time, ``false`` to match schedules with no start time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_start_terms(self):
"""Clears the schedule start terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_start_terms = property(fdel=clear_schedule_start_terms)
@abc.abstractmethod
def match_schedule_end(self, low, high, match):
"""Matches the schedule end time between the given range inclusive.
:param low: low time range
:type low: ``osid.calendaring.DateTime``
:param high: high time range
:type high: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_schedule_end(self, match):
"""Matches a schedule that has any end time assigned.
:param match: ``true`` to match schedules with any end time, ``false`` to match schedules with no start time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_end_terms(self):
"""Clears the schedule end terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_end_terms = property(fdel=clear_schedule_end_terms)
@abc.abstractmethod
def match_schedule_time(self, date, match):
"""Matches schedules with start and end times between the given range inclusive.
:param date: a date
:type date: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_schedule_time(self, match):
"""Matches schedules that has any time assigned.
:param match: ``true`` to match schedules with any time, ``false`` to match schedules with no time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_time_terms(self):
"""Clears the schedule time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_time_terms = property(fdel=clear_schedule_time_terms)
@abc.abstractmethod
def match_schedule_time_inclusive(self, start, end, match):
"""Matches schedules with start and end times between the given range inclusive.
:param start: start date
:type start: ``osid.calendaring.DateTime``
:param end: end date
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``end`` or ``start`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_time_inclusive_terms(self):
"""Clears the schedule time inclusive terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_time_inclusive_terms = property(fdel=clear_schedule_time_inclusive_terms)
@abc.abstractmethod
def match_limit(self, from_, to, match):
"""Matches schedules that have the given limit in the given range inclusive.
:param from: start range
:type from: ``integer``
:param to: end range
:type to: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_limit(self, match):
"""Matches schedules with any occurrence limit.
:param match: ``true`` to match schedules with any limit, to match schedules with no limit
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_limit_terms(self):
"""Clears the limit terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
limit_terms = property(fdel=clear_limit_terms)
@abc.abstractmethod
def match_location_description(self, location, string_match_type, match):
"""Matches the location description string.
:param location: location string
:type location: ``string``
:param string_match_type: string match type
:type string_match_type: ``osid.type.Type``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``location`` is not of ``string_match_type``
:raise: ``NullArgument`` -- ``location`` or ``string_match_type`` is ``null``
:raise: ``Unsupported`` -- ``supports_string_match_type(string_match_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_location_description(self, match):
"""Matches a schedule that has any location description assigned.
:param match: ``true`` to match schedules with any location description, ``false`` to match schedules with no
location description
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_description_terms(self):
"""Clears the location description terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_description_terms = property(fdel=clear_location_description_terms)
@abc.abstractmethod
def match_location_id(self, location_id, match):
"""Sets the location ``Id`` for this query.
:param location_id: a location ``Id``
:type location_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``location_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_id_terms(self):
"""Clears the location ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_id_terms = property(fdel=clear_location_id_terms)
@abc.abstractmethod
def supports_location_query(self):
"""Tests if a ``LocationQuery`` is available for querying locations.
:return: ``true`` if a location query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_location_query(self):
"""Gets the query for a location.
Multiple retrievals produce a nested ``OR`` term.
:return: the location query
:rtype: ``osid.mapping.LocationQuery``
:raise: ``Unimplemented`` -- ``supports_location_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_location_query()`` is ``true``.*
"""
return # osid.mapping.LocationQuery
location_query = property(fget=get_location_query)
@abc.abstractmethod
def match_any_location(self, match):
"""Matches a schedule that has any location assigned.
:param match: ``true`` to match schedules with any location, ``false`` to match schedules with no location
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_location_terms(self):
"""Clears the location terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
location_terms = property(fdel=clear_location_terms)
@abc.abstractmethod
def match_total_duration(self, low, high, match):
"""Matches the total duration between the given range inclusive.
:param low: low duration range
:type low: ``osid.calendaring.Duration``
:param high: high duration range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_total_duration_terms(self):
"""Clears the total duration terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
total_duration_terms = property(fdel=clear_total_duration_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying calendars.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_schedule_query_record(self, schedule_record_type):
"""Gets the schedule query record corresponding to the given ``Schedule`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param schedule_record_type: a schedule query record type
:type schedule_record_type: ``osid.type.Type``
:return: the schedule query record
:rtype: ``osid.calendaring.records.ScheduleQueryRecord``
:raise: ``NullArgument`` -- ``schedule_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(schedule_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.ScheduleQueryRecord
class ScheduleSlotQuery:
"""This is the query for searching schedule slots.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_schedule_slot_id(self, schedule_slot_id, match):
"""Sets the schedule ``Id`` for this query for matching nested schedule slots.
:param schedule_slot_id: a schedule slot ``Id``
:type schedule_slot_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``schedule_slot_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_slot_id_terms(self):
"""Clears the schedule slot ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_slot_id_terms = property(fdel=clear_schedule_slot_id_terms)
@abc.abstractmethod
def supports_schedule_slot_query(self):
"""Tests if a ``ScheduleSlotQuery`` is available for querying sechedule slots.
:return: ``true`` if a schedule slot query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_schedule_slot_query(self):
"""Gets the query for a schedul slot.
Multiple retrievals produce a nested ``OR`` term.
:return: the schedule slot query
:rtype: ``osid.calendaring.ScheduleSlotQuery``
:raise: ``Unimplemented`` -- ``supports_schedule_slot_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_schedule_slot_query()`` is ``true``.*
"""
return # osid.calendaring.ScheduleSlotQuery
schedule_slot_query = property(fget=get_schedule_slot_query)
@abc.abstractmethod
def match_any_schedule_slot(self, match):
"""Matches a schedule that has any schedule slot assigned.
:param match: ``true`` to match schedule with any schedule slots, ``false`` to match schedules with no schedule
slots
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_schedule_slot_terms(self):
"""Clears the schedule slot terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
schedule_slot_terms = property(fdel=clear_schedule_slot_terms)
@abc.abstractmethod
def match_weekday(self, weekday, match):
"""Matches schedules that have the given weekday.
:param weekday: a weekday
:type weekday: ``cardinal``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_weekday(self, match):
"""Matches schedules with any weekday set.
:param match: ``true`` to match schedules with any weekday, ``false`` to match schedules with no weekday
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_weekday_terms(self):
"""Clears the weekday terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
weekday_terms = property(fdel=clear_weekday_terms)
@abc.abstractmethod
def match_weekly_interval(self, from_, to, match):
"""Matches schedules that have the given weekly interval in the given range inclusive.
:param from: start range
:type from: ``integer``
:param to: end range
:type to: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_weekly_interval(self, match):
"""Matches schedules with any weekly interval set.
:param match: ``true`` to match schedules with any weekly interval, ``false`` to match schedules with no weekly
interval
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_weekly_interval_terms(self):
"""Clears the weekly interval terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
weekly_interval_terms = property(fdel=clear_weekly_interval_terms)
@abc.abstractmethod
def match_week_of_month(self, from_, to, match):
"""Matches schedules that have a week of month in the given range inclusive.
:param from: start range
:type from: ``integer``
:param to: end range
:type to: ``integer``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_week_of_month(self, match):
"""Matches schedules with any month week set.
:param match: ``true`` to match schedules with any week of month, ``false`` to match schedules with no month
week
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_week_of_month_terms(self):
"""Clears the week of month terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
week_of_month_terms = property(fdel=clear_week_of_month_terms)
@abc.abstractmethod
def match_weekday_time(self, from_, to, match):
"""Matches schedules that have a weekday time in the given range inclusive.
:param from: start range
:type from: ``osid.calendaring.Time``
:param to: end range
:type to: ``osid.calendaring.Time``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_weekday_time(self, match):
"""Matches schedules with any weekday time.
:param match: ``true`` to match schedules with any weekday time, ``false`` to match schedules with no weekday
time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_weekday_time_terms(self):
"""Clears the weekday time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
weekday_time_terms = property(fdel=clear_weekday_time_terms)
@abc.abstractmethod
def match_fixed_interval(self, from_, to, match):
"""Matches schedules that have the given fixed interval in the given range inclusive.
:param from: start range
:type from: ``osid.calendaring.Duration``
:param to: end range
:type to: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``to`` is less than ``from``
:raise: ``NullArgument`` -- ``from`` or ``to`` ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_fixed_interval(self, match):
"""Matches schedules with any fixed interval.
:param match: ``true`` to match schedules with any fixed interval, ``false`` to match schedules with no fixed
interval
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_fixed_interval_terms(self):
"""Clears the fixed interval terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
fixed_interval_terms = property(fdel=clear_fixed_interval_terms)
@abc.abstractmethod
def match_duration(self, low, high, match):
"""Matches the duration between the given range inclusive.
:param low: low duration range
:type low: ``osid.calendaring.Duration``
:param high: high duration range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_duration(self, match):
"""Matches a schedule slot that has any duration.
:param match: ``true`` to match schedules with any duration, ``false`` to match schedules with no start time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_duration_terms(self):
"""Clears the duration terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
duration_terms = property(fdel=clear_duration_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying calendars.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_schedule_slot_query_record(self, schedule_slot_record_type):
"""Gets the schedule slot query record corresponding to the given ``ScheduleSlot`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param schedule_slot_record_type: a schedule slot query record type
:type schedule_slot_record_type: ``osid.type.Type``
:return: the schedule slot query record
:rtype: ``osid.calendaring.records.ScheduleSlotQueryRecord``
:raise: ``NullArgument`` -- ``schedule_slot_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(schedule_slot_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.ScheduleSlotQueryRecord
class TimePeriodQuery:
"""This is the query for searching time periods.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_start(self, low, high, match):
"""Matches the time period start time between the given range inclusive.
:param low: low time range
:type low: ``osid.calendaring.DateTime``
:param high: high time range
:type high: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``zero``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_start(self, match):
"""Matches a time period that has any start time assigned.
:param match: ``true`` to match time periods with any start time, ``false`` to match time periods with no start
time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_start_terms(self):
"""Clears the time period start terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
start_terms = property(fdel=clear_start_terms)
@abc.abstractmethod
def match_end(self, low, high, match):
"""Matches the time period end time between the given range inclusive.
:param low: low time range
:type low: ``osid.calendaring.DateTime``
:param high: high time range
:type high: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``zero``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_end(self, match):
"""Matches a time period that has any end time assigned.
:param match: ``true`` to match time periods with any end time, ``false`` to match time periods with no end time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_end_terms(self):
"""Clears the time period end terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
end_terms = property(fdel=clear_end_terms)
@abc.abstractmethod
def match_time(self, time, match):
"""Matches time periods that include the given time.
:param time: date
:type time: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def match_any_time(self, match):
"""Matches a time period that has any time assigned.
:param match: ``true`` to match time periods with any time, ``false`` to match time periods with no time
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_terms(self):
"""Clears the time terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_terms = property(fdel=clear_time_terms)
@abc.abstractmethod
def match_time_inclusive(self, start, end, match):
"""Matches time periods with start and end times between the given range inclusive.
:param start: start date
:type start: ``osid.calendaring.DateTime``
:param end: end date
:type end: ``osid.calendaring.DateTime``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``NullArgument`` -- ``start`` or ``end`` is ``zero``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_inclusive_terms(self):
"""Clears the time inclusive terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_inclusive_terms = property(fdel=clear_time_inclusive_terms)
@abc.abstractmethod
def match_duration(self, low, high, match):
"""Matches the time period duration between the given range inclusive.
:param low: low duration range
:type low: ``osid.calendaring.Duration``
:param high: high duration range
:type high: ``osid.calendaring.Duration``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``InvalidArgument`` -- ``high`` is less than ``low``
:raise: ``NullArgument`` -- ``high`` or ``low`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_duration_terms(self):
"""Clears the duration terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
duration_terms = property(fdel=clear_duration_terms)
@abc.abstractmethod
def match_exception_id(self, event_id, match):
"""Sets the event ``Id`` for this query to match exceptions.
:param event_id: an exception event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_exception_id_terms(self):
"""Clears the exception event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
exception_id_terms = property(fdel=clear_exception_id_terms)
@abc.abstractmethod
def supports_exception_query(self):
"""Tests if an ``EventQuery`` is available for querying exception events.
:return: ``true`` if a exception query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_exception_query(self):
"""Gets the query for an exception event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_exception_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_exception_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
exception_query = property(fget=get_exception_query)
@abc.abstractmethod
def match_any_exception(self, match):
"""Matches a time period that has any exception event assigned.
:param match: ``true`` to match time periods with any exception, ``false`` to match time periods with no
exception
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_exception_terms(self):
"""Clears the exception event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
exception_terms = property(fdel=clear_exception_terms)
@abc.abstractmethod
def match_event_id(self, event_id, match):
"""Sets the event ``Id`` for this query.
:param event_id: an event or recurring event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_id_terms(self):
"""Clears the event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_id_terms = property(fdel=clear_event_id_terms)
@abc.abstractmethod
def supports_event_query(self):
"""Tests if an ``EventQuery`` is available for querying events.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_event_query(self):
"""Gets the query for an event or recurring event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
event_query = property(fget=get_event_query)
@abc.abstractmethod
def match_any_event(self, match):
"""Matches a time period that has any event assigned.
:param match: ``true`` to match time periods with any event, ``false`` to match time periods with no events
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_terms(self):
"""Clears the event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_terms = property(fdel=clear_event_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying resources.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_time_period_query_record(self, time_period_record_type):
"""Gets the time period query record corresponding to the given ``TimePeriod`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param time_period_record_type: a time period query record type
:type time_period_record_type: ``osid.type.Type``
:return: the time period query record
:rtype: ``osid.calendaring.records.TimePeriodQueryRecord``
:raise: ``NullArgument`` -- ``time_period_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(time_period_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.TimePeriodQueryRecord
class CommitmentQuery:
"""This is the query for searching commitments.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_event_id(self, event_id, match):
"""Sets the event ``Id`` for this query.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_id_terms(self):
"""Clears the event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_id_terms = property(fdel=clear_event_id_terms)
@abc.abstractmethod
def supports_event_query(self):
"""Tests if an ``EventQuery`` is available.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_event_query(self):
"""Gets the query for an event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
event_query = property(fget=get_event_query)
@abc.abstractmethod
def clear_event_terms(self):
"""Clears the event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_terms = property(fdel=clear_event_terms)
@abc.abstractmethod
def match_resource_id(self, resource_id, match):
"""Sets the resource ``Id`` for this query.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_resource_id_terms(self):
"""Clears the resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
resource_id_terms = property(fdel=clear_resource_id_terms)
@abc.abstractmethod
def supports_resource_query(self):
"""Tests if a ``ResourceQuery`` is available for querying resources.
:return: ``true`` if a resource query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_resource_query(self):
"""Gets the query for a resource.
Multiple retrievals produce a nested ``OR`` term.
:return: the resource query
:rtype: ``osid.resource.ResourceQuery``
:raise: ``Unimplemented`` -- ``supports_resource_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_query()`` is ``true``.*
"""
return # osid.resource.ResourceQuery
resource_query = property(fget=get_resource_query)
@abc.abstractmethod
def clear_resource_terms(self):
"""Clears the resource terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
resource_terms = property(fdel=clear_resource_terms)
@abc.abstractmethod
def match_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_calendar_id_terms(self):
"""Clears the calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_id_terms = property(fdel=clear_calendar_id_terms)
@abc.abstractmethod
def supports_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available for querying resources.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_calendar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def clear_calendar_terms(self):
"""Clears the calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_terms = property(fdel=clear_calendar_terms)
@abc.abstractmethod
def get_commitment_query_record(self, commitment_record_type):
"""Gets the commitment query record corresponding to the given ``Commitment`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
:param commitment_record_type: a commitment query record type
:type commitment_record_type: ``osid.type.Type``
:return: the commitment query record
:rtype: ``osid.calendaring.records.CommitmentQueryRecord``
:raise: ``NullArgument`` -- ``commitment_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(commitment_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.CommitmentQueryRecord
class CalendarQuery:
"""This is the query for searching calendars.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def match_event_id(self, event_id, match):
"""Sets the event ``Id`` for this query.
:param event_id: an event ``Id``
:type event_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_id_terms(self):
"""Clears the event ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_id_terms = property(fdel=clear_event_id_terms)
@abc.abstractmethod
def supports_event_query(self):
"""Tests if an ``EventQuery`` is available.
:return: ``true`` if an event query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_event_query(self):
"""Gets the query for an event.
Multiple retrievals produce a nested ``OR`` term.
:return: the event query
:rtype: ``osid.calendaring.EventQuery``
:raise: ``Unimplemented`` -- ``supports_event_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_event_query()`` is ``true``.*
"""
return # osid.calendaring.EventQuery
event_query = property(fget=get_event_query)
@abc.abstractmethod
def match_any_event(self, match):
"""Matches a calendar that has any event assigned.
:param match: ``true`` to match calendars with any event, ``false`` to match calendars with no events
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_event_terms(self):
"""Clears the event terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
event_terms = property(fdel=clear_event_terms)
@abc.abstractmethod
def match_time_period_id(self, time_period_id, match):
"""Sets the time period ``Id`` for this query.
:param time_period_id: a time period ``Id``
:type time_period_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``time_period_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_period_id_terms(self):
"""Clears the time period ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_period_id_terms = property(fdel=clear_time_period_id_terms)
@abc.abstractmethod
def supports_time_period_query(self):
"""Tests if a ``TimePeriodQuery`` is available.
:return: ``true`` if a time period query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_time_period_query(self):
"""Gets the query for a time period.
Multiple retrievals produce a nested ``OR`` term.
:return: the tiem period query
:rtype: ``osid.calendaring.TimePeriodQuery``
:raise: ``Unimplemented`` -- ``supports_time_period_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_time_period_query()`` is ``true``.*
"""
return # osid.calendaring.TimePeriodQuery
time_period_query = property(fget=get_time_period_query)
@abc.abstractmethod
def match_any_time_period(self, match):
"""Matches a calendar that has any time period assigned.
:param match: ``true`` to match calendars with any time period, ``false`` to match calendars with no time
periods
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_time_period_terms(self):
"""Clears the time period terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_period_terms = property(fdel=clear_time_period_terms)
@abc.abstractmethod
def match_commitment_id(self, commitment_id, match):
"""Sets the commitment ``Id`` for this query.
:param commitment_id: a commitment ``Id``
:type commitment_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``commitment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_commitment_id_terms(self):
"""Clears the commitment ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commitment_id_terms = property(fdel=clear_commitment_id_terms)
@abc.abstractmethod
def supports_commitment_query(self):
"""Tests if a ``CommitmentQuery`` is available.
:return: ``true`` if a commitment query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_commitment_query(self):
"""Gets the query for a commitment.
Multiple retrievals produce a nested ``OR`` term.
:return: the commitment query
:rtype: ``osid.calendaring.CommitmentQuery``
:raise: ``Unimplemented`` -- ``supports_commitment_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_commitment_query()`` is ``true``.*
"""
return # osid.calendaring.CommitmentQuery
commitment_query = property(fget=get_commitment_query)
@abc.abstractmethod
def match_any_commitment(self, match):
"""Matches a calendar that has any event commitment.
:param match: ``true`` to match calendars with any commitment, ``false`` to match calendars with no commitments
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_commitment_terms(self):
"""Clears the commitment terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
commitment_terms = property(fdel=clear_commitment_terms)
@abc.abstractmethod
def match_ancestor_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query to match calendars that have the specified calendar as an ancestor.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_calendar_id_terms(self):
"""Clears the ancestor calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_calendar_id_terms = property(fdel=clear_ancestor_calendar_id_terms)
@abc.abstractmethod
def supports_ancestor_calendar_query(self):
"""Tests if a ``CalendarQuery`` is available.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_ancestor_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_ancestor_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_calndar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
ancestor_calendar_query = property(fget=get_ancestor_calendar_query)
@abc.abstractmethod
def match_any_ancestor_calendar(self, match):
"""Matches a calendar that has any ancestor.
:param match: ``true`` to match calendars with any ancestor, ``false`` to match root calendars
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_ancestor_calendar_terms(self):
"""Clears the ancestor calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
ancestor_calendar_terms = property(fdel=clear_ancestor_calendar_terms)
@abc.abstractmethod
def match_descendant_calendar_id(self, calendar_id, match):
"""Sets the calendar ``Id`` for this query to match calendars that have the specified calendar as a descendant.
:param calendar_id: a calendar ``Id``
:type calendar_id: ``osid.id.Id``
:param match: ``true`` for a positive match, ``false`` for a negative match
:type match: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_calendar_id_terms(self):
"""Clears the descendant calendar ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_calendar_id_terms = property(fdel=clear_descendant_calendar_id_terms)
@abc.abstractmethod
def supports_descendant_calendar_query(self):
"""Tests if a ``CalendarQuery``.
:return: ``true`` if a calendar query is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_descendant_calendar_query(self):
"""Gets the query for a calendar.
Multiple retrievals produce a nested ``OR`` term.
:return: the calendar query
:rtype: ``osid.calendaring.CalendarQuery``
:raise: ``Unimplemented`` -- ``supports_descendant_calendar_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_calndar_query()`` is ``true``.*
"""
return # osid.calendaring.CalendarQuery
descendant_calendar_query = property(fget=get_descendant_calendar_query)
@abc.abstractmethod
def match_any_descendant_calendar(self, match):
"""Matches a calendar that has any descendant.
:param match: ``true`` to match calendars with any descendant, ``false`` to match leaf calendars
:type match: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def clear_descendant_calendar_terms(self):
"""Clears the descendant calendar terms.
*compliance: mandatory -- This method must be implemented.*
"""
pass
descendant_calendar_terms = property(fdel=clear_descendant_calendar_terms)
@abc.abstractmethod
def get_calendar_query_record(self, calendar_record_type):
"""Gets the calendar query record corresponding to the given ``Calendar`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
:param calendar_record_type: a calendar record type
:type calendar_record_type: ``osid.type.Type``
:return: the calendar query record
:rtype: ``osid.calendaring.records.CalendarQueryRecord``
:raise: ``NullArgument`` -- ``calendar_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(calendar_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.records.CalendarQueryRecord
| 28.934574
| 120
| 0.632409
| 137,690
| 0.994705
| 0
| 0
| 123,595
| 0.892879
| 0
| 0
| 97,690
| 0.705735
|
0ca4dce21686a03b945a69ccbec119c4e788576f
| 2,373
|
py
|
Python
|
scripts/republish_s3_products.py
|
hysds/grq2
|
c86704a4e46f106ab00dcdfc9a658a97097e9289
|
[
"Apache-2.0"
] | 1
|
2019-10-18T21:27:56.000Z
|
2019-10-18T21:27:56.000Z
|
scripts/republish_s3_products.py
|
hysds/grq2
|
c86704a4e46f106ab00dcdfc9a658a97097e9289
|
[
"Apache-2.0"
] | 5
|
2019-10-17T15:46:23.000Z
|
2021-06-04T22:18:36.000Z
|
scripts/republish_s3_products.py
|
hysds/grq2
|
c86704a4e46f106ab00dcdfc9a658a97097e9289
|
[
"Apache-2.0"
] | 3
|
2018-04-08T12:53:24.000Z
|
2020-05-05T01:10:32.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import json
import requests
import sys
import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from hysds.orchestrator import submit_job
from grq2 import app
from grq2.lib.utils import parse_config
# get source and destination index
src = "grq_v02_wvcc_merged_data"
# bucket
bucket_name = "wvcc-dataset-bucket"
# region
region = "us-east-1"
# get s3 connection
s3_conn = S3Connection()
bucket = s3_conn.get_bucket(bucket_name)
# get connection and create destination index
es_url = app.config['ES_URL']
# index all docs from source index to destination index
query = {
"query": {
"query_string": {
"query": "\"%s\"" % bucket_name
}
},
"fields": ["_id", "urls"]
}
r = requests.post('%s/%s/_search?search_type=scan&scroll=60m&size=100' %
(es_url, src), data=json.dumps(query))
scan_result = r.json()
count = scan_result['hits']['total']
scroll_id = scan_result['_scroll_id']
results = []
while True:
r = requests.post('%s/_search/scroll?scroll=60m' % es_url, data=scroll_id)
res = r.json()
scroll_id = res['_scroll_id']
if len(res['hits']['hits']) == 0:
break
for hit in res['hits']['hits']:
doc = hit['fields']
prefix = "%s/" % doc['urls'][0].replace(
'http://%s.s3-website-%s.amazonaws.com/' % (bucket_name, region), '')
print((doc['_id'], prefix))
localize_urls = []
for i in bucket.list(prefix):
#localize_urls.append({ 'url': 's3://%s/%s' % (bucket_name, i.name), 'local_path': '%s/' % os.path.basename(prefix[0:-1]) })
localize_urls.append({'url': 'http://%s.s3-website-%s.amazonaws.com/%s' % (
bucket_name, region, i.name), 'local_path': '%s/' % os.path.basename(prefix[0:-1])})
payload = {
"job_type": "job:ingest_dataset",
"payload": {
"dataset": doc['_id'],
"dataset_urls": localize_urls
}
}
# print json.dumps(payload, indent=2)
submit_job.apply_async((payload,), queue="jobs_processed")
# sys.exit()
| 30.423077
| 136
| 0.629583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 840
| 0.353982
|
0ca59997a346eb090f3898738011c007aac380e0
| 5,550
|
py
|
Python
|
tensorflow/emo_tflearn.py
|
lukewegryn/emo_net
|
5f8f0d047b41a978c2c96e6d0dcd8e8c05d89fe5
|
[
"MIT"
] | 4
|
2017-08-15T06:52:22.000Z
|
2020-02-13T18:18:13.000Z
|
tensorflow/emo_tflearn.py
|
luoda888/emo_net
|
5f8f0d047b41a978c2c96e6d0dcd8e8c05d89fe5
|
[
"MIT"
] | 1
|
2018-06-14T08:42:11.000Z
|
2018-06-14T08:42:11.000Z
|
tensorflow/emo_tflearn.py
|
luoda888/emo_net
|
5f8f0d047b41a978c2c96e6d0dcd8e8c05d89fe5
|
[
"MIT"
] | 6
|
2017-08-04T13:40:35.000Z
|
2021-08-07T11:37:44.000Z
|
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n' \
'You should consider updating to Python 3.4.0 or ' \
'higher as the libraries built for this course ' \
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda '
'and then restart `jupyter notebook`:\n' \
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
import IPython.display as ipyd
import csv
import shlex
except ImportError:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
import IPython.display as ipyd
print('Done!')
# Import Tensorflow
try:
import tensorflow as tf
except ImportError:
print("You do not have tensorflow installed!")
print("Follow the instructions on the following link")
print("to install tensorflow before continuing:")
print("")
print("https://github.com/pkmital/CADL#installation-preliminaries")
try:
from libs import utils, gif, datasets, dataset_utils, vae, dft
except ImportError:
print("Make sure you have started notebook in the same directory" +
" as the provided zip file which includes the 'libs' folder" +
" and the file 'utils.py' inside of it. You will NOT be able"
" to complete this assignment unless you restart jupyter"
" notebook inside the directory created by extracting"
" the zip file or cloning the github repo.")
# We'll tell matplotlib to inline any drawn figures like so:
plt.style.use('ggplot')
def import_csv(filename):
labels = []
images = []
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[2] == "Training":
labels.append(row[0])
images.append(row[1])
return labels, images
######## Start actual code ##########
data_file = "/Users/luke/ownCloud/deep_learning/course/final_project/fer2013.csv"
labels,images = import_csv(data_file)
assert(len(labels) == len(images))
#read in the images
imgs = []
for image in images:
imgs.append(np.fromstring(str(image), dtype=np.uint8,sep=' '))
Xs = imgs
ys = labels
Xs = np.array(imgs).astype(np.uint8)
ys = np.array(ys).astype(np.uint8)
#print(ys)
assert(len(Xs) == len(ys))
ds = datasets.Dataset(Xs,ys,one_hot=True,split=[0.8, 0.1, 0.1])
for i in range(0, 10):
ds.X[i].shape
from tensorflow.python.framework.ops import reset_default_graph
reset_default_graph()
# We'll have placeholders just like before which we'll fill in later.
n_input = 48*48
n_output = 7
ds_X_reshape = np.reshape(ds.X,(28709, 48, 48, 1))
ds_valid_images_reshape = np.reshape(ds.valid.images,(ds.valid.images.shape[0],48,48,1))
#https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
#pip install tflearn
import tflearn
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
"""
net = tflearn.input_data(shape=[None, 48, 48,1])
net = tflearn.conv_2d(net, 64, 5, activation = 'relu')
net = tflearn.max_pool_2d(net, 3, strides = 2)
net = tflearn.conv_2d(net, 64, 5, activation = 'relu')
net = tflearn.max_pool_2d(net, 3, strides = 2)
net = tflearn.conv_2d(net, 128, 4, activation = 'relu')
net = tflearn.dropout(net, 0.3)
net = tflearn.fully_connected(net, 3072, activation = 'tanh')
net = tflearn.fully_connected(net, 7, activation='softmax')
net = tflearn.regression(net, optimizer='momentum', loss='categorical_crossentropy')
"""
network = tflearn.input_data(shape=[None, 48, 48,1])
network = tflearn.conv_2d(network, 96, 11, strides=4, activation='relu')
network = tflearn.max_pool_2d(network, 3, strides=2)
network = tflearn.local_response_normalization(network)
network = tflearn.conv_2d(network, 256, 5, activation='relu')
network = tflearn.max_pool_2d(network, 3, strides=2)
network = tflearn.local_response_normalization(network)
network = tflearn.conv_2d(network, 384, 3, activation='relu')
network = tflearn.conv_2d(network, 384, 3, activation='relu')
network = tflearn.conv_2d(network, 256, 3, activation='relu')
network = tflearn.max_pool_2d(network, 3, strides=2)
network = tflearn.local_response_normalization(network)
network = tflearn.fully_connected(network, 4096, activation='tanh')
network = tflearn.dropout(network, 0.5)
network = tflearn.fully_connected(network, 4096, activation='tanh')
network = tflearn.dropout(network, 0.5)
network = tflearn.fully_connected(network, 7, activation='softmax')
network = tflearn.regression(network, optimizer='momentum',
loss='categorical_crossentropy')
model = tflearn.DNN(network,checkpoint_path='./emo_net/checkpoint_emo_net',max_checkpoints=3)
model.fit(ds_X_reshape, ds.Y, n_epoch=1000, show_metric=True, shuffle=True, validation_set=0.01, batch_size=64, snapshot_step=200, snapshot_epoch=False, run_id='emo_net')
model.save('./emo_net/emotion_recog.tflearn')
pred = model.predict(ds_X_reshape)
def onehot_to_dense(array):
index = np.argmax(array)
return index
distribution = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0}
for i in range(0,len(pred)):
distribution[onehot_to_dense(pred[i])] += 1
print(distribution)
| 37.248322
| 170
| 0.718739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,041
| 0.367748
|
0ca67ab44bcdd00d832c2b6369f179cd48b4cfb9
| 1,786
|
py
|
Python
|
strivial/__init__.py
|
watsosc/strivial
|
aa7efe889227650d8f39b247fd0208deb71d246b
|
[
"Apache-2.0"
] | null | null | null |
strivial/__init__.py
|
watsosc/strivial
|
aa7efe889227650d8f39b247fd0208deb71d246b
|
[
"Apache-2.0"
] | null | null | null |
strivial/__init__.py
|
watsosc/strivial
|
aa7efe889227650d8f39b247fd0208deb71d246b
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
from logging import Formatter, FileHandler
from flask import Flask
def create_app(test_config=False):
app = Flask(__name__, instance_relative_config=True)
if test_config:
app.config.from_object('config.TestingConfig')
else:
app.config.from_object(os.environ['APP_SETTINGS'])
# set up logging if applicable
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# register the database
from strivial.database import db
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
# required for database migration
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
migrate = Migrate(app, db, compare_type=True)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# register the strava integration
from strivial.strava import strava_integration
app.strava = strava_integration.StravaIntegration()
# apply the blueprints
from strivial.blueprints import auth, about, errors, strava_rides
app.register_blueprint(auth.bp)
app.register_blueprint(errors.bp)
app.register_blueprint(about.bp)
app.register_blueprint(strava_rides.bp)
app.add_url_rule('/', endpoint='home')
with app.app_context():
from strivial.util import filters
return app
| 29.766667
| 92
| 0.703807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 344
| 0.192609
|
0cab184754b8b6e990d3f1607a9b78c8dc5d5f41
| 7,421
|
py
|
Python
|
model_functions.py
|
blowe615/flower_classifier
|
7cdb6ebe292f90ae711f050ff24fb68e3a9570c1
|
[
"MIT"
] | 1
|
2019-08-29T04:24:22.000Z
|
2019-08-29T04:24:22.000Z
|
model_functions.py
|
blowe615/flower_classifier
|
7cdb6ebe292f90ae711f050ff24fb68e3a9570c1
|
[
"MIT"
] | null | null | null |
model_functions.py
|
blowe615/flower_classifier
|
7cdb6ebe292f90ae711f050ff24fb68e3a9570c1
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
#from helper_functions import process_image
class DeepNetworkClassifier(nn.Module):
def __init__(self, input_units, output_units, hidden_units,p_drop=0.2):
'''
Builds a classifier for a pretrained deep neural network for the flower dataset
Inputs
------
arch: string, model name from torchvision.models, determines the number of inputs in the classifier
hidden_units: int, the number of hidden units in the hidden layer
'''
super().__init__()
# Create input layer with input units based on model architecture
self.input = nn.Linear(input_units,hidden_units)
# Create output layer with 102 outputs (for 102 flower classes)
self.output = nn.Linear(hidden_units,output_units)
# Define level of dropout
self.dropout = nn.Dropout(p=p_drop)
def forward(self, x):
'''
Performs a forward pass through the network and returns the log probabilities
x: layer in model
'''
# Apply ReLU activation function and dropout to the input layer
x = F.relu(self.input(x))
x = self.dropout(x)
# Apply Log Softmax function to output layer
x = self.output(x)
x = F.log_softmax(x,dim=1)
return x
def train(model, trainloader, validloader, criterion, optimizer, epochs, device):
'''
Train the model
Inputs
-------
model: torchvision model
trainloader: PyTorch dataloader containing the training dataset
validloader: PyTorch dataloader containing the validation dataset
criterion: PyTorch criterion
optimizer: PyTorch optimizer with learning rate
epochs: int, number of passes of the training data through the network
device: 'cuda' if GPU is specified, otherwise 'cpu'
'''
# Initialize some counters
steps = 0
running_loss = 0
print_every = 10
for epoch in range(epochs):
for images, labels in trainloader:
steps += 1
# Send the images and labels to the device
images, labels = images.to(device), labels.to(device)
# Zero gradients for this step
optimizer.zero_grad()
# Perform a forward pass on the models
log_ps = model.forward(images)
# Calculate loss
loss = criterion(log_ps, labels)
# Backpropagate error
loss.backward()
# Take next step
optimizer.step()
# Aggregate loss
running_loss += loss.item()
# Display results
if steps % print_every == 0:
# Set model to evaluate mode
model.eval()
# Initialize the validation loss and accuracy
valid_loss = 0
valid_acc = 0
# Run validation dataset through the network
with torch.no_grad():
for images, labels in validloader:
# Send the images and labels to the device
images_v, labels_v = images.to(device), labels.to(device)
# Perform forward pass with validation images
log_ps_valid = model.forward(images_v)
# Calculate validation loss and aggregate
loss = criterion(log_ps_valid, labels_v)
valid_loss += loss
# Calculate validation accuracy
# Calculate the probabilities from the log_probabilities
ps = torch.exp(log_ps_valid)
# Determine the top probability
top_p, top_class = ps.topk(1, dim=1)
# Compare top_class to label
valid_equality = top_class == labels_v.view(*top_class.shape)
# Calculate accuracy by aggregating the equalities
valid_acc += torch.mean(valid_equality.type(torch.FloatTensor)).item()
# Print Results
print(f"Epoch {epoch+1}/{epochs}.. "
f"Training Loss: {running_loss/print_every:.3f}.. "
f"Validation Loss: {valid_loss/len(validloader):.3f}.. "
f"Validation Accuracy: {valid_acc/len(validloader):.3f}")
# Reset counter
running_loss = 0
# Return model to training mode to calculate grads
model.train()
def test(model, testloader, device):
'''
Test the model on the test dataset
Inputs
-------
model: torchvision model
testloader: PyTorch dataloader containing the testing dataset
device: 'cuda' if GPU is specified, otherwise 'cpu'
'''
# Set model to evaluate mode
model.eval()
# Initialize the testing accuracy
test_acc = 0
# Run test dataset through the network
with torch.no_grad():
for images, labels in testloader:
# Send the images and labels to the device
images_t, labels_t = images.to(device), labels.to(device)
# Perform forward pass with validation images
log_ps_test = model.forward(images_t)
# Calculate test accuracy
# Calculate the probabilities from the log_probabilities
ps_test = torch.exp(log_ps_test)
# Determine the top probability
top_p, top_class = ps_test.topk(1, dim=1)
# Compare top_class to label
test_equality = top_class == labels_t.view(*top_class.shape)
# Calculate accuracy by aggregating the equalities
test_acc += torch.mean(test_equality.type(torch.FloatTensor)).item()
# Print Results
print("Test Accuracy: {:.3f}".format(test_acc/len(testloader)))
# Return model to training mode to calculate grads
model.train();
def predict(image, model, topk, device):
'''
Predict the class (or classes) of an image using a trained deep learning model.
Inputs
------
image: numpy array, processed for PyTorch (224x224, normalized, color dimension in 3rd channel)
model: torchvision model
topk: int, number of classes to output
returns lists of the topk probabilities and the corresponding classes
'''
# Convert image from a numpy array to a tensor
image_tensor = torch.from_numpy(image)
image_tensor = image_tensor.unsqueeze(0)
# Run the test dataset through the model
# Send the model to the device
model.to(device)
# Set model to evaluate mode
model.to(torch.double)
model.eval()
# Run the image through the network
with torch.no_grad():
# Send the image to the device
image_tensor = image_tensor.to(device)
# Perform a forward pass with the image
log_ps = model.forward(image_tensor)
# Calculate the probabilities from the log_probabilities
ps = torch.exp(log_ps)
# Determine the top k probabilities
top_p, top_class = ps.topk(topk, dim=1)
labels = []
for i in top_class.tolist()[0]:
for cls, idx in model.class_to_idx.items():
if idx == i:
labels.append(cls)
# Return model to train mode
model.train()
return top_p.tolist()[0], labels
| 38.252577
| 107
| 0.603153
| 1,238
| 0.166824
| 0
| 0
| 0
| 0
| 0
| 0
| 3,541
| 0.477159
|
0cab395492740b9b3d338ab6d9a913dcbe6912e1
| 1,327
|
py
|
Python
|
src/pages/random.py
|
jojo935/Kemono2
|
bdfaf0ab2dd3c2c4a04805feea8e9fb6193cbd9b
|
[
"BSD-3-Clause"
] | null | null | null |
src/pages/random.py
|
jojo935/Kemono2
|
bdfaf0ab2dd3c2c4a04805feea8e9fb6193cbd9b
|
[
"BSD-3-Clause"
] | null | null | null |
src/pages/random.py
|
jojo935/Kemono2
|
bdfaf0ab2dd3c2c4a04805feea8e9fb6193cbd9b
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint, redirect, url_for, g
from ..utils.utils import make_cache_key
from ..internals.cache.redis import get_conn
from ..internals.cache.flask_cache import cache
from ..internals.database.database import get_cursor
from ..lib.artist import get_artist, get_random_artist_keys
from ..lib.post import get_post, get_random_posts_keys
from ..lib.ab_test import get_ab_variant
from ..utils.utils import get_value
import random as rand
random = Blueprint('random', __name__)
@random.route('/posts/random')
def random_post():
post = get_random_post()
if post is None:
return redirect('back')
return redirect(url_for('post.get', service = post['service'], artist_id = post['user'], post_id = post['id']))
@random.route('/artists/random')
def random_artist():
artist = get_random_artist()
if artist is None:
return redirect('back')
return redirect(url_for('artists.get', service = artist['service'], artist_id = artist['id']))
def get_random_post():
post_keys = get_random_posts_keys(1000)
if len(post_keys) == 0:
return None
return rand.choice(post_keys)
def get_random_artist():
artists = get_random_artist_keys(1000)
if len(artists) == 0:
return None
return rand.choice(artists)
| 30.159091
| 116
| 0.699322
| 0
| 0
| 0
| 0
| 501
| 0.377543
| 0
| 0
| 107
| 0.080633
|
0cab46908744c082e44a614483e84981deda1786
| 4,258
|
py
|
Python
|
rljax/algorithm/tqc.py
|
kew96/rljax
|
f80998b7698e87ee9f81b159ba33d619e4cf77c1
|
[
"MIT"
] | 56
|
2020-10-01T02:55:47.000Z
|
2022-03-07T08:00:25.000Z
|
rljax/algorithm/tqc.py
|
kew96/rljax
|
f80998b7698e87ee9f81b159ba33d619e4cf77c1
|
[
"MIT"
] | 4
|
2020-10-02T03:52:29.000Z
|
2021-10-02T03:59:00.000Z
|
rljax/algorithm/tqc.py
|
kew96/rljax
|
f80998b7698e87ee9f81b159ba33d619e4cf77c1
|
[
"MIT"
] | 10
|
2020-12-21T08:21:02.000Z
|
2022-01-11T03:36:20.000Z
|
from functools import partial
from typing import List
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from rljax.algorithm.sac import SAC
from rljax.network import ContinuousQuantileFunction, StateDependentGaussianPolicy
from rljax.util import quantile_loss
class TQC(SAC):
name = "TQC"
def __init__(
self,
num_agent_steps,
state_space,
action_space,
seed,
max_grad_norm=None,
gamma=0.99,
nstep=1,
num_critics=5,
buffer_size=10 ** 6,
use_per=False,
batch_size=256,
start_steps=10000,
update_interval=1,
tau=5e-3,
fn_actor=None,
fn_critic=None,
lr_actor=3e-4,
lr_critic=3e-4,
lr_alpha=3e-4,
units_actor=(256, 256),
units_critic=(512, 512, 512),
log_std_min=-20.0,
log_std_max=2.0,
d2rl=False,
num_quantiles=25,
num_quantiles_to_drop=0,
):
if d2rl:
self.name += "-D2RL"
if fn_critic is None:
def fn_critic(s, a):
return ContinuousQuantileFunction(
num_critics=num_critics,
hidden_units=units_critic,
num_quantiles=num_quantiles,
d2rl=d2rl,
)(s, a)
if fn_actor is None:
def fn_actor(s):
return StateDependentGaussianPolicy(
action_space=action_space,
hidden_units=units_actor,
log_std_min=log_std_min,
log_std_max=log_std_max,
d2rl=d2rl,
)(s)
super(TQC, self).__init__(
num_agent_steps=num_agent_steps,
state_space=state_space,
action_space=action_space,
seed=seed,
max_grad_norm=max_grad_norm,
gamma=gamma,
nstep=nstep,
num_critics=num_critics,
buffer_size=buffer_size,
use_per=use_per,
batch_size=batch_size,
start_steps=start_steps,
update_interval=update_interval,
tau=tau,
fn_actor=fn_actor,
fn_critic=fn_critic,
lr_actor=lr_actor,
lr_critic=lr_critic,
lr_alpha=lr_alpha,
)
self.cum_p_prime = jnp.expand_dims((jnp.arange(0, num_quantiles, dtype=jnp.float32) + 0.5) / num_quantiles, 0)
self.num_quantiles = num_quantiles
self.num_quantiles_target = (num_quantiles - num_quantiles_to_drop) * num_critics
@partial(jax.jit, static_argnums=0)
def _calculate_value(
self,
params_critic: hk.Params,
state: np.ndarray,
action: np.ndarray,
) -> jnp.ndarray:
return jnp.concatenate(self._calculate_value_list(params_critic, state, action), axis=1)
@partial(jax.jit, static_argnums=0)
def _calculate_target(
self,
params_critic_target: hk.Params,
log_alpha: jnp.ndarray,
reward: np.ndarray,
done: np.ndarray,
next_state: np.ndarray,
next_action: jnp.ndarray,
next_log_pi: jnp.ndarray,
) -> jnp.ndarray:
next_quantile = self._calculate_value(params_critic_target, next_state, next_action)
next_quantile = jnp.sort(next_quantile)[:, : self.num_quantiles_target]
next_quantile -= jnp.exp(log_alpha) * self._calculate_log_pi(next_action, next_log_pi)
return jax.lax.stop_gradient(reward + (1.0 - done) * self.discount * next_quantile)
@partial(jax.jit, static_argnums=0)
def _calculate_loss_critic_and_abs_td(
self,
quantile_list: List[jnp.ndarray],
target: jnp.ndarray,
weight: np.ndarray,
) -> jnp.ndarray:
loss_critic = 0.0
for quantile in quantile_list:
loss_critic += quantile_loss(target[:, None, :] - quantile[:, :, None], self.cum_p_prime, weight, "huber")
loss_critic /= self.num_critics * self.num_quantiles
abs_td = jnp.abs(target[:, None, :] - quantile_list[0][:, :, None]).mean(axis=1).mean(axis=1, keepdims=True)
return loss_critic, jax.lax.stop_gradient(abs_td)
| 32.015038
| 118
| 0.59488
| 3,970
| 0.932363
| 0
| 0
| 1,601
| 0.375998
| 0
| 0
| 19
| 0.004462
|
0cac991dc2d4d32121af9b2da9f1960fba266638
| 917
|
py
|
Python
|
benchmark_constructor/file_normalizers/ContactSelectFileNormalizer.py
|
Kortemme-Lab/benchmark_set_construct
|
ee6c9e097ff49d370936b41f102ada006fb4441a
|
[
"MIT"
] | null | null | null |
benchmark_constructor/file_normalizers/ContactSelectFileNormalizer.py
|
Kortemme-Lab/benchmark_set_construct
|
ee6c9e097ff49d370936b41f102ada006fb4441a
|
[
"MIT"
] | null | null | null |
benchmark_constructor/file_normalizers/ContactSelectFileNormalizer.py
|
Kortemme-Lab/benchmark_set_construct
|
ee6c9e097ff49d370936b41f102ada006fb4441a
|
[
"MIT"
] | null | null | null |
import os
from .FileNormalizer import FileNormalizer
class ContactSelectFileNormalizer(FileNormalizer):
'''ContactSelectFileNormalizer creates a pymol script that selects
residues which have contacts to asymmetric units.
'''
def __init__(self):
pass
def normalize_one_file(self, path, crystal_contact_res_set):
cmd = 'select crystal_contact_res,'
for res in crystal_contact_res_set:
cmd += ' res {0} and chain {1}'.format(res[1], res[0])
with open(path, 'w') as f:
f.write(cmd)
def apply(self, info_dict):
for structure_dict in info_dict['candidate_list']:
d = os.path.dirname(structure_dict['path'])
n = '.'.join([structure_dict['name']+'_show_crystal_contact', 'pml'])
if 'crystal_contact_res_set' in structure_dict.keys():
self.normalize_one_file(os.path.join(d, n), structure_dict['crystal_contact_res_set'])
| 31.62069
| 94
| 0.691385
| 847
| 0.923664
| 0
| 0
| 0
| 0
| 0
| 0
| 292
| 0.31843
|
0cac9d083e4dfd2daccd29d3da4102e79f646255
| 1,919
|
py
|
Python
|
neurovault/apps/statmaps/tests/test_qa.py
|
abitrolly/NeuroVault
|
e62bc65c8e0e58bff55bb9fa7cf11193dc54d734
|
[
"MIT"
] | 68
|
2015-02-07T06:09:49.000Z
|
2022-03-03T22:58:33.000Z
|
neurovault/apps/statmaps/tests/test_qa.py
|
abitrolly/NeuroVault
|
e62bc65c8e0e58bff55bb9fa7cf11193dc54d734
|
[
"MIT"
] | 436
|
2015-01-01T01:01:13.000Z
|
2021-11-07T18:24:00.000Z
|
neurovault/apps/statmaps/tests/test_qa.py
|
abitrolly/NeuroVault
|
e62bc65c8e0e58bff55bb9fa7cf11193dc54d734
|
[
"MIT"
] | 60
|
2015-01-10T23:31:26.000Z
|
2021-08-10T06:39:57.000Z
|
import os
import nibabel as nb
import numpy as np
from django.test import TestCase
from neurovault.apps.statmaps.models import BaseStatisticMap
from neurovault.apps.statmaps.utils import is_thresholded, infer_map_type
class QATest(TestCase):
def setUp(self):
this_path = os.path.abspath(os.path.dirname(__file__))
self.brain = nb.load(os.path.join(this_path, "../static", "anatomical", "MNI152.nii.gz"))
self.roi_map = nb.load(os.path.join(this_path, "test_data", "statmaps", "WA3.nii.gz"))
self.parcellation = nb.load(os.path.join(this_path, "test_data", "TTatlas.nii.gz"))
# We will fill in brain mask with this percentage of randomly placed values
self.ratios = [0.0,0.1,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.96, 0.98]
self.thresholded = [False,False,False,False,False,False,False,False,False,True,True]
def testThresholded(self):
for p,t in zip(self.ratios, self.thresholded):
empty_data = np.ones(self.brain.shape)
if p != 0.0:
number_voxels = int(np.floor(p * empty_data.size))
random_idx = np.random.choice(range(empty_data.size), number_voxels, replace=False)
empty_data[np.unravel_index(random_idx, empty_data.shape)] = 0
empty_nii = nb.Nifti1Image(empty_data,affine=self.brain.get_affine(),header=self.brain.get_header())
is_thr, ratio_bad = is_thresholded(nii_obj=empty_nii)
print "Zeroed %s of values, is_thresholded returns [%s:%s]" %(p,is_thr,ratio_bad)
self.assertAlmostEqual(p, ratio_bad, delta=0.001)
self.assertEquals(t, is_thr)
def testInferMapType(self):
self.assertEquals(infer_map_type(self.roi_map), BaseStatisticMap.R)
self.assertEquals(infer_map_type(self.parcellation), BaseStatisticMap.Pa)
self.assertEquals(infer_map_type(self.brain), BaseStatisticMap.OTHER)
| 50.5
| 112
| 0.684211
| 1,697
| 0.884315
| 0
| 0
| 0
| 0
| 0
| 0
| 226
| 0.11777
|
0cad53d938be9fc089dc7d7cacb7515f952a2770
| 1,758
|
py
|
Python
|
src/crawler/input_data/spiders/bitcointalk.py
|
HofmannZ/global-ai-hackathon--truth-coin
|
9f544cdb05de0811796d2465fba64875ee77cdab
|
[
"MIT"
] | 5
|
2017-06-24T22:54:13.000Z
|
2020-02-13T17:23:12.000Z
|
src/crawler/input_data/spiders/bitcointalk.py
|
HofmannZ/global-ai-hackathon--truth-coin
|
9f544cdb05de0811796d2465fba64875ee77cdab
|
[
"MIT"
] | 2
|
2017-06-24T12:07:22.000Z
|
2017-06-25T18:12:24.000Z
|
src/crawler/input_data/spiders/bitcointalk.py
|
HofmannZ/global-ai-hackathon--truth-coin
|
9f544cdb05de0811796d2465fba64875ee77cdab
|
[
"MIT"
] | 1
|
2017-08-02T12:37:52.000Z
|
2017-08-02T12:37:52.000Z
|
# -*- coding: utf-8 -*-
import scrapy
class BitcointalkSpider(scrapy.Spider):
name = 'bitcointalk'
allowed_domains = ['bitcointalk.org']
start_urls = [
'https://bitcointalk.org/index.php?board=1.0',
]
def parse(self, response):
topics = response.css('div.tborder table.bordercolor')[-1]
if topics.css('table span a::attr(href)') is not None:
for link in topics.css('span a::attr(href)'):
url = link.extract()
yield scrapy.Request(url, callback=self.parseTopic)
prevnext = response.css('td#toppages span.prevnext')[-1]
linkContent = prevnext.css('a::text').extract_first()
link = prevnext.css('a::attr(href)')
print(linkContent)
if linkContent == '»':
url = link.extract_first()
yield scrapy.Request(url, callback=self.parse)
def parseTopic(self, response):
for post in response.css('form#quickModForm tr:first-of-type'):
yield {
'author': post.css('td.poster_info b a::text').extract_first(),
'messageNumber': post.css('a.message_number::text').extract_first(),
'title': post.css('div.subject a::text').extract_first(),
'date': post.css('td.td_headerandpost div.smalltext::text').extract_first(),
'text': post.css('div.post::text').extract(),
}
prevnext = response.css('td.middletext span.prevnext')
linkContent = prevnext.css('a::text').extract_first()
link = prevnext.css('a::attr(href)')
print(linkContent)
if linkContent == '»':
url = link.extract_first()
yield scrapy.Request(url, callback=self.parseTopic)
| 33.807692
| 92
| 0.585324
| 1,719
| 0.976705
| 1,521
| 0.864205
| 0
| 0
| 0
| 0
| 493
| 0.280114
|
0cae04c95140cd33bca1362795247caf69458f47
| 9,770
|
py
|
Python
|
fugue/column/functions.py
|
kvnkho/fugue
|
5f3fe8f1fb72632e5b5987d720c1d1ef546e4682
|
[
"Apache-2.0"
] | 547
|
2020-09-22T08:30:14.000Z
|
2022-03-30T23:11:05.000Z
|
fugue/column/functions.py
|
kvnkho/fugue
|
5f3fe8f1fb72632e5b5987d720c1d1ef546e4682
|
[
"Apache-2.0"
] | 196
|
2020-09-22T23:08:26.000Z
|
2022-03-26T21:22:48.000Z
|
fugue/column/functions.py
|
kvnkho/fugue
|
5f3fe8f1fb72632e5b5987d720c1d1ef546e4682
|
[
"Apache-2.0"
] | 37
|
2020-09-23T17:05:00.000Z
|
2022-03-29T18:26:52.000Z
|
from typing import Any, Optional
import pyarrow as pa
from fugue.column.expressions import (
ColumnExpr,
_FuncExpr,
_to_col,
function,
)
from triad import Schema
def coalesce(*args: Any) -> ColumnExpr:
"""SQL ``COALESCE`` function
:param args: If a value is not :class:`~fugue.column.expressions.ColumnExpr`
then it's converted to a literal column by
:func:`~fugue.column.expressions.col`
.. note::
this function can infer neither type nor alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.coalesce(col("a"), col("b")+col("c"), 1)
"""
return function("COALESCE", *[_to_col(x) for x in args])
def min(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``MIN`` function (aggregation)
:param col: the column to find min
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.min(col("a")) # CAST(MIN(a) AS double) AS a
f.min(-col("a")) # CAST(MIN(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.min(col("a")+1)
f.min(col("a")+col("b"))
# you can specify explicitly
# CAST(MIN(a+b) AS int) AS x
f.min(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("MIN", col)
def max(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``MAX`` function (aggregation)
:param col: the column to find max
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.max(col("a")) # CAST(MAX(a) AS double) AS a
f.max(-col("a")) # CAST(MAX(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.max(col("a")+1)
f.max(col("a")+col("b"))
# you can specify explicitly
# CAST(MAX(a+b) AS int) AS x
f.max(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("MAX", col)
def count(col: ColumnExpr) -> ColumnExpr:
"""SQL ``COUNT`` function (aggregation)
:param col: the column to find count
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.count(col("*")) # COUNT(*)
f.count(col("a")) # COUNT(a) AS a
# you can specify explicitly
# CAST(COUNT(a) AS double) AS a
f.count(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("COUNT", col)
def count_distinct(col: ColumnExpr) -> ColumnExpr:
"""SQL ``COUNT DISTINCT`` function (aggregation)
:param col: the column to find distinct element count
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.count_distinct(col("*")) # COUNT(DISTINCT *)
f.count_distinct(col("a")) # COUNT(DISTINCT a) AS a
# you can specify explicitly
# CAST(COUNT(DISTINCT a) AS double) AS a
f.count_distinct(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("COUNT", col, arg_distinct=True)
def avg(col: ColumnExpr) -> ColumnExpr:
"""SQL ``AVG`` function (aggregation)
:param col: the column to find average
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.avg(col("a")) # AVG(a) AS a
# you can specify explicitly
# CAST(AVG(a) AS double) AS a
f.avg(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("AVG", col)
def sum(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``SUM`` function (aggregation)
:param col: the column to find sum
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.sum(col("a")) # SUM(a) AS a
# you can specify explicitly
# CAST(SUM(a) AS double) AS a
f.sum(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("SUM", col)
def first(col: ColumnExpr) -> ColumnExpr:
"""SQL ``FIRST`` function (aggregation)
:param col: the column to find first
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.first(col("a")) # CAST(FIRST(a) AS double) AS a
f.first(-col("a")) # CAST(FIRST(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.first(col("a")+1)
f.first(col("a")+col("b"))
# you can specify explicitly
# CAST(FIRST(a+b) AS int) AS x
f.first(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("FIRST", col)
def last(col: ColumnExpr) -> ColumnExpr:
"""SQL ``LAST`` function (aggregation)
:param col: the column to find last
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.last(col("a")) # CAST(LAST(a) AS double) AS a
f.last(-col("a")) # CAST(LAST(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.last(col("a")+1)
f.last(col("a")+col("b"))
# you can specify explicitly
# CAST(LAST(a+b) AS int) AS x
f.last(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("LAST", col)
def is_agg(column: Any) -> bool:
"""Check if a column contains aggregation operation
:param col: the column to check
:return: whether the column is :class:`~fugue.column.expressions.ColumnExpr`
and contains aggregation operations
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
assert not f.is_agg(1)
assert not f.is_agg(col("a"))
assert not f.is_agg(col("a")+lit(1))
assert f.is_agg(f.max(col("a")))
assert f.is_agg(-f.max(col("a")))
assert f.is_agg(f.max(col("a")+1))
assert f.is_agg(f.max(col("a"))+f.min(col("a"))))
"""
if isinstance(column, _UnaryAggFuncExpr):
return True
if isinstance(column, _FuncExpr):
return any(is_agg(x) for x in column.args) or any(
is_agg(x) for x in column.kwargs.values()
)
return False
class _UnaryAggFuncExpr(_FuncExpr):
def __init__(self, func: str, col: ColumnExpr, arg_distinct: bool = False):
super().__init__(func, col, arg_distinct=arg_distinct)
def infer_alias(self) -> ColumnExpr:
return (
self
if self.output_name != ""
else self.alias(self.args[0].infer_alias().output_name)
)
def _copy(self) -> _FuncExpr:
return _UnaryAggFuncExpr(self.func, *self.args, **self.kwargs)
class _SameTypeUnaryAggFuncExpr(_UnaryAggFuncExpr):
def _copy(self) -> _FuncExpr:
return _SameTypeUnaryAggFuncExpr(self.func, *self.args, **self.kwargs)
def infer_type(self, schema: Schema) -> Optional[pa.DataType]:
return self.as_type or self.args[0].infer_type(schema)
| 26.334232
| 80
| 0.572467
| 771
| 0.078915
| 0
| 0
| 0
| 0
| 0
| 0
| 7,385
| 0.755885
|
0cae7bc6d95d0a5148d10292b4933dd1fd93753f
| 1,968
|
py
|
Python
|
chapters/10/src/biglittle/entity/user.py
|
PacktPublishing/-Learn-MongoDB-4.0
|
011f14fc66c42498dcbf07e64e760b5e9f420243
|
[
"MIT"
] | 13
|
2020-08-06T17:05:50.000Z
|
2021-11-08T13:12:11.000Z
|
chapters/10/src/biglittle/entity/user.py
|
PacktPublishing/-Learn-MongoDB-4.0
|
011f14fc66c42498dcbf07e64e760b5e9f420243
|
[
"MIT"
] | 4
|
2020-09-20T05:30:39.000Z
|
2021-04-01T08:35:40.000Z
|
chapters/10/src/biglittle/entity/user.py
|
PacktPublishing/-Learn-MongoDB-4.0
|
011f14fc66c42498dcbf07e64e760b5e9f420243
|
[
"MIT"
] | 12
|
2020-08-07T06:45:43.000Z
|
2021-12-08T06:58:23.000Z
|
# biglittle.entity.user
# tell python where to find module source code
import os,sys
sys.path.append(os.path.realpath('../../../src'))
from biglittle.entity.base import Base
class Name(Base) :
formFieldPrefix = 'name_'
fields = {
'title' : '',
'first' : '',
'middle' : '',
'last' : '',
'suffix' : ''
}
class Location(Base) :
formFieldPrefix = 'location_'
fields = {
'streetAddress' : '',
'buildingName' : '',
'floor' : '',
'roomAptCondoFlat' : '',
'city' : '',
'stateProvince' : '',
'locality' : '',
'country' : '',
'postalCode' : '',
'latitude' : '',
'longitude' : ''
}
class Contact(Base) :
formFieldPrefix = 'contact_'
fields = {
'email' : '',
'phone' : '',
'socMedia' : {}
}
class OtherContact(Base) :
fields = {
'emails' : [],
'phoneNumbers' : [],
'socMedias' : []
}
class OtherInfo(Base) :
fields = {
'gender' : '',
'dateOfBirth' : ''
}
class LoginInfo(Base) :
fields = {
'username' : '',
'oauth2' : '',
'password' : ''
}
class User(Base) :
fields = {
'_id' : '',
'userKey' : '',
'userType' : '',
'businessName' : '',
'name' : Name(),
'address' : Location(),
'contact' : Contact(),
'otherContact' : OtherContact(),
'otherInfo' : OtherInfo(),
'login' : LoginInfo()
}
def getId(self) :
return self['_id']
def getKey(self) :
return self['userKey']
def getName(self) :
return Name(self['name'])
def getFullName(self) :
name = self.getName()
return name.get('first') + ' ' + name.get('last')
| 22.363636
| 57
| 0.427846
| 1,779
| 0.903963
| 0
| 0
| 0
| 0
| 0
| 0
| 564
| 0.286585
|
0caedcb03495a9332700a86dd6b9b7674d0e59ac
| 32
|
py
|
Python
|
gaia-sdk-python/conftest.py
|
leftshiftone/gaia-sdk
|
7e0d1ce054fada8ae154da70b71e8a90347c9f97
|
[
"MIT"
] | null | null | null |
gaia-sdk-python/conftest.py
|
leftshiftone/gaia-sdk
|
7e0d1ce054fada8ae154da70b71e8a90347c9f97
|
[
"MIT"
] | 10
|
2019-11-14T07:55:47.000Z
|
2022-02-26T19:36:45.000Z
|
gaia-sdk-python/conftest.py
|
leftshiftone/gaia-sdk
|
7e0d1ce054fada8ae154da70b71e8a90347c9f97
|
[
"MIT"
] | 2
|
2020-05-12T11:09:53.000Z
|
2020-12-25T14:03:04.000Z
|
# enabled testing relative paths
| 32
| 32
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 1
|
0caeebf4e3ed3af12c71f32665fdf047f4676dd8
| 497
|
py
|
Python
|
backend/app/utils.py
|
dashdashforce/int20h-test-photo-viewer
|
1720ec2c30685eac9d1e5ef9ecf3d389239ee566
|
[
"MIT"
] | null | null | null |
backend/app/utils.py
|
dashdashforce/int20h-test-photo-viewer
|
1720ec2c30685eac9d1e5ef9ecf3d389239ee566
|
[
"MIT"
] | 20
|
2019-02-04T21:57:59.000Z
|
2019-02-10T21:50:17.000Z
|
backend/app/utils.py
|
dashdashforce/int20h-test-photo-viewer
|
1720ec2c30685eac9d1e5ef9ecf3d389239ee566
|
[
"MIT"
] | null | null | null |
from functools import reduce
from itertools import groupby
from operator import add, itemgetter
def merge_records_by(key, combine):
return lambda first, second: {
k: first[k] if k == key else combine(first[k], second[k])
for k in first
}
def merge_list_of_records_by(key, combine):
keyprop = itemgetter(key)
return lambda lst: [
reduce(merge_records_by(key, combine), records)
for _, records in groupby(sorted(lst, key=keyprop), keyprop)
]
| 24.85
| 68
| 0.682093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0cb1135cd1e8235884dc831dd92989b829868853
| 704
|
py
|
Python
|
main/validadorCPF/cpf.py
|
LuizMoreira-py/cadastro
|
606c024f126f99ba943cb68115aef472ea61e57e
|
[
"MIT"
] | null | null | null |
main/validadorCPF/cpf.py
|
LuizMoreira-py/cadastro
|
606c024f126f99ba943cb68115aef472ea61e57e
|
[
"MIT"
] | null | null | null |
main/validadorCPF/cpf.py
|
LuizMoreira-py/cadastro
|
606c024f126f99ba943cb68115aef472ea61e57e
|
[
"MIT"
] | null | null | null |
class Cpf:
def __init__(self, documento):
documento = str(documento)
if self.cpf_eh_valido(documento):
self.cpf = documento
else:
raise ValueError("CPF inválido!")
def cpf_eh_valido(self, documento):
if len(documento) == 11:
return True
else:
return False
def cpf_formato(self):
fatia_um = self.cpf[:3]
fatia_dois = self.cpf[3:6]
fatia_tres = self.cpf[6:9]
fatia_quatro = self.cpf[9:]
return(
"{}.{}.{}-{}".format(
fatia_um,
fatia_dois,
fatia_tres,
fatia_quatro
)
)
| 25.142857
| 45
| 0.484375
| 704
| 0.998582
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.041135
|
0cb1a795ec8e001999c9a8e30123e5f34107264b
| 1,484
|
py
|
Python
|
backend/venue_scraper.py
|
illicitonion/edfringeplanner
|
ab6d4a3218ee211de5078b3205fd39da1fbdfb50
|
[
"BSD-3-Clause"
] | null | null | null |
backend/venue_scraper.py
|
illicitonion/edfringeplanner
|
ab6d4a3218ee211de5078b3205fd39da1fbdfb50
|
[
"BSD-3-Clause"
] | null | null | null |
backend/venue_scraper.py
|
illicitonion/edfringeplanner
|
ab6d4a3218ee211de5078b3205fd39da1fbdfb50
|
[
"BSD-3-Clause"
] | null | null | null |
from selenium import webdriver
from config import Config
from db import cursor
def get_venues():
driver = webdriver.Chrome()
try:
driver.get("https://tickets.edfringe.com/venues")
while True:
venues_container = driver.find_element_by_class_name("venues")
for venue in venues_container.find_elements_by_class_name("venue-details"):
name = venue.find_element_by_tag_name("h3").text
lis = venue.find_elements_by_tag_name("li")
address = lis[0].text
number_text = lis[1].text.split()[-1]
lat = lis[3].get_attribute("data-lat")
long = lis[3].get_attribute("data-lng")
yield (int(number_text), name, address, (float(lat), float(long)))
next_links = driver.find_elements_by_link_text("Next »")
if not next_links:
break
next_links[0].click()
finally:
driver.quit()
venues = tuple(get_venues())
with cursor(Config.from_env()) as cur:
cur.execute("SELECT edfringe_number FROM venues")
existing = {row[0] for row in cur.fetchall()}
for venue in sorted(venues):
if venue[0] in existing:
continue
print(
cur.mogrify(
"INSERT INTO venues (edfringe_number, name, address, latlong) VALUES (%s, %s, %s, POINT%s)",
venue,
).decode("utf-8"),
end=";\n",
)
| 32.26087
| 108
| 0.574798
| 0
| 0
| 903
| 0.608081
| 0
| 0
| 0
| 0
| 236
| 0.158923
|
0cb24ca44f49e7024594f31e5eea8a2d6ed7620b
| 1,437
|
py
|
Python
|
Source Files/auth.py
|
clever-username/baseball-card-inventory
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
[
"MIT"
] | 1
|
2021-05-18T21:32:43.000Z
|
2021-05-18T21:32:43.000Z
|
Source Files/auth.py
|
clever-username/baseball-card-inventory
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
[
"MIT"
] | null | null | null |
Source Files/auth.py
|
clever-username/baseball-card-inventory
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
[
"MIT"
] | 2
|
2015-05-18T14:52:01.000Z
|
2015-05-19T18:21:51.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This program prompts for a password."""
import authentication
import getpass
def login(username, maxattempts=3):
"""This function takes input from a user and checks the password.
Arg:
username(str): String input from user.
maxattempts(int): Max attempts for login.
Return:
auth(boolean): True or False if user successfully authenticated
before hitting maximum no. of failed attempts.
Examples:
>>>login('mike', 4)
Incorrect username or password. You have 4 attempts.
Incorrect username or password. You have 3 attempts.
Incorrect username or password. You have 2 attempts.
Incorrect username or password. You have 1 attempts.
Incorrect username or password. You have 0 attempts.
False
"""
auth = False
user_login = 'Please enter your password: '
auth_fail = "Incorrect username or password. You have" ' {} ' "attempts."
attempt = 1
while attempt <= maxattempts:
passwd = getpass.getpass(user_login)
message = authentication.authenticate(username, passwd)
if message:
auth = True
break
else:
print auth_fail.format(maxattempts - attempt)
attempt += 1
return auth
| 31.933333
| 78
| 0.592206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 962
| 0.66945
|
0cb26210fdbce5c2de9ff66cfbeec89817eff49b
| 267
|
py
|
Python
|
tests/test_utils.py
|
yehzhang/dscraper
|
6fd1a4238795e9eb01b9dd8329a84495a70979d1
|
[
"Apache-2.0"
] | 1
|
2017-08-13T09:50:06.000Z
|
2017-08-13T09:50:06.000Z
|
tests/test_utils.py
|
yehzhang/dscraper
|
6fd1a4238795e9eb01b9dd8329a84495a70979d1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
yehzhang/dscraper
|
6fd1a4238795e9eb01b9dd8329a84495a70979d1
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import logging
import xml.etree.ElementTree as et
import dscraper.utils as utils
logger = logging.getLogger(__name__)
class TestUtils(unittest.TestCase):
XML_FILES = (
'tests/resources/1.xml',
)
def setUp(self):
pass
| 14.833333
| 36
| 0.692884
| 128
| 0.479401
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.086142
|
0cb5558fd712cd9664d2840e0dfa1433d69b0ae5
| 7,491
|
py
|
Python
|
CameraCalibration.py
|
lsmanoel/StereoVision
|
22e9a422a217290e6fb2b71afc663db87e530842
|
[
"MIT"
] | null | null | null |
CameraCalibration.py
|
lsmanoel/StereoVision
|
22e9a422a217290e6fb2b71afc663db87e530842
|
[
"MIT"
] | null | null | null |
CameraCalibration.py
|
lsmanoel/StereoVision
|
22e9a422a217290e6fb2b71afc663db87e530842
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import glob
from matplotlib import pyplot as plt
class CameraCalibration():
def __init__(self):
pass
# ===========================================================
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def find_chess(frame_input, chess_size=(6, 6)):
status = None
print("chess...")
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((chess_size[0]*chess_size[1], 3), np.float32)
objp[:, :2] = np.mgrid[0:chess_size[0], 0:chess_size[1]].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
frame_gray = cv2.cvtColor(frame_input, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(frame_gray, (chess_size[0], chess_size[1]), None)
# If found, add object points, image points (after refining them)
frame_output = None
if ret == True:
status = "checkmate!"
print(status)
objpoints.append(objp)
corners2 = cv2.cornerSubPix(frame_gray,
corners,
(11, 11),
(-1, -1),
criteria)
imgpoints.append(corners2)
# Draw and display the corners
frame_output = cv2.drawChessboardCorners(frame_input, (chess_size[0], chess_size[1]), corners2, ret)
plt.imshow(frame_output)
plt.show()
if frame_output is None:
frame_output = frame_input
return frame_output, objpoints, imgpoints, status
# ===========================================================
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def calibrateCoefficients(frame_input, objpoints, imgpoints):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,
imgpoints,
frame_input.shape[::-1],
None,
None)
tot_error = 0
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
tot_error += error
print("total error: ", mean_error/len(objpoints))
return ret, mtx, dist, rvecs, tvecs
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def testbench(video_source=2):
capture = cv2.VideoCapture(video_source)
count_frame = 0
while 1:
# ++++++++++++++++++++++++++++++++++++++++++++++++
print('calibrate state...')
status = None
while status is None:
status = None
ret, frame_input = capture.read()
print(count_frame)
count_frame += 1
frame_chess, objpoints, imgpoints, status = CameraCalibration.find_chess(frame_input)
plt.imshow(frame_chess)
plt.show()
# ++++++++++++++++++++++++++++++++++++++++++++++++
frame_gray = cv2.cvtColor(frame_input, cv2.COLOR_BGR2GRAY)
plt.imshow(frame_gray)
plt.show()
ret, mtx, dist, rvecs, tvecs = CameraCalibration.calibrateCoefficients(frame_gray, objpoints, imgpoints)
h, w = frame_gray.shape[:2]
newcameramtx, roi =cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
# ++++++++++++++++++++++++++++++++++++++++++++++++
print('test state...')
while 1:
ret, frame_input = capture.read()
frame_gray = cv2.cvtColor(frame_input,cv2.COLOR_BGR2GRAY)
h, w = frame_gray.shape[:2]
newcameramtx, roi =cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
frame_undist = cv2.undistort(frame_input, mtx, dist, None, newcameramtx)
x,y,w,h = roi
print(x,y,w,h)
# frame_undist = frame_undist[y:y+h, x:x+w]
frame_concat = np.concatenate((frame_undist, frame_input), axis=1)
plt.imshow(frame_concat)
plt.show()
# ----------------------------------------------------------
# Esc -> EXIT while
# while 1:
# k = cv2.waitKey(1) & 0xff
# if k ==13 or k==27:
# break
# if k == 27:
# break
# ----------------------------------------------------------
capture.release()
cv2.destroyAllWindows()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def getPhoto(video_source=0):
capture = cv2.VideoCapture(video_source)
while 1:
ret, frame_input = capture.read()
frame_line = frame_input
frame_output = cv2.line(frame_line,
(0, frame_line.shape[0]//2),
(frame_line.shape[1], frame_line.shape[0]//2),
(255,0,0),
1)
frame_output = cv2.line(frame_line,
(frame_line.shape[1]//2, 0),
(frame_line.shape[1]//2, frame_line.shape[0]),
(255,0,0),
1)
cv2.imshow("Video", frame_line)
# ------------------------------------------------------------------------------------------------------------------
# Esc -> EXIT while
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# ------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
ret, frame_input = capture.read()
frame_input = cv2.cvtColor(frame_input, cv2.COLOR_BGR2RGB)
plt.imshow(frame_input)
plt.xticks([])
plt.yticks([])
plt.show()
# ----------------------------------------------------------------------------------------------------------------------
capture.release()
cv2.destroyAllWindows()
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# CameraCalibration.testbench(video_source=2)
| 39.015625
| 128
| 0.394206
| 7,162
| 0.956081
| 0
| 0
| 6,547
| 0.873982
| 0
| 0
| 2,071
| 0.276465
|
0cb626407dc59dff1be601a5e0499c7a012ea0ad
| 75
|
py
|
Python
|
app/database/base.py
|
CabetoDP/fastapi-crud
|
bbeef58b74b7a010037ca8503a7f05f8b4db2ab4
|
[
"MIT"
] | null | null | null |
app/database/base.py
|
CabetoDP/fastapi-crud
|
bbeef58b74b7a010037ca8503a7f05f8b4db2ab4
|
[
"MIT"
] | null | null | null |
app/database/base.py
|
CabetoDP/fastapi-crud
|
bbeef58b74b7a010037ca8503a7f05f8b4db2ab4
|
[
"MIT"
] | null | null | null |
from app.database.base_class import Base
from app.models.place import Place
| 37.5
| 40
| 0.853333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0cb66d2801b2daaa2e8e7ffbed52fec520091038
| 3,499
|
py
|
Python
|
yolox/models/simo_fpn.py
|
RawFisher/YOLOX
|
bec9423bdd25a9e85b976c32d774e31a33fcefed
|
[
"Apache-2.0"
] | null | null | null |
yolox/models/simo_fpn.py
|
RawFisher/YOLOX
|
bec9423bdd25a9e85b976c32d774e31a33fcefed
|
[
"Apache-2.0"
] | null | null | null |
yolox/models/simo_fpn.py
|
RawFisher/YOLOX
|
bec9423bdd25a9e85b976c32d774e31a33fcefed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import torch
import torch.nn as nn
from .sdc_darknet import SDCCSPDarknet
# from .simo_darknet import SIMOCSPDarknet
from .network_blocks import BaseConv, CSPLayer, DWConv
from .network_blocks import get_activation
class SIMOFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=1.0,
width=1.0,
in_features=("dark5",),
in_channels=[1024,],
encode_channels=[256, 256, 256],
out_channels=[256, 256, 256],
depthwise=False,
act="silu",
):
super().__init__()
self.backbone = SDCCSPDarknet(depth, width, depthwise=depthwise, act=act)
# self.backbone = SIMOCSPDarknet(depth, width, depthwise=depthwise, act=act)
self.in_features = in_features
self.in_channels = in_channels
self.out_channels = out_channels
self.encode_channels = encode_channels
Conv = DWConv if depthwise else BaseConv
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
self.align_layers = nn.ModuleList()
for idx in range(len(self.in_channels)):
self.align_layers.append(
Conv(int(self.in_channels[idx] * width), int(self.encode_channels[idx] * width), 1, 1, act=act)
)
# bottom-up conv
self.level_conv2_layers = nn.ModuleList()
for idx in range(len(self.out_channels)):
self.level_conv2_layers.append(
Conv(int(self.encode_channels[idx] * width), int(self.encode_channels[idx] * width), 3, 1, act=act)
)
# extra layers
self.extra_lvl_in_conv = ExtraConv(
int(self.encode_channels[0] * width), int(self.encode_channels[0] * width), 3, 2, act=act
)
self.top_down_blocks = ExtraConv(
int(self.encode_channels[0] * width), int(self.encode_channels[0] * width), 3, 2, act=act
)
def forward(self, input):
"""
Args:
inputs: input images.
Returns:
Tuple[Tensor]: FPN feature.
"""
# backbone
out_features = self.backbone(input)
features = [align(out_features[f]) for f, align in zip(self.in_features, self.align_layers)]
[C5] = features
P5 = C5
P4 = self.upsample(P5)
P3 = self.upsample(P4)
P5 = self.level_conv2_layers[0](P5)
P4 = self.level_conv2_layers[1](P4)
P3 = self.level_conv2_layers[2](P3)
# extra layers
P6 = self.extra_lvl_in_conv(C5) + self.top_down_blocks(P5)
outputs = (P3, P4, P5, P6)
return outputs
class ExtraConv(nn.Module):
"""A Conv2d -> Batchnorm -> silu/leaky relu block"""
def __init__(
self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu"
):
super().__init__()
pad = ksize // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=ksize,
stride=stride,
padding=pad,
groups=groups,
bias=bias,
)
self.bn = nn.BatchNorm2d(out_channels)
self.act = get_activation(act, inplace=True)
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
| 30.964602
| 115
| 0.595027
| 3,172
| 0.906545
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.158617
|
0cb6a5d9b64c81ee9b97838a133419cdba2cb50d
| 326
|
py
|
Python
|
benchmark/mysql_benchmark.py
|
AlonFischer/SpatialDatabaseBench
|
1fe933bd4196ba17c687f04c37cb5a34acc6d824
|
[
"Apache-2.0"
] | 1
|
2020-11-17T22:56:56.000Z
|
2020-11-17T22:56:56.000Z
|
benchmark/mysql_benchmark.py
|
AlonFischer/SpatialDatabaseBench
|
1fe933bd4196ba17c687f04c37cb5a34acc6d824
|
[
"Apache-2.0"
] | null | null | null |
benchmark/mysql_benchmark.py
|
AlonFischer/SpatialDatabaseBench
|
1fe933bd4196ba17c687f04c37cb5a34acc6d824
|
[
"Apache-2.0"
] | null | null | null |
from benchmark.benchmark import Benchmark
from mysqlutils.mysqladapter import MySQLAdapter
class MysqlBenchmark(Benchmark):
"""Abstract parent class for mysql benchmarks"""
def __init__(self, adapter, title, repeat_count=7):
super().__init__(title, repeat_count=repeat_count)
self.adapter = adapter
| 29.636364
| 58
| 0.754601
| 232
| 0.711656
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.147239
|
0cb86fe9bbc7f2cf6d3e6c50ebc4e8bef2550fd2
| 316
|
py
|
Python
|
packages/core/minos-microservice-networks/tests/test_networks/test_exceptions.py
|
sorasful/minos-python
|
1189330eebf6444627a2af6b29f347670f95a4dd
|
[
"MIT"
] | 247
|
2022-01-24T14:55:30.000Z
|
2022-03-25T12:06:17.000Z
|
packages/core/minos-microservice-networks/tests/test_networks/test_exceptions.py
|
sorasful/minos-python
|
1189330eebf6444627a2af6b29f347670f95a4dd
|
[
"MIT"
] | 275
|
2021-04-03T09:23:40.000Z
|
2022-01-28T11:56:25.000Z
|
tests/test_networks/test_exceptions.py
|
Clariteia/minos_microservice_networks
|
77f239429653272c5cb3447311513143f8521ed9
|
[
"MIT"
] | 21
|
2022-02-06T17:25:58.000Z
|
2022-03-27T04:50:29.000Z
|
import unittest
from minos.common import (
MinosException,
)
from minos.networks import (
MinosNetworkException,
)
class TestExceptions(unittest.TestCase):
def test_type(self):
self.assertTrue(issubclass(MinosNetworkException, MinosException))
if __name__ == "__main__":
unittest.main()
| 17.555556
| 74
| 0.737342
| 140
| 0.443038
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.031646
|
0cb88d9738f070179ad3791e8725e49dddde3cbd
| 45
|
py
|
Python
|
Weltantschauung/__init__.py
|
area42/Weltanschauung-
|
85694740f149aa741f69a67bf234b447ba11fb22
|
[
"MIT"
] | null | null | null |
Weltantschauung/__init__.py
|
area42/Weltanschauung-
|
85694740f149aa741f69a67bf234b447ba11fb22
|
[
"MIT"
] | null | null | null |
Weltantschauung/__init__.py
|
area42/Weltanschauung-
|
85694740f149aa741f69a67bf234b447ba11fb22
|
[
"MIT"
] | null | null | null |
from .Weltantschauung import Weltantschauung
| 22.5
| 44
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0cb8ec1c4a754eeb4820931d43bc795cf047e17e
| 187
|
py
|
Python
|
api/messages/csv_file.py
|
pikanezi/Roadkill
|
b2c69294afa4cce810fa898f3aa1cb467bffa413
|
[
"MIT"
] | null | null | null |
api/messages/csv_file.py
|
pikanezi/Roadkill
|
b2c69294afa4cce810fa898f3aa1cb467bffa413
|
[
"MIT"
] | null | null | null |
api/messages/csv_file.py
|
pikanezi/Roadkill
|
b2c69294afa4cce810fa898f3aa1cb467bffa413
|
[
"MIT"
] | null | null | null |
__author__ = 'Vincent'
from protorpc import messages
class CsvFile(messages.Message):
file = messages.BytesField(1, required=True)
name = messages.StringField(2, required=False)
| 26.714286
| 50
| 0.759358
| 132
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.048128
|
0cb8edc3aaa4ea60cf2e8cdba8dd3f71fa79f1ce
| 1,510
|
py
|
Python
|
run.py
|
whyjay/memoryGAN
|
cfc5e8cf37f9537a3136595a6afa734335622202
|
[
"MIT"
] | 44
|
2018-03-05T06:11:31.000Z
|
2022-03-30T06:40:24.000Z
|
run.py
|
whyjay/memoryGAN
|
cfc5e8cf37f9537a3136595a6afa734335622202
|
[
"MIT"
] | 3
|
2018-03-20T03:17:23.000Z
|
2018-07-29T11:46:34.000Z
|
run.py
|
whyjay/memoryGAN
|
cfc5e8cf37f9537a3136595a6afa734335622202
|
[
"MIT"
] | 11
|
2018-04-01T18:24:53.000Z
|
2020-10-15T08:55:21.000Z
|
import os
import numpy as np
import tensorflow as tf
from models.config import Config
from models.memory_gan import MemoryGAN
from models.test_generation import test_generation
from models.train import train
from utils import pp, visualize, to_json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
flags = tf.app.flags
flags.DEFINE_integer("epoch", 1500, "Max epoch to train")
flags.DEFINE_string("exp", 0, "Experiment number")
flags.DEFINE_string("load_cp_dir", '', "cp path")
flags.DEFINE_string("dataset", "fashion", "[fashion, affmnist, cifar10]")
flags.DEFINE_string("loss", "jsd", "[jsd, alternative, reverse_kl, updown]")
flags.DEFINE_boolean("lr_decay", False, "")
flags.DEFINE_boolean("use_augmentation", False, "")
flags.DEFINE_boolean("is_train", True, "True for training, False for testing [False]")
flags.DEFINE_string("model", 'MemoryGAN', '')
flags.DEFINE_string("generator", 'base_g', '')
flags.DEFINE_string("discriminator", 'memory_d', '')
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
config = Config(FLAGS)
config.print_config()
config.make_dirs()
config_proto = tf.ConfigProto(allow_soft_placement=FLAGS.is_train, log_device_placement=False)
config_proto.gpu_options.allow_growth = True
with tf.Session(config=config_proto) as sess:
model = globals()[FLAGS.model](config)
if not FLAGS.is_train:
test_generation(model, sess)
else:
train(model, sess)
if __name__ == '__main__':
tf.app.run()
| 31.458333
| 98
| 0.724503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 365
| 0.241722
|
0cb901c85f60e28cdd422152266e1ce6e9afaf21
| 2,164
|
py
|
Python
|
web/impact/impact/views/calendar_reminder_view.py
|
masschallenge/impact-api
|
81075ced8fcc95de9390dd83c15e523e67fc48c0
|
[
"MIT"
] | 5
|
2017-10-19T15:11:52.000Z
|
2020-03-08T07:16:21.000Z
|
web/impact/impact/views/calendar_reminder_view.py
|
masschallenge/impact-api
|
81075ced8fcc95de9390dd83c15e523e67fc48c0
|
[
"MIT"
] | 182
|
2017-06-21T19:32:13.000Z
|
2021-03-22T13:38:16.000Z
|
web/impact/impact/views/calendar_reminder_view.py
|
masschallenge/impact-api
|
81075ced8fcc95de9390dd83c15e523e67fc48c0
|
[
"MIT"
] | 1
|
2018-06-23T11:53:18.000Z
|
2018-06-23T11:53:18.000Z
|
from django.views import View
from django.http import HttpResponseRedirect
from add2cal import Add2Cal
from pytz import timezone
import datetime
from django.http import (
JsonResponse,
HttpResponse
)
ADD2CAL_DATE_FORMAT = "%Y%m%dT%H%M%S"
CALENDAR_CONTENT_TYPE = 'text/calendar'
OUTLOOK_LINK_TYPE = 'outlook'
GOOGLE_LINK_TYPE = 'google'
YAHOO_LINK_TYPE = 'yahoo'
ICAL_LINK_TYPE = 'ical'
class CalendarReminderView(View):
view_name = 'calendar_reminder_view'
def get(self, request, *args, **kwargs):
params = self.request.GET
start = params.get('start', datetime.datetime.now().strftime(
ADD2CAL_DATE_FORMAT))
end = params.get(
'end', datetime.datetime.now().strftime(ADD2CAL_DATE_FORMAT))
title = params.get('title', 'new reminder')
description = params.get('description', '')
location = params.get('location', 'MassChallenge')
tz = params.get('timezone', timezone('UTC'))
link_type = params.get('link_type', 'data')
add2cal = Add2Cal(
start=start,
end=end,
title=title,
description=description,
location=location,
timezone=tz)
calendar_data = add2cal.as_dict()
if link_type == ICAL_LINK_TYPE:
response = HttpResponse(
calendar_data['ical_content'],
content_type=CALENDAR_CONTENT_TYPE)
attachment = 'attachment; filename={title}.ics'.format(title=title)
response['Content-Type'] = CALENDAR_CONTENT_TYPE
response['Content-Disposition'] = attachment
elif link_type == OUTLOOK_LINK_TYPE:
response = HttpResponseRedirect(
redirect_to=calendar_data['outlook_link'])
elif link_type == GOOGLE_LINK_TYPE:
response = HttpResponseRedirect(
redirect_to=calendar_data['gcal_link'])
elif link_type == YAHOO_LINK_TYPE:
response = HttpResponseRedirect(
redirect_to=calendar_data['yahoo_link'])
else:
response = JsonResponse(add2cal.as_dict())
return response
| 35.47541
| 79
| 0.636322
| 1,766
| 0.816081
| 0
| 0
| 0
| 0
| 0
| 0
| 309
| 0.142791
|
0cb93959fe2a17c6bba6b5049a41d091d98ecf1d
| 1,174
|
py
|
Python
|
slixmpp/plugins/xep_0421/stanza.py
|
cnngimenez/slixmpp
|
bb61f0f39dfba205282dab50c0f3a47b26145c74
|
[
"BSD-3-Clause"
] | null | null | null |
slixmpp/plugins/xep_0421/stanza.py
|
cnngimenez/slixmpp
|
bb61f0f39dfba205282dab50c0f3a47b26145c74
|
[
"BSD-3-Clause"
] | null | null | null |
slixmpp/plugins/xep_0421/stanza.py
|
cnngimenez/slixmpp
|
bb61f0f39dfba205282dab50c0f3a47b26145c74
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2020 "Maxime “pep” Buquet <pep@bouah.net>"
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
from slixmpp.xmlstream import ElementBase
NS = 'urn:xmpp:occupant-id:0'
class OccupantId(ElementBase):
'''
An Occupant-id tag.
An <occupant-id/> tag is set by the MUC.
This is useful in semi-anon MUCs (and MUC-PMs) as a stable identifier to
prevent the usual races with nicknames.
Without occupant-id, getting the following messages from MUC history would
prevent a client from asserting senders are the same entity:
<message type='groupchat' from='foo@muc/nick1' id='message1'>
<body>Some message</body>
<occupant-id xmlns='urn:xmpp:occupant-id:0' id='unique-opaque-id1'/>
</message>
<message type='groupchat' from='foo@muc/nick2' id='message2'>
<body>Some correction</body>
<occupant-id xmlns='urn:xmpp:occupant-id:0' id='unique-opaque-id1'/>
<replace xmlns='urn:xmpp:message-correct:0' id='message1'/>
</message>
'''
name = 'occupant-id'
namespace = NS
interface = {'id'}
| 28.634146
| 78
| 0.663543
| 907
| 0.769949
| 0
| 0
| 0
| 0
| 0
| 0
| 1,037
| 0.880306
|
0cba78e638ec2faf5f7126a5c233d72920bc6dd8
| 3,458
|
py
|
Python
|
poseidon/dags/traffic_counts/traffic_counts_jobs.py
|
panda-tech/poseidon-airflow
|
bce5bc02b55f15330635a436056d99acb93488ef
|
[
"Apache-2.0"
] | null | null | null |
poseidon/dags/traffic_counts/traffic_counts_jobs.py
|
panda-tech/poseidon-airflow
|
bce5bc02b55f15330635a436056d99acb93488ef
|
[
"Apache-2.0"
] | null | null | null |
poseidon/dags/traffic_counts/traffic_counts_jobs.py
|
panda-tech/poseidon-airflow
|
bce5bc02b55f15330635a436056d99acb93488ef
|
[
"Apache-2.0"
] | null | null | null |
"""Traffic counts _jobs file."""
import pandas as pd
import logging
from subprocess import Popen, PIPE
from trident.util import general
conf = general.config
fy = general.get_FY_year()
def get_traffic_counts(out_fname='traffic_counts_file'):
"""Get traffic counts file from shared drive."""
logging.info(f'Retrieving data for FY {fy}.')
command = "smbclient //ad.sannet.gov/dfs " \
+ "--user={adname}%{adpass} -W ad -c " \
+ "'cd \"TSW-TEO-Shared/TEO/" \
+ "TEO-Transportation-Systems-and-Safety-Programs/" \
+ "Traffic Data/{fy}/RECORD FINDER\";" \
+ " ls; get Machine_Count_Index.xlsx {temp_dir}/{out_f}.xlsx;'"
command = command.format(adname=conf['svc_acct_user'],
adpass=conf['svc_acct_pass'],
fy=fy,
temp_dir=conf['temp_data_dir'],
out_f=out_fname)
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise Exception(output)
else:
return 'Successfully retrieved {} data.'.format(fy)
def clean_traffic_counts(src_fname='traffic_counts_file',
out_fname='traffic_counts_raw_clean'):
"""Clean traffic counts data."""
xlsx_file = "{0}/{1}.xlsx"\
.format(conf['temp_data_dir'], src_fname)
out_csv_file = "{0}/{1}.csv"\
.format(conf['temp_data_dir'], out_fname)
names = ['street_name',
'limits',
'northbound_count',
'southbound_count',
'eastbound_count',
'westbound_count',
'total_count',
'file_no',
'date_count']
worksheet = pd.read_excel(xlsx_file,
sheet_name='TRAFFIC',
header=None,
skiprows=[0, 1, 2, 3],
usecols=[8, 9, 10, 11, 12, 13, 14, 15, 16],
names=names)
# Write temp csv
general.pos_write_csv(
worksheet,
out_csv_file,
date_format=conf['date_format_ymd_hms'])
return "Successfully cleaned traffic counts data."
def build_traffic_counts(src_fname='traffic_counts_raw_clean',
out_fname='traffic_counts_datasd_v1'):
"""Build traffic counts production data."""
src_file = "{0}/{1}.csv"\
.format(conf['temp_data_dir'], src_fname)
out_file = "{0}/{1}.csv"\
.format(conf['prod_data_dir'], out_fname)
# read in csv from temp
counts = pd.read_csv(src_file)
# remove rows that are part of the main worksheet but empty for some reason
counts = counts[counts['street_name'] != ' ']
# date type
counts['date_count'] = pd.to_datetime(counts['date_count'],errors='coerce')
# create id field based on file id and street
counts['id'] = counts.street_name.str.cat(counts.file_no, sep="")\
.str.replace(" ", "")\
.str.replace("-", "")
# reorder columns
cols = counts.columns.tolist()
cols = cols[-1:] + cols[:-1]
counts = counts[cols]
# write to production file
new_file_path = out_file
general.pos_write_csv(
counts,
new_file_path,
date_format=conf['date_format_ymd_hms'])
return "Successfully built traffic counts production file."
| 32.317757
| 79
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,286
| 0.371891
|
0cba867a6c14d6b3104167202a30906c2120dfc6
| 2,624
|
py
|
Python
|
metaquantome/modules/run_viz.py
|
jj-umn/metaquantome
|
46461dea0914b9c153985e02c594eeb781bf3a27
|
[
"Apache-2.0"
] | 4
|
2019-03-19T10:40:34.000Z
|
2021-08-16T14:10:53.000Z
|
metaquantome/modules/run_viz.py
|
jj-umn/metaquantome
|
46461dea0914b9c153985e02c594eeb781bf3a27
|
[
"Apache-2.0"
] | 35
|
2018-11-15T18:33:39.000Z
|
2021-02-20T20:37:55.000Z
|
metaquantome/modules/run_viz.py
|
jj-umn/metaquantome
|
46461dea0914b9c153985e02c594eeb781bf3a27
|
[
"Apache-2.0"
] | 6
|
2018-11-16T03:10:45.000Z
|
2021-02-24T20:56:45.000Z
|
import os
import subprocess
import json
from metaquantome.util.utils import BASE_DIR
from metaquantome.classes.SampleGroups import SampleGroups
def run_viz(plottype, img, infile, strip=None,
mode=None, meancol=None, nterms='5', target_rank=None, barcol=6, # barplot, stacked_bar
textannot=None, fc_name=None, fc_corr_p=None, flip_fc=False, gosplit=False, # volcano
sinfo=None, filter_to_sig=False, alpha='0.05', # heatmap
calculate_sep=False, # pca
whichway=None, name=None, id=None, target_onto=None, # ft_dist
width='5', height='5', tabfile=None, feature_cluster_size=2, sample_cluster_size=2):
"""
Wrapper script for the command-line R visualizations
The documentation for each of the arguments is in cli.py
:return: None
"""
r_script_path = os.path.join(BASE_DIR, 'modules', 'viz.R')
cmd = ['Rscript', '--vanilla', r_script_path, plottype, img, infile]
if plottype == "bar":
cmd += [mode, meancol, nterms, width, height, target_rank, target_onto, barcol, tabfile]
elif plottype == "volcano":
cmd += [str(textannot), fc_name, fc_corr_p, flip_fc, gosplit, width, height, tabfile]
elif plottype == "heatmap":
samp_grps = SampleGroups(sinfo)
all_intcols_str = ','.join(samp_grps.all_intcols)
json_dump = json.dumps(samp_grps.sample_names)
cmd += [all_intcols_str, json_dump, filter_to_sig, alpha, width, height, strip, feature_cluster_size, sample_cluster_size, fc_corr_p]
elif plottype == "pca":
samp_grps = SampleGroups(sinfo)
all_intcols_str = ','.join(samp_grps.all_intcols)
json_dump = json.dumps(samp_grps.sample_names)
cmd += [all_intcols_str, json_dump, calculate_sep, width, height, strip]
elif plottype == "ft_dist":
cmd += [whichway, name, id, meancol, nterms, width, height,
target_rank, target_onto, barcol, tabfile]
if plottype == "stacked_bar":
samp_grps = SampleGroups(sinfo)
all_intcols_str = ','.join(samp_grps.all_intcols)
json_dump = json.dumps(samp_grps.sample_names)
cmd += [all_intcols_str, json_dump, nterms, target_rank, width, height, tabfile]
else:
ValueError("Wrong plot type. Must be bar, volcano, heatmap, ft_dist, stacked_bar, or pca.")
# ensure that all elements are strings (even booleans, etc)
cmd_string = [str(elem) for elem in cmd]
# run the visualizations, suppressing any output to stdout
with open(os.devnull, 'w') as fnull:
subprocess.run(cmd_string, stdout=fnull, check=True)
| 47.709091
| 141
| 0.676829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 511
| 0.194741
|
0cbd16de6a3b89e4146e58d8e4a4fcddb5bba48b
| 7,173
|
py
|
Python
|
src/mtweepy/__init__.py
|
Souvic/mtweepy
|
26c5480aee1032a38335018efc66610b6960f7d4
|
[
"MIT"
] | 1
|
2021-07-04T09:30:10.000Z
|
2021-07-04T09:30:10.000Z
|
src/mtweepy/__init__.py
|
Souvic/mtweepy
|
26c5480aee1032a38335018efc66610b6960f7d4
|
[
"MIT"
] | null | null | null |
src/mtweepy/__init__.py
|
Souvic/mtweepy
|
26c5480aee1032a38335018efc66610b6960f7d4
|
[
"MIT"
] | null | null | null |
import json
import multiprocessing
import os
import requests
from requests_oauthlib import OAuth1
from time import sleep
import tweepy
def get_users_single(x,auth,output_folder):
while(True):
url=f"https://api.twitter.com/1.1/users/lookup.json?user_id={','.join([str(i) for i in x])}"
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
r = requests.get(url = url,headers=headers)
else:
r = requests.get(url = url, auth=auth)
if(r.status_code != 200):
print("sleeping")
url="https://api.twitter.com/1.1/application/rate_limit_status.json?resources=help,users,search,statuses"
while(True):
sleep(30)
try:
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
l = requests.get(url = url,headers=headers).json()
else:
l = requests.get(url = url, auth=auth).json()
if(l["resources"]["users"]["/users/lookup"]["remaining"]!=0):
break;
except:
pass;
continue;
else:
l = r.json()
return(l)
break;
def get_users_single_mp_aux(x,index,auths,output_folder):
n=100
auth=auths[index]
with open(f'{output_folder}/{index}.jsonl', 'w') as outfile:
for i in range(0,len(x),n):
json1=get_users_single(x[i:i+n],auth,output_folder)
json.dump(json1, outfile)
outfile.write('\n')
def get_users(auths,user_ids,output_folder):
if(not os.path.isdir(output_folder)):
print(f"Not a directory: {output_folder}")
return(None)
if(len(auths)==0):
return(None)
if(type(auths[0])!=str):
auths=[OAuth1(auths[i][0],auths[i][1],auths[i][2],auths[i][3]) for i in range(len(auths))]
Process_jobs = []
k=len(auths)
n=(1+len(user_ids)//k)
index=0
for i in range(0,len(user_ids),n):
p = multiprocessing.Process(target = get_users_single_mp_aux, args = (user_ids[i:i+n],index,auths,output_folder))
index+=1
Process_jobs.append(p)
p.start()
for p in Process_jobs:
p.join()
def get_timeline_single(auth,user_id=None,screen_name=None,count=200,trim_user=True,exclude_replies=False,include_rts=True,max_id=None):
l=[1]
ans=[]
while(len(l)!=0):
if(user_id is not None):
url=f"https://api.twitter.com/1.1/statuses/user_timeline.json?user_id={user_id}&count={count}&trim_user={trim_user}&exclude_replies={exclude_replies}&include_rts={include_rts}"
else:
url=f"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name={screen_name}&count={count}&trim_user={trim_user}&exclude_replies={exclude_replies}&include_rts={include_rts}"
url+="&tweet_mode=extended"
if(max_id is not None):
#print(max_id,"here")
url+=f"&max_id={max_id}"
#r = requests.get(url = url, auth=auth)
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
r = requests.get(url = url,headers=headers)
else:
r = requests.get(url = url, auth=auth)
#print(url)
if(r.status_code == 401):
break;
if(r.status_code != 200):
print("sleeping")
url="https://api.twitter.com/1.1/application/rate_limit_status.json?resources=help,users,search,statuses"
while(True):
sleep(30)
try:
if(type(auth)==str):
l=requests.get(url = url,headers=headers).json()
else:
l=requests.get(url = url, auth=auth).json()
if(l["resources"]["statuses"]["/statuses/user_timeline"]["remaining"]!=0):
break;
except Exception as e:
print(e)
pass;
continue;
else:
l = r.json()
ans.extend(l)
if(len(l)==0 or max_id==l[-1]["id_str"]):
break;
else:
max_id=l[-1]["id_str"]
return(ans)
def get_timeline_single_mp_aux(index,auths,users,output_folder):
auth=auths[index]
with open(f'{output_folder}/{index}.jsonl', 'w') as outfile:
for user_id in users:
try:
json1=get_timeline_single(auth=auth,user_id=user_id)
except:
sleep(30)
continue;
json.dump(json1, outfile)
outfile.write('\n')
def get_timelines(auths,users,output_folder):
if(not os.path.isdir(output_folder)):
print(f"Not a directory: {output_folder}")
return(None)
if(len(auths)==0):
return(None)
if(type(auths[0])!=str):
auths=[OAuth1(auths[i][0],auths[i][1],auths[i][2],auths[i][3]) for i in range(len(auths))]
Process_jobs = []
k=len(auths)
n=(1+len(users)//k)
index=-1
for i in range(0,len(users),n):
p = multiprocessing.Process(target = get_timeline_single_mp_aux, args = (index,auths,users[i:i+n],output_folder))
index+=1
Process_jobs.append(p)
p.start()
for p in Process_jobs:
p.join()
def get_followers_aux(auth,screen_name_or_userid,cursor=-1,use_userid=False):
url="https://api.twitter.com/1.1/followers/ids.json"
params={"screen_name":screen_name_or_userid,"count":"5000","cursor":cursor}
if(use_userid):
params={"user_id":screen_name_or_userid,"count":"5000","cursor":cursor}
if(type(auth)==str):
headers = {"Authorization": "Bearer "+auth}
temp=requests.get(url=url,headers=headers,params=params).json()
else:
temp=requests.get(url=url,auth=auth,params=params).json()
if(len(temp["ids"])==0):
return(temp["ids"],None)
else:
return(temp["ids"],temp["next_cursor"])
def get_followers(auths,screen_name_or_userid,max_num=-1,use_userid=False):
cursor=-1
if(len(auths)==0):
return(None)
if(type(auths[0])!=str):
auths=[OAuth1(auths[i][0],auths[i][1],auths[i][2],auths[i][3]) for i in range(len(auths))]
res=[]
index=0
auth=auths[index]
flag=False
while(cursor is not None and (max_num==-1 or max_num>len(res))):
try:
a,cursor=get_followers_aux(auth,screen_name_or_userid,cursor,use_userid)
flag=False
res.extend(a)
print(len(res))
except Exception as e:
print(e)
print("done",len(res))
if(flag):
sleep(30)
else:
flag=True
index+=1
index%=len(auths)
auth=auths[index]
pass;
return(res)
| 33.518692
| 196
| 0.539384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,281
| 0.178586
|
0cbd7b1b3648a635297e6eb2447f59f26aa10163
| 49,924
|
py
|
Python
|
pyUSID/io/hdf_utils/model.py
|
rajgiriUW/pyUSID
|
064dcd81d9c42f4eb4782f0a41fd437b3f56f50c
|
[
"MIT"
] | 25
|
2018-07-11T21:43:56.000Z
|
2021-11-17T11:40:00.000Z
|
pyUSID/io/hdf_utils/model.py
|
rajgiriUW/pyUSID
|
064dcd81d9c42f4eb4782f0a41fd437b3f56f50c
|
[
"MIT"
] | 62
|
2018-07-05T20:28:52.000Z
|
2021-12-14T09:49:35.000Z
|
pyUSID/io/hdf_utils/model.py
|
rajgiriUW/pyUSID
|
064dcd81d9c42f4eb4782f0a41fd437b3f56f50c
|
[
"MIT"
] | 15
|
2019-03-27T22:28:47.000Z
|
2021-01-03T20:23:42.000Z
|
# -*- coding: utf-8 -*-
"""
Utilities for reading and writing USID datasets that are highly model-dependent (with or without N-dimensional form)
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from warnings import warn
import sys
import h5py
import numpy as np
from dask import array as da
from sidpy.hdf.hdf_utils import get_attr, write_simple_attrs, is_editable_h5, \
copy_dataset, lazy_load_array
from sidpy.base.num_utils import contains_integers
from sidpy.base.dict_utils import flatten_dict
from sidpy.base.string_utils import validate_single_string_arg, \
validate_list_of_strings, validate_string_args
from sidpy.hdf.dtype_utils import validate_dtype
from sidpy import sid
from .base import write_book_keeping_attrs
from .simple import link_as_main, check_if_main, write_ind_val_dsets, validate_dims_against_main, validate_anc_h5_dsets
from ..dimension import Dimension, validate_dimensions
from ..anc_build_utils import INDICES_DTYPE, make_indices_matrix
if sys.version_info.major == 3:
unicode = str
def reshape_to_n_dims(h5_main, h5_pos=None, h5_spec=None, get_labels=False, verbose=False, sort_dims=False,
lazy=False):
"""
Reshape the input 2D matrix to be N-dimensions based on the
position and spectroscopic datasets.
Parameters
----------
h5_main : HDF5 Dataset
2D data to be reshaped
h5_pos : HDF5 Dataset, optional
Position indices corresponding to rows in `h5_main`
h5_spec : HDF5 Dataset, optional
Spectroscopic indices corresponding to columns in `h5_main`
get_labels : bool, optional
Whether or not to return the dimension labels. Default False
verbose : bool, optional
Whether or not to print debugging statements
sort_dims : bool
If True, the data is sorted so that the dimensions are in order from slowest to fastest
If False, the data is kept in the original order
If `get_labels` is also True, the labels are sorted as well.
lazy : bool, optional. Default = False
If False, ds_Nd will be a numpy.ndarray object - this is suitable if the HDF5 dataset fits into memory
If True, ds_Nd will be a dask.array object - This is suitable if the HDF5 dataset is too large to fit into
memory. Note that this will bea lazy computation meaning that the returned object just contains the instructions
. In order to get the actual value or content in numpy arrays, call ds_Nd.compute()
Returns
-------
ds_Nd : N-D numpy array or dask.array object
N dimensional array arranged as [positions slowest to fastest, spectroscopic slowest to fastest]
success : boolean or string
True if full reshape was successful
"Positions" if it was only possible to reshape by
the position dimensions
False if no reshape was possible
ds_labels : list of str
List of the labels of each dimension of `ds_Nd`
Notes
-----
If either `h5_pos` or `h5_spec` are not provided, the function will first
attempt to find them as attributes of `h5_main`. If that fails, it will
generate dummy values for them.
"""
# TODO: automatically switch on lazy if the data is larger than memory
# TODO: sort_dims does not appear to do much. Functions as though it was always True
if h5_pos is None and h5_spec is None:
if not check_if_main(h5_main):
raise ValueError('if h5_main is a h5py.Dataset it should be a Main dataset')
else:
if not isinstance(h5_main, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('h5_main should either be a h5py.Dataset or numpy array')
if h5_pos is not None:
if not isinstance(h5_pos, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('h5_pos should either be a h5py.Dataset or numpy array')
if h5_pos.shape[0] != h5_main.shape[0]:
raise ValueError('The size of h5_pos: {} does not match with h5_main: {}'.format(h5_pos.shape,
h5_main.shape))
if h5_spec is not None:
if not isinstance(h5_spec, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('h5_spec should either be a h5py.Dataset or numpy array')
if h5_spec.shape[1] != h5_main.shape[1]:
raise ValueError('The size of h5_spec: {} does not match with h5_main: {}'.format(h5_spec.shape,
h5_main.shape))
pos_labs = np.array(['Positions'])
spec_labs = np.array(['Spectral_Step'])
if h5_pos is None:
"""
Get the Position datasets from the references if possible
"""
if isinstance(h5_main, h5py.Dataset):
try:
h5_pos = h5_main.file[h5_main.attrs['Position_Indices']]
ds_pos = h5_pos[()]
pos_labs = get_attr(h5_pos, 'labels')
except KeyError:
print('No position datasets found as attributes of {}'.format(h5_main.name))
if len(h5_main.shape) > 1:
ds_pos = np.arange(h5_main.shape[0], dtype=INDICES_DTYPE).reshape(-1, 1)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
else:
ds_pos = np.array(0, dtype=INDICES_DTYPE).reshape(-1, 1)
else:
ds_pos = np.arange(h5_main.shape[0], dtype=INDICES_DTYPE).reshape(-1, 1)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
elif isinstance(h5_pos, h5py.Dataset):
"""
Position Indices dataset was provided
"""
ds_pos = h5_pos[()]
pos_labs = get_attr(h5_pos, 'labels')
elif isinstance(h5_pos, (np.ndarray, da.core.Array)):
ds_pos = np.atleast_2d(h5_pos)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
else:
raise TypeError('Position Indices must be either h5py.Dataset or None')
if h5_spec is None:
"""
Get the Spectroscopic datasets from the references if possible
"""
if isinstance(h5_main, h5py.Dataset):
try:
h5_spec = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]
ds_spec = h5_spec[()]
spec_labs = get_attr(h5_spec, 'labels')
except KeyError:
print('No spectroscopic datasets found as attributes of {}'.format(h5_main.name))
if len(h5_main.shape) > 1:
ds_spec = np.arange(h5_main.shape[1], dtype=INDICES_DTYPE).reshape([1, -1])
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
else:
ds_spec = np.array(0, dtype=INDICES_DTYPE).reshape([1, 1])
else:
ds_spec = np.arange(h5_main.shape[1], dtype=INDICES_DTYPE).reshape([1, -1])
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
elif isinstance(h5_spec, h5py.Dataset):
"""
Spectroscopic Indices dataset was provided
"""
ds_spec = h5_spec[()]
spec_labs = get_attr(h5_spec, 'labels')
elif isinstance(h5_spec, (np.ndarray, da.core.Array)):
ds_spec = h5_spec
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
else:
raise TypeError('Spectroscopic Indices must be either h5py.Dataset or None')
'''
Sort the indices from fastest to slowest
'''
pos_sort = get_sort_order(np.transpose(ds_pos))
spec_sort = get_sort_order(ds_spec)
if verbose:
print('Position dimensions:', pos_labs)
print('Position sort order:', pos_sort)
print('Spectroscopic Dimensions:', spec_labs)
print('Spectroscopic sort order:', spec_sort)
'''
Get the size of each dimension in the sorted order
'''
pos_dims = get_dimensionality(np.transpose(ds_pos), pos_sort)
spec_dims = get_dimensionality(ds_spec, spec_sort)
if np.prod(pos_dims) != h5_main.shape[0]:
mesg = 'Product of position dimension sizes: {} = {} not matching ' \
'with size of first axis of main dataset: {}. One or more ' \
'dimensions are dependent dimensions and not marked as such' \
'.'.format(pos_dims, np.prod(pos_dims), h5_main.shape[0])
raise ValueError(mesg)
if np.prod(spec_dims) != h5_main.shape[1]:
mesg = 'Product of spectroscopic dimension sizes: {} = {} not matching ' \
'with size of second axis of main dataset: {}. One or more ' \
'dimensions are dependent dimensions and not marked as such' \
'.'.format(spec_dims, np.prod(spec_dims), h5_main.shape[1])
raise ValueError(mesg)
if verbose:
print('\nPosition dimensions (sort applied):', pos_labs[pos_sort])
print('Position dimensionality (sort applied):', pos_dims)
print('Spectroscopic dimensions (sort applied):', spec_labs[spec_sort])
print('Spectroscopic dimensionality (sort applied):', spec_dims)
if lazy:
ds_main = lazy_load_array(h5_main)
else:
ds_main = h5_main[()]
"""
Now we reshape the dataset based on those dimensions
numpy reshapes correctly when the dimensions are arranged from slowest to fastest.
Since the sort orders we have are from fastest to slowest, we need to reverse the orders
for both the position and spectroscopic dimensions
"""
if verbose:
print('Will attempt to reshape main dataset from:\n{} to {}'.format(ds_main.shape, pos_dims[::-1] + spec_dims[::-1]))
try:
ds_Nd = ds_main.reshape(pos_dims[::-1] + spec_dims[::-1])
except ValueError:
warn('Could not reshape dataset to full N-dimensional form. Attempting reshape based on position only.')
try:
ds_Nd = ds_main.reshape(pos_dims[::-1] + [-1])
except ValueError:
warn('Reshape by position only also failed. Will keep dataset in 2d form.')
if get_labels:
return ds_main, False, ['Position', 'Spectral Step']
else:
return ds_main, False
# No exception
else:
if get_labels:
return ds_Nd, 'Positions', ['Position'] + spec_labs
else:
return ds_Nd, 'Positions'
all_labels = np.hstack((pos_labs[pos_sort][::-1],
spec_labs[spec_sort][::-1]))
if verbose:
print('\nAfter reshaping, labels are', all_labels)
print('Data shape is', ds_Nd.shape)
"""
At this point, the data is arranged from slowest to fastest dimension in both pos and spec
"""
if sort_dims:
results = [ds_Nd, True]
if get_labels:
results.append(all_labels)
return results
if verbose:
print('\nGoing to put dimensions back in the same order as in the file:')
swap_axes = list()
# Compare the original order of the pos / spec labels with where these dimensions occur in the sorted labels
for lab in pos_labs:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
for lab in spec_labs:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
swap_axes = np.array(swap_axes)
if verbose:
print('Axes will permuted in this order:', swap_axes)
print('New labels ordering:', all_labels[swap_axes])
ds_Nd = ds_Nd.transpose(tuple(swap_axes))
results = [ds_Nd, True]
if verbose:
print('Dataset now of shape:', ds_Nd.shape)
if get_labels:
'''
Get the labels in the proper order
'''
results.append(all_labels[swap_axes])
return results
def reshape_from_n_dims(data_n_dim, h5_pos=None, h5_spec=None, verbose=False):
"""
Reshape the input 2D matrix to be N-dimensions based on the
position and spectroscopic datasets.
Parameters
----------
data_n_dim : numpy.array or dask.array.core.Array
N dimensional array arranged as [positions dimensions..., spectroscopic dimensions]
If h5_pos and h5_spec are not provided, this function will have to assume that the dimensions
are arranged as [positions slowest to fastest, spectroscopic slowest to fastest].
This restriction is removed if h5_pos and h5_spec are provided
h5_pos : HDF5 Dataset, numpy.array or dask.array.core.Array
Position indices corresponding to rows in the final 2d array
The dimensions should be arranged in terms of rate of change corresponding to data_n_dim.
In other words if data_n_dim had two position dimensions arranged as [pos_fast, pos_slow, spec_dim_1....],
h5_pos should be arranged as [pos_fast, pos_slow]
h5_spec : HDF5 Dataset, numpy. array or dask.array.core.Array
Spectroscopic indices corresponding to columns in the final 2d array
The dimensions should be arranged in terms of rate of change corresponding to data_n_dim.
In other words if data_n_dim had two spectral dimensions arranged as [pos_dim_1,..., spec_fast, spec_slow],
h5_spec should be arranged as [pos_slow, pos_fast]
verbose : bool, optional. Default = False
Whether or not to print log statements
Returns
-------
ds_2d : numpy.array
2 dimensional numpy array arranged as [positions, spectroscopic]
success : boolean or string
True if full reshape was successful
"Positions" if it was only possible to reshape by
the position dimensions
False if no reshape was possible
Notes
-----
If either `h5_pos` or `h5_spec` are not provided, the function will
assume the first dimension is position and the remaining are spectroscopic already
in order from fastest to slowest.
"""
if not isinstance(data_n_dim, (np.ndarray, da.core.Array)):
raise TypeError('data_n_dim is not a numpy or dask array')
if h5_spec is None and h5_pos is None:
raise ValueError('at least one of h5_pos or h5_spec must be specified for an attempt to reshape to 2D')
if data_n_dim.ndim < 2:
return data_n_dim, True
if h5_pos is None:
pass
elif isinstance(h5_pos, h5py.Dataset):
'''
Position Indices dataset was provided
'''
ds_pos = h5_pos[()]
elif isinstance(h5_pos, da.core.Array):
ds_pos = h5_pos.compute()
elif isinstance(h5_pos, np.ndarray):
ds_pos = h5_pos
else:
raise TypeError('Position Indices must be either h5py.Dataset or None')
if h5_spec is None:
pass
elif isinstance(h5_spec, h5py.Dataset):
'''
Spectroscopic Indices dataset was provided
'''
ds_spec = h5_spec[()]
elif isinstance(h5_spec, da.core.Array):
ds_spec = h5_spec.compute()
elif isinstance(h5_spec, np.ndarray):
ds_spec = h5_spec
else:
raise TypeError('Spectroscopic Indices must be either h5py.Dataset or None')
if h5_spec is None and h5_pos is not None:
if verbose:
print('Spectral indices not provided but position indices provided.\n'
'Building spectral indices assuming that dimensions are arranged as slow -> fast')
pos_dims = get_dimensionality(ds_pos, index_sort=get_sort_order(ds_pos))
if not np.all([x in data_n_dim.shape for x in pos_dims]):
raise ValueError('Dimension sizes in pos_dims: {} do not exist in data_n_dim shape: '
'{}'.format(pos_dims, data_n_dim.shape))
spec_dims = [col for col in list(data_n_dim.shape[len(pos_dims):])]
if verbose:
print('data has dimensions: {}. Provided position indices had dimensions of size: {}. Spectral dimensions '
'will built with dimensions: {}'.format(data_n_dim.shape, pos_dims, spec_dims))
ds_spec = make_indices_matrix(spec_dims, is_position=False)
elif h5_pos is None and h5_spec is not None:
if verbose:
print('Position indices not provided but spectral indices provided.\n'
'Building position indices assuming that dimensions are arranged as slow -> fast')
spec_dims = get_dimensionality(ds_spec, index_sort=get_sort_order(ds_spec))
if not np.all([x in data_n_dim.shape for x in spec_dims]):
raise ValueError('Dimension sizes in spec_dims: {} do not exist in data_n_dim shape: '
'{}'.format(spec_dims, data_n_dim.shape))
pos_dims = [col for col in list(data_n_dim.shape[:data_n_dim.ndim-len(spec_dims)])]
if verbose:
print('data has dimensions: {}. Spectroscopic position indices had dimensions of size: {}. Position '
'dimensions will built with dimensions: {}'.format(data_n_dim.shape, spec_dims, pos_dims))
ds_pos = make_indices_matrix(pos_dims, is_position=True)
elif h5_spec is not None and h5_pos is not None:
if ds_pos.shape[0] * ds_spec.shape[1] != np.product(data_n_dim.shape):
raise ValueError('The product ({}) of the number of positions ({}) and spectroscopic ({}) observations is '
'not equal to the product ({}) of the data shape ({})'
'.'.format(ds_pos.shape[0] * ds_spec.shape[1], ds_pos.shape[0], ds_spec.shape[1],
np.product(data_n_dim.shape), data_n_dim.shape))
if ds_pos.shape[1] + ds_spec.shape[0] != data_n_dim.ndim:
# This may mean that the dummy position or spectroscopic axes has been squeezed out!
# Dask does NOT allow singular dimensions apparently. So cannot do expand_dims. Handle later
if ds_pos.size == 1 or ds_spec.size == 1:
if verbose:
print('ALL Position dimensions squeezed: {}. ALL Spectroscopic dimensions squeezed: {}'
'.'.format(ds_pos.size == 1, ds_spec.size == 1))
else:
raise ValueError('The number of position ({}) and spectroscopic ({}) dimensions do not match with the '
'dimensionality of the N-dimensional dataset: {}'
'.'.format(ds_pos.shape[1], ds_spec.shape[0], data_n_dim.ndim))
'''
Sort the indices from fastest to slowest
'''
if ds_pos.size == 1:
# Position dimension squeezed out:
pos_sort = []
else:
pos_sort = get_sort_order(np.transpose(ds_pos))
if ds_spec.size == 1:
# Spectroscopic axis squeezed out:
spec_sort = []
else:
spec_sort = get_sort_order(ds_spec)
if h5_spec is None:
spec_sort = spec_sort[::-1]
if h5_pos is None:
pos_sort = pos_sort[::-1]
if verbose:
print('Position sort order: {}'.format(pos_sort))
print('Spectroscopic sort order: {}'.format(spec_sort))
'''
Now we transpose the axes associated with the spectroscopic dimensions
so that they are in the same order as in the index array
'''
swap_axes = np.uint16(np.append(pos_sort[::-1], spec_sort[::-1] + len(pos_sort)))
if verbose:
print('swap axes: {} to be applied to N dimensional data of shape {}'.format(swap_axes, data_n_dim.shape))
data_n_dim_2 = data_n_dim.transpose(tuple(swap_axes))
if verbose:
print('N dimensional data shape after axes swap: {}'.format(data_n_dim_2.shape))
'''
Now we reshape the dataset based on those dimensions
We must use the spectroscopic dimensions in reverse order
'''
try:
ds_2d = data_n_dim_2.reshape([ds_pos.shape[0], ds_spec.shape[1]])
except ValueError:
raise ValueError('Could not reshape dataset to full N-dimensional form')
return ds_2d, True
def get_dimensionality(ds_index, index_sort=None):
"""
Get the size of each index dimension in a specified sort order
Parameters
----------
ds_index : 2D HDF5 Dataset or numpy array
Row matrix of indices
index_sort : Iterable of unsigned integers (Optional)
Sort that can be applied to dimensionality.
For example - Order of rows sorted from fastest to slowest
Returns
-------
sorted_dims : list of unsigned integers
Dimensionality of each row in ds_index. If index_sort is supplied, it will be in the sorted order
"""
if isinstance(ds_index, da.core.Array):
ds_index = ds_index.compute()
if not isinstance(ds_index, (np.ndarray, h5py.Dataset)):
raise TypeError('ds_index should either be a numpy array or h5py.Dataset')
if ds_index.shape[0] > ds_index.shape[1]:
# must be spectroscopic like in shape (few rows, more cols)
ds_index = np.transpose(ds_index)
if index_sort is None:
index_sort = np.arange(ds_index.shape[0])
else:
if not contains_integers(index_sort, min_val=0):
raise ValueError('index_sort should contain integers > 0')
index_sort = np.array(index_sort)
if index_sort.ndim != 1:
raise ValueError('index_sort should be a 1D array')
if len(np.unique(index_sort)) > ds_index.shape[0]:
raise ValueError('length of index_sort ({}) should be smaller than number of dimensions in provided dataset'
' ({}'.format(len(np.unique(index_sort)), ds_index.shape[0]))
if set(np.arange(ds_index.shape[0])) != set(index_sort):
raise ValueError('Sort order of dimensions ({}) not matching with number of dimensions ({})'
''.format(index_sort, ds_index.shape[0]))
sorted_dims = [len(np.unique(row)) for row in np.array(ds_index, ndmin=2)[index_sort]]
return sorted_dims
def get_sort_order(ds_spec):
"""
Find how quickly the spectroscopic values are changing in each row
and the order of rows from fastest changing to slowest.
Parameters
----------
ds_spec : 2D HDF5 dataset or numpy array
Rows of indices to be sorted from fastest changing to slowest
Returns
-------
change_sort : List of unsigned integers
Order of rows sorted from fastest changing to slowest
"""
if isinstance(ds_spec, da.core.Array):
ds_spec = ds_spec.compute()
if not isinstance(ds_spec, (np.ndarray, h5py.Dataset)):
raise TypeError('ds_spec should either be a numpy array or h5py.Dataset')
if ds_spec.shape[0] > ds_spec.shape[1]:
# must be spectroscopic like in shape (few rows, more cols)
ds_spec = np.transpose(ds_spec)
change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec]
change_sort = np.argsort(change_count)[::-1]
return change_sort
def get_unit_values(ds_inds, ds_vals, dim_names=None, all_dim_names=None, is_spec=None, verbose=False):
"""
Gets the unit arrays of values that describe the spectroscopic dimensions
Parameters
----------
ds_inds : h5py.Dataset or numpy.ndarray
Spectroscopic or Position Indices dataset
ds_vals : h5py.Dataset or numpy.ndarray
Spectroscopic or Position Values dataset
dim_names : str, or list of str, Optional
Names of the dimensions of interest. Default = all
all_dim_names : list of str, Optional
Names of all the dimensions in these datasets. Use this if supplying numpy arrays instead of h5py.Dataset
objects for h5_inds, h5_vals since there is no other way of getting the dimension names.
is_spec : bool, optional
Whether or not the provided ancillary datasets are position or spectroscopic
The user is recommended to supply this parameter whenever it is known
By default, this function will attempt to recognize the answer based on the shape of the datasets.
verbose : bool, optional
Whether or not to print debugging statements. Default - off
Note - this function can be extended / modified for ancillary position dimensions as well
Returns
-------
unit_values : dict
Dictionary containing the unit array for each dimension. The name of the dimensions are the keys.
"""
if all_dim_names is None:
allowed_types = h5py.Dataset
else:
all_dim_names = validate_list_of_strings(all_dim_names, 'all_dim_names')
all_dim_names = np.array(all_dim_names)
allowed_types = (h5py.Dataset, np.ndarray)
for dset, dset_name in zip([ds_inds, ds_vals], ['ds_inds', 'ds_vals']):
if not isinstance(dset, allowed_types):
raise TypeError(dset_name + ' should be of type: {}'.format(allowed_types))
# For now, we will throw an error if even a single dimension is listed as an incomplete dimension:
if isinstance(ds_inds, h5py.Dataset):
if np.any(['incomplete_dimensions' in dset.attrs.keys() for dset in [ds_inds, ds_vals]]):
try:
incomp_dims_inds = get_attr(ds_inds, 'incomplete_dimensions')
except KeyError:
incomp_dims_inds = None
try:
incomp_dims_vals = get_attr(ds_vals, 'incomplete_dimensions')
except KeyError:
incomp_dims_vals = None
if incomp_dims_inds is None and incomp_dims_vals is not None:
incomp_dims = incomp_dims_vals
elif incomp_dims_inds is not None and incomp_dims_vals is None:
incomp_dims = incomp_dims_inds
else:
# ensure that both attributes are the same
if incomp_dims_vals != incomp_dims_inds:
raise ValueError('Provided indices ({}) and values ({}) datasets were marked with different values '
'for incomplete_datasets.'.format(incomp_dims_inds, incomp_dims_vals))
incomp_dims = incomp_dims_vals
all_dim_names = get_attr(ds_inds, 'labels')
raise ValueError('Among all dimensions: {}, These dimensions were marked as incomplete dimensions: {}'
'. You are recommended to find unit values manually'.format(all_dim_names, incomp_dims))
# Do we need to check that the provided inds and vals correspond to the same main dataset?
if ds_inds.shape != ds_vals.shape:
raise ValueError('h5_inds: {} and h5_vals: {} should have the same shapes'.format(ds_inds.shape, ds_vals.shape))
if all_dim_names is None:
all_dim_names = get_attr(ds_inds, 'labels')
if verbose:
print('All dimensions: {}'.format(all_dim_names))
# First load to memory
inds_mat = ds_inds[()]
vals_mat = ds_vals[()]
if is_spec is None:
# Attempt to recognize the type automatically
is_spec = False
if inds_mat.shape[0] < inds_mat.shape[1]:
is_spec = True
else:
if not isinstance(is_spec, bool):
raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))
if verbose:
print(
'Ancillary matrices of shape: {}, hence determined to be Spectroscopic:{}'.format(inds_mat.shape, is_spec))
if not is_spec:
# Convert to spectral shape
inds_mat = np.transpose(inds_mat)
vals_mat = np.transpose(vals_mat)
if len(all_dim_names) != inds_mat.shape[0]:
raise ValueError('Length of dimension names list: {} not matching with shape of dataset: {}'
'.'.format(len(all_dim_names), inds_mat.shape[0]))
if dim_names is None:
dim_names = all_dim_names
if verbose:
print('Going to return unit values for all dimensions: {}'.format(all_dim_names))
else:
dim_names = validate_list_of_strings(dim_names, 'dim_names')
if verbose:
print('Checking to make sure that the target dimension names: {} exist in the datasets attributes: {}'
'.'.format(dim_names, all_dim_names))
# check to make sure that the dimension names exist in the datasets:
for dim_name in dim_names:
if dim_name not in all_dim_names:
raise KeyError('Dimension {} does not exist in the provided ancillary datasets'.format(dim_name))
unit_values = dict()
for dim_name in all_dim_names:
# Find the row in the spectroscopic indices that corresponds to the dimensions we want to slice:
if verbose:
print('Looking for dimension: {} in {}'.format(dim_name, dim_names))
desired_row_ind = np.where(all_dim_names == dim_name)[0][0]
inds_for_dim = inds_mat[desired_row_ind]
# Wherever this dimension goes to 0 - start of a new tile
starts = np.where(inds_for_dim == np.min(inds_for_dim))[0]
if starts[0] != 0:
raise ValueError('Spectroscopic Indices for dimension: "{}" not '
'starting with 0. Please fix this and try again'
'.'.format(dim_name))
# There may be repetitions in addition to tiling. Find how the the positions increase.
# 1 = repetition, > 1 = new tile
step_sizes = np.hstack(([1], np.diff(starts)))
# This array is of the same length as the full indices array
# We should expect only two values of step sizes for a regular dimension (tiles of the same size):
# 1 for same value repeating and a big jump in indices when the next tile starts
# If the repeats / tiles are of different lengths, then this is not a regular dimension.
# What does a Unit Values vector even mean in this case? Just raise an error for now
if np.where(np.unique(step_sizes) - 1)[0].size > 1:
raise ValueError('Non constant step sizes')
# Finding Start of a new tile
tile_starts = np.where(step_sizes > 1)[0]
# converting these indices to correct indices that can be mapped straight to
if len(tile_starts) < 1:
# Dimension(s) with no tiling at all
# Make it look as though the next tile starts at the end of the whole indices vector
tile_starts = np.array([0, len(inds_for_dim)])
else:
# Dimension with some form of repetition
tile_starts = np.hstack(([0], starts[tile_starts]))
# Verify that each tile is identical here
# Last tile will not be checked unless we add the length of the indices vector as the start of next tile
tile_starts = np.hstack((tile_starts, [len(inds_for_dim)]))
subsections = [inds_for_dim[tile_starts[ind]: tile_starts[ind + 1]] for ind in range(len(tile_starts) - 1)]
if np.max(np.diff(subsections, axis=0)) != 0:
# Should get unit values for ALL dimensions regardless of expectations to catch such scenarios.
raise ValueError('Values in each tile of dimension: {} are different'.format(dim_name))
# Now looking within the first tile:
subsection = inds_for_dim[tile_starts[0]:tile_starts[1]]
# remove all repetitions. ie - take indices only where jump == 1
step_inds = np.hstack(([0], np.where(np.hstack(([0], np.diff(subsection))))[0]))
# Finally, use these indices to get the values
if dim_name in dim_names:
# Only add this dimension to dictionary if requwested.
unit_values[dim_name] = vals_mat[desired_row_ind, step_inds]
return unit_values
def write_main_dataset(h5_parent_group, main_data, main_data_name, quantity, units, pos_dims, spec_dims,
main_dset_attrs=None, h5_pos_inds=None, h5_pos_vals=None, h5_spec_inds=None, h5_spec_vals=None,
aux_spec_prefix='Spectroscopic_', aux_pos_prefix='Position_', verbose=False,
slow_to_fast=False, **kwargs):
"""
Writes the provided data as a 'Main' dataset with all appropriate linking.
By default, the instructions for generating the ancillary datasets should be specified using the pos_dims and
spec_dims arguments as dictionary objects. Alternatively, if both the indices and values datasets are already
available for either/or the positions / spectroscopic, they can be specified using the keyword arguments. In this
case, fresh datasets will not be generated.
Parameters
----------
h5_parent_group : :class:`h5py.Group`
Parent group under which the datasets will be created
main_data : numpy.ndarray, dask.array.core.Array, list or tuple
2D matrix formatted as [position, spectral] or a list / tuple with the shape for an empty dataset.
If creating an empty dataset - the dtype must be specified via a kwarg.
main_data_name : String / Unicode
Name to give to the main dataset. This cannot contain the '-' character.
quantity : String / Unicode
Name of the physical quantity stored in the dataset. Example - 'Current'
units : String / Unicode
Name of units for the quantity stored in the dataset. Example - 'A' for amperes
pos_dims : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
Object specifying the instructions necessary for building the Position indices and values datasets
spec_dims : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
Object specifying the instructions necessary for building the Spectroscopic indices and values datasets
main_dset_attrs : dictionary, Optional
Dictionary of parameters that will be written to the main dataset. Do NOT include region references here.
h5_pos_inds : h5py.Dataset, Optional
Dataset that will be linked with the name "Position_Indices"
h5_pos_vals : h5py.Dataset, Optional
Dataset that will be linked with the name "Position_Values"
h5_spec_inds : h5py.Dataset, Optional
Dataset that will be linked with the name "Spectroscopic_Indices"
h5_spec_vals : h5py.Dataset, Optional
Dataset that will be linked with the name "Spectroscopic_Values"
aux_spec_prefix : str or unicode, Optional
Default prefix for Spectroscopic datasets. Default = "Spectroscopic"
aux_pos_prefix : str or unicode, Optional
Default prefix for Position datasets. Default = "Position"
verbose : bool, Optional, default=False
If set to true - prints debugging logs
slow_to_fast : bool, Optional. Default=False
Set to True if the dimensions are arranged from slowest varying to fastest varying.
Set to False otherwise.
kwargs will be passed onto the creation of the dataset. Please pass chunking, compression, dtype, and other
arguments this way
Returns
-------
h5_main : USIDataset
Reference to the main dataset
"""
def __check_anc_before_creation(aux_prefix, dim_type='pos'):
aux_prefix = validate_single_string_arg(aux_prefix, 'aux_' + dim_type + '_prefix')
if not aux_prefix.endswith('_'):
aux_prefix += '_'
if '-' in aux_prefix:
warn('aux_' + dim_type + ' should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(aux_prefix, aux_prefix.replace('-', '_')))
aux_prefix = aux_prefix.replace('-', '_')
for dset_name in [aux_prefix + 'Indices', aux_prefix + 'Values']:
if dset_name in h5_parent_group.keys():
# TODO: What if the contained data was correct?
raise KeyError('Dataset named: ' + dset_name + ' already exists in group: '
'{}. Consider passing these datasets using kwargs (if they are correct) instead of providing the pos_dims and spec_dims arguments'.format(h5_parent_group.name))
return aux_prefix
def __ensure_anc_in_correct_file(h5_inds, h5_vals, prefix):
if h5_inds.file != h5_vals.file:
raise ValueError('Provided ' + prefix + ' datasets are present in different HDF5 files!')
if h5_inds.file != h5_parent_group.file:
# Need to copy over the anc datasets to the new group
if verbose:
print('Need to copy over ancillary datasets: {} and {} to '
'destination group: {} which is in a different HDF5 '
'file'.format(h5_inds, h5_vals, h5_parent_group))
ret_vals = [copy_dataset(x, h5_parent_group, verbose=verbose) for x in [h5_inds, h5_vals]]
else:
ret_vals = [h5_inds, h5_vals]
return tuple(ret_vals)
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or h5py.Group object')
if not is_editable_h5(h5_parent_group):
raise ValueError('The provided file is not editable')
if verbose:
print('h5 group and file OK')
quantity, units, main_data_name = validate_string_args([quantity, units, main_data_name],
['quantity', 'units', 'main_data_name'])
if verbose:
print('quantity, units, main_data_name all OK')
quantity = quantity.strip()
units = units.strip()
main_data_name = main_data_name.strip()
if '-' in main_data_name:
warn('main_data_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(main_data_name, main_data_name.replace('-', '_')))
main_data_name = main_data_name.replace('-', '_')
if isinstance(main_data, (list, tuple)):
if not contains_integers(main_data, min_val=1):
raise ValueError('main_data if specified as a shape should be a list / tuple of integers >= 1')
if len(main_data) != 2:
raise ValueError('main_data if specified as a shape should contain 2 numbers')
if 'dtype' not in kwargs:
raise ValueError('dtype must be included as a kwarg when creating an empty dataset')
_ = validate_dtype(kwargs.get('dtype'))
main_shape = main_data
if verbose:
print('Selected empty dataset creation. OK so far')
elif isinstance(main_data, (np.ndarray, da.core.Array)):
if main_data.ndim != 2:
raise ValueError('main_data should be a 2D array')
main_shape = main_data.shape
if verbose:
print('Provided numpy or Dask array for main_data OK so far')
else:
raise TypeError('main_data should either be a numpy array or a tuple / list with the shape of the data')
if h5_pos_inds is not None and h5_pos_vals is not None:
# The provided datasets override fresh building instructions.
validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, main_shape, is_spectroscopic=False)
if verbose:
print('The shapes of the provided h5 position indices and values are OK')
h5_pos_inds, h5_pos_vals = __ensure_anc_in_correct_file(h5_pos_inds, h5_pos_vals, 'Position')
else:
aux_pos_prefix = __check_anc_before_creation(aux_pos_prefix, dim_type='pos')
pos_dims = validate_dimensions(pos_dims, dim_type='Position')
validate_dims_against_main(main_shape, pos_dims, is_spectroscopic=False)
if verbose:
print('Passed all pre-tests for creating position datasets')
h5_pos_inds, h5_pos_vals = write_ind_val_dsets(h5_parent_group, pos_dims, is_spectral=False, verbose=verbose,
slow_to_fast=slow_to_fast, base_name=aux_pos_prefix)
if verbose:
print('Created position datasets!')
if h5_spec_inds is not None and h5_spec_vals is not None:
# The provided datasets override fresh building instructions.
validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, main_shape, is_spectroscopic=True)
if verbose:
print('The shapes of the provided h5 position indices and values '
'are OK')
h5_spec_inds, h5_spec_vals = __ensure_anc_in_correct_file(h5_spec_inds, h5_spec_vals,
'Spectroscopic')
else:
aux_spec_prefix = __check_anc_before_creation(aux_spec_prefix, dim_type='spec')
spec_dims = validate_dimensions(spec_dims, dim_type='Spectroscopic')
validate_dims_against_main(main_shape, spec_dims, is_spectroscopic=True)
if verbose:
print('Passed all pre-tests for creating spectroscopic datasets')
h5_spec_inds, h5_spec_vals = write_ind_val_dsets(h5_parent_group, spec_dims, is_spectral=True, verbose=verbose,
slow_to_fast=slow_to_fast, base_name=aux_spec_prefix)
if verbose:
print('Created Spectroscopic datasets')
if h5_parent_group.file.driver == 'mpio':
if kwargs.pop('compression', None) is not None:
warn('This HDF5 file has been opened wth the "mpio" communicator. '
'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')
if isinstance(main_data, np.ndarray):
# Case 1 - simple small dataset
h5_main = h5_parent_group.create_dataset(main_data_name, data=main_data, **kwargs)
if verbose:
print('Created main dataset with provided data')
elif isinstance(main_data, da.core.Array):
# Case 2 - Dask dataset
# step 0 - get rid of any automated dtype specification:
_ = kwargs.pop('dtype', None)
# step 1 - create the empty dataset:
h5_main = h5_parent_group.create_dataset(main_data_name, shape=main_data.shape, dtype=main_data.dtype,
**kwargs)
if verbose:
print('Created empty dataset: {} for writing Dask dataset: {}'.format(h5_main, main_data))
print('Dask array will be written to HDF5 dataset: "{}" in file: "{}"'.format(h5_main.name,
h5_main.file.filename))
# Step 2 - now ask Dask to dump data to disk
da.to_hdf5(h5_main.file.filename, {h5_main.name: main_data})
# main_data.to_hdf5(h5_main.file.filename, h5_main.name) # Does not work with python 2 for some reason
else:
# Case 3 - large empty dataset
h5_main = h5_parent_group.create_dataset(main_data_name, main_data, **kwargs)
if verbose:
print('Created empty dataset for Main')
write_simple_attrs(h5_main, {'quantity': quantity, 'units': units})
if verbose:
print('Wrote quantity and units attributes to main dataset')
if isinstance(main_dset_attrs, dict):
write_simple_attrs(h5_main, main_dset_attrs)
if verbose:
print('Wrote provided attributes to main dataset')
write_book_keeping_attrs(h5_main)
# make it main
link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
if verbose:
print('Successfully linked datasets - dataset should be main now')
from ..usi_data import USIDataset
return USIDataset(h5_main)
def map_grid_to_cartesian(h5_main, grid_shape, mode='histogram', **kwargs):
"""
Map an incomplete measurement, such as a spiral scan, to a cartesian grid.
Parameters
----------
h5_main : :class:`pyUSID.USIDataset`
Dataset containing the sparse measurement
grid_shape : int or [int, int]
Shape of the output :class:`numpy.ndarray`.
mode : str, optional. Default = 'histogram'
Method used for building a cartesian grid.
Available methods = 'histogram', 'linear', 'nearest', 'cubic'
Use kwargs to pass onto each of the techniques
Note
----
UNDER DEVELOPMENT!
Currently only valid for 2 position dimensions
@author: Patrik Marschalik
Returns
-------
:class:`numpy.ndarray` but could be a h5py.Dataset or dask.array.core.Array object
"""
try:
from scipy.interpolate import griddata
except ImportError as expn:
griddata = None
warn('map_grid_to_cartesian() requires scipy')
raise expn
from ..usi_data import USIDataset
if not isinstance(h5_main, USIDataset):
raise TypeError('Provided object is not a pyUSID.USIDataset object')
if mode not in ['histogram', 'linear', 'nearest', 'cubic']:
raise ValueError('mode must be a string among["histogram", "cubic"]')
ds_main = h5_main[()].squeeze()
ds_pos_vals = h5_main.h5_pos_vals[()]
if ds_pos_vals.shape[1] != 2:
raise TypeError("Only working for 2 position dimensions.")
# Transform to row, col image format
rotation = np.array([[0, 1], [-1, 0]])
ds_pos_vals = np.dot(ds_pos_vals, rotation)
try:
grid_n = len(grid_shape)
except TypeError:
grid_n = 1
if grid_n != 1 and grid_n != 2:
raise ValueError("grid_shape must be of type int or [int, int].")
if grid_n == 1:
grid_shape = 2 * [grid_shape]
def interpolate(points, values, grid_shape, method):
grid_shape = list(map((1j).__mul__, grid_shape))
grid_x, grid_y = np.mgrid[
np.amin(points[:, 0]):np.amax(points[:, 0]):grid_shape[0],
np.amin(points[:, 1]):np.amax(points[:, 1]):grid_shape[1]
]
ndim_data = griddata(points, values, (grid_x, grid_y), method=method)
return ndim_data
if mode == "histogram":
histogram_weighted, _, _ = np.histogram2d(*ds_pos_vals.T, bins=grid_shape, weights=ds_main)
histogram, _, _ = np.histogram2d(*ds_pos_vals.T, bins=grid_shape)
cart_data = np.divide(histogram_weighted, histogram)
else:
cart_data = interpolate(ds_pos_vals, ds_main, grid_shape, method=mode)
return cart_data
def write_sidpy_dataset(si_dset, h5_parent_group, verbose=False,
**kwargs):
"""
Writes a sidpy.Dataset as a USID dataset in the provided HDF5 Group.
Please see notes about dimension types
Parameters
----------
si_dset: sidpy.Dataset
Dataset to be written to HDF5 in NSID format
h5_parent_group : class:`h5py.Group`
Parent group under which the datasets will be created
verbose : bool, Optional. Default = False
Whether or not to write logs to standard out
kwargs: dict
additional keyword arguments passed on to h5py when writing data
Returns
------
h5_main : USIDataset
Reference to the main dataset
Notes
-----
USID only has two dimension types - Position and Spectroscopic.
Consider changing the types of dimensions of all other dimensions to either
"SPATIAL" or "SPECTRAL".
"""
if not isinstance(si_dset, sid.Dataset):
raise TypeError('Data to write is not a sidpy dataset')
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError('h5_parent_group is not a h5py.File or '
'h5py.Group object')
spatial_dims, spectral_dims, spatial_size, spectral_size = [], [], 1, 1
for dim_ind, dime in si_dset._axes.items():
if dime._dimension_type == sid.DimensionType.SPATIAL:
spatial_dims.append(Dimension(dime._name,
dime._units,
dime.values,
dime._quantity,
dime._dimension_type))
spatial_size *= np.size(dime.values)
else:
if not dime._dimension_type == sid.DimensionType.SPECTRAL:
warn('Will consider dimension: {} of type: {} as a '
'spectroscopic dimension'.format(dime._name,
dime._dimension_type))
spectral_dims.append(Dimension(dime._name,
dime._units,
dime.values,
dime._quantity,
dime._dimension_type))
spectral_size *= np.size(dime.values)
main_dataset = da.reshape(si_dset, [spatial_size, spectral_size])
# TODO : Consider writing this out as a separate group
main_dset_attr = {}
for attr_name in dir(si_dset):
attr_val = getattr(si_dset, attr_name)
if isinstance(attr_val, dict):
main_dset_attr.update(attr_val)
h5_main = write_main_dataset(h5_parent_group=h5_parent_group,
main_data=main_dataset,
main_data_name=si_dset.name,
quantity=si_dset.quantity,
units=si_dset.units,
pos_dims=spatial_dims,
spec_dims=spectral_dims,
main_dset_attrs=flatten_dict(main_dset_attr),
slow_to_fast=True,
verbose=verbose,
**kwargs)
return h5_main
| 45.303085
| 223
| 0.640834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23,168
| 0.464065
|
0cbd80d538ed5aeecd342647472ca2c49593352a
| 3,110
|
py
|
Python
|
TaxPy/data_processing/export_reads.py
|
stenglein-lab/TaxAssessor
|
144599d1395627c4e86ab68a4d6d3e0785e606f0
|
[
"MIT"
] | null | null | null |
TaxPy/data_processing/export_reads.py
|
stenglein-lab/TaxAssessor
|
144599d1395627c4e86ab68a4d6d3e0785e606f0
|
[
"MIT"
] | 2
|
2016-11-29T19:48:27.000Z
|
2016-12-09T17:18:56.000Z
|
TaxPy/data_processing/export_reads.py
|
stenglein-lab/TaxAssessor
|
144599d1395627c4e86ab68a4d6d3e0785e606f0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import json
import timeit
import re
import TaxPy.db_management.db_wrap as TaxDb
from itertools import izip
def retrieveReads(userName,fileName,fileId,parentTaxId,query):
time1 = timeit.default_timer()
taxTree = loadTaxTree(userName,fileName)
time2 = timeit.default_timer()
print str(time2-time1)+" seconds loading tree"
status,subTree = findSubTree(taxTree,parentTaxId)
time3 = timeit.default_timer()
print str(time3-time2)+" finding subtree"
children = findChildren(subTree,[])
time4 = timeit.default_timer()
print str(time4-time3)+" finding children"
readLines,status = getReadLines(children,fileId,query)
time5 = timeit.default_timer()
print str(time5-time4)+" getting read lines"
return readLines,status
def findSubTree(tree,parentTaxId,found=False):
subTree = None
if int(tree["taxId"]) == int(parentTaxId) or found:
return True,tree
try:
for child in tree["children"]:
found,subTree = findSubTree(child,parentTaxId)
if found:
return True,subTree
except KeyError:
pass
return found,subTree
def findChildren(tree,children):
children.append(tree["taxId"])
try:
for child in tree["children"]:
children = findChildren(child,children)
except KeyError:
pass
return children
def loadTaxTree(userName,fileName):
jsonFile = "uploads/"+userName+"/"+fileName+"_tree.json"
with open(jsonFile,"r") as inFile:
taxTree = json.load(inFile)
return taxTree
def getReadLines(children,fileId,query):
readLines = []
count = 0
with TaxDb.openDbSS("TaxAssessor_Alignments") as db, \
TaxDb.cursor(db) as cur:
cmd = "SELECT COUNT(*) FROM "+fileId+" WHERE taxId IN "
children = "("+str(children).lstrip("[").rstrip("]")+")"
cmd += children
cur.execute(cmd)
nRows = cur.fetchall()[0][0]
cmd = "SELECT "+query+" FROM "+fileId+" WHERE taxId IN "
cmd += children + ";"
cur.execute(cmd)
for line in cur:
readLines.append(line[0])
return readLines,str(nRows)
def getReadsForTaxIds(userName,fileName,fileId,taxIds,query):
readLines = []
count = 0
with TaxDb.openDbSS("TaxAssessor_Alignments") as db, \
TaxDb.cursor(db) as cur:
cmd = "SELECT "+query+" FROM "+fileId+" WHERE taxId IN (%s)"
in_p=', '.join(map(lambda x: '%s', taxIds))
cmd = cmd % in_p
cur.execute(cmd,taxIds)
for line in cur:
readLines.append(line[0])
return readLines
def getReadsForGiInTaxId(userName,fileName,fileId,taxId,seqId,query):
readLines = []
count = 0
with TaxDb.openDbSS("TaxAssessor_Alignments") as db, \
TaxDb.cursor(db) as cur:
cmd = "SELECT "+query+" FROM "+fileId+" WHERE taxId=%s AND seqId=%s"
cur.execute(cmd,(taxId,seqId))
for line in cur:
readLines.append(line[0])
return readLines
| 27.280702
| 76
| 0.618971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 417
| 0.134084
|
0cbdc5e7cc5bd19da3d1e30a35d3c1cd8334e753
| 1,209
|
py
|
Python
|
python/caliper-reader/setup.py
|
slabasan/Caliper
|
85601f48e7f883fb87dec85e92c849eec2bb61f7
|
[
"BSD-3-Clause"
] | 220
|
2016-01-19T19:00:10.000Z
|
2022-03-29T02:09:39.000Z
|
python/caliper-reader/setup.py
|
slabasan/Caliper
|
85601f48e7f883fb87dec85e92c849eec2bb61f7
|
[
"BSD-3-Clause"
] | 328
|
2016-05-12T15:47:30.000Z
|
2022-03-30T19:42:02.000Z
|
python/caliper-reader/setup.py
|
slabasan/Caliper
|
85601f48e7f883fb87dec85e92c849eec2bb61f7
|
[
"BSD-3-Clause"
] | 48
|
2016-03-04T22:04:39.000Z
|
2021-12-18T12:11:43.000Z
|
# Copyright (c) 2020-20201, Lawrence Livermore National Security, LLC.
# See top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import setuptools
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# Get the version in a safe way which does not refrence the `__init__` file
# per python docs: https://packaging.python.org/guides/single-sourcing-package-version/
version = {}
with open("./caliperreader/version.py") as fp:
exec(fp.read(), version)
setuptools.setup(
name="caliper-reader",
version=version["__version__"],
description="A Python library for reading Caliper .cali files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/LLNL/Caliper",
author="David Boehme",
author_email="boehme3@llnl.gov",
license="BSD-3-Clause",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
],
packages=setuptools.find_packages()
)
| 31.815789
| 87
| 0.715467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 664
| 0.549214
|
0cbded3b957b5b9247296e61a096662c54742d11
| 774
|
py
|
Python
|
programming_fundamentals/python_part_2/common_vars.py
|
tobaidullah/2
|
3fa67855ef461ccaee283dcbbdd9bf00e7a52378
|
[
"MIT"
] | 629
|
2017-12-15T20:26:13.000Z
|
2022-03-30T04:02:02.000Z
|
programming_fundamentals/python_part_2/common_vars.py
|
tobaidullah/2
|
3fa67855ef461ccaee283dcbbdd9bf00e7a52378
|
[
"MIT"
] | 40
|
2018-01-18T09:07:50.000Z
|
2021-09-23T23:21:47.000Z
|
programming_fundamentals/python_part_2/common_vars.py
|
tobaidullah/2
|
3fa67855ef461ccaee283dcbbdd9bf00e7a52378
|
[
"MIT"
] | 394
|
2017-12-18T22:35:36.000Z
|
2022-03-29T19:41:25.000Z
|
#! /usr/bin/env python
"""
Learning Series: Network Programmability Basics
Module: Programming Fundamentals
Lesson: Python Part 2
Author: Hank Preston <hapresto@cisco.com>
common_vars.py
Illustrate the following concepts:
- Code reuse
imported into other examples
"""
shapes = ["square", "triangle", "circle"]
books = [
{
"title": "War and Peace",
"shelf": 3,
"available": True
},
{
"title": "Hamlet",
"shelf": 1,
"available": False
},
{
"title": "Harold and the Purple Crayon",
"shelf": 2,
"available": True
}
]
colors = ["blue", "green", "red"]
| 22.764706
| 56
| 0.485788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 440
| 0.568475
|
0cc0715c89b9cf37ccc8268295889e035e429cd7
| 4,118
|
py
|
Python
|
forms.py
|
godsgift/gdohs
|
fc7fa4e010b7c508c3c1154255fa2ded0534fb1d
|
[
"MIT"
] | null | null | null |
forms.py
|
godsgift/gdohs
|
fc7fa4e010b7c508c3c1154255fa2ded0534fb1d
|
[
"MIT"
] | null | null | null |
forms.py
|
godsgift/gdohs
|
fc7fa4e010b7c508c3c1154255fa2ded0534fb1d
|
[
"MIT"
] | null | null | null |
from flask_wtf import Form
from wtforms import TextField, PasswordField, validators, IntegerField, BooleanField, SelectField, SubmitField
from wtforms.validators import Required, Length, Email, ValidationError, Regexp, EqualTo, NumberRange
from wtforms.widgets import SubmitInput
class SignUp(Form):
username = TextField("Username", validators=[Required("Please provide a username without any spaces"),
Length(min=4, max=20), Regexp(r'^[\w.@+-]+$', message="Please provide a username without any spaces")])
password = PasswordField("Password", validators=[Required("Please pick a secure password"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces")])
email = TextField("Email", validators=[Required("Please provide a valid email address"),
Length(min=6, max=35), Email(message="That is not a valid email address"),
Regexp(r'^[\w.@+-]+$', message="Please provide an email without any spaces")])
firstname = TextField("First Name", validators=[Required("Please provide your first name"),
Regexp(r'^[\w.@+-]+$', message="Please enter your first name without any spaces")])
lastname = TextField("Last Name", validators=[Required("Please provide your last name"),
Regexp(r'^[\w.@+-]+$', message="Please enter your last name without any spaces")])
class Login(Form):
username = TextField("Username", validators=[Required("Please provide a username without any spaces"),
Length(min=4, max=20), Regexp(r'^[\w.@+-]+$', message="Please provide a username without any spaces")])
password = PasswordField("Password", validators=[Required("Please pick a secure password"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces")])
class ForgotPassword(Form):
email = TextField("Email", validators=[Required("Please provide a valid email address"),
Length(min=6, max=35), Email(message="That is not a valid email address"),
Regexp(r'^[\w.@+-]+$', message="Please provide an email without any spaces")])
class NewPassword(Form):
password = PasswordField("Password", validators=[Required("Please pick a secure password"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces")])
confirm_password = PasswordField("Confirm Password", validators=[Required("Required"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces"),
EqualTo("password", message="Passwords must match")])
class ChangePassword(Form):
current_password = PasswordField("Current Password", validators=[Required("Please type in your current password"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces")])
password = PasswordField("New Password", validators=[Required("Please pick a secure password"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces")])
confirm_password = PasswordField("Confirm Password", validators=[Required("Password must match with new password"),
Regexp(r'^[\w.@+-]+$', message="Please provide a password without any spaces"),
EqualTo("password", message="Password must match with new password")])
class CamSettings(Form):
brightness = IntegerField("Brightness", default=50, validators=[Required("Please choose a number between 0 and 100"),
NumberRange(min=0, max=100, message="Please choose a number between 0 and 100")])
resolution = SelectField("Video/Image Resolution: ", choices=[("320x240", "320 x 240"), ("640x480", "640 x 480"),
("800x600", "800 x 600")], default="640x480", validators=[(Required("Required"))])
hflip = BooleanField("Horizontal Flip: ", default=False)
vflip = BooleanField("Vertical Flip: ", default=False)
class Recording(Form):
start = SubmitField("Start Recording")
stop = SubmitField("Stop Recording")
class LicensePlate(Form):
license = TextField("License Plate", validators=[Required("Please provide a license plate without any spaces"),
Length(min=4, max=10), Regexp(r'^[\w.@+-]+$', message="Please provide a license plate without any spaces")])
class ForceLock(Form):
forcelock = SubmitField("Force Lock")
class GarageDoor(Form):
opengarage = SubmitField("Open Garage")
| 52.126582
| 118
| 0.723409
| 3,818
| 0.927149
| 0
| 0
| 0
| 0
| 0
| 0
| 1,955
| 0.474745
|
0cc0bf99ee01e613b032f7efe713db47ddaef6b6
| 1,137
|
py
|
Python
|
ossdbtoolsservice/metadata/contracts/object_metadata.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 33
|
2019-05-27T13:04:35.000Z
|
2022-03-17T13:33:05.000Z
|
ossdbtoolsservice/metadata/contracts/object_metadata.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 31
|
2019-06-10T01:55:47.000Z
|
2022-03-09T07:27:49.000Z
|
ossdbtoolsservice/metadata/contracts/object_metadata.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 25
|
2019-05-13T18:39:24.000Z
|
2021-11-16T03:07:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import enum
from typing import Optional
from ossdbtoolsservice.serialization import Serializable
class MetadataType(enum.Enum):
"""Contract enum for representing metadata types"""
TABLE = 0
VIEW = 1
SPROC = 2
FUNCTION = 3
class ObjectMetadata(Serializable):
"""Database object metadata"""
@classmethod
def get_child_serializable_types(cls):
return {'metadata_type': MetadataType}
def __init__(self, urn: str = None, metadata_type: MetadataType = None, metadata_type_name: str = None, name: str = None, schema: Optional[str] = None):
self.metadata_type: MetadataType = metadata_type
self.metadata_type_name: str = metadata_type_name
self.name: str = name
self.schema: str = schema
self.urn: str = urn
| 34.454545
| 156
| 0.591029
| 687
| 0.604222
| 0
| 0
| 102
| 0.08971
| 0
| 0
| 437
| 0.384345
|
0cc14f945ff11b1ec78d14d582d03623e82355fd
| 4,657
|
py
|
Python
|
tools/multiscale_shape.py
|
marvin-eisenberger/hamiltonian-interpolation
|
d18c2f401feffc672998c5fa1d50c1de03dba902
|
[
"MIT"
] | 5
|
2021-01-05T23:16:55.000Z
|
2021-07-23T12:26:06.000Z
|
tools/multiscale_shape.py
|
marvin-eisenberger/hamiltonian-interpolation
|
d18c2f401feffc672998c5fa1d50c1de03dba902
|
[
"MIT"
] | null | null | null |
tools/multiscale_shape.py
|
marvin-eisenberger/hamiltonian-interpolation
|
d18c2f401feffc672998c5fa1d50c1de03dba902
|
[
"MIT"
] | 1
|
2021-02-22T08:31:05.000Z
|
2021-02-22T08:31:05.000Z
|
import torch
from shape_utils import Shape, load_shape_pair, scatter_shape_pair
from torch_geometric.nn import knn
from param import *
from arap_potential import arap_vert
def load_multiscale_shapes(folder_path, file_name, scales, offset=0.5*torch.ones([3], device=device, dtype=torch.float32)):
"""Like 'load_shape_pair' but for shapes with different resolutions"""
vert_x_array = []
triv_x_array = []
vert_y_array = []
triv_y_array = []
for i_scale in range(len(scales)):
file_load = folder_path + "sub_" + str(scales[i_scale]) + "/" + file_name
shape_x, shape_y = load_shape_pair(file_load, offset)
vert_x_array.append(shape_x.vert)
vert_y_array.append(shape_y.vert)
triv_x_array.append(shape_x.triv)
triv_y_array.append(shape_y.triv)
shape_x = MultiscaleShape(vert_x_array, triv_x_array)
shape_y = MultiscaleShape(vert_y_array, triv_y_array)
return shape_x, shape_y
class MultiscaleShape(Shape):
"""Class for shapes with multiple resolutions.
Attributes beyond the base class 'Shape' are:
vert_array: List of vertices with different resolutions
triv_array: List of triangles with different resolutions
scale_idx: The index describing the current resolution --
The current vertices are vert_array[scale_idx]
ass_[array/vecs/weights]: attributes needed to apply an interpolation
on scale 'scale_idx' to the next resolution '(scale_idx+1)'
"""
def __init__(self, vert_array, triv_array):
super().__init__(vert_array[0], triv_array[0])
self.vert_array = vert_array
self.triv_array = triv_array
self.scale_idx = 0
self.scale_idx_len = len(vert_array)
self.ass_array = None
self.ass_vecs = None
self.ass_weights = None
self.init_upscale()
def set_scale_idx(self, scale_idx):
assert scale_idx >= 0 and scale_idx < self.scale_idx_len, "new index out of bounds"
self.vert_array[self.scale_idx] = self.vert
self.scale_idx = scale_idx
self.vert = self.vert_array[scale_idx]
self.triv = self.triv_array[scale_idx]
self.samples = list(range(self.vert.shape[0]))
self.neigh = None
def increase_scale_idx(self):
self.set_scale_idx(self.scale_idx+1)
def next_resolution(self):
return self.vert_array[self.scale_idx+1].shape
def init_upscale(self, num_knn=3):
self.ass_array = []
self.ass_vecs = []
self.ass_weights = []
for idx in range(self.scale_idx_len-1):
vert_i = self.vert_array[idx].to(device_cpu)
vert_ip1 = self.vert_array[idx+1].to(device_cpu)
ass_curr = knn(vert_i, vert_ip1, num_knn)
ass_curr = ass_curr[1, :].view(-1, num_knn)
self.ass_array.append(ass_curr.to(device)) #[n_vert_tp1, num_knn]
vec_curr = vert_ip1.unsqueeze(1) - vert_i[ass_curr, :]
self.ass_vecs.append(vec_curr.to(device)) #[n_vert_tp1, num_knn, 3]
weights_curr = 1/(torch.norm(vec_curr, dim=2, keepdim=True)+1e-5)
weights_curr = weights_curr / torch.sum(weights_curr, dim=1, keepdim=True)
self.ass_weights.append(weights_curr.to(device)) #[n_vert_tp1, num_knn, 1]
def apply_upsampling(self, vert_t):
R = arap_vert(vert_t, self.vert, self.get_neigh()) #[n_vert_tp1, 3, 3]
ass_curr = self.ass_array[self.scale_idx]
vec_curr = self.ass_vecs[self.scale_idx]
weights_curr = self.ass_weights[self.scale_idx]
vert_tp1 = vert_t[ass_curr, :] + torch.matmul(R[ass_curr], vec_curr.unsqueeze(3)).squeeze() #[n_vert_tp1, num_knn, 3]
vert_tp1 = torch.sum(weights_curr * vert_tp1, dim=1)
return vert_tp1
def rotate(self, R):
for i in range(self.scale_idx_len):
self.vert_array[i] = torch.mm(self.vert_array[i], R.transpose(0, 1))
self.vert = self.vert_array[self.scale_idx]
self.init_upscale()
def to_box(self, shape_y):
scale_idx = self.scale_idx
for i in range(self.scale_idx_len):
self.set_scale_idx(i)
shape_y.set_scale_idx(i)
super().to_box(shape_y)
self.set_scale_idx(scale_idx)
shape_y.set_scale_idx(scale_idx)
self.init_upscale()
def scale(self, factor, shift=True):
scale_idx = self.scale_idx
for i in range(self.scale_idx_len):
self.set_scale_idx(i)
super().scale(factor, shift)
self.set_scale_idx(scale_idx)
self.init_upscale()
if __name__ == "__main__":
print("main of multiscale_shape.py")
| 33.503597
| 126
| 0.665235
| 3,622
| 0.777754
| 0
| 0
| 0
| 0
| 0
| 0
| 711
| 0.152673
|
0cc186344e52a624e94b0910847681d7c50bf522
| 7,919
|
py
|
Python
|
src/RBF.py
|
KastnerRG/sherlock
|
ba3e8a81e08315df169bb5dd76d9fdd8f2660583
|
[
"CC-BY-4.0"
] | null | null | null |
src/RBF.py
|
KastnerRG/sherlock
|
ba3e8a81e08315df169bb5dd76d9fdd8f2660583
|
[
"CC-BY-4.0"
] | null | null | null |
src/RBF.py
|
KastnerRG/sherlock
|
ba3e8a81e08315df169bb5dd76d9fdd8f2660583
|
[
"CC-BY-4.0"
] | null | null | null |
import numpy as np
import scipy
import scipy.linalg as linalg
import scipy.spatial
import scipy.special
import scipy.optimize
import sklearn
def bases(name):
if name == 'linear':
f = lambda x: x
elif name == 'cubic':
f = lambda x: x**3
elif name == 'multiquadric':
f = lambda x, s: np.sqrt((1.0/s*x)**2 + 1)
elif name == 'thin_plate':
f = lambda x: scipy.special.xlogy(x**2, x)
elif name == 'gaussian':
f = lambda x, s: np.exp(-(1.0/s*x)**2)
elif name == 'inverse_multiquadric':
f = lambda x, s: 1.0/np.sqrt((1.0/s*x)**2 + 1)
else:
raise ValueError('Basis not recognised.')
return f
class RbfInterpolator:
"""
Standard RBF interpolation / kernel smoothing.
Written to replace Scipy's Rbf class, which has a silly interface and is difficult to modify
Also includes optional optimization of the "smooth" parameter
Author: Alric Althoff -- 2018
"""
def __init__(self, norm='euclidean', rbf=lambda r: r, smooth=0.0, optimize_smoothing=False):
self.norm = norm
self.rbf = rbf
self.smooth = smooth
self.optimize_smoothing = optimize_smoothing
def _opt_smooth(self):
# We're just using cross-validation and retraining the whole model.
# Likely a lot of improvements possible
def obj(x):
ss = sklearn.model_selection.ShuffleSplit(n_splits=5, test_size=.3)
for tri, tei in ss.split(self._X_train):
K = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(self._X_train[tri,:], self.norm))
K = self.rbf(K)
K -= np.eye(K.shape[0]) * x
nodes = None
rcond = 1/np.linalg.cond(K)
if rcond > 1e-10: # If the matrix is not singular, (i.e. most of the time)
try:
nodes = linalg.solve(K, self._y_train[tri], sym_pos=True)
except linalg.LinAlgError: pass
if nodes is None:
nodes = linalg.lstsq(K, self._y_train[tri])[0]
K = scipy.spatial.distance.cdist(self._X_train[tei,:], self._X_train[tri,:], self.norm)
K = self.rbf(K)
return np.sum((self._y_train[tei] - np.dot(K, nodes))**2)
opt_param = scipy.optimize.minimize_scalar(obj, bounds=[.0001,100], bracket=[0.0001,100])
self.smooth = opt_param.x
def _make_kernel(self, new_X=None):
if new_X is None:
K = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(self._X_train, self.norm))
else:
K = scipy.spatial.distance.cdist(new_X, self._X_train, self.norm)
K = self.rbf(K)
if new_X is None and self.smooth != 0:
K -= np.eye(K.shape[0])*self.smooth
return K
def fit(self, X, y):
self._X_train = X
self._y_train = y
if len(self._X_train.shape) == 1:
self._X_train = self._X_train[:,np.newaxis]
if self.optimize_smoothing:
self._opt_smooth()
self.K = self._make_kernel()
nodes = None
rcond = 1/np.linalg.cond(self.K)
if rcond > 1e-10:
try:
self.nodes = linalg.solve(self.K, self._y_train, sym_pos=True)
except linalg.LinAlgError: pass
if nodes is None:
self.nodes = linalg.lstsq(self.K, self._y_train)[0]
def predict(self, X):
if len(X.shape) == 1:
X = X[:,np.newaxis]
K = self._make_kernel(X)
return np.dot(K, self.nodes)
class RBFConsensus:
def __init__(self,
sample_frac=.6,
subsample_rounds=32,
radial_basis_function=lambda x:x,
norm='euclidean',
copy_data=True,
categorical_features=None):
self.sample_frac = sample_frac # What fraction of data to sample for each subsampling round
self.subsample_rounds = subsample_rounds # How many rounds
self.radial_basis_function = radial_basis_function # which interpolator ("linear" with euclidean norm is linear interpolation)
self.norm = norm # Which distance function is appropriate?
self.copy_data = copy_data # Should input data be copied, or refed?
self.N = None
self.trained_smooth_param = None
self.categorical_features = categorical_features
def _fit_one(self, X, y, optimize_smoothing=False):
self.rbfis_by_dim = []
for dim in range(y.shape[1]):
# Use previously optimized smoothing unless optimize_smoothing == True
if self.trained_smooth_param is None:
rbfi = RbfInterpolator(rbf=self.radial_basis_function, norm=self.norm, optimize_smoothing=optimize_smoothing)
else:
rbfi = RbfInterpolator(smooth=self.trained_smooth_param[dim], rbf=self.radial_basis_function, norm=self.norm, optimize_smoothing=optimize_smoothing)
rbfi.fit(X,y[:,dim])
self.rbfis_by_dim.append(rbfi)
if optimize_smoothing: # This means we have optimized params available
self.trained_smooth_param = [self.rbfis_by_dim[dim].smooth for dim in range(y.shape[1])]
return self
def _predict_one(self, X):
if len(X.shape) == 1:
Xp = X[:,np.newaxis]
else:
Xp = X
pred = np.empty([Xp.shape[0], self._y_train.shape[1]])
for dim in range(len(self.rbfis_by_dim)):
pred[:,dim] = self.rbfis_by_dim[dim].predict(X).squeeze()
return pred
def fit(self, X, y):
self._y_train = y.copy() if self.copy_data else y
self._X_train = X.copy() if self.copy_data else X
if len(self._y_train.shape) == 1:
self._y_train = self._y_train[:,np.newaxis]
if len(self._X_train.shape) == 1:
self._X_train = self._X_train[:,np.newaxis]
self.N = X.shape[0]
def predict(self, X, return_std=False):
if self.N is None:
raise RuntimeError('`.fit` must be called before `.predict`')
N_samp = int(np.ceil(self.N * self.sample_frac))
y_pred = np.empty([X.shape[0], self._y_train.shape[1], self.subsample_rounds])
# np.random.seed(7)
for i in range(self.subsample_rounds):
r = np.random.permutation(self.N)[:N_samp]
y_sub = self._y_train[r,:]
X_sub = self._X_train[r,:]
self._fit_one(X_sub, y_sub)
y_pred[:,:,i] = self._predict_one(X)
y_out = y_pred.mean(axis=2)
if return_std:
y_std = np.sqrt(y_pred.var(axis=2).sum(axis=1))
return y_out, y_std
else:
return y_out
def RBF_unit_test():
import matplotlib.pyplot as plt
import time
# Generate synthetic 1-d data
N = 300
lo = -10.0
hi = 10.0
t = np.linspace(lo,hi,N)
y = np.sin(t*.5) - .08*t**2 + np.random.randn(t.shape[0])*.05*(t-lo)
# Messy fitting
model = RBFConsensus(radial_basis_function=lambda x:bases('inverse_multiquadric')(x,.2))
t0 = time.time()
model.fit(t,y)
y_pred, y_std = model.predict(t, return_std=True)
print(time.time()-t0)
y_pred = y_pred.squeeze()
y_std = y_std.squeeze()
plt.fill_between(t, y_pred - 5*y_std, y_pred + 5*y_std, alpha=0.15, color='k')
plt.scatter(t,y)
plt.plot(t, y_pred, color='red')
plt.show()
| 32.858921
| 164
| 0.56194
| 6,490
| 0.819548
| 0
| 0
| 0
| 0
| 0
| 0
| 1,033
| 0.130446
|
0cc31eec76a99a4705096b18e21f9ea4dd88bce8
| 523
|
py
|
Python
|
web/pyshop/admin.py
|
Andrew7891-kip/Ecommerce-pyshop
|
2eaa7b553789b65992cbd80f80a68fcf25ef0efd
|
[
"Apache-2.0"
] | null | null | null |
web/pyshop/admin.py
|
Andrew7891-kip/Ecommerce-pyshop
|
2eaa7b553789b65992cbd80f80a68fcf25ef0efd
|
[
"Apache-2.0"
] | null | null | null |
web/pyshop/admin.py
|
Andrew7891-kip/Ecommerce-pyshop
|
2eaa7b553789b65992cbd80f80a68fcf25ef0efd
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
class ProductAdmin(admin.ModelAdmin):
list_display=['name','category','price_is']
prepopulated_fields = {"slug": ("name",)}
class CartAdmin(admin.ModelAdmin):
list_display=['item','user','created']
class OrderAdmin(admin.ModelAdmin):
list_display=['user','ordered']
admin.site.register(Product,ProductAdmin)
admin.site.register(Cart,CartAdmin)
admin.site.register(Order,OrderAdmin)
admin.site.register(Checkout)
# Register your models here.
| 20.115385
| 47
| 0.743786
| 279
| 0.533461
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.195029
|
0cc3636c2c8cdfc0167b425c8f83724d3610d2e3
| 1,063
|
py
|
Python
|
extract/tef/incident_reflected_power_test.py
|
PuffyPuffin/LO_user
|
c7cafc2045b027aad0098d034cbe2b70126c8379
|
[
"MIT"
] | null | null | null |
extract/tef/incident_reflected_power_test.py
|
PuffyPuffin/LO_user
|
c7cafc2045b027aad0098d034cbe2b70126c8379
|
[
"MIT"
] | null | null | null |
extract/tef/incident_reflected_power_test.py
|
PuffyPuffin/LO_user
|
c7cafc2045b027aad0098d034cbe2b70126c8379
|
[
"MIT"
] | null | null | null |
"""
Test of the cancellation of terms in the calculation of
tidal energy flux.
This will follow Mofjeld's notation.
F is proportional to the energy flux of the original signal, and
FF is proportional to the sum of the energy fluxes of the incident and
reflected waves.
RESULT: The two net fluxes are only equal for zero friction. I think this
may be because pressure work is a nonlinear term and some part of the
two waves pressure work can leak into the other.
"""
import numpy as np
A0 = 1 + 0j
U0 = 1 + 0.2j
F = A0.real*U0.real + A0.imag*U0.imag
alpha = 1 / np.sqrt(1 + 0j)
Ap = (A0 + U0/alpha)/2
Am = (A0 - U0/alpha)/2
Up = alpha * Ap
Um = alpha * Am
FF = (Ap.real*Up.real + Ap.imag*Up.imag) - (Am.real*Um.real + Am.imag*Um.imag)
print('No friction:')
print('F = %0.1f, FF = %0.1f' % (F, FF))
alpha = 1 / np.sqrt(1 + 1j)
Ap = (A0 + U0/alpha)/2
Am = (A0 - U0/alpha)/2
Up = alpha * Ap
Um = alpha * Am
FF = (Ap.real*Up.real + Ap.imag*Up.imag) - (Am.real*Um.real + Am.imag*Um.imag)
print('\nOrder-1 friction:')
print('F = %0.1f, FF = %0.1f' % (F, FF))
| 25.309524
| 78
| 0.659454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 551
| 0.518344
|
0cc371f590d58414f3d55a84eb2346850fb66bd9
| 552
|
py
|
Python
|
tests/test_recipe.py
|
iruoma/DevCookbook
|
e13b955bc2dbfaacab1852d857af058aab0029e5
|
[
"MIT"
] | 20
|
2020-10-28T03:06:41.000Z
|
2021-11-15T02:52:43.000Z
|
tests/test_recipe.py
|
iruoma/DevCookbook
|
e13b955bc2dbfaacab1852d857af058aab0029e5
|
[
"MIT"
] | 15
|
2020-12-04T00:47:59.000Z
|
2021-03-23T11:42:48.000Z
|
tests/test_recipe.py
|
iruoma/DevCookbook
|
e13b955bc2dbfaacab1852d857af058aab0029e5
|
[
"MIT"
] | 22
|
2020-11-24T14:02:07.000Z
|
2022-02-01T18:52:26.000Z
|
from recipe_compiler.recipe import Recipe
from recipe_compiler.recipe_category import RecipeCategory
def test_recipe_slug():
# Given
name = "Thomas Eckert"
residence = "Seattle, WA"
category = RecipeCategory("dessert")
recipe_name = '"Pie" Shell Script'
quote = "Hello, World"
ingredients = [""]
instructions = [""]
expected = "pie-shell-script"
# When
recipe = Recipe(
name, residence, category, recipe_name, quote, ingredients, instructions
)
# Then
assert expected == recipe.slug
| 23
| 80
| 0.664855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.202899
|
0cc46073749631b895fb07e8351d82807fbd6e14
| 2,140
|
py
|
Python
|
lms_app/v1/serializers/user_serializers.py
|
Etomovich/lms-backend
|
e586abc44a0e74ed28da7a77f6ef31230995c84b
|
[
"MIT"
] | null | null | null |
lms_app/v1/serializers/user_serializers.py
|
Etomovich/lms-backend
|
e586abc44a0e74ed28da7a77f6ef31230995c84b
|
[
"MIT"
] | 1
|
2021-06-02T00:45:56.000Z
|
2021-06-02T00:45:56.000Z
|
lms_app/v1/serializers/user_serializers.py
|
Etomovich/lms-backend
|
e586abc44a0e74ed28da7a77f6ef31230995c84b
|
[
"MIT"
] | null | null | null |
from flask_restplus import Namespace, fields
class UserDataModel(object):
"""Represents the user data transfer object."""
api = Namespace(
'user', description='user authentication and signup resources'
)
this_user = api.model('Register input data', {
'username': fields.String(
required=True, description="username"
),
'first_name': fields.String(
required=True, description="user's first name"
),
'last_name': fields.String(
required=True, description="user's last name"
),
'national_id': fields.Integer(
required=True, description="user's national ID"
),
'role': fields.String(
required=True, description="user's role"
),
'date_joined': fields.String(
required=True, description="Date joined timestamp"
),
'email': fields.String(
required=True, description="user's email"
),
'phone_number': fields.String(
required=True, description="user's last name"
),
'password': fields.String(
required=True, description="user's password"
),
'retype_password': fields.String(
required=True, description="Retype password"
),
'officer_username': fields.String(
required=False, description="Enter officer name"
),
'location': fields.String(
required=False, description="Enter location"
),
'officer_info': fields.String(
required=False, description="Add officer information"
),
'farmer_info': fields.String(
required=False, description="Add farmer information"
),
})
login_user = api.model('Login input data', {
'password': fields.String(
required=True, description="Add your password"
),
'username': fields.String(
required=False, description="Add your username"
),
'email': fields.String(
required=False, description="Add you email"
)
})
| 33.4375
| 70
| 0.574299
| 2,092
| 0.97757
| 0
| 0
| 0
| 0
| 0
| 0
| 636
| 0.297196
|
0cc52afa5bda9e011a3f67aa407ce29b267af421
| 1,409
|
py
|
Python
|
Unit 7 Objects/LessonQ33.1.py
|
ItsMrTurtle/PythonChris
|
4513dea336e68f48fabf480ad87bc538a323c2cd
|
[
"MIT"
] | null | null | null |
Unit 7 Objects/LessonQ33.1.py
|
ItsMrTurtle/PythonChris
|
4513dea336e68f48fabf480ad87bc538a323c2cd
|
[
"MIT"
] | null | null | null |
Unit 7 Objects/LessonQ33.1.py
|
ItsMrTurtle/PythonChris
|
4513dea336e68f48fabf480ad87bc538a323c2cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 18:48:24 2020
@author: Christopher Cheng
"""
class Stack(object):
def __init__ (self):
self.stack = []
def get_stack_elements(self):
return self.stack.copy()
def add_one(self, item):
self.stack.append(item)
def add_many(self,item,n): # item is still a single string, n times
for i in range (n):
self.stack.append(item)
def remove_one(self):
self.stack.pop()
def remove_many(self,n):
for i in range(n):
self.stack.pop()
def size(self):
return len(self.stack)
def prettyprint(self):
for thing in self.stack[::-1]:
print("|_", thing,"_|")
def add_list(self, L):
for e in L:
self.stack.append(e)
def __str__ (self):
ret = ""
for thing in self.stack[::-1]:
ret += ("|_" + str(thing) + "_|\n")
return ret
class Circle (object):
def __init__(self):
self.radius = 0
def change_radius(self, radius):
self.radius = radius
def get_radius (self):
return self.radius
def __str__(self):
return "circle: " + str(self.radius)
circles = Stack()
one_circle = Circle()
one_circle.change_radius(1)
circles.add_one(one_circle)
two_circle = Circle()
two_circle.change_radius(2)
circles.add_one(two_circle)
print(circles)
| 26.092593
| 71
| 0.581973
| 1,108
| 0.786373
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.116395
|
0cc6417c3e829823797e9f3e6ad674ead279d5e9
| 2,657
|
py
|
Python
|
src/data_analysis_util.py
|
vikramnayyar/Customer-Identification-for-Bank-Marketing
|
4727f6d8997d26836ad167616a8edb4898623c39
|
[
"Apache-2.0"
] | null | null | null |
src/data_analysis_util.py
|
vikramnayyar/Customer-Identification-for-Bank-Marketing
|
4727f6d8997d26836ad167616a8edb4898623c39
|
[
"Apache-2.0"
] | null | null | null |
src/data_analysis_util.py
|
vikramnayyar/Customer-Identification-for-Bank-Marketing
|
4727f6d8997d26836ad167616a8edb4898623c39
|
[
"Apache-2.0"
] | null | null | null |
"""
The script declares functions used in 'data_analysis.py'
"""
import os
import yaml
from logzero import logger
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Patch
import plotly.graph_objects as go
from utility import parse_config
config_path = "config/config.yaml"
config = parse_config(config_path) # read config file
def dataset_balance(df_clean, col):
fig, ax = plt.subplots()
sns.countplot(x = col, data = df_clean, palette = 'viridis')
plt.title('Deposit Distribution of Bank Customers', fontsize = 16)
plt.xlabel('Deposit', fontsize = 14)
plt.ylabel('Total Customers', fontsize = 14)
plt.xticks(fontsize = 12)
plt.savefig("dataset_balance.png")
def box_plot(df_clean, col, plot_type):
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle(config["data_analysis"][plot_type]["title"], size = 18, y=1.08)
# Subplot 1
ax[0].hist(df_clean[df_clean["deposit"]=='no'][col], bins=30, alpha=0.5, color="green", label="Non-Depositors")
ax[0].hist(df_clean[df_clean["deposit"]=='yes'][col], bins=30, alpha=0.5, color="blue", label="Depositors")
ax[0].set_xlabel(config["data_analysis"][plot_type]["xlabel"], size = 14)
ax[0].set_ylabel(config["data_analysis"][plot_type]["ylabel"], size = 14)
ax[0].legend(fontsize = 11);
# Subplot 2
sns.boxplot(x=col, y="deposit", data=df_clean, orient="h", palette={ 'no':"#80e880", 'yes':"#2626ff"}, ax = ax[1])
ax[1].get_yaxis().set_visible(False)
ax[1].set_xlabel(config["data_analysis"][plot_type]["xlabel"], size = 14)
color_patches = [
Patch(facecolor="#80e880", label="Non-Depositors"),
Patch(facecolor="#2626ff", label="Depositors")
]
ax[1].legend(handles=color_patches, fontsize=11);
plt.savefig(plot_type) # saving figure
def grouped_bar_plot(df_clean, col, plot_type):
fig, ax = plt.subplots()
sns.catplot(col, hue = 'deposit', data=df_clean, kind="count", palette={'no':"#80e880", 'yes':"#2626ff"}, legend = False)
color_patches = [
Patch(facecolor="#80e880", label="Non-Depositors"),
Patch(facecolor="#2626ff", label="Depositors")
]
plt.title(config["data_analysis"][plot_type]["title"], size = 18, y=1.08)
plt.xlabel(config["data_analysis"][plot_type]["xlabel"], size = 14)
plt.ylabel(config["data_analysis"][plot_type]["ylabel"], size = 14)
plt.xticks(size = 12, rotation = 'vertical')
plt.legend(handles = color_patches, fontsize = 12, bbox_to_anchor=(1.4,1.05))
plt.savefig(plot_type) # saving figure
plt.close(1)
| 34.064103
| 125
| 0.657132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 663
| 0.24953
|
0cc6f68c50e68c364cd5514c50d107da2d606391
| 122
|
py
|
Python
|
api/crawller/admin.py
|
MahsaSeifikar/tweetphus
|
01b687f38365023cfaaa34739c50b0da79f0b510
|
[
"MIT"
] | null | null | null |
api/crawller/admin.py
|
MahsaSeifikar/tweetphus
|
01b687f38365023cfaaa34739c50b0da79f0b510
|
[
"MIT"
] | 1
|
2021-12-26T16:35:36.000Z
|
2021-12-29T15:07:01.000Z
|
api/crawller/admin.py
|
MahsaSeifikar/tweetphus
|
01b687f38365023cfaaa34739c50b0da79f0b510
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from crawller.models import User
# Register your models here.
admin.site.register(User)
| 20.333333
| 32
| 0.811475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.229508
|
0cc75fc2057f1d904d4d63b853c8dc9ff11fc8ab
| 987
|
py
|
Python
|
featureflags/config.py
|
enverbisevac/ff-python-server-sdk
|
e7c809229d13517e0bf4b28fc0a556e693c9034e
|
[
"Apache-2.0"
] | null | null | null |
featureflags/config.py
|
enverbisevac/ff-python-server-sdk
|
e7c809229d13517e0bf4b28fc0a556e693c9034e
|
[
"Apache-2.0"
] | null | null | null |
featureflags/config.py
|
enverbisevac/ff-python-server-sdk
|
e7c809229d13517e0bf4b28fc0a556e693c9034e
|
[
"Apache-2.0"
] | null | null | null |
"""Configuration is a base class that has default values that you can change
during the instance of the client class"""
from typing import Callable
BASE_URL = "https://config.feature-flags.uat.harness.io/api/1.0"
MINUTE = 60
PULL_INTERVAL = 1 * MINUTE
class Config(object):
def __init__(self, base_url: str = BASE_URL,
pull_interval: int = PULL_INTERVAL,
cache: object = None,
store: object = None,
enable_stream: bool = False):
self.base_url = base_url
self.pull_interval = pull_interval
self.cache = cache
self.store = store
self.enable_stream = enable_stream
default_config = Config()
def with_base_url(base_url: str) -> Callable:
def func(config: Config) -> None:
config.base_url = base_url
return func
def with_stream_enabled(value: bool) -> Callable:
def func(config: Config) -> None:
config.enable_stream = value
return func
| 25.973684
| 76
| 0.64843
| 422
| 0.427558
| 0
| 0
| 0
| 0
| 0
| 0
| 172
| 0.174265
|
0cc7dbac1b53714dc8579ed543f77deb34610c57
| 1,705
|
py
|
Python
|
src/users/management/commands/populate_tables.py
|
pimpale/BQuest-Backend
|
b32833ee5053db1c47fa28f57273632eae43a5cc
|
[
"MIT"
] | null | null | null |
src/users/management/commands/populate_tables.py
|
pimpale/BQuest-Backend
|
b32833ee5053db1c47fa28f57273632eae43a5cc
|
[
"MIT"
] | 51
|
2018-01-24T05:53:15.000Z
|
2022-01-13T00:44:24.000Z
|
src/users/management/commands/populate_tables.py
|
pimpale/BQuest-Backend
|
b32833ee5053db1c47fa28f57273632eae43a5cc
|
[
"MIT"
] | 3
|
2020-04-22T03:21:37.000Z
|
2020-12-15T22:45:52.000Z
|
from django.core.management.base import BaseCommand
from users.models import Major, Minor, Course
from django.db import IntegrityError
from os import path
import json
class Command(BaseCommand):
def _create_majors(self):
base_path = path.dirname(__file__)
majors_path = path.abspath(path.join(base_path, "..", "..", "majors.json"))
with open(majors_path) as majors_file:
majors = json.load(majors_file)
for major in majors:
major_entry = Major(name=major)
try:
major_entry.save()
except IntegrityError:
pass
def _create_minors(self):
base_path = path.dirname(__file__)
minors_path = path.abspath(path.join(base_path, "..", "..", "minors.json"))
with open(minors_path) as minors_file:
minors = json.load(minors_file)
for minor in minors:
minor_entry = Minor(name=minor)
try:
minor_entry.save()
except IntegrityError:
pass
def _create_courses(self):
base_path = path.dirname(__file__)
courses_path = path.abspath(path.join(base_path, "..", "..", "courses.json"))
with open(courses_path) as courses_file:
courses = json.load(courses_file)
for course in courses:
course_entry = Course(name=course)
try:
course_entry.save()
except IntegrityError:
pass
def handle(self, *args, **kwargs):
self._create_majors()
self._create_minors()
self._create_courses()
| 32.788462
| 85
| 0.567742
| 1,536
| 0.90088
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.037537
|
0cc8db72c131873f18e22e999afa4a7e2c43c233
| 2,041
|
py
|
Python
|
contrib/stack/stripmapStack/unpackFrame_risat_raw.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,133
|
2022-01-07T21:24:57.000Z
|
2022-01-07T21:33:08.000Z
|
contrib/stack/stripmapStack/unpackFrame_risat_raw.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 276
|
2019-02-10T07:18:28.000Z
|
2022-03-31T21:45:55.000Z
|
contrib/stack/stripmapStack/unpackFrame_risat_raw.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 235
|
2019-02-10T05:00:53.000Z
|
2022-03-18T07:37:24.000Z
|
#!/usr/bin/env python3
import isce
from isceobj.Sensor import createSensor
import shelve
import argparse
import os
from isceobj.Util import Poly1D
from isceobj.Planet.AstronomicalHandbook import Const
from mroipac.dopiq.DopIQ import DopIQ
import copy
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Unpack RISAT raw data and store metadata in pickle file.')
parser.add_argument('-i','--input', dest='indir', type=str,
required=True, help='Input CSK frame')
parser.add_argument('-o', '--output', dest='slc', type=str,
required=True, help='Output SLC file')
parser.add_argument('-p', '--polar', dest='polar', type=str,
default='RH', help='Polarization to extract')
return parser.parse_args()
def unpack(hdf5, slcname, polar='RH'):
'''
Unpack HDF5 to binary SLC file.
'''
obj = createSensor('RISAT1')
obj._imageFile = os.path.join(hdf5, 'scene_'+polar, 'dat_01.001')
obj._leaderFile = os.path.join(hdf5, 'scene_'+polar,'lea_01.001')
if not os.path.isdir(slcname):
os.mkdir(slcname)
date = os.path.basename(slcname)
obj.output = os.path.join(slcname, date + '.raw')
obj.extractImage()
obj.frame.getImage().renderHdr()
#####Estimate doppler
dop = DopIQ()
dop.configure()
img = copy.deepcopy(obj.frame.getImage())
img.setAccessMode('READ')
dop.wireInputPort('frame', object=obj.frame)
dop.wireInputPort('instrument', object=obj.frame.instrument)
dop.wireInputPort('image', object=img)
dop.calculateDoppler()
dop.fitDoppler()
fit = dop.quadratic
coef = [fit['a'], fit['b'], fit['c']]
print(coef)
obj.frame._dopplerVsPixel = [x*obj.frame.PRF for x in coef]
pickName = os.path.join(slcname, 'raw')
with shelve.open(pickName) as db:
db['frame'] = obj.frame
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
unpack(inps.indir, inps.slc, polar=inps.polar)
| 26.506494
| 108
| 0.652621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 455
| 0.22293
|
0cc92881b3783140afbb04ec688ee09d279aa156
| 2,794
|
py
|
Python
|
distla/distla_core/distla_core/linalg/qr/test_qr_ooc.py
|
google/distla_core
|
7f0d8ab7b847a75e0fc713627488643a8984712a
|
[
"Apache-2.0"
] | 2
|
2021-12-19T21:17:06.000Z
|
2021-12-25T09:19:47.000Z
|
distla/distla_core/distla_core/linalg/qr/test_qr_ooc.py
|
google/distla_core
|
7f0d8ab7b847a75e0fc713627488643a8984712a
|
[
"Apache-2.0"
] | null | null | null |
distla/distla_core/distla_core/linalg/qr/test_qr_ooc.py
|
google/distla_core
|
7f0d8ab7b847a75e0fc713627488643a8984712a
|
[
"Apache-2.0"
] | 1
|
2021-12-25T09:19:56.000Z
|
2021-12-25T09:19:56.000Z
|
"""Tests for qr.py."""
from jax import lax
import jax.numpy as jnp
import numpy as np
import pytest
import tempfile
from distla_core.linalg.utils import testutils
from distla_core.linalg.qr import qr_ooc
from distla_core.utils import pops
DTYPE = jnp.float32
seeds = [0, 1]
flags = [True, False]
def _dephase_qr(R, Q=None):
""" Maps the Q and R factor from an arbitrary QR decomposition to the unique
with non-negative diagonal entries.
"""
phases_data = np.sign(np.diagonal(R))
m, n = R.shape
if m > n:
phases = np.ones(m)
phases[:n] = phases_data
else:
phases = phases_data
R = phases.conj()[:, None] * R
if Q is not None:
Q = Q * phases
return Q, R
@pytest.mark.parametrize("N", [8, 32, 128])
@pytest.mark.parametrize("aspect_ratio", [1, 2, 10])
@pytest.mark.parametrize("panel_size", [1, 2])
@pytest.mark.parametrize("seed", [0, 1])
def test_qr_ooc(N, aspect_ratio, panel_size, seed):
dtype = np.float32
M = N * aspect_ratio
np.random.seed(seed)
A = np.random.randn(M, N).astype(dtype)
_, expected = np.linalg.qr(A)
_, expected = _dephase_qr(expected)
with tempfile.NamedTemporaryFile(delete=False) as f:
np.save(f, A)
f.close() # Explicit close needed to open again as a memmap.
# The file is still deleted when the context goes out of scope.
result = qr_ooc.qr_ooc(f.name, caqr_panel_size=panel_size)
result = pops.undistribute(result)
_, result = _dephase_qr(result)
atol = testutils.eps(lax.Precision.HIGHEST, dtype=dtype)
atol *= np.linalg.norm(A) ** 2
testutils.assert_allclose(result, expected, atol=atol)
@pytest.mark.parametrize("N", [8, 32, 128])
@pytest.mark.parametrize("aspect_ratio", [1, 2, 10])
@pytest.mark.parametrize("panel_size", [1, 2])
@pytest.mark.parametrize("seed", [0, 1])
def test_fake_cholesky(N, aspect_ratio, panel_size, seed):
fname = "fake_cholesky_test_matrix"
dtype = np.float32
M = N * aspect_ratio
np.random.seed(seed)
A = np.random.randn(M, N).astype(dtype)
cond = np.linalg.cond(A)
expected_gram = np.dot(A.T, A)
expected_chol = np.linalg.cholesky(expected_gram).T
_, expected_chol = _dephase_qr(expected_chol)
np.save(fname, A)
fread = fname + ".npy"
chol_fname = "cholesky_transpose"
gram_fname = "gram_matrix"
qr_ooc.fake_cholesky(fread, caqr_panel_size=panel_size,
chol_fname=chol_fname, gram_fname=gram_fname)
result_gram = np.load(gram_fname + ".npy")
result_chol = np.load(chol_fname + ".npy")
_, result_chol = _dephase_qr(result_chol)
atol = testutils.eps(lax.Precision.HIGHEST, dtype=dtype)
atol *= cond * np.linalg.norm(expected_gram) ** 2
testutils.assert_allclose(result_chol, expected_chol, atol=10 * atol)
testutils.assert_allclose(result_gram, expected_gram, atol=atol)
| 30.703297
| 78
| 0.700787
| 0
| 0
| 0
| 0
| 2,094
| 0.749463
| 0
| 0
| 403
| 0.144238
|
0cca1b15bf096080117912090cc7cfaa4cb29eca
| 7,940
|
py
|
Python
|
modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/cardinal.py
|
serkhanekarim/AI
|
0a13880ae8e608cd00fa819dc590097abdb7ae6e
|
[
"Apache-2.0"
] | null | null | null |
modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/cardinal.py
|
serkhanekarim/AI
|
0a13880ae8e608cd00fa819dc590097abdb7ae6e
|
[
"Apache-2.0"
] | null | null | null |
modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/taggers/cardinal.py
|
serkhanekarim/AI
|
0a13880ae8e608cd00fa819dc590097abdb7ae6e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
GraphFst,
insert_space,
)
from nemo_text_processing.text_normalization.ar.taggers.date import get_hundreds_graph
from nemo_text_processing.text_normalization.ar.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
-23 -> cardinal { negative: "true" integer: "twenty three" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
# TODO repalce to have "oh" as a default for "0"
graph = pynini.Far(get_abs_path("data/numbers/cardinal_number_name.far")).get_fst()
self.graph_hundred_component_at_least_one_none_zero_digit = (
pynini.closure(NEMO_DIGIT, 2, 3) | pynini.difference(NEMO_DIGIT, pynini.accep("0"))
) @ graph
self.graph = (
pynini.closure(NEMO_DIGIT, 1, 3)
+ pynini.closure(pynini.closure(pynutil.delete(","), 0, 1) + NEMO_DIGIT + NEMO_DIGIT + NEMO_DIGIT)
) @ graph
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
single_digits_graph = pynini.invert(graph_digit | graph_zero)
self.single_digits_graph = single_digits_graph + pynini.closure(insert_space + single_digits_graph)
if not deterministic:
# for a single token allow only the same normalization
# "007" -> {"oh oh seven", "zero zero seven"} not {"oh zero seven"}
single_digits_graph_zero = pynini.invert(graph_digit | graph_zero)
single_digits_graph_oh = pynini.invert(graph_digit) | pynini.cross("0", "oh")
self.single_digits_graph = single_digits_graph_zero + pynini.closure(
insert_space + single_digits_graph_zero
)
self.single_digits_graph |= single_digits_graph_oh + pynini.closure(insert_space + single_digits_graph_oh)
single_digits_graph_with_commas = pynini.closure(
self.single_digits_graph + insert_space, 1, 3
) + pynini.closure(
pynutil.delete(",")
+ single_digits_graph
+ insert_space
+ single_digits_graph
+ insert_space
+ single_digits_graph,
1,
)
self.range_graph = pynutil.insert("from ") + self.graph + pynini.cross("-", " to ") + self.graph
self.range_graph |= self.graph + (pynini.cross("x", " by ") | pynini.cross(" x ", " by ")) + self.graph
self.range_graph |= (
pynutil.insert("from ") + get_hundreds_graph() + pynini.cross("-", " to ") + get_hundreds_graph()
)
self.range_graph = self.range_graph.optimize()
serial_graph = self.get_serial_graph()
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
if deterministic:
long_numbers = pynini.compose(NEMO_DIGIT ** (5, ...), self.single_digits_graph).optimize()
final_graph = self.graph | serial_graph | pynutil.add_weight(long_numbers, -0.001)
cardinal_with_leading_zeros = pynini.compose(
pynini.accep("0") + pynini.closure(NEMO_DIGIT), self.single_digits_graph
)
final_graph |= cardinal_with_leading_zeros
else:
leading_zeros = pynini.compose(pynini.closure(pynini.accep("0"), 1), self.single_digits_graph)
cardinal_with_leading_zeros = (
leading_zeros + pynutil.insert(" ") + pynini.compose(pynini.closure(NEMO_DIGIT), self.graph)
)
final_graph = (
self.graph
| serial_graph
| self.range_graph
| self.single_digits_graph
| get_hundreds_graph()
| pynutil.add_weight(single_digits_graph_with_commas, 0.001)
| cardinal_with_leading_zeros
)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + final_graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
def get_serial_graph(self):
"""
Finite state transducer for classifying serial (handles only cases without delimiters,
values with delimiters are handled by default).
The serial is a combination of digits, letters and dashes, e.g.:
c325b -> tokens { cardinal { integer: "c three two five b" } }
"""
num_graph = self.single_digits_graph
if not self.deterministic:
num_graph |= self.graph
# add space between letter and digit
graph_with_space = pynini.compose(
pynini.cdrewrite(pynutil.insert(" "), NEMO_ALPHA, NEMO_DIGIT, NEMO_SIGMA),
pynini.cdrewrite(pynutil.insert(" "), NEMO_DIGIT, NEMO_ALPHA, NEMO_SIGMA),
)
# make sure at least one digit and letter is present
not_space = pynini.closure(NEMO_NOT_SPACE)
graph_with_space = pynini.compose(
(not_space + NEMO_ALPHA + not_space + NEMO_DIGIT + not_space)
| (not_space + NEMO_DIGIT + not_space + NEMO_ALPHA + not_space),
graph_with_space,
)
keep_space = pynini.accep(" ")
serial_graph = pynini.compose(
graph_with_space,
pynini.closure(pynini.closure(NEMO_ALPHA, 1) + keep_space, 1)
+ num_graph
+ pynini.closure(keep_space + pynini.closure(NEMO_ALPHA) + pynini.closure(keep_space + num_graph, 0, 1)),
)
serial_graph |= pynini.compose(
graph_with_space,
num_graph
+ keep_space
+ pynini.closure(NEMO_ALPHA, 1)
+ pynini.closure(keep_space + num_graph + pynini.closure(keep_space + pynini.closure(NEMO_ALPHA), 0, 1)),
)
# serial graph with delimiter
delimiter = pynini.accep("-") | pynini.accep("/")
alphas = pynini.closure(NEMO_ALPHA, 1)
letter_num = alphas + delimiter + num_graph
num_letter = pynini.closure(num_graph + delimiter, 1) + alphas
next_alpha_or_num = pynini.closure(delimiter + (alphas | num_graph))
next_alpha_or_num |= pynini.closure(delimiter + num_graph + pynutil.insert(" ") + alphas)
serial_graph |= letter_num + next_alpha_or_num
serial_graph |= num_letter + next_alpha_or_num
# numbers only with 2+ delimiters
serial_graph |= (
num_graph + delimiter + num_graph + delimiter + num_graph + pynini.closure(delimiter + num_graph)
)
return pynutil.add_weight(serial_graph, 2)
| 43.387978
| 118
| 0.639924
| 6,791
| 0.85529
| 0
| 0
| 0
| 0
| 0
| 0
| 1,843
| 0.232116
|
0cca7a33169b15c0dca26a3d1d4121500e7fe51e
| 7,735
|
py
|
Python
|
robot.py
|
dragonrobotics/2018-PowerUp
|
0fb6be22420b1488ca3d6abb04588e8564d768b9
|
[
"MIT"
] | 2
|
2018-02-08T23:29:21.000Z
|
2018-12-27T22:45:12.000Z
|
robot.py
|
dragonrobotics/2018-PowerUp
|
0fb6be22420b1488ca3d6abb04588e8564d768b9
|
[
"MIT"
] | 2
|
2018-02-10T20:25:16.000Z
|
2018-02-20T12:47:33.000Z
|
robot.py
|
dragonrobotics/2018-PowerUp
|
0fb6be22420b1488ca3d6abb04588e8564d768b9
|
[
"MIT"
] | 8
|
2018-01-15T14:53:52.000Z
|
2018-02-14T22:34:30.000Z
|
import wpilib
import constants
import swerve
import lift
import winch
import sys
from teleop import Teleop
from autonomous.baseline_simple import Autonomous
from sensors.imu import IMU
def log(src, msg):
try:
full_msg = "[{:.3f}] [{}] {}".format(
wpilib.Timer.getMatchTime(), str(src), str(msg)
)
print(full_msg, file=sys.stderr)
except: # noqa: E772
full_msg = "[{:.3f}] [log] Caught exception when logging: {} {}".format( # noqa: E501
wpilib.Timer.getMatchTime(),
str(sys.exc_info()[0]),
str(sys.exc_info()[1])
)
print(full_msg, file=sys.stderr)
def log_exception(src, locstr):
# i.e. caught {ValueError} {in my_method}: {could not cast X to Y}
log(src, "Caught {} {}: {}".format(
str(sys.exc_info()[0]), locstr, str(sys.exc_info()[1])
))
class Robot(wpilib.IterativeRobot):
def robotInit(self):
constants.load_control_config()
wpilib.CameraServer.launch('driver_vision.py:main')
self.autoPositionSelect = wpilib.SendableChooser()
self.autoPositionSelect.addDefault('Middle-Baseline', 'Middle-Baseline')
self.autoPositionSelect.addObject('Middle-Placement', 'Middle-Placement') # noqa: E501
self.autoPositionSelect.addObject('Left', 'Left')
self.autoPositionSelect.addObject('Right', 'Right')
wpilib.SmartDashboard.putData(
'Robot Starting Position',
self.autoPositionSelect)
self.drivetrain = swerve.SwerveDrive(
constants.chassis_length,
constants.chassis_width,
constants.swerve_config
)
self.drivetrain.load_config_values()
self.lift = lift.ManualControlLift(
constants.lift_ids['left'],
constants.lift_ids['right'],
constants.lift_limit_channel,
constants.start_limit_channel
)
self.winch = winch.Winch(
constants.winch_id
)
self.throttle = wpilib.Joystick(1)
self.claw = lift.Claw(
constants.claw_id,
constants.claw_follower_id
)
self.imu = IMU(wpilib.SPI.Port.kMXP)
self.sd_update_timer = wpilib.Timer()
self.sd_update_timer.reset()
self.sd_update_timer.start()
def disabledInit(self):
pass
def disabledPeriodic(self):
try:
self.lift.load_config_values()
self.drivetrain.load_config_values()
except: # noqa: E772
log_exception('disabled', 'when loading config')
try:
self.drivetrain.update_smart_dashboard()
self.imu.update_smart_dashboard()
self.lift.update_smart_dashboard()
self.winch.update_smart_dashboard()
wpilib.SmartDashboard.putNumber(
"Throttle Pos", self.throttle.getRawAxis(constants.liftAxis)
)
except: # noqa: E772
log_exception('disabled', 'when updating SmartDashboard')
try:
self.lift.checkLimitSwitch()
pass
except: # noqa: E772
log_exception('disabled', 'when checking lift limit switch')
self.drivetrain.update_smart_dashboard()
def autonomousInit(self):
try:
self.drivetrain.load_config_values()
self.lift.load_config_values()
except: # noqa: E772
log_exception('auto-init', 'when loading config')
self.autoPos = None
try:
self.autoPos = self.autoPositionSelect.getSelected()
except: # noqa: E772
self.autoPos = None
log_exception('auto-init', 'when getting robot start position')
try:
if self.autoPos is not None and self.autoPos != 'None':
self.auto = Autonomous(self, self.autoPos)
else:
log('auto-init', 'Disabling autonomous...')
except: # noqa: E772
log_exception('auto-init', 'in Autonomous constructor')
try:
self.lift.checkLimitSwitch()
pass
except: # noqa: E772
log_exception('auto-init', 'when checking lift limit switch')
def autonomousPeriodic(self):
try:
if self.sd_update_timer.hasPeriodPassed(0.5):
self.auto.update_smart_dashboard()
self.imu.update_smart_dashboard()
self.drivetrain.update_smart_dashboard()
self.lift.update_smart_dashboard()
self.winch.update_smart_dashboard()
except: # noqa: E772
log_exception('auto', 'when updating SmartDashboard')
try:
if self.autoPos is not None and self.autoPos != 'None':
self.auto.periodic()
except: # noqa: E772
# Stop everything.
self.drivetrain.immediate_stop()
self.lift.setLiftPower(0)
self.claw.set_power(0)
self.winch.stop()
log_exception('auto', 'in auto :periodic()')
try:
self.lift.checkLimitSwitch()
pass
except: # noqa: E772
log_exception('auto', 'when checking lift limit switch')
def teleopInit(self):
try:
self.teleop = Teleop(self)
except: # noqa: E772
log_exception('teleop-init', 'in Teleop constructor')
try:
self.drivetrain.load_config_values()
self.lift.load_config_values()
constants.load_control_config()
except: # noqa: E772
log_exception('teleop-init', 'when loading config')
try:
self.lift.checkLimitSwitch()
pass
except: # noqa: E772
log_exception('teleop-init', 'when checking lift limit switch')
def teleopPeriodic(self):
try:
self.teleop.drive()
except: # noqa: E772
log_exception('teleop', 'in drive control')
self.drivetrain.immediate_stop()
try:
self.teleop.buttons()
except: # noqa: E772
log_exception('teleop', 'in button handler')
try:
self.teleop.lift_control()
except: # noqa: E772
log_exception('teleop', 'in lift_control')
self.lift.setLiftPower(0)
try:
self.teleop.claw_control()
except: # noqa: E772
log_exception('teleop', 'in claw_control')
self.claw.set_power(0)
try:
self.teleop.winch_control()
except: # noqa: E772
log_exception('teleop', 'in winch_control')
self.winch.stop()
try:
self.lift.checkLimitSwitch()
pass
except: # noqa: E772
log_exception('teleop', 'in lift.checkLimitSwitch')
if self.sd_update_timer.hasPeriodPassed(0.5):
try:
constants.load_control_config()
self.drivetrain.load_config_values()
self.lift.load_config_values()
except: # noqa: E772
log_exception('teleop', 'when loading config')
try:
self.drivetrain.update_smart_dashboard()
self.teleop.update_smart_dashboard()
self.imu.update_smart_dashboard()
self.lift.update_smart_dashboard()
self.winch.update_smart_dashboard()
except: # noqa: E772
log_exception('teleop', 'when updating SmartDashboard')
# for module in self.drivetrain.modules:
# module.set_steer_angle(0)
if __name__ == "__main__":
wpilib.run(Robot)
| 31.315789
| 95
| 0.576083
| 6,807
| 0.880026
| 0
| 0
| 0
| 0
| 0
| 0
| 1,483
| 0.191726
|
0ccb7361200b302e98746fb913273e875a9c713b
| 593
|
py
|
Python
|
2019/06-hsctf/web-networked/solve.py
|
wani-hackase/wani-writeup
|
dd4ad0607d2f2193ad94c1ce65359294aa591681
|
[
"MIT"
] | 25
|
2019-03-06T11:55:56.000Z
|
2021-05-21T22:07:14.000Z
|
2019/06-hsctf/web-networked/solve.py
|
wani-hackase/wani-writeup
|
dd4ad0607d2f2193ad94c1ce65359294aa591681
|
[
"MIT"
] | 1
|
2020-06-25T07:27:15.000Z
|
2020-06-25T07:27:15.000Z
|
2019/06-hsctf/web-networked/solve.py
|
wani-hackase/wani-writeup
|
dd4ad0607d2f2193ad94c1ce65359294aa591681
|
[
"MIT"
] | 1
|
2019-02-14T00:42:28.000Z
|
2019-02-14T00:42:28.000Z
|
import requests
text = "0123456789abcdefghijklmnopqrstuvwxyz_}"
flag = "hsctf{"
for _ in range(30):
time = [0.1 for _ in range(38)]
for _ in range(5):
for i in range(38):
payload = {"password": flag + text[i]}
r = requests.post(
"https://networked-password.web.chal.hsctf.com", data=payload
)
response_time = r.elapsed.total_seconds()
time[i] += response_time
print(payload, " response time : ", response_time)
flag += text[time.index(max(time))]
print("flag is ", flag)
| 21.962963
| 77
| 0.563238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.22597
|
0ccc1f35f3830db92996f5a342365046d1d2adc7
| 47,367
|
py
|
Python
|
gda-public/multidim/covertree.py
|
drkostas/tda_examples
|
3fdef4f890ced14b8e3207bd9393eaf262dd0c24
|
[
"MIT"
] | 1
|
2021-12-22T14:29:40.000Z
|
2021-12-22T14:29:40.000Z
|
gda-public/multidim/covertree.py
|
drkostas/tda_examples
|
3fdef4f890ced14b8e3207bd9393eaf262dd0c24
|
[
"MIT"
] | null | null | null |
gda-public/multidim/covertree.py
|
drkostas/tda_examples
|
3fdef4f890ced14b8e3207bd9393eaf262dd0c24
|
[
"MIT"
] | null | null | null |
r"""This module contains the essential classes for the "Cover-tree with
friends" algorithm, namely:
- :class:`CoverTree`
- :class:`CoverLevel`
This module also defines the constants
- :code:`ratio_Ag` :math:`=\sqrt{2} - 1=0.414\ldots`, the inverse of the silver ratio
- :code:`ratio_Au` :math:`=\frac{\sqrt{5} - 1}{2}=0.618\ldots`, the inverse of the golden ratio
Copyright
---------
- This file is part of https://github.com/geomdata/gda-public/
- 2015, 2016, 2017 by Geometric Data Analytics, Inc. (http://geomdata.com)
- AGPL license. See `LICENSE` or https://github.com/geomdata/gda-public/blob/master/LICENSE
"""
from __future__ import print_function
from copy import deepcopy
import numpy as np
import pandas as pd
from . import PointCloud
from . import fast_algorithms
from scipy.spatial.distance import cdist, pdist, squareform
from collections import OrderedDict
import collections
import logging
ratio_Ag = np.float64(0.41421356237309504880168872420969807857)
ratio_Au = np.float64(0.61803398874989484820458683436563811772)
assert ratio_Ag**2 + 2*ratio_Ag == np.float64(1.0),\
"""pre-defined ratio_Ag does not match artithmetic.
Try using some form of sqrt(2) - 1, which is the positive root of x**2 + 2*x == 1."""
assert ratio_Au**2 + 1*ratio_Au == np.float64(1.0),\
"""pre-defined ratio_Au does not match artithmetic.
Try using some form of (sqrt(5) - 1)/2, which is the positive root of x**2 + x == 1."""
class CoverTree(object):
r"""An efficient and convenient implementation of
the "Cover Tree with Friends" algorithm.
This implementation follows the notation and terminology of the paper
[CDER1]_ as carefully as possible; they were written in concert.
A CoverTree is an Python iterator object [iter1]_ [iter2]_.
The :func:`__next__` and :func:`__getitem__` methods yield the
:class:`CoverLevel` with that index. The entire "friends" algorithm happens
in :func:`multidim.covertree.CoverTree.__next__`
Parameters
----------
pointcloud : :class:`multidim.PointCloud`
The original data from which to construct a cover tree. Note that the
labeling/weighting/indexing system requires the use of
:class:`multidim.PointCloud` input, not merely a
:class:`numpy.ndarray`. However, `CoverTree` ignores all of the higher
strata (edges, faces, and so on) of the :class:`multidim.PointCloud`.
Only the points in stratum[0] are used.
ratio : float
Ratio :math:`\theta` to shrink radii by at each step. Must satisfy
:math:`0<\theta<1`. Good values are :code:`0.5` or
:code:`ratio_Ag` or :code:`ratio_Au`. Default: :code:`ratio_Ag`
exchange_teens : bool
Should teens be exchanged at each step, using Type-2 friends?
Default: :code:`True`
sort_orphans_by_mean : bool
Should orphans be re-ordered by their proximity to weighted mean of the
labels? This is particularly useful for improving the cross-validation
score of the :class:`multidim.models.CDER` classifier. Disable for
speed ordering of adults is irrelevant for your needs.
Default: :code:`True`
Yields
------
:class:`multidim.covertree.CoverLevel`
From level 0 (one ball) until all points are separated. Each `CoverLevel`
is cached once computed.
Attributes
----------
pointcloud : :class:`multidim.PointCloud`
The original dataset.
ratio : :class:`numpy.float64`
Ratio :math:`\theta` by which to shrink the ball radius between levels.
_r0 : :class:`numpy.float64`
The initial radius at level 0.
_adult0 : :class:`numpy.int64`
The index of the original adult. Typically, this is the index of the
point nearest the weighted mean of the :class:`PointCloud`
_levels : :class:`collections.OrderedDict`
An ordered dictionary to cache the levels computed so far, keyed by the
index. Typically, a user would never access this directly. Insead, use
:code:`covertree[i]`
cohort : :class:`numpy.ndarray`
An array of :class:`numpy.int64`, which keeps track of the cohort
(that is, the level in the filtration) of each point. If a point has
not been born as an adult yet, the value is -1
level_pointer : int
Index of the currently referenced `CoverLevel`, for iteration purposes.
Setting this is like using :func:`file.seek` on file objects. Usually,
you don't want to mess with it, but it is used internally in
:class:`mutlidim.models.CDER` for comparing entropy between levels.
N : int
The number of points in :code:`self.pointcloud`
allpoints : :class:`numpy.ndarray`
The raw NumPY array underlying :code:`self.pointcloud`.
Notes
-----
This section is excerpted and condensed from [CDER1]_
**Definition**
Let :math:`X` be a finite subset of :math:`\mathbb{R}^d`.
The purpose of a cover tree is to build a filtration
:math:`\emptyset \subset CL_0 \subset CL_1 \subset \cdots \subset CL_{\text{max}} = X`
by covering it with balls of smaller and smaller radius centered at points in
the set.
The points in :math:`CL_\ell` are called the **adults** at level :math:`\ell`.
Specifically, a **cover tree** is a filtration of :math:`X` with the
following additional properties:
- :math:`CL_0` contains a single point, :math:`a_0`. (see :code:`_adult0`)
- There is a radius :math:`r_0` (see :code:`_r0`) such that :math:`X` is contained in the
ball :math:`B(a_0, r_0)` of radius :math:`r_0` around :math:`a_0`
- There is a real number :math:`0< \theta < 1` (see :code:`ratio`) such that, for every
:math:`\ell`, the set :math:`X` is a subset of
:math:`\cup_{a_i \in CL_\ell} B(a_i, r_\ell)`
where :math:`r_\ell = r_0 \theta^\ell`
- For each :math:`\ell`, if :math:`a_i, a_j \in CL_\ell`, then
`\| a_i - a_j\| > r_\ell`. No two adults lie in the same ball.
- For each :math:`\ell`, each point :math:`x \in X` is assigned to a
**guardian** :math:`a_i \in CL_\ell` such that :math:`x` lies in the ball
:math:`B(a_i, r_\ell)`. We say :math:`x` is a **child** of :math:`a_i`
at level :math:`\ell`. Each :math:`a_i \in CL_\ell` is its own guardian
and its own child.
- There is a tree structure on the (level, adult) pairs of the filtration
:math:`(\ell, a_i)`, where the tree relation
:math:`(\ell, a_i) \to (\ell+1, a_k)` holds if :math:`a_k` was a child of
:math:`a_i` at level :math:`\ell`. We say :math:`a_k` is a
**successor** of :math:`a_i`, and :math:`a_i` is a **predecessor** of
:math:`a_k`. Note that :math:`(\ell, a_i) \to (\ell+1, a_i)` for all
:math:`a_i \in CL_\ell`
Extending the maturation/reproduction metaphor of **adults**, **children**, and
**guardians** above, a child :math:`x` with guardian :math:`a_i` at level
:math:`\ell` is called a **teen** if :math:`\frac12 r_\ell < \|a_i - x\|`, and
it is called a **youngin** if :math:`\|a_i - x\| \leq \frac12 r_\ell`.
The point of this is that we may require the additional condition:
- (Optional) On the previous condition, we can additionally require that
each :math:`x` is the child of the *nearest* adult, if it lies in the
intersection of two or more balls of :math:`B(a_i, r_\ell)`. If two
adults are equally distant, choose the one of the lowest index. This
option is enforced by the :code:`exchange_teens` flag.
When changing from level math:`\ell` to level :math:`\ell+1`, the radius of
each ball shrinks to :math:`r_{\ell+1} = \theta r_\ell`. Children farther than
:math:`r_{\ell+1}` from their guardians become **orphans**. We must decide
whether these orphans should be **adopted** by other adults at level
:math:`\ell+1`, or if the orphans should be **emancipated** as new adults at level
:math:`\ell+1`. That is, the newly emancipated adults at level
:math:`\ell+1` comprise the **cohort** (see :code:`cohort`) at level $\ell+1$.
We say :math:`a_j \in CL_{\ell}` is an **elder** of
:math:`a_k \in CL_{\ell+1}` if the distance :math:`\|a_j - a_k\|` is
sufficiently small that :math:`a_j` *could have been* emancipated from
:math:`a_k` between levels :math:`\ell` and :math:`\ell+1`. That is,
if the tree structure were unknown, then elders of :math:`a_j$ are the
possible predecessors. If :math:`a_k` is its own predecessor (because it
was already an adult in :math:`CL_{\ell}`, then the only elder of
:math:`a_k` is itself.
**Example**
Consider this point cloud in :math:`\mathbb{R}^2`
.. math::
X = \{(0,0.1),(1,2),(0,1),(0,0),(2,2),(2,2.2),(3,3),(1,1)\}
We index these points from 0 to 7 in the given order.
We have the following filtration
.. math::
&CL_0 = \{7\}\\
&CL_1 = \{3, 4, 6, 7\}\\
&CL_2 = \{1, 2, 3, 4, 6, 7\}\\
&CL_3 = \{1, 2, 3, 4, 6, 7\}\\
&CL_4 = \{1, 2, 3, 4, 5, 6, 7\}\\
&CL_5 = \{0, 1, 2, 3, 4, 5, 6, 7\}\\
We have the following cover ball radii
.. math::
&r_0 = 2\sqrt{2}\\
&r_1 = \sqrt{2}\\
&r_2 = \frac{\sqrt{2}}{2}\\
&r_3 = \frac{\sqrt{2}}{4}\\
&r_4= \frac{\sqrt{2}}{8}\\
&r_5 = \frac{\sqrt{2}}{16}
Here we have :math:`a_0 = (1,1)`, :math:`r_0 = 2\sqrt{2}`, and
:math:`\theta = 1/2`.
**The Friends Algorithm**
Our algorithm is based upon the concept of **friends**. To each adult there
will be associated *three* types of friends. Types 1, 2,
and 3 are used to build the `CoverTree` in typically linear time.
Let :math:`a_i \in CL_\ell`, that is, :math:`a_i` is an adult at level
:math:`\ell`. Define the following thresholds
.. math::
T_1(\ell) &= (2 + \theta)r_l \\
T_2(\ell) &= (2 + 2\theta)r_l \\
T_3(\ell) &= \frac{2}{1 - \theta}r_l.
It is elementary to show that :math:`T_1(l) < T_2(l) < T_3(l)`.
Moreover, we have the recursion relation
:math:`T_3(l) < T_3(l-1)`.
Each level of the filtreation and all of this associated data is stored in
a `CoverLevel` object.
The algorithm works like this, using a "reproduction" metaphor:
- Level 0 (see :code:`covertree[0]` of type `CoverLevel`) has a single adult. All
points are its children. Its only friends are itself.
- ...
- Level :math:`\ell` (see :code:`covertree[l]` of type `CoverLevel`)
has known adults, friends1, friends3, friends3, and children. We now
compute level :math:`\ell+1.` in :func:`__next__`
1. Shrink the radius by a factor of :math:`\theta`. Some children
become orphans.
2. Orphans are adopted or become newly emanicpated adults. This uses
:math:`T_1(\ell)`.
3. If :code:`exhange_teens is True`, then children who are teens are
re-assigned to the closest possible adult. This uses
:math:`T_2(\ell)`.
4. Compute new friends3.
5. Use new friends3 to compute new friends1, friends2, friends3.
- Level :math:`\ell+1` (see :code:`covertree[l+1]` of type `CoverLevel`)
has known adults, friends1, friends3, friends3, and
children. We now compute level :math:`l+2`
- ...
- Stop when all points are adults.
Levels are evaluated lazily and cached. For example,
if no levels have been computed, then
:code:`covertree[3]` will compute levels 0, 1, 2, and 3.
Then :code:`covertree[5]` will use those values for 0, 1, 2, 3 to compute 4
and 5.
Examples
--------
>>> pc = PointCloud.from_multisample_multilabel(
... [np.array([[0,0.1],[1,2],[0,1],[0,0],[2,2],[2,2.2],[3,3],[1,1]])], [None])
>>> ct = CoverTree(pc, ratio=0.5, sort_orphans_by_mean=False)
>>> cl=ct.next()
>>> list(cl.adults)
[7]
>>> pc.coords.values[7,:]
array([ 1., 1.])
>>> cl
Level 0 using 1 adults at radius 2.8284271247...
>>> ct.next()
Level 1 using 2 adults at radius 1.4142135623...
>>> for cl in ct:
... print(cl.exponent, list(cl.adults))
0 [7]
1 [7, 5]
2 [7, 5, 0, 1, 2, 6]
3 [7, 5, 0, 1, 2, 6]
4 [7, 5, 0, 1, 2, 6, 4]
5 [7, 5, 0, 1, 2, 6, 4, 3]
>>> ct.cohort
array([2, 2, 2, 5, 4, 1, 2, 0])
References
----------
.. [CDER1] Supervised Learning of Labeled Pointcloud Differences via Cover-Tree Entropy Reduction https://arxiv.org/abs/1702.07959
.. [CDER2] CDER, Learning with Friends https://www.ima.umn.edu/2016-2017/DSS9.6.16-5.30.17/26150
.. [iter1] https://docs.python.org/3/library/stdtypes.html?highlight=iterator#iterator-types
.. [iter2] https://wiki.python.org/moin/Iterator
"""
def __init__(self, pointcloud, ratio=ratio_Ag, exchange_teens=True,
sort_orphans_by_mean=True):
self.pointcloud = pointcloud
self.pointcloud.covertree = self
if np.any(self.pointcloud.stratum[0]['mass'].values <= 0):
logging.warning("""
Some of your points have non-positive mass! This is probably wrong.
Consider setting masses with PointCloud.stratum[0]['mass']=1.0.""")
self.label_set = self.pointcloud.label_info['int_index'].values
self.coords = self.pointcloud.coords.values
try:
self.pointcloud.multiplicity
except AttributeError:
self.pointcloud.multiplicity = np.ones(
shape=(self.coords.shape[0],),
dtype=np.int64)
self.ratio = ratio
self._levels = dict()
self.radius = np.inf
self.exchange_teens = exchange_teens
self.sort_orphans_by_mean = sort_orphans_by_mean
# more initialization happens in __next__()
ball = self.pointcloud.cover_ball()
self._r0 = ball['radius']
self._adult0 = ball['index']
self.N = self.pointcloud.coords.index.shape[0]
self.allpoints = self.pointcloud.coords.index.values
self.cohort = -1*np.ones(shape=(self.N,), dtype=np.int64)
assert np.all(self.pointcloud.coords.index.values == np.arange(self.N)),\
"So far, out methods require the pointcloud index to be range(N)."
level0 = CoverLevel(self, 0)
level0.adults.append(self._adult0)
# TODO! Use index method somehow!
level0.children[self._adult0] = self.pointcloud.coords.index.values.copy()
level0.friends1[self._adult0] = [self._adult0]
level0.friends2[self._adult0] = [self._adult0]
level0.friends3[self._adult0] = [self._adult0]
level0.weights[self._adult0] = level0.find_label_weights(self._adult0)
level0.predecessor = OrderedDict({self._adult0: None})
level0.successors = OrderedDict()
level0.guardians = self._adult0*np.ones(shape=(self.N,), dtype=np.int64)
self.cohort[self._adult0] = 0
self._levels[0] = level0
self.level_pointer = -1
self.reset()
def __sizeof__(self):
return sum( [cl.__sizeof__() for _,cl in self._levels.items()] )
def __repr__(self):
s = """A CoverTree of {} points in dimension {}, computed to
level\tadults\n""".format(
self.pointcloud.coords.shape[0],
self.pointcloud.coords.shape[1])
for cl in list(sorted(self._levels.keys())):
s += "{}\t{}\n".format(cl, len(self._levels[cl].adults))
return s
def next(self):
r""" See :func:`__next__` """
return self.__next__()
def __next__(self):
r"""
Increment exponent and compute/retrieve next level of cover tree as
a CoverLevel object.
This is where the Friends algorithm happens.
"""
assert 0.0 < self.ratio < 1.0
# negative exponent means we are about to begin, so the next will be 0
if self.level_pointer < 0:
self.level_pointer = -1
self.level_pointer += 1
# simple cache
if self.level_pointer in self._levels:
return self._levels[self.level_pointer]
assert self.level_pointer > 0
# If we got here, we are really initialized.
level = CoverLevel(self, self.level_pointer)
# get data from previous level
prev_level = self._levels[level.exponent - 1]
# STEP 1: Promote
level.guardians = deepcopy(prev_level.guardians)
level.children = deepcopy(prev_level.children)
level.adults = []
level.adults.extend(prev_level.adults)
for ca in level.adults:
ci = ca # for human sanity
level.predecessor[ca] = ci
prev_level.successors[ci] = np.array([ca], dtype=np.int64)
# initialize friends -- updated cleverly later.
level.friends1[ca] = [ca]
level.friends2[ca] = [ca]
level.friends3[ca] = [ca]
# STEP 2: Orphan
orphans = []
for ci in level.adults:
center_a = np.array([ci], dtype=np.int64)
#children_ids = np.where(level.children[ci])[0]
children_dists = fast_algorithms.distance_cache_None(center_a, level.children[ci], self.coords).flatten()
# since we have computed children_dists, let's take a moment to count
# duplicate points of new adults.
if self.cohort[ci] == prev_level.exponent:
mult = np.count_nonzero(children_dists == 0.0)
self.pointcloud.multiplicity[ci] = mult
if mult > 1:
logging.warning("point {} has multiplicity {}.".format(ci, mult))
my_orphans = level.children[ci][children_dists > level.radius]
assert np.all(np.in1d(my_orphans, level.children[ci]))
if len(my_orphans) > 0 and self.sort_orphans_by_mean:
child_coords = self.coords[level.children[ci], :]
child_labels = self.pointcloud.labels[level.children[ci]]
child_weight = self.pointcloud.stratum[0]['mass'].values[level.children[ci]]
label_means, label_weights = fast_algorithms.label_means(
child_coords,
child_labels,
child_weight,
self.label_set)
label_ordering = label_weights.argsort()[::-1] # big-to-small
dist_to_labelmean_by_orphan = cdist(label_means[label_ordering, :],
self.coords[my_orphans, :])
# get closet-to-each-label until all orphans are used
orphan_order = np.concatenate([
my_orphans[dist_to_labelmean_by_orphan.argsort(axis=1).T.flatten()],
my_orphans]) # include everyone.
# remove duplicates
sort_orphan, sort_index = np.unique(orphan_order, return_index=True)
assert len(my_orphans) == len(sort_index), "Orphans lost from sorted list?"
# re-sort orphans by proximity to biggest weight.
# Because label_means was pre-sorted by weight, we can re-sort
# by that index!
sort_index.sort()
my_orphans = orphan_order[sort_index]
orphans.extend(my_orphans)
# check that each orphan was ejected once only.
assert len(orphans) == len(set(orphans)), orphans
# orphans = sorted(orphans)
# STEP 3: Adopt or Liberate
# Use type-1 friends to re-assign or promote orphans.
# This is where most distances are computed, so it is the slowest.
for orphan_index in orphans:
assert orphan_index not in level.adults
assert orphan_index in level.children[level.guardians[orphan_index]], "{} not in {}".format(orphan_index, level.children[level.guardians[orphan_index]])
old_parent, new_parent = fast_algorithms.covertree_adopt_or_liberate(
level, prev_level, orphan_index)
if new_parent == orphan_index:
prev_level.successors[old_parent] = np.append(prev_level.successors[old_parent], orphan_index)
level.predecessor[orphan_index] = old_parent
level.adults.append(orphan_index)
level.guardians[orphan_index] = orphan_index
level.children[orphan_index] = np.array([orphan_index], dtype=np.int64)
level.friends1[orphan_index] = [orphan_index]
level.friends2[orphan_index] = [orphan_index]
level.friends3[orphan_index] = [orphan_index]
self.cohort[orphan_index] = level.exponent
assert orphan_index not in level.children[old_parent]
assert orphan_index in level.children[new_parent]
assert np.all(level.guardians >= 0)
# STEP 4: Exchange teens
# re-assign "teen" children to nearest adult using type-2 friends
if self.exchange_teens:
for ci in level.adults:
fast_algorithms.covertree_exchange_teens(level, prev_level, ci)
# STEP N: Update friends from old friends
prev_level = self._levels[level.exponent - 1]
for pre_i in prev_level.adults:
fast_algorithms.covertree_befriend321(level, prev_level, pre_i,
np.array(prev_level.friends3[pre_i], dtype=np.int64))
level.cleanup()
# assert level.check()
self._levels[level.exponent] = level
return level
def reset(self):
"""
Go to level -1. Used internally to re-compute levels.
"""
self.level_pointer = -1
pass
def __getitem__(self, exponent):
"""
Get CoverLevel (exponent index, or slice of them)
"""
if isinstance(exponent, slice):
# Since self[i] is already recursive, this probably makes
# a lot of excessive function calls, but oh well...
return (self[i] for i in range(exponent.start, exponent.stop, exponent.step))
else:
if exponent < 0:
exponent += len(self)
self.reset()
# ensure that previous levels have been computed
while self.level_pointer < exponent:
self.__next__()
assert exponent == self.level_pointer
return self._levels[exponent]
def __iter__(self):
r""" Iterate until stop condition is met or we run out of points. """
self.reset()
level = self.__next__()
yield level
num_points = self.pointcloud.coords.values.shape[0]
while np.sum(self.pointcloud.multiplicity[level.adults]) < num_points:
level = self.__next__()
yield level
def __len__(self):
r"""Current Depth of the CoverTree.
That is, the number of levels computed *so far*. That is, if levels
0, 1, 2, 3 have been computed, then len(self) is 4.
Returns
-------
int
"""
return max(self._levels.keys())+1
def sparse_complex(self, level=-1):
r""" Make a sparse complex from this CoverTree, using the type-4 friends
algorithm.
Notes
-----
This is a *placeholder*. Sparse Complexes are not currently
implemented in the stable codebase.
Parameters
----------
level: int
Level to use. (Default: -1, meaning len(self)
Returns
-------
PointCloud object, with edge values coming from sparse complex algorithm.
"""
raise NotImplementedError
def make_edges(self, min_distance=0.0, max_distance=-1.0):
r"""Iterate over the edges between the points of the underlying
`PointCloud`, where min_distance < length <= max_distance.
Uses the CoverTree type-1 friends for efficiency.
This is called by :func:`PointCloud.build_edges`
Parameters
----------
min_distance: float
Minimum length. (Default: 0.0) Inequality means no self-edges!
max_distance: float
Maximum length. (Default: -1.0, meaning 2*self._r0, for all edges)
Yields
------
triples (a,b,r), where a,b are the indices of points, and r is the
distance.
"""
if max_distance == -1.0:
max_distance = 2*self._r0
if max_distance <= 0.0:
raise ValueError("Meaningless maximum distance {}.".format(max_distance))
ell = np.int64(np.floor(np.log(max_distance/self._r0)/np.log(self.ratio)))
ball_radius = self._r0 * (self.ratio ** ell)
if ell <= 0:
ell = 1
else:
assert ball_radius * self.ratio < max_distance <= ball_radius,\
"Incorrect exponent?"
# we need only check friends at level ell-1.
level = self[ell - 1]
total = 0
for ci in level.adults:
for cj in level.friends1[ci]:
if ci == cj:
kids_i = level.children[ci]
total += int(len(kids_i)*(len(kids_i)-1)/2)
if len(kids_i) > 1:
dists = squareform(pdist(self.coords[kids_i,:], self.pointcloud.dist))
good_pairs = (min_distance < dists) & (dists <= max_distance)
good_edges = np.where(good_pairs)
for index_i, index_j in np.array(good_edges).T:
# don't double_count on symmetric square matrix!
if index_i < index_j:
yield (kids_i[index_i], kids_i[index_j], dists[index_i,index_j])
# friends is reflexive, so don't double-count by parent
elif ci < cj:
kids_i = level.children[ci]
kids_j = level.children[cj]
total += len(kids_i)*len(kids_j)
dists = fast_algorithms.distance_cache_None(kids_i,
kids_j,
self.coords)
good_pairs = (min_distance < dists) & (dists <= max_distance)
good_edges = np.where(good_pairs)
for index_i, index_j in np.array(good_edges).T:
yield (kids_i[index_i], kids_j[index_j], dists[index_i,index_j])
if total > 0:
logging.info("Examined {} possible edge distances using level {}.".format(total, ell-1))
def plot(self, canvas, **kwargs):
r""" Interactive plot of a CoverTree, with dynamic computation of levels.
Parameters
----------
canvas : :class:`bokeh.plotting.figure.Figure`
as obtained from :code:`canvas = bokeh.plotting.figure()`
Other parameters are fed to :func:`CoverLevel.plot`
"""
if type(canvas).__module__ == 'bokeh.plotting.figure':
canvas_type = "bokeh"
import bokeh.plotting
from bokeh.io import push_notebook
# elif type(canvas).__module__ == 'matplotlib.axes._subplots':
# canvas_type = "pyplot"
# import matplotlib.pyplot as plt
else:
raise NotImplementedError(
"""canvas must be a bokeh.plotting.figure() or a matplotlib.pyplot.subplots()[1].
You gave me {}""".format(type(canvas))
)
source = self[0].plot(canvas, **kwargs)
def update(level):
print("level {}".format(level))
data, title = self[level].plot_data_title(**kwargs)
canvas.title.text = title
source.data = data
push_notebook()
pass
return update
# from ipywidgets import interact
# return interact(update, level=(0,max(self._levels.keys())))
def plot_tree(self, canvas, show_balls=True, show_tribes=False,
show_villages=False, show_adults=True):
r""" Plot the tree of a CoverTree.
Parameters
----------
canvas : :class:`bokeh.plotting.figure.Figure`
as obtained from :code:`canvas = bokeh.plotting.figure()`
show_balls : boolean
default True
show_adults : boolean
default True
show_villages : boolean
default False
show_tribes : boolean
default False
"""
if type(canvas).__module__ == 'bokeh.plotting.figure':
canvas_type = "bokeh"
import bokeh.plotting
from bokeh.io import push_notebook
elif type(canvas).__module__ == 'matplotlib.axes._subplots':
canvas_type = "pyplot"
import matplotlib.pyplot as plt
else:
raise NotImplementedError(
"canvas must be a bokeh.plotting.figure(). You gave me {}".format(
type(canvas))
)
import networkx as nx
g = nx.DiGraph()
edges = []
for root in self.tree:
if root is not None:
for branch in self.tree[root]:
edges.append((root, branch))
g.add_edges_from(edges)
val_map = dict((i, 1.0*self.cohort[i]/self.cohort.max()) for i in range(self.cohort.shape[0]))
values = [val_map.get(node) for node in g.nodes()]
pos = dict()
prev_num = 0
for ht in range(len(self)):
adults = np.where(self.cohort == ht)[0]
this_num = adults.shape[0]
diff = this_num - prev_num
prev_num = this_num
for i,ci in enumerate(adults):
pos[ci] = np.array([this_num/2.0 - i, 1.0*ht])
for node in g.nodes():
assert node in pos, "{} not found {}". format(node, self.cohort[node])
nx.draw_networkx_edges(g, pos, arrows=True, alpha=0.1)
nx.draw_networkx_nodes(g, pos, node_size=50, cmap=plt.get_cmap('jet'), node_color = values)
pass
class CoverLevel(object):
r"""
A thin class to represent one level of the filtration in a :class:`CoverTree`.
A CoverLevel is essentially a collection of dictionaries of adults,
friends, children, and other attributes of a particular level.
The various attributes have different orderings, optimized for typical
usage and minimal algorithmic complexity.
Notes
-----
The user should never create a CoverLevel directly. Instead, create a
CoverTree and access its :math:`i^{\text{th}}` level with
:code:`covertree[i]`.
Attributes
----------
covertree : :class:`CoverTree`
The CoverTree to which this CoverLevel belongs.
pointcloud : :class:`multidim.PointCloud`
The PointCloud used to make the CoverTree
exponent : int
The exponent (that is, index or depth or level) of this CoverLevel in
the CoverTree.
radius : :class:`numpy.float64`
The ball radius
T1 : :class:`numpy.float64`
The type-1 friends radius
T2 : :class:`numpy.float64`
The type-2 friends radius
T3 : :class:`numpy.float64`
The type-3 friends radius
adults : `list`
List of adult indices, in order they were born
friends1 : :class:`collections.OrderedDict`
An ordered dictionary to keep track of type-1 friends. Keyed by the
adults, in birth order. The values are lists, in index order.
friends2 : :class:`collections.OrderedDict`
An ordered dictionary to keep track of type-2 friends. Keyed by the
adults, in birth order. The values are lists, in index order.
friends3 : :class:`collections.OrderedDict`
An ordered dictionary to keep track of type-3 friends. Keyed by the
adults, in birth order. The values are lists, in index order.
guardians : :class:`numpy.ndarray`
An array of :class:`numpy.int64`, which keeps track of the guardians
of each point in the underlying `PointCloud`. Adults are their own
guardians.
predecessor : :class:`collections.OrderedDict`
An ordered dictionary to keep track of predecessors of the adults.
Keyed by the adults, in birth order. The values are
the indices of adults at the previous `CoverLevel`.
successors : :class:`collections.OrderedDict`
An ordered dictionary to keep track of predecessors of the adults.
Keyed by the adults, in birth order. The values are
NumPy arrays of indices of adults in the next `CoverLevel`. This is
computed only at the next level!
children : :class:`collections.OrderedDict`
An ordered dictionary to keep track of children1 friends, keyed by the
adults, in birth order. The values are NumPy boolean arrays, which
allows for easy extraction of subsets of children.
weights : :class:`collections.OrderedDict`
An ordered dictionary to keep track of total weight of children, keyed
by the adults, in birth order. The values are NumPy arrays, with one
entry per label. This is computed as part of :func:`cleanup`
entropy : :class:`collections.OrderedDict`
An ordered dictionary to keep track of overall entropy of children, keyed
by the adults, in birth order. The values are :class:`numpy.float64`
numbers, of overall entropy of weights across labels. This is computed
and stored via :class:`multidim.models.CDER`, but it otherwise
unused.
"""
def __init__(self, covertree, exponent):
self.covertree = covertree
self.pointcloud = self.covertree.pointcloud
self.exponent = exponent
self.radius = self.covertree._r0 * (self.covertree.ratio ** self.exponent)
self.T1 = self.radius*(2.0 + self.covertree.ratio)
self.T2 = self.radius*(2.0 + 2.0*self.covertree.ratio)
self.T3 = self.radius*2.0/(1.0 - self.covertree.ratio)
self.adults = []
self.friends1 = OrderedDict() # each entry should be a LIST
self.friends2 = OrderedDict() # each entry should be a LIST
self.friends3 = OrderedDict() # each entry should be a LIST
self.guardians = None
self.predecessor = OrderedDict() # each entry an ARRAY
self.successors = OrderedDict() # each entry an ARRAY
self.children = OrderedDict() # each entry a np.uint8 ARRAY
self.weights = OrderedDict() # each entry a np array by label
self.entropy = OrderedDict() # each entry a np.float64
def check(self):
r""" Perform basic sanity checks on children, friends, etc.
Throws `AssertionError` if anyhting fails.
"""
assert type(self.adults) == list
assert type(self.friends1) == OrderedDict
assert type(self.friends2) == OrderedDict
assert type(self.friends3) == OrderedDict
assert type(self.predecessor) == OrderedDict
assert type(self.successors) == OrderedDict
assert type(self.weights) == OrderedDict
assert type(self.entropy) == OrderedDict
assert type(self.guardians) == np.ndarray\
and self.guardians.shape == (self.covertree.N, )
adult_set = set(self.adults)
assert len(adult_set) == len(self.adults)
assert set(self.children.keys()) == adult_set, "Mismatched adults and children keys"
assert set(self.friends1.keys()) == adult_set, "Mismatched adults and friends1 keys"
assert set(self.friends2.keys()) == adult_set, "Mismatched adults and friends2 keys"
assert set(self.friends3.keys()) == adult_set, "Mismatched adults and friends3 keys"
assert set(self.predecessor.keys()) == adult_set, "Mismatched adults and predecessor keys"
assert set(self.successors.keys()) == set() or set(self.successors.keys()) == adult_set,\
"Mismatched adults and successors keys"
assert set(self.weights.keys()) == set() or set(self.weights.keys()) == adult_set,\
"Mismatched adults and weights keys"
assert set(self.entropy.keys()) == set() or set(self.entropy.keys()) == adult_set,\
"Mismatched adults and entropy keys"
assert set(list(self.guardians)) == adult_set, "Mismatched guardians and adults."
# cannot check successors without violating something..
union = np.array([], dtype=np.int64)
for ci in self.adults:
assert type(self.friends1[ci]) == list
assert type(self.friends2[ci]) == list
assert type(self.friends3[ci]) == list
assert type(self.children[ci]) == np.ndarray\
and self.children[ci].dtype == 'int64'\
and self.children[ci].shape[0] <= self.covertree.N
assert len(set(self.friends1[ci])) == len(self.friends1[ci])
assert len(set(self.friends2[ci])) == len(self.friends2[ci])
assert len(set(self.friends3[ci])) == len(self.friends3[ci])
assert ci in self.friends1[ci]
assert ci in self.friends2[ci]
assert ci in self.friends3[ci]
assert self.guardians[ci] == ci
assert ci in self.children[ci]
assert np.intersect1d(union, self.children[ci]).shape[0] == 0,\
"Children overlap. Not Partition."
union = np.union1d(union, self.children[ci])
assert len(union) == self.covertree.N, "Children missing. Not Partition."
#assert len(union) == len(np.unique(union)), "Duplicates?"
try:
v = self.villages
# TODO -- also test matching indices
assert len(v._blocks) == len(self.adults),\
"blocks should match adults"
except AttributeError:
pass
return True
def __sizeof__(self):
import sys
return sum([sys.getsizeof(x) for x in [
self.adults,
self.children,
self.friends1,
self.friends2,
self.friends3,
self.guardians,
self.predecessor,
self.weights,
self.entropy]])
def __repr__(self):
return "Level {} using {} adults at radius {}".format(
self.exponent, len(self.adults), self.radius)
def find_label_weights(self, adult):
r""" Compute the weights of labelled children of an adult.
Store it in self.weights[adult].
Parameters
----------
adult : `int`
index of adult to compute.
Returns
-------
self.weights[adult]
"""
if adult in self.weights.keys():
pass
else:
pc = self.covertree.pointcloud
children_set = np.zeros(shape=(pc.coords.shape[0],), dtype='bool')
children_set[self.children[adult]] = True
self.weights[adult] = fast_algorithms.label_weights(
children_set,
pc.labels,
pc.stratum[0]['mass'].values,
pc.label_info['int_index'].values)
return self.weights[adult]
def find_entropy(self, adult):
r""" Compute the entropy of the labelled children on an adult.
Store it in self.entropy[adults].
This is only used by :class:`multidim.models.CDER`
Parameters
----------
adult : `int`
index of adult to compute.
Returns
-------
self.entropy[adult]
"""
if adult in self.entropy.keys():
pass
else:
totweight = self.weights[adult].sum()
assert totweight > 0
self.entropy[adult] = fast_algorithms.entropy(self.weights[adult]/totweight)
return self.entropy[adult]
def plot_data_title(self, show_balls=True, show_adults=True):
r""" Internal method -- Make source data for plot.
See Also
--------
:func:`plot`
"""
title = "CoverTree Level {}, radius {}".format(self.exponent, self.radius)
pc = self.covertree.pointcloud
xts = []
cts = []
import bokeh.palettes
adult_ids = sorted(list(self.adults))
xs = pc.coords.loc[adult_ids, 0].values
ys = pc.coords.loc[adult_ids, 1].values
rs = [self.radius]*len(self.adults)
cs = [str(c) for c in self.adults]
data = {'xs': xs, 'ys': ys, 'rs': rs, 'cs': cs, 'cts': xts, 'cts': cts}
return data, title
def cleanup(self):
r""" Internal method -- remove duplicate friends, and compute weights
and entropy.
"""
for ca in self.adults:
self.friends1[ca] = sorted(list(set(self.friends1[ca])))
self.friends2[ca] = sorted(list(set(self.friends2[ca])))
self.friends3[ca] = sorted(list(set(self.friends3[ca])))
self.find_label_weights(ca)
def plot(self, canvas, show_balls=True, show_adults=True, show_hulls=False, color='purple'):
"""
Plot a single level of a `CoverTree`
See the example at example-covertree_
Parameters
----------
canvas : :class:`bokeh.plotting.figure.Figure`
as obtained from :code:`canvas = bokeh.plotting.figure()`
show_balls : bool
Draw the covering balls at this level.
Default: True
show_adults : bool
Draw the adults at this level.
Default: True
show_hulls : bool
Draw the convex hulls of the childeren of each adult.
Note -- this works only with matplotlib for now, not bokeh.
Default: False
color : str
Name of color to use for cover-tree balls and hulls.
Default: 'purple'
References
----------
.. _example-covertree: http://nbviewer.jupyter.org/github/geomdata/gda-public/blob/master/examples/example-covertree.ipynb
"""
# fix the aspect ratio!
all_xs = self.pointcloud.coords.values[:, 0]
all_ys = self.pointcloud.coords.values[:, 1]
xmid = (all_xs.max() + all_xs.min())/2.0
ymid = (all_ys.max() + all_ys.min())/2.0
span = max([all_xs.max() - xmid,
xmid - all_xs.min(),
all_ys.max() - ymid,
ymid - all_ys.min()])
if type(canvas).__module__ == 'bokeh.plotting.figure':
canvas_type = "bokeh"
from bokeh.models import ColumnDataSource, Range1d
import bokeh.plotting
elif type(canvas).__module__ == 'matplotlib.axes._subplots':
canvas_type = "pyplot"
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection, PatchCollection
from matplotlib.patches import Circle, Ellipse, Polygon
import matplotlib.colors as colors
# fix the aspect ratio!
canvas.set_aspect('equal')
canvas.set_xlim([xmid-span, xmid+span])
canvas.set_ylim([ymid-span, ymid+span])
else:
raise NotImplementedError(
"canvas must be a bokeh.plotting.figure(). You gave me {}".format(
type(canvas))
)
pc = self.covertree.pointcloud
all_xs = pc.coords.values[:, 0]
all_ys = pc.coords.values[:, 1]
data, title = self.plot_data_title(show_balls=show_balls,
show_adults=show_adults)
# fix the aspect ratio!
xmean = all_xs.mean()
ymean = all_ys.mean()
span = max([all_xs.max() - xmean,
xmean - all_xs.min(),
all_ys.max() - ymean,
ymean - all_ys.min()])
if canvas_type == "pyplot":
xs = data['xs']
ys = data['ys']
rs = data['rs']
if show_balls:
patches = []
rgbas = []
cc = colors.ColorConverter()
for i in range(len(xs)):
patches.append(Circle(xy=(xs[i], ys[i]), radius=rs[i]))
# have to set the alpha value manually.
rgba = list(cc.to_rgba(color))
rgba[3] = 0.2
rgbas.append(tuple(rgba))
p = PatchCollection(patches, edgecolor='none')
p.set_facecolors(rgbas)
canvas.add_collection(p)
if show_adults:
canvas.scatter(x=xs, y=ys, color='blue', alpha=1.)
if show_hulls:
from scipy.spatial import ConvexHull
patches = []
rgbas = []
cc = colors.ColorConverter()
for ai in self.adults:
children = pc.coords.values[self.children[ai], :]
if children.shape[0] >= 3:
hull = ConvexHull(children).vertices
poly_data = children[hull, :]
patches.append(Polygon(poly_data))
elif children.shape[0] == 2:
d = cdist( children[[0],:], children[[1],:] )[0,0]
patches.append(Circle(xy=pc.coords.values[ai,:], radius=d))
else: # singleton
patches.append(Circle(xy=pc.coords.values[ai,:], radius=0.5*self.radius))
rgba = list(cc.to_rgba(color))
rgba[3] = 0.2
rgbas.append(tuple(rgba))
p = PatchCollection(patches, edgecolor=color)
p.set_facecolors(rgbas)
canvas.add_collection(p)
pass
elif canvas_type == "bokeh":
source = ColumnDataSource(data=data)
canvas.title.text = title
canvas.x_range = Range1d(xmean-span, xmean+span)
canvas.y_range = Range1d(ymean-span, ymean+span)
if show_balls:
canvas.circle('xs', 'ys', source=source, radius='rs', color=color, alpha=0.2)
if show_adults:
canvas.circle('xs', 'ys', source=source, size=4, color='blue', alpha=1.)
if show_hulls:
raise NotImplementedError("No hulls in Bokeh yet. Use pyplot.")
#canvas.circle(all_xs, all_ys, color='black', alpha=0.2, size=0.5)
return source
| 40.623499
| 164
| 0.583508
| 45,898
| 0.968987
| 3,357
| 0.070872
| 0
| 0
| 0
| 0
| 24,243
| 0.511812
|
0ccc2e5ca0664e29a1337110f68367598882b29e
| 3,936
|
py
|
Python
|
azure-iot-device/azure/iot/device/iothub/models/message.py
|
elhorton/azure-iot-sdk-python
|
484b804a64c245bd92930c13b970ff86f868b5fe
|
[
"MIT"
] | 1
|
2019-02-06T06:52:44.000Z
|
2019-02-06T06:52:44.000Z
|
azure-iot-device/azure/iot/device/iothub/models/message.py
|
elhorton/azure-iot-sdk-python
|
484b804a64c245bd92930c13b970ff86f868b5fe
|
[
"MIT"
] | null | null | null |
azure-iot-device/azure/iot/device/iothub/models/message.py
|
elhorton/azure-iot-sdk-python
|
484b804a64c245bd92930c13b970ff86f868b5fe
|
[
"MIT"
] | 1
|
2019-12-17T17:50:43.000Z
|
2019-12-17T17:50:43.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains a class representing messages that are sent or received.
"""
from azure.iot.device import constant
# TODO: Revise this class. Does all of this REALLY need to be here?
class Message(object):
"""Represents a message to or from IoTHub
:ivar data: The data that constitutes the payload
:ivar custom_properties: Dictionary of custom message properties
:ivar lock_token: Used by receiver to abandon, reject or complete the message
:ivar message id: A user-settable identifier for the message used for request-reply patterns. Format: A case-sensitive string (up to 128 characters long) of ASCII 7-bit alphanumeric characters + {'-', ':', '.', '+', '%', '_', '#', '*', '?', '!', '(', ')', ',', '=', '@', ';', '$', '''}
:ivar sequence_number: A number (unique per device-queue) assigned by IoT Hub to each message
:ivar to: A destination specified for Cloud-to-Device (C2D) messages
:ivar expiry_time_utc: Date and time of message expiration in UTC format
:ivar enqueued_time: Date and time a C2D message was received by IoT Hub
:ivar correlation_id: A property in a response message that typically contains the message_id of the request, in request-reply patterns
:ivar user_id: An ID to specify the origin of messages
:ivar ack: A feedback message generator. This property is used in C2D messages to request IoT Hub to generate feedback messages as a result of the consumption of the message by the device
:ivar content_encoding: Content encoding of the message data. Can be 'utf-8', 'utf-16' or 'utf-32'
:ivar content_type: Content type property used to route messages with the message-body. Can be 'application/json'
:ivar output_name: Name of the output that the is being sent to.
"""
def __init__(
self,
data,
message_id=None,
content_encoding="utf-8",
content_type="application/json",
output_name=None,
):
"""
Initializer for Message
:param data: The data that constitutes the payload
:param str message_id: A user-settable identifier for the message used for request-reply patterns. Format: A case-sensitive string (up to 128 characters long) of ASCII 7-bit alphanumeric characters + {'-', ':', '.', '+', '%', '_', '#', '*', '?', '!', '(', ')', ',', '=', '@', ';', '$', '''}
:param str content_encoding: Content encoding of the message data. Default is 'utf-8'. Other values can be utf-16' or 'utf-32'
:param str content_type: Content type property used to routes with the message body. Default value is 'application/json'
:param str output_name: Name of the output that the is being sent to.
"""
self.data = data
self.custom_properties = {}
self.lock_token = None
self.message_id = message_id
self.sequence_number = None
self.to = None
self.expiry_time_utc = None
self.enqueued_time = None
self.correlation_id = None
self.user_id = None
self.ack = None
self.content_encoding = content_encoding
self.content_type = content_type
self.output_name = output_name
self._iothub_interface_id = None
@property
def iothub_interface_id(self):
return self._iothub_interface_id
def set_as_security_message(self):
"""
Set the message as a security message.
This is a provisional API. Functionality not yet guaranteed.
"""
self._iothub_interface_id = constant.SECURITY_MESSAGE_INTERFACE_ID
def __str__(self):
return str(self.data)
| 50.461538
| 298
| 0.649644
| 3,432
| 0.871951
| 0
| 0
| 85
| 0.021596
| 0
| 0
| 2,913
| 0.740091
|
0ccd4f9fbf2b5d4dda1cc40e475be33aa9ef28bc
| 320
|
py
|
Python
|
scraping/test001.py
|
flaviogf/Exemplos
|
fc666429f6e90c388e201fb7b7d5801e3c25bd25
|
[
"MIT"
] | null | null | null |
scraping/test001.py
|
flaviogf/Exemplos
|
fc666429f6e90c388e201fb7b7d5801e3c25bd25
|
[
"MIT"
] | 5
|
2019-12-29T04:58:10.000Z
|
2021-03-11T04:35:15.000Z
|
scraping/test001.py
|
flaviogf/Exemplos
|
fc666429f6e90c388e201fb7b7d5801e3c25bd25
|
[
"MIT"
] | null | null | null |
import pandas
import requests
with open('avengers.csv', 'w') as file:
file_url = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/avengers/avengers.csv'
response = requests.get(file_url)
file.write(response.text)
with open('avengers.csv', 'r') as file:
data_frame = pandas.read_csv(file)
| 29.090909
| 100
| 0.73125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.371875
|
0ccde3d4f64a774d9d8fa84b6c6fe3d0ad69c35d
| 3,997
|
py
|
Python
|
backup/guitemplates/custominvocationcutdurationdialog.py
|
calebtrahan/KujiIn_Python
|
0599d36993fa1d5988a4cf3206a12fdbe63781d8
|
[
"MIT"
] | null | null | null |
backup/guitemplates/custominvocationcutdurationdialog.py
|
calebtrahan/KujiIn_Python
|
0599d36993fa1d5988a4cf3206a12fdbe63781d8
|
[
"MIT"
] | null | null | null |
backup/guitemplates/custominvocationcutdurationdialog.py
|
calebtrahan/KujiIn_Python
|
0599d36993fa1d5988a4cf3206a12fdbe63781d8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'custominvocationcutdurationdialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_custominvocationcutdurationdialog(object):
def setupUi(self, custominvocationcutdurationdialog):
custominvocationcutdurationdialog.setObjectName(_fromUtf8("custominvocationcutdurationdialog"))
custominvocationcutdurationdialog.resize(400, 213)
self.custominvocationtopLabel = QtGui.QLabel(custominvocationcutdurationdialog)
self.custominvocationtopLabel.setGeometry(QtCore.QRect(90, 10, 221, 16))
self.custominvocationtopLabel.setObjectName(_fromUtf8("custominvocationtopLabel"))
self.custominvocationcutsLabel = QtGui.QLabel(custominvocationcutdurationdialog)
self.custominvocationcutsLabel.setGeometry(QtCore.QRect(11, 30, 381, 20))
self.custominvocationcutsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.custominvocationcutsLabel.setObjectName(_fromUtf8("custominvocationcutsLabel"))
self.horizontalLayoutWidget = QtGui.QWidget(custominvocationcutdurationdialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(130, 60, 160, 80))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.custominvocationLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.custominvocationLayout.setMargin(0)
self.custominvocationLayout.setObjectName(_fromUtf8("custominvocationLayout"))
self.custominvocationValue = QtGui.QSpinBox(self.horizontalLayoutWidget)
self.custominvocationValue.setObjectName(_fromUtf8("custominvocationValue"))
self.custominvocationLayout.addWidget(self.custominvocationValue)
self.custominvocationminLabel = QtGui.QLabel(self.horizontalLayoutWidget)
self.custominvocationminLabel.setObjectName(_fromUtf8("custominvocationminLabel"))
self.custominvocationLayout.addWidget(self.custominvocationminLabel)
self.custominvocationaddButton = QtGui.QPushButton(custominvocationcutdurationdialog)
self.custominvocationaddButton.setGeometry(QtCore.QRect(160, 170, 131, 30))
self.custominvocationaddButton.setObjectName(_fromUtf8("custominvocationaddButton"))
self.custominvocationcancelButton = QtGui.QPushButton(custominvocationcutdurationdialog)
self.custominvocationcancelButton.setGeometry(QtCore.QRect(300, 170, 84, 30))
self.custominvocationcancelButton.setObjectName(_fromUtf8("custominvocationcancelButton"))
self.retranslateUi(custominvocationcutdurationdialog)
QtCore.QMetaObject.connectSlotsByName(custominvocationcutdurationdialog)
def retranslateUi(self, custominvocationcutdurationdialog):
custominvocationcutdurationdialog.setWindowTitle(_translate("custominvocationcutdurationdialog", "Dialog", None))
self.custominvocationtopLabel.setText(_translate("custominvocationcutdurationdialog", "How Long Would You Like To Invoke:", None))
self.custominvocationcutsLabel.setText(_translate("custominvocationcutdurationdialog", "Cuts Here", None))
self.custominvocationminLabel.setText(_translate("custominvocationcutdurationdialog", "min", None))
self.custominvocationaddButton.setText(_translate("custominvocationcutdurationdialog", "ADD TO SESSION", None))
self.custominvocationcancelButton.setText(_translate("custominvocationcutdurationdialog", "CANCEL", None))
| 60.560606
| 138
| 0.788341
| 3,317
| 0.829872
| 0
| 0
| 0
| 0
| 0
| 0
| 750
| 0.187641
|
0ccf64808d3042c572ef4543702896d84041599e
| 1,393
|
py
|
Python
|
benchmarks/pytorch_alexnet_inference.py
|
d3dave/python-macrobenchmarks
|
ee52cce1af120f543ce3e2f6bc99225784b59506
|
[
"MIT"
] | 20
|
2020-10-20T20:55:51.000Z
|
2021-11-18T16:26:49.000Z
|
benchmarks/pytorch_alexnet_inference.py
|
d3dave/python-macrobenchmarks
|
ee52cce1af120f543ce3e2f6bc99225784b59506
|
[
"MIT"
] | 2
|
2021-11-17T18:37:27.000Z
|
2022-03-22T20:26:24.000Z
|
benchmarks/pytorch_alexnet_inference.py
|
d3dave/python-macrobenchmarks
|
ee52cce1af120f543ce3e2f6bc99225784b59506
|
[
"MIT"
] | 4
|
2020-10-30T15:09:37.000Z
|
2022-02-12T00:12:12.000Z
|
import json
import time
import torch
import urllib
import sys
if __name__ == "__main__":
start = time.time()
model = torch.hub.load('pytorch/vision:v0.6.0', 'alexnet', pretrained=True)
# assert time.time() - start < 3, "looks like we just did the first-time download, run this benchmark again to get a clean run"
model.eval()
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
from PIL import Image
from torchvision import transforms
input_image = Image.open(filename)
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
n = 1000
if len(sys.argv) > 1:
n = int(sys.argv[1])
with torch.no_grad():
times = []
for i in range(n):
times.append(time.time())
if i % 10 == 0:
print(i)
output = model(input_batch)
times.append(time.time())
print((len(times) - 1) / (times[-1] - times[0]) , "/s")
if len(sys.argv) > 2:
json.dump(times, open(sys.argv[2], 'w'))
| 31.659091
| 131
| 0.613065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.207466
|
0cd1d7ce809f4555127103b9f2ebc53cd22fdca6
| 2,885
|
py
|
Python
|
Curso Python Completo - Udemy/Teste/core/poo1.py
|
Cauenumo/Python
|
6414ee2013c651e9d45cd328a381a476c6c9073b
|
[
"Apache-2.0"
] | null | null | null |
Curso Python Completo - Udemy/Teste/core/poo1.py
|
Cauenumo/Python
|
6414ee2013c651e9d45cd328a381a476c6c9073b
|
[
"Apache-2.0"
] | null | null | null |
Curso Python Completo - Udemy/Teste/core/poo1.py
|
Cauenumo/Python
|
6414ee2013c651e9d45cd328a381a476c6c9073b
|
[
"Apache-2.0"
] | null | null | null |
# class Circle(object):
# pi = 3.14
# # O círculo é instanciado com um raio (o padrão é 1)
# def __init__(self, radius=1):
# self.radius = radius
# # Método de cálculo da área. Observe o uso de si mesmo.
# def area(self):
# return self.radius * self.radius * Circle.pi
# # Método que redefine a área
# def setRadius(self, radius):
# self.radius = radius
# # Método para obter raio (Mesmo que apenas chamar .radius)
# def getRadius(self):
# return self.radius
# c = Circle()
# c.setRadius(3)
# print('O raio é: ',c.getRadius())
# print('A área é: ', c.area())
# l = [1,2,3]
# t = (1,2,3)
# print(type(t))
# def funcao(a,b):
# somei = a + b
# return somei
# print(funcao(1,2))
# print(type(funcao))
# class Dog(object):
# def __init__(self,raça):
# self.raça = raça
# sam = Dog(raça='Labrador')
# frank = Dog(raça = 'Pitbull')
# print(frank.raça)
# class Dog(object):
# species = 'mamifero'
# def __init__(self,raça):
# self.raça = raça
# print(len(self.species))
# def latir(self):
# print("au au")
# sam = Dog(raça = 'Labrador')
# print(sam.latir())
# class Circulo(object):
# pi = 3.14
# def __init__(self, raio = 1):
# self.raio = raio
# def area(self):
# return self.raio ** 2 * self.pi
# def att(self, raio):
# self.raio = raio
# def obtemraio(self):
# return self.raio
# c = Circulo()
# print(c.att(52))
# class Animal(object):
# def __init__(self):
# print('Animal criado.')
# def quemsou(self):
# print('Eu sou um animal')
# def comer(self):
# print('Comendo...')
# class Cachorro(Animal):
# def __init__(self):
# Animal.__init__(self)
# print('Cachorro criado.')
# def quemsou(self):
# print('Sou um cachorro.')
# def latir(self):
# print('Au AU')
# sam = Cachorro()
# print(sam.quemsou())
# print(sam.latir())
# class book():
# def __init__(self,titulo,autor,paginas):
# print('um livro foi criado.')
# self.titulo = titulo
# self.autor = autor
# self.paginas = paginas
# def __str__(self):
# return "Titulo {}".format(self.titulo)
# def __len__(self):
# return self.paginas
# def __del__(self):
# print('livro destruido')
# l = [1,2,3]
# livro1 = book ('Python', 'Cauê', 100)
# class Line(object):
# def __init__(Self,coor1,coor2):
# self.coor1 = coor1
# self.coor2 = coor2
# def distance(self):
# x1,y1 = self.coor1
# x2,y2 = self.coor2
# return ( (x2-x1) ** 2 + (y2-y1) ** 2) ** 0.5
# def slope(self):
# x1,y1 = self.coor1
# x2,y2 = self.coo2
# return float((y2-y1))/(x2-x1)
# coor
| 20.316901
| 64
| 0.533449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,732
| 0.939154
|
0cd346f1de289a9e93d3b25b5635b78a4192c096
| 1,126
|
py
|
Python
|
gen-raw-logs.py
|
lightoyou/grapl
|
77488059891091e5656254ee15efef038a1b46a7
|
[
"Apache-2.0"
] | null | null | null |
gen-raw-logs.py
|
lightoyou/grapl
|
77488059891091e5656254ee15efef038a1b46a7
|
[
"Apache-2.0"
] | null | null | null |
gen-raw-logs.py
|
lightoyou/grapl
|
77488059891091e5656254ee15efef038a1b46a7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
try:
from typing import Any, Dict, Union, Optional
except:
pass
import time
import string
import boto3
import random
import zstd
import sys
def rand_str(l):
# type: (int) -> str
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(l))
def main(prefix):
s3 = boto3.client('s3')
with open('./eventlog.xml', 'rb') as b:
body = b.readlines()
body = [line for line in body]
def chunker(seq, size):
return [seq[pos:pos + size] for pos in range(0, len(seq), size)]
for chunks in chunker(body, 50):
c_body = zstd.compress(b"\n".join(chunks), 4)
epoch = int(time.time())
s3.put_object(
Body=c_body,
Bucket="{}-sysmon-log-bucket".format(prefix),
Key=str(epoch - (epoch % (24 * 60 * 60))) + "/sysmon/" +
str(epoch) + rand_str(3)
)
print(time.ctime())
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Provide bucket prefix as first argument")
else:
main(sys.argv[1])
| 22.078431
| 72
| 0.571936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 155
| 0.137655
|
0cd35d400b8ba8d38cccab4e5289309cd18ed0ce
| 2,773
|
py
|
Python
|
src/bot/lib/economy/economy.py
|
rdunc/rybot
|
ec3bf6159e095b53e69f6f81af9f10739c180b42
|
[
"MIT"
] | 1
|
2016-01-11T02:10:05.000Z
|
2016-01-11T02:10:05.000Z
|
src/bot/lib/economy/economy.py
|
rdunc/RyBot
|
ec3bf6159e095b53e69f6f81af9f10739c180b42
|
[
"MIT"
] | null | null | null |
src/bot/lib/economy/economy.py
|
rdunc/RyBot
|
ec3bf6159e095b53e69f6f81af9f10739c180b42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import requests, json, threading, sys
import collections, os, time
from bot.lib.economy import EconomyInit
from bot.lib.core.benchmark import Benchmark
from bot.lib.core.log import Log
from bot.helpers.color_helper import ColorHelper
from bot.helpers.rybot_helper import RyBotHelper
from collections import Counter
class Economy(EconomyInit):
"""Give all offline and online chatters points."""
def give_points(self):
config = self.config
debug = config["debug"]
point_timer = config["give_points_timer"]
api_chatters_url = config["twitch_chatters_url"]
economy_path = "db/" + self.channel + "/economy.json"
try:
twitch_request = requests.get(api_chatters_url + self.channel + "/chatters")
chatters_json = twitch_request.json()
if debug:
time_1 = Benchmark.start()
with open(economy_path, "r") as of:
file_chatters = of.read()
of.close()
if len(file_chatters) > 0:
file_chatters = json.loads(file_chatters)
if debug:
Log.economy("Current file chatters count: {0}".format(len(file_chatters)))
api_chatters = chatters_json["chatters"]["viewers"]
chatters_dictionary = {}
for i in api_chatters:
chatters_dictionary[i] = 1
if debug:
Log.economy("1 point was added to: {0}".format(i))
if len(file_chatters) > 0:
merged_chatters = [chatters_dictionary, file_chatters]
merged_chatters = sum((Counter(dict(i)) for i in merged_chatters), Counter())
else:
merged_chatters = chatters_dictionary
with open(economy_path, "w") as of:
json.dump(merged_chatters, of)
of.close()
Log.economy("1 point was added to {0} {1}".format(len(merged_chatters), RyBotHelper.pluralize(len(merged_chatters), "chatter")))
if debug:
Log.economy("Current chatters from API: {0}".format(len(chatters_dictionary)))
Benchmark.stop(time_1)
except json.decoder.JSONDecodeError:
Log.error("Problem decoding the JSON. Unable to distribute points.")
except requests.exceptions.ConnectionError:
Log.error("Unable to connect to the Twitch API.")
except TypeError:
Log.error("Error finding the viewers.")
except FileNotFoundError:
Log.error("Economy file not found. Unable to distribute points.")
| 39.056338
| 141
| 0.582402
| 2,418
| 0.87198
| 0
| 0
| 0
| 0
| 0
| 0
| 485
| 0.174901
|
0cd6ed4cd564901c9d6e6419361c7b61b1d56dfb
| 120
|
py
|
Python
|
CF_Functions/Arcade/WordPower.py
|
glickmac/Misc_Scripts
|
7e18be79b84a309a1e79935f4470ea915141938d
|
[
"MIT"
] | null | null | null |
CF_Functions/Arcade/WordPower.py
|
glickmac/Misc_Scripts
|
7e18be79b84a309a1e79935f4470ea915141938d
|
[
"MIT"
] | null | null | null |
CF_Functions/Arcade/WordPower.py
|
glickmac/Misc_Scripts
|
7e18be79b84a309a1e79935f4470ea915141938d
|
[
"MIT"
] | 1
|
2020-07-30T17:37:12.000Z
|
2020-07-30T17:37:12.000Z
|
def wordPower(word):
num = dict(zip(string.ascii_lowercase, range(1,27)))
return sum([num[ch] for ch in word])
| 24
| 56
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0cd7b71bf7de36ad8722f58dc56d94db5fb81535
| 827
|
py
|
Python
|
python/mapper.py
|
qoofyk/zipper
|
c1d77448f8d479f9ef4bf785d49cf2b41da09130
|
[
"BSD-3-Clause"
] | null | null | null |
python/mapper.py
|
qoofyk/zipper
|
c1d77448f8d479f9ef4bf785d49cf2b41da09130
|
[
"BSD-3-Clause"
] | null | null | null |
python/mapper.py
|
qoofyk/zipper
|
c1d77448f8d479f9ef4bf785d49cf2b41da09130
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import math
def contiguous_mapper(mpi_rank, mpi_size, num_endpoints):
group_size = math.ceil(mpi_size/num_endpoints) # round up
local_id = mpi_rank % group_size
group_id = mpi_rank // group_size
return (group_id, local_id)
def generate_endpoint_file(endpoint_list, mpi_size, mapper_func):
for i in range(mpi_size):
group_id, local_id = contiguous_mapper(i, mpi_size, len(endpoint_list))
print(endpoint_list[group_id], local_id)
# run like python 3 mapper.py 3 ip1 ip2 ip3 ip4...
if __name__ == "__main__":
#print("Running: ", sys.argv)
#print("Number of arguments: ", len(sys.argv))
mpi_size = int(sys.argv[1])
endpoint_list = sys.argv[2:]
#print("The endpoints are: " , endpoint_list)
generate_endpoint_file(endpoint_list, mpi_size, contiguous_mapper)
| 35.956522
| 79
| 0.71705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.229746
|
0cd87ef313939da59162ef6b202deb04d9ca957b
| 7,079
|
py
|
Python
|
src/deepcover.py
|
nce11/deepcover
|
129488e3593f8d69e352be1e613f44480e4033e6
|
[
"BSD-3-Clause"
] | 25
|
2018-03-14T21:23:00.000Z
|
2021-11-22T14:06:20.000Z
|
src/deepcover.py
|
nce11/deepcover
|
129488e3593f8d69e352be1e613f44480e4033e6
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T07:15:15.000Z
|
2022-03-14T10:29:50.000Z
|
src/deepcover.py
|
nce11/deepcover
|
129488e3593f8d69e352be1e613f44480e4033e6
|
[
"BSD-3-Clause"
] | 18
|
2018-03-14T19:20:45.000Z
|
2022-02-16T18:33:10.000Z
|
from keras.preprocessing import image
from keras.applications import vgg16
from keras.applications.vgg16 import VGG16
from keras.applications import inception_v3, mobilenet, xception
from keras.models import load_model
import matplotlib.pyplot as plt
import csv
import argparse
import os
import numpy as np
from utils import *
from to_explain import *
from comp_explain import *
def main():
parser=argparse.ArgumentParser(description='To explain neural network decisions' )
parser.add_argument(
'--model', dest='model', default='-1', help='the input neural network model (.h5)')
parser.add_argument("--inputs", dest="inputs", default="-1",
help="the input test data directory", metavar="DIR")
parser.add_argument("--outputs", dest="outputs", default="outs",
help="the outputput test data directory", metavar="DIR")
parser.add_argument("--measures", dest="measures", default=['tarantula', 'zoltar', 'ochiai', 'wong-ii'],
help="the SFL measures (tarantula, zoltar, ochiai, wong-ii)", metavar="" , nargs='+')
parser.add_argument("--measure", dest="measure", default="None",
help="the SFL measure", metavar="MEASURE")
parser.add_argument("--mnist-dataset", dest="mnist", help="MNIST dataset", action="store_true")
parser.add_argument("--normalized-input", dest="normalized", help="To normalize the input", action="store_true")
parser.add_argument("--cifar10-dataset", dest="cifar10", help="CIFAR-10 dataset", action="store_true")
parser.add_argument("--grayscale", dest="grayscale", help="MNIST dataset", action="store_true")
parser.add_argument("--vgg16-model", dest='vgg16', help="vgg16 model", action="store_true")
parser.add_argument("--inception-v3-model", dest='inception_v3', help="inception v3 model", action="store_true")
parser.add_argument("--xception-model", dest='xception', help="Xception model", action="store_true")
parser.add_argument("--mobilenet-model", dest='mobilenet', help="mobilenet model", action="store_true")
parser.add_argument("--attack", dest='attack', help="to atatck", action="store_true")
parser.add_argument("--text-only", dest='text_only', help="for efficiency", action="store_true")
parser.add_argument("--input-rows", dest="img_rows", default="224",
help="input rows", metavar="INT")
parser.add_argument("--input-cols", dest="img_cols", default="224",
help="input cols", metavar="INT")
parser.add_argument("--input-channels", dest="img_channels", default="3",
help="input channels", metavar="INT")
parser.add_argument("--x-verbosity", dest="x_verbosity", default="0",
help="the verbosity level of explanation output", metavar="INT")
parser.add_argument("--top-classes", dest="top_classes", default="1",
help="check the top-xx classifications", metavar="INT")
parser.add_argument("--adversarial-ub", dest="adv_ub", default="1.",
help="upper bound on the adversarial percentage (0, 1]", metavar="FLOAT")
parser.add_argument("--adversarial-lb", dest="adv_lb", default="0.",
help="lower bound on the adversarial percentage (0, 1]", metavar="FLOAT")
parser.add_argument("--masking-value", dest="adv_value", default="234",
help="masking value for input mutation", metavar="INT")
parser.add_argument("--testgen-factor", dest="testgen_factor", default="0.2",
help="test generation factor (0, 1]", metavar="FLOAT")
parser.add_argument("--testgen-size", dest="testgen_size", default="2000",
help="testgen size ", metavar="INT")
parser.add_argument("--testgen-iterations", dest="testgen_iter", default="1",
help="to control the testgen iteration", metavar="INT")
parser.add_argument("--causal", dest='causal', help="causal explanation", action="store_true")
parser.add_argument("--wsol", dest='wsol_file', help="weakly supervised object localization", metavar="FILE")
parser.add_argument("--occlusion", dest='occlusion_file', help="to load the occluded images", metavar="FILE")
args=parser.parse_args()
img_rows, img_cols, img_channels = int(args.img_rows), int(args.img_cols), int(args.img_channels)
## some common used datasets
if args.mnist:
img_rows, img_cols, img_channels = 28, 28, 1
elif args.cifar10:
img_rows, img_cols, img_channels = 32, 32, 3
elif args.inception_v3 or args.xception:
img_rows, img_cols, img_channels = 299, 299, 3
## to load the input DNN model
if args.model!='-1':
dnn=load_model(args.model)
elif args.vgg16:
print ('to load VGG16')
dnn=VGG16()
print ('done')
elif args.mobilenet:
dnn=mobilenet.MobileNet()
elif args.inception_v3:
dnn=inception_v3.InceptionV3()
elif args.xception:
dnn=xception.Xception()
else:
raise Exception ('A DNN model needs to be provided...')
## to load the input data
fnames=[]
xs=[]
if args.inputs!='-1':
for path, subdirs, files in os.walk(args.inputs):
for name in files:
fname=(os.path.join(path, name))
if fname.endswith('.jpg') or fname.endswith('.png') or fname.endswith('.JPEG'):
if args.grayscale is True or args.mnist:
x=image.load_img(fname, target_size=(img_rows, img_cols), color_mode = "grayscale")
x=np.expand_dims(x,axis=2)
else:
x=image.load_img(fname, target_size=(img_rows, img_cols))
x=np.expand_dims(x,axis=0)
xs.append(x)
fnames.append(fname)
else:
raise Exception ('What do you want me to do?')
xs=np.vstack(xs)
xs = xs.reshape(xs.shape[0], img_rows, img_cols, img_channels)
print ('\n[Total data loaded: {0}]'.format(len(xs)))
eobj=explain_objectt(dnn, xs)
eobj.outputs=args.outputs
eobj.top_classes=int(args.top_classes)
eobj.adv_ub=float(args.adv_ub)
eobj.adv_lb=float(args.adv_lb)
eobj.adv_value=float(args.adv_value)
eobj.testgen_factor=float(args.testgen_factor)
eobj.testgen_size=int(args.testgen_size)
eobj.testgen_iter=int(args.testgen_iter)
eobj.vgg16=args.vgg16
eobj.mnist=args.mnist
eobj.cifar10=args.cifar10
eobj.inception_v3=args.inception_v3
eobj.xception=args.xception
eobj.mobilenet=args.mobilenet
eobj.attack=args.attack
eobj.text_only=args.text_only
eobj.normalized=args.normalized
eobj.x_verbosity=int(args.x_verbosity)
eobj.fnames=fnames
eobj.occlusion_file=args.occlusion_file
measures = []
if not args.measure=='None':
measures.append(args.measure)
else: measures = args.measures
eobj.measures=measures
if not args.wsol_file is None:
print (args.wsol_file)
boxes={}
with open(args.wsol_file, 'r') as csvfile:
res=csv.reader(csvfile, delimiter=' ')
for row in res:
boxes[row[0]]=[int(row[1]), int(row[2]), int(row[3]), int(row[4])]
eobj.boxes=boxes
if args.causal:
comp_explain(eobj)
else: to_explain(eobj)
if __name__=="__main__":
main()
| 44.24375
| 114
| 0.676508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,129
| 0.300749
|
0cdb931bc3d4d0011e0c24642dc040bbe2b51af1
| 8,924
|
py
|
Python
|
phigaro/cli/batch.py
|
bobeobibo/phigaro
|
342a3454bb5324426b25feb4a4d1f640b58bf8f8
|
[
"MIT"
] | 31
|
2019-03-06T14:33:37.000Z
|
2022-03-08T07:16:07.000Z
|
phigaro/cli/batch.py
|
bobeobibo/phigaro
|
342a3454bb5324426b25feb4a4d1f640b58bf8f8
|
[
"MIT"
] | 27
|
2019-05-17T05:06:58.000Z
|
2022-03-27T00:38:56.000Z
|
phigaro/cli/batch.py
|
bobeobibo/phigaro
|
342a3454bb5324426b25feb4a4d1f640b58bf8f8
|
[
"MIT"
] | 12
|
2017-08-23T12:48:38.000Z
|
2021-06-24T00:57:22.000Z
|
from __future__ import absolute_import
import argparse
import logging
import multiprocessing
import os
import sys
import uuid
from os.path import join, exists
import yaml
from phigaro.context import Context
from phigaro.batch.runner import run_tasks_chain
from phigaro.batch.task.path import sample_name
from phigaro.batch.task.prodigal import ProdigalTask
from phigaro.batch.task.hmmer import HmmerTask
from phigaro.batch.task.dummy import DummyTask
from phigaro.batch.task.preprocess import PreprocessTask
from phigaro.batch.task.run_phigaro import RunPhigaroTask
from phigaro._version import __version__
def parse_substitute_output(subs):
subs = subs or []
res = {}
for sub in subs:
task_name, output = sub.split(":")
res[task_name] = DummyTask(output, task_name)
return res
def create_task(substitutions, task_class, *args, **kwargs):
# TODO: refactor to class Application
task = task_class(*args, **kwargs)
if task.task_name in substitutions:
print(
'Substituting output for {}: {}'.format(
task.task_name, substitutions[task.task_name].output()
)
)
return substitutions[task.task_name]
return task
def clean_fold():
is_empty = True
for root, dirs, files in os.walk('proc', topdown=False):
for name in files:
is_empty = False
break
if is_empty:
for name in dirs:
os.rmdir(os.path.join(root, name))
if is_empty:
os.rmdir('proc')
def main():
default_config_path = join(os.getenv('HOME'), '.phigaro', 'config.yml')
parser = argparse.ArgumentParser(
prog='phigaro',
description='Phigaro is a scalable command-line tool for predictions phages and prophages '
'from nucleid acid sequences',
)
parser.add_argument(
'-V',
'--version',
action='version',
version='%(prog)s {version}'.format(version=__version__),
)
parser.add_argument(
'-f',
'--fasta-file',
help='Assembly scaffolds/contigs or full genomes, required',
required=True,
)
parser.add_argument(
'-c',
'--config',
default=default_config_path,
help='Path to the config file, not required. The deafult is %s'%default_config_path,
)
parser.add_argument(
'-v', '--verbose', action='store_true', help=argparse.SUPPRESS
)
parser.add_argument(
'-p',
'--print-vogs',
help='Print phage vogs for each region',
action='store_true',
)
parser.add_argument(
'-e',
'--extension',
default=['html'],
nargs='+',
help='Type of the output: html, tsv, gff, bed or stdout. Default is html. You can specify several file formats with a space as a separator. Example: -e tsv html stdout.',
)
parser.add_argument(
'-o',
'--output',
default='',
help='Output filename for html and txt outputs. Required by default, but not required for stdout only output.',
)
parser.add_argument(
'--not-open',
help='Do not open html file automatically, if html output type is specified.',
action='store_true',
)
parser.add_argument(
'-t',
'--threads',
type=int,
default=multiprocessing.cpu_count(),
help='Num of threads ('
'default is num of CPUs={})'.format(multiprocessing.cpu_count()),
)
parser.add_argument(
'--no-cleanup', action='store_true', help="Do not delete any temporary files that was generated by Phigaro (HMMER & Prodigal outputs and some others)."
)
parser.add_argument(
'-S',
'--substitute-output',
action='append',
help='If you have precomputed prodigal and/or hmmer data you can provide paths to the files in the following format: program:address/to/the/file. In place of program you should write hmmer or prodigal. If you need to provide both files you should pass them separetely as two parametres.',
)
parser.add_argument(
'--save-fasta',
action='store_true',
help='Save all phage fasta sequences in a fasta file.',
)
parser.add_argument(
'-d',
'--delete-shorts',
action='store_true',
help='Exclude sequences with length < 20000 automatically.',
)
parser.add_argument(
'-m',
'--mode',
default='basic',
help='You can launch Phigaro at one of 3 modes: basic, abs, without_gc. Default is basic. Read more about modes at https://github.com/bobeobibo/phigaro/',
)
parser.add_argument(
'--wtp',
action='store_true',
help=argparse.SUPPRESS
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARN)
logging.getLogger('sh.command').setLevel(logging.WARN)
logger = logging.getLogger(__name__)
if not exists(args.config):
# TODO: pretty message
print('Please, create config file using phigaro-setup script')
exit(1)
args.extension = [atype.lower() for atype in args.extension]
for ext in args.extension:
if ext not in ['html', 'gff', 'bed', 'tsv', 'stdout']:
print(
'Error! The unknown output format in -e/--extensionn parameter: %s. Please, choose one or several from the list: html, gff, bed, tsv, stdout'%ext
)
exit(1)
if (args.output == '') and (args.extension != ['stdout']):
print(
'Error! Argument -o/--output is required or change the type of the output to stdout.'
)
exit(1)
with open(args.config) as f:
logger.info('Using config file: {}'.format(args.config))
config = yaml.load(f, Loader=yaml.FullLoader)
config['phigaro']['wtp'] = args.wtp
config['phigaro']['print_vogs'] = args.print_vogs
config['phigaro']['filename'] = args.fasta_file
config['phigaro']['no_html'] = (
True if 'html' not in args.extension else False
)
config['phigaro']['not_open'] = args.not_open
config['phigaro']['output'] = (args.output+'/'+os.path.splitext(os.path.basename(args.fasta_file))[0]+'.phigaro').replace('//', '/')
config['phigaro']['uuid'] = uuid.uuid4().hex
config['phigaro']['delete_shorts'] = args.delete_shorts
config['phigaro']['gff'] = True if ('gff' in args.extension) else False
config['phigaro']['bed'] = True if ('bed' in args.extension) else False
config['phigaro']['mode'] = args.mode
config['phigaro']['save_fasta'] = args.save_fasta
filename = args.fasta_file
sample = '{}-{}'.format(sample_name(filename), config['phigaro']['uuid'])
if args.wtp:
config['phigaro']['not_open'] = True
config['phigaro']['gff'] = True
config['phigaro']['bed'] = True
args.extension.append('tsv')
config['phigaro']['delete_shorts'] = True
config['phigaro']['print_vogs'] = True
config['phigaro']['output_wtp'] = args.output + '/phigaro.txt'
config['phigaro']['output'] = args.output +'/phigaro/phigaro'
config['phigaro']['save_fasta'] = True
if config['phigaro']['output'] != '':
fold = os.path.dirname(config['phigaro']['output'])
if fold and not os.path.isdir(fold):
os.makedirs(fold)
if args.wtp:
fold = os.path.dirname(config['phigaro']['output_wtp'])
if fold and not os.path.isdir(fold):
os.makedirs(fold)
Context.initialize(
sample=sample, config=config, threads=args.threads,
)
substitutions = parse_substitute_output(args.substitute_output)
preprocess_task = create_task(substitutions, PreprocessTask, filename)
prodigal_task = create_task(
substitutions, ProdigalTask, preprocess_task=preprocess_task
)
hmmer_task = create_task(
substitutions, HmmerTask, prodigal_task=prodigal_task
)
run_phigaro_task = create_task(
substitutions,
RunPhigaroTask,
prodigal_task=prodigal_task,
hmmer_task=hmmer_task,
)
tasks = [preprocess_task, prodigal_task, hmmer_task, run_phigaro_task]
task_output_file = run_tasks_chain(tasks)
if ('tsv' in args.extension) or ('stdout' in args.extension):
with open(task_output_file) as f:
f = list(f)
if 'tsv' in args.extension:
out_f = open(config['phigaro']['output'] + '.tsv', 'w')
for line in f:
out_f.write(line)
if 'stdout' in args.extension:
out_f = sys.stdout
for line in f:
out_f.write(line)
out_f.close()
if not args.no_cleanup:
for t in tasks:
t.clean()
clean_fold()
if __name__ == '__main__':
main()
| 33.174721
| 296
| 0.61766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,727
| 0.30558
|
0cdb9744480da6f8e1b4899b7fcf04b7238e340b
| 1,551
|
py
|
Python
|
MachineLearning.BayesianNetwork/python-imp/bayes_core.py
|
JillyMan/decision-tree
|
8e2efc914aaade9cc97a2c94052bc909e50fdb48
|
[
"MIT"
] | null | null | null |
MachineLearning.BayesianNetwork/python-imp/bayes_core.py
|
JillyMan/decision-tree
|
8e2efc914aaade9cc97a2c94052bc909e50fdb48
|
[
"MIT"
] | 1
|
2019-12-29T13:49:52.000Z
|
2019-12-29T13:49:52.000Z
|
MachineLearning.BayesianNetwork/python-imp/bayes_core.py
|
JillyMan/MachineLerningFramework
|
8e2efc914aaade9cc97a2c94052bc909e50fdb48
|
[
"MIT"
] | null | null | null |
import math
RangeType = 'Range'
BinaryType = 'Binary'
class Hipothesis:
def __init__(self, id, name, p):
self.id = id
self.name = name
self.p = p
class Attribute:
def __init__(self, id, name, question, _type):
self.id = id
self.name = name
self.question = question
self.type = _type
class Tag:
def __init__(self, hipothesis, attribute, pp, pm):
self.pp = pp
self.pm = pm
self.attribute = attribute
self.hipothesis = hipothesis
class InputType:
def __init__(self, _type, value):
self.type = _type
self.value = int(value)
class Binary(InputType):
def __init__(self, value):
InputType.__init__(self, BinaryType, value)
class Range(InputType):
def __init__(self, start, end, value):
InputType.__init__(self, RangeType, value)
self.start = int(start)
self.end = int(end)
def normalize(self):
l = self.end - self.start
v = self.value - self.start
return v / l
def phe_func(p, pp, pm):
return (p * pp) / (p * pp + (1-p) * pm)
def calc_probs(pp, pm, p):
phe = phe_func(p, pp, pm)
phne = phe_func(p, 1 - pp, 1 - pm)
return (phe, phne)
def lerp(start, end, t):
return start + (end - start) * t
def interpolate_result_clamp01(phne, ph, phe, r):
if r > 0.5:
return lerp(ph, phe, r)
elif r < 0.5:
return lerp(phne, ph, r)
return ph
def interpolate_result_binary(phne, phe, r):
return phne if r == 0 else phe
| 23.149254
| 54
| 0.588008
| 983
| 0.633785
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.009671
|
0cdc773a241a8d2d5331293406b95caeb6731f44
| 926
|
py
|
Python
|
tests/test_load_bin_log.py
|
bols-blue-org/pid_evaluation
|
af210f2ef7ca49681ff41f4531cfcbd83d70aca0
|
[
"MIT"
] | 1
|
2020-08-27T06:30:53.000Z
|
2020-08-27T06:30:53.000Z
|
tests/test_load_bin_log.py
|
bols-blue-org/ape
|
af210f2ef7ca49681ff41f4531cfcbd83d70aca0
|
[
"MIT"
] | null | null | null |
tests/test_load_bin_log.py
|
bols-blue-org/ape
|
af210f2ef7ca49681ff41f4531cfcbd83d70aca0
|
[
"MIT"
] | null | null | null |
import unittest
from ape.load_bin_log import LoadBinLog
class LoadBinTestCase(unittest.TestCase):
def test_LoadBinLogAll(self):
data = LoadBinLog("../tests/log_0_2020-5-1-14-53-42.bin")
self.assertGreater(len(data), 0, "no data")
def test_LoadBinLogString(self):
data = LoadBinLog("../tests/log_0_2020-5-1-14-53-42.bin", "RCOU")
self.assertEqual(len(data), 822, "no data")
def test_LoadBinLogStringArray(self):
data = LoadBinLog("../tests/log_0_2020-5-1-14-53-42.bin", ["RCOU", "ATT"])
self.assertEqual(len(data), 1644, "no data")
def test_SepalteRCIN6Para(self):
data = LoadBinLog("../tests/log_13_2020-5-13-15-45-02.bin", ["RCOU", "ATT", "RCIN"])
dict = data.seplateRCIN6Param()
for item in dict:
print("data"+item)
self.assertEqual(len(data), 1644, "no data")
if __name__ == '__main__':
unittest.main()
| 31.931034
| 92
| 0.637149
| 818
| 0.883369
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.259179
|
0cdcd31b1d541c0b2fc7fa87f9fe6a1fb877291b
| 4,997
|
py
|
Python
|
rdsslib/kinesis/client.py
|
JiscSD/rdss-shared-libraries
|
cf07cad3f176ef8be1410fc29b240fb4791e607a
|
[
"Apache-2.0"
] | null | null | null |
rdsslib/kinesis/client.py
|
JiscSD/rdss-shared-libraries
|
cf07cad3f176ef8be1410fc29b240fb4791e607a
|
[
"Apache-2.0"
] | 4
|
2018-02-15T12:32:26.000Z
|
2018-03-06T16:33:34.000Z
|
rdsslib/kinesis/client.py
|
JiscSD/rdss-shared-libraries
|
cf07cad3f176ef8be1410fc29b240fb4791e607a
|
[
"Apache-2.0"
] | 1
|
2018-03-13T19:38:54.000Z
|
2018-03-13T19:38:54.000Z
|
import json
import logging
from .errors import MaxRetriesExceededException, DecoratorApplyException
MAX_ATTEMPTS = 6
class KinesisClient(object):
def __init__(self, writer, reader):
"""
Writes and reads messages to and from Kinesis streams
:param writer: handles writing of payloads to Kinesis stream
:param reader: handles reading of payloads from Kinesis stream
:type writer: writer.StreamWriter
:type reader: reader.StreamReader
"""
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
self.writer = writer
self.reader = reader
def write_message(self, stream_names, payload, max_attempts=MAX_ATTEMPTS):
"""Write a payload into each stream in stream_names
:param stream_names: Kinesis streams to write to
:param payload: JSON payload
:param max_attempts: maximum number of times to attempt writing
:type stream_names: list of str
:type payload: str
"""
for stream_name in stream_names:
self.writer.put_stream(stream_name, payload, max_attempts)
def read_messages(self, stream_name, seq_number=None):
"""Continuous loop that reads messages from stream_name
:param stream_name: Name of Kinesis stream to read from
:param seq_number: Optional seq number
:type stream_name: str
:return message_gen: Yields messages read from Kinesis stream
:rtype message_gen: generator
"""
message_gen = self.reader.read_stream(
stream_name, seq_number=seq_number)
return message_gen
class EnhancedKinesisClient(KinesisClient):
def __init__(self, writer, reader, error_handler, decorators=None):
"""
Writes and reads messages to and from Kinesis streams with
error handling and message decoration
:param writer: Writes messages to Kinesis stream
:param reader: Reads messages from Kinesis stream
:param error_handler: Handles messages with errors
:param decorators: Enhance messages with extra fields
:type writer: writer.StreamWriter
:type reader: reader.StreamReader
:type error_handler: handlers.MessageErrorHandler
:type decorators: list
"""
super().__init__(writer, reader)
if decorators:
self.decorators = decorators
else:
self.decorators = []
self.error_handler = error_handler
def _apply_decorators(self, payload):
"""
Applies a sequence of decorators that
enhance and modify the contents of a payload
:param payload: Undecorated JSON payload
:type payload: str
:return payload: Decorated JSON payload
:rtype payload: str
"""
decorated_payload = payload
for decorator in self.decorators:
try:
decorated_payload = decorator.process(payload)
except Exception:
self.logger.warning(
'Failed to apply decorator {}'.format(decorator.name))
raise DecoratorApplyException()
return decorated_payload
def write_message(self, stream_names, payload, max_attempts=MAX_ATTEMPTS):
"""Write a payload into each stream in stream_names
:param stream_names: Kinesis streams to write to
:param payload: JSON payload
:param max_attempts: Max number of times to attempt writing
:type stream_names: list of str
:type payload: str
:type max_attempts: int
"""
try:
json.loads(payload)
except json.decoder.JSONDecodeError:
self.error_handler.handle_invalid_json(payload)
return
decorated_payload = self._apply_decorators(payload)
for stream_name in stream_names:
try:
super().write_message([stream_name],
decorated_payload,
max_attempts)
except MaxRetriesExceededException as e:
stream_name = e.args[0]
error_code = 'GENERR005'
error_description = 'Maximum retry attempts {0} exceed'\
'for stream {1}'.format(max_attempts,
stream_name)
self.error_handler.handle_error(decorated_payload,
error_code,
error_description)
def handle_error(self, payload, error_code, error_description):
""" Allows errors to be posted to the stream occurring from
activities like payload validation
:param payload: JSON payload
:param error_code: Error Code
:param error_description: Description Of Error
"""
self.error_handler.handle_error(payload, error_code, error_description)
| 39.346457
| 79
| 0.626976
| 4,871
| 0.974785
| 0
| 0
| 0
| 0
| 0
| 0
| 2,378
| 0.475886
|
0cdd0af2f9cdd4f1682dfeb1a35ec8ea6569dc39
| 516
|
py
|
Python
|
offer/10-qing-wa-tiao-tai-jie-wen-ti-lcof.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | 2
|
2021-03-14T11:38:26.000Z
|
2021-03-14T11:38:30.000Z
|
offer/10-qing-wa-tiao-tai-jie-wen-ti-lcof.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | null | null | null |
offer/10-qing-wa-tiao-tai-jie-wen-ti-lcof.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | 1
|
2022-01-17T19:33:23.000Z
|
2022-01-17T19:33:23.000Z
|
'''
剑指 Offer 10- II. 青蛙跳台阶问题
一只青蛙一次可以跳上1级台阶,也可以跳上2级台阶。求该青蛙跳上一个 n 级的台阶总共有多少种跳法。
答案需要取模 1e9+7(1000000007),如计算初始结果为:1000000008,请返回 1。
提示:
0 <= n <= 100
'''
'''
思路:递归
'''
class Solution:
def numWays(self, n: int) -> int:
if n == 0:
return 1
if n == 1:
return 1
if n == 2:
return 2
return (self.numWays(n - 1) + self.numWays(n - 2)) % 1000000007
s = Solution()
print(s.numWays(2))
print(s.numWays(5))
print(s.numWays(0))
print(s.numWays(7))
| 15.636364
| 71
| 0.560078
| 245
| 0.358712
| 0
| 0
| 0
| 0
| 0
| 0
| 336
| 0.491947
|
0cddc6fcdac1a04a9f2296ecc74335e532a712c0
| 2,624
|
py
|
Python
|
recipes/libmount/all/conanfile.py
|
KristianJerpetjon/conan-center-index
|
f368200c30fb3be44862e2e709be990d0db4d30e
|
[
"MIT"
] | null | null | null |
recipes/libmount/all/conanfile.py
|
KristianJerpetjon/conan-center-index
|
f368200c30fb3be44862e2e709be990d0db4d30e
|
[
"MIT"
] | 1
|
2019-11-26T10:55:31.000Z
|
2019-11-26T10:55:31.000Z
|
recipes/libmount/all/conanfile.py
|
KristianJerpetjon/conan-center-index
|
f368200c30fb3be44862e2e709be990d0db4d30e
|
[
"MIT"
] | 1
|
2019-10-31T19:29:14.000Z
|
2019-10-31T19:29:14.000Z
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import os
class LibmountConan(ConanFile):
name = "libmount"
description = "The libmount library is used to parse /etc/fstab, /etc/mtab and /proc/self/mountinfo files, manage the mtab file, evaluate mount options, etc"
topics = ("conan", "mount", "libmount", "linux", "util-linux")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git"
license = "GPL-2.0-or-later"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
_source_subfolder = "source_subfolder"
_autotools = None
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os != "Linux":
raise ConanInvalidConfiguration("only Linux is supported")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "util-linux-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_autotools(self):
if not self._autotools:
args = ["--disable-all-programs", "--enable-libmount", "--enable-libblkid"]
if self.options.shared:
args.extend(["--disable-static", "--enable-shared"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self)
self._autotools.configure(args=args)
return self._autotools
def build(self):
with tools.chdir(self._source_subfolder):
env_build = self._configure_autotools()
env_build.make()
def package(self):
with tools.chdir(self._source_subfolder):
env_build = self._configure_autotools()
env_build.install()
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
tools.rmdir(os.path.join(self.package_folder, "sbin"))
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
os.remove(os.path.join(self.package_folder, "lib", "libblkid.la"))
os.remove(os.path.join(self.package_folder, "lib", "libmount.la"))
def package_info(self):
self.cpp_info.libs = ["mount", "blkid"]
self.cpp_info.includedirs.append(os.path.join("include", "libmount"))
| 43.733333
| 161
| 0.651296
| 2,496
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 707
| 0.269436
|
0cde288694905dadb83458256a681e9a26cd9df7
| 36,246
|
py
|
Python
|
code/tmp_rtrip/nntplib.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/nntplib.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/nntplib.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ['NNTP', 'NNTPError', 'NNTPReplyError', 'NNTPTemporaryError',
'NNTPPermanentError', 'NNTPProtocolError', 'NNTPDataError', 'decode_header'
]
_MAXLINE = 2048
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
NNTP_PORT = 119
NNTP_SSL_PORT = 563
_LONGRESP = {'100', '101', '211', '215', '220', '221', '222', '224', '225',
'230', '231', '282'}
_DEFAULT_OVERVIEW_FMT = ['subject', 'from', 'date', 'message-id',
'references', ':bytes', ':lines']
_OVERVIEW_FMT_ALTERNATIVES = {'bytes': ':bytes', 'lines': ':lines'}
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo', ['group', 'last', 'first',
'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo', ['number', 'message_id',
'lines'])
def decode_header(header_str):
"""Takes a unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError('LIST OVERVIEW.FMT response too short')
if fmt[:len(defaults)] != defaults:
raise NNTPDataError('LIST OVERVIEW.FMT redefines default fields')
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to an OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
h = field_name + ': '
if token and token[:len(h)].lower() != h:
raise NNTPDataError(
"OVER/XOVER response doesn't include names of additional headers"
)
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = '000000'
else:
time_str = '{0.hour:02d}{0.minute:02d}{0.second:02d}'.format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = '{0:02d}{1.month:02d}{1.day:02d}'.format(y, dt)
else:
date_str = '{0:04d}{1.month:02d}{1.day:02d}'.format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
class _NNTPBase:
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host, readermode=None, timeout=
_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
self._caps = None
self.getcapabilities()
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
self._caps = None
self.getcapabilities()
self.tls_on = False
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda : hasattr(self, 'file')
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging:
print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1:
print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be a unicode string."""
if self.debugging:
print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line:
raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns a unicode string."""
resp = self._getline()
if self.debugging:
print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is a unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
if isinstance(file, (str, bytes)):
openedFile = file = open(file, 'wb')
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
terminators = b'.' + _CRLF, b'.\n'
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors) for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring('LIST OVERVIEW.FMT')
except NNTPPermanentError:
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring('CAPABILITIES')
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, not '{:40}'"
.format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, not '{:40}'"
.format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file
)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end
), file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn(
'The XGTITLE extension is not actively used, use descriptions() instead'
, DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn('The XPATH extension is not actively used',
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd('DATE')
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b'\r\n') + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b'.\r\n')
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError('Already logged in.')
if not user and not usenetrc:
raise ValueError(
'At least one of `user` and `usenetrc` must be specified')
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
self._caps = None
self.getcapabilities()
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
if self.tls_on:
raise ValueError('TLS is already enabled.')
if self.authenticated:
raise ValueError('TLS cannot be started after authentication.')
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile('rwb')
self.tls_on = True
self._caps = None
self.getcapabilities()
else:
raise NNTPError('TLS failed to start.')
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
file = self.sock.makefile('rwb')
_NNTPBase.__init__(self, file, host, readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT, user=None, password=
None, ssl_context=None, readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile('rwb')
_NNTPBase.__init__(self, file, host, readermode=readermode,
timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append('NNTP_SSL')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=
' nntplib built-in demo - display the latest articles in a newsgroup'
)
parser.add_argument('-g', '--group', default=
'gmane.comp.python.general', help=
'group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org', help=
'NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int, help=
'NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int, help=
'number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + '...'
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print('{:7} {:20} {:42} ({})'.format(artnum, cut(author, 20), cut(
subject, 42), lines))
s.quit()
| 36.20979
| 89
| 0.580202
| 28,026
| 0.773216
| 0
| 0
| 0
| 0
| 0
| 0
| 15,012
| 0.41417
|
0cde5c372756830b141e6816281e99f572d9eff3
| 3,463
|
py
|
Python
|
tests/required_with_test.py
|
roypeters/spotlight
|
f23818cf7b49aa7a31200c1945ebc2d91656156e
|
[
"MIT"
] | 9
|
2019-03-26T13:21:16.000Z
|
2021-03-21T08:55:49.000Z
|
tests/required_with_test.py
|
roypeters/spotlight
|
f23818cf7b49aa7a31200c1945ebc2d91656156e
|
[
"MIT"
] | 7
|
2019-03-28T17:32:03.000Z
|
2021-09-24T13:17:32.000Z
|
tests/required_with_test.py
|
roypeters/spotlight
|
f23818cf7b49aa7a31200c1945ebc2d91656156e
|
[
"MIT"
] | 4
|
2019-03-30T13:28:22.000Z
|
2020-06-15T13:15:44.000Z
|
from src.spotlight.errors import REQUIRED_WITH_ERROR
from .validator_test import ValidatorTest
class RequiredWithTest(ValidatorTest):
def setUp(self):
self.other_field = "test1"
self.field = "test2"
self.required_with_error = REQUIRED_WITH_ERROR.format(
field=self.field, other=self.other_field
)
self.rules = {"test2": "required_with:test1"}
def test_required_with_rule_with_missing_field_expect_error(self):
data = {"test1": "hello"}
expected = self.required_with_error
errors = self.validator.validate(data, self.rules)
errs = errors.get(self.field)
self.assertEqual(errs[0], expected)
def test_required_with_rule_with_field_present_expect_no_error(self):
data = {"test1": "hello", "test2": "world"}
expected = None
errors = self.validator.validate(data, self.rules)
errs = errors.get(self.field)
self.assertEqual(errs, expected)
def test_required_with_rule_with_boolean_true_expect_no_error(self):
data = {"test1": True, "test2": "world"}
expected = None
errors = self.validator.validate(data, self.rules)
errs = errors.get(self.field)
self.assertEqual(errs, expected)
def test_required_with_rule_with_boolean_false_expect_no_error(self):
data = {"test1": False, "test2": "world"}
expected = None
errors = self.validator.validate(data, self.rules)
errs = errors.get(self.field)
self.assertEqual(errs, expected)
def test_required_with_rule_with_multi_requirement_and_missing_field_expect_error(
self
):
field = "test5"
rules = {"test5": "required_with:test1,test2,test3,test4"}
data = {"test2": "not.missing", "test4": "not.missing"}
expected = REQUIRED_WITH_ERROR.format(
field=field, other="test1, test2, test3, test4"
)
errors = self.validator.validate(data, rules)
errs = errors.get(field)
self.assertEqual(errs[0], expected)
def test_required_with_rule_with_all_present_expect_no_error(self):
rules = {"test5": "required_with:test1,test2,test3,test4"}
data = {
"test1": "test",
"test2": "test",
"test3": "test",
"test4": "test",
"test5": "test",
}
expected = None
errors = self.validator.validate(data, rules)
errs = errors.get("test5")
self.assertEqual(errs, expected)
def test_required_with_rule_with_other_field_present_but_none_expect_error(self):
field = "test2"
rules = {
"test1": "required_with:test2|string",
"test2": "required_with:test1|string",
}
data = {"test1": "test", "test2": None}
expected = REQUIRED_WITH_ERROR.format(field=field, other="test1")
errors = self.validator.validate(data, rules)
errs = errors.get(field)
self.assertEqual(errs[0], expected)
def test_required_with_rule_with_both_none_expect_no_error(self):
field = "test2"
rules = {
"test1": "required_with:test2|string",
"test2": "required_with:test1|string",
}
data = {"test1": None, "test2": None}
expected = None
errors = self.validator.validate(data, rules)
errs = errors.get(field)
self.assertEqual(errs, expected)
| 32.064815
| 86
| 0.626047
| 3,365
| 0.971701
| 0
| 0
| 0
| 0
| 0
| 0
| 560
| 0.16171
|
0cde6e9d59bff904867397a498cf0cce96687bf3
| 3,194
|
py
|
Python
|
default-approach/data-collection/harpers-data/scraper_scripts/get-harpers-links.py
|
the-browser/recommending-interesting-writing
|
9ff4771d3f437d33c26d2f306e393b5a90a04878
|
[
"MIT"
] | 5
|
2020-09-17T17:56:21.000Z
|
2021-11-03T02:40:27.000Z
|
default-approach/data-collection/harpers-data/scraper_scripts/get-harpers-links.py
|
the-browser/recommending-interesting-writing
|
9ff4771d3f437d33c26d2f306e393b5a90a04878
|
[
"MIT"
] | null | null | null |
default-approach/data-collection/harpers-data/scraper_scripts/get-harpers-links.py
|
the-browser/recommending-interesting-writing
|
9ff4771d3f437d33c26d2f306e393b5a90a04878
|
[
"MIT"
] | 1
|
2020-11-01T11:37:38.000Z
|
2020-11-01T11:37:38.000Z
|
BASE_URL="https://harpers.org/sections/readings/page/"
N_ARTICLE_LINK_PAGES = 50
OUTPUT_FILE = 'harpers-later-urls.json'
WORKER_THREADS = 32
import json
import datetime
import dateutil.parser
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from datetime import datetime
from newspaper import Article
from bs4 import BeautifulSoup
from typing import List
from queue import Queue
from threading import Thread
from requests import get
from pathlib import Path
import pandas as pd
from urllib.request import Request, urlopen
@dataclass_json
@dataclass
class HarperReadingArticleUrl:
url: str
title: str
class WriteThread(Thread):
def __init__(self, queue: Queue, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = queue
def run(self):
existing_links = []
while True:
article = self.queue.get()
if article is None:
output_file_path = Path(OUTPUT_FILE)
check_df = pd.DataFrame(existing_links)
check_df.drop_duplicates(subset="url", keep="first", inplace=True)
check_df.to_json(output_file_path, orient="records")
break
current_article_json = article.to_dict()
existing_links.insert(0,current_article_json)
class ScrapeThread(Thread):
def __init__(self, chunk, queue: Queue, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chunk = chunk
self.queue = queue
def run(self):
for i in self.chunk:
try:
print(f'Getting articles from list page {i}')
url = f"{BASE_URL}{i}"
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, "html5lib")
articles = soup.find_all('div', {'class': 'card'})
for article in articles:
dual_hrefs = article.find_all('a')
link = dual_hrefs[1]['href']
title = dual_hrefs[1].find('h2', {'class': 'ac-title'})
if title is None or title.string is None or link is None or link is None:
continue
article_url = HarperReadingArticleUrl(url=link.strip(), title=str(title.string.strip()) or '')
self.queue.put(article_url)
except Exception as e:
print(f'Something went wrong when scraping: {e}')
print("------------------------------------------")
if __name__ == '__main__':
queue = Queue()
write_thread = WriteThread(queue)
write_thread.start()
worker_threads = []
chunk_size = (N_ARTICLE_LINK_PAGES) // WORKER_THREADS
for i in range(0, N_ARTICLE_LINK_PAGES+1, chunk_size):
chunk = range(i,i+chunk_size)
worker_threads.append(ScrapeThread(chunk, queue))
for thread in worker_threads:
thread.start()
for thread in worker_threads:
thread.join()
# Signal end of jobs to write thread
queue.put(None)
print('Done.')
write_thread.join()
| 31.313725
| 114
| 0.60551
| 2,034
| 0.636819
| 0
| 0
| 85
| 0.026612
| 0
| 0
| 369
| 0.115529
|
0cdee741020f9cadb35d114ce192b7140ac463d7
| 8,711
|
py
|
Python
|
rulm/models/neural_net/encoder_only.py
|
IlyaGusev/rulm
|
4e78a495eba6cd6ea1fea839463c8145ed7051f2
|
[
"Apache-2.0"
] | null | null | null |
rulm/models/neural_net/encoder_only.py
|
IlyaGusev/rulm
|
4e78a495eba6cd6ea1fea839463c8145ed7051f2
|
[
"Apache-2.0"
] | null | null | null |
rulm/models/neural_net/encoder_only.py
|
IlyaGusev/rulm
|
4e78a495eba6cd6ea1fea839463c8145ed7051f2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict
import numpy as np
import torch
from torch.nn.functional import linear, log_softmax, embedding
from torch.nn import Dropout, LogSoftmax, NLLLoss
from allennlp.common import Params
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary, DEFAULT_PADDING_TOKEN
from allennlp.modules import TextFieldEmbedder, TimeDistributed, Seq2SeqEncoder
from allennlp.modules.sampled_softmax_loss import SampledSoftmaxLoss
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.token_embedders import Embedding, TokenEmbedder
from allennlp.modules.token_embedders.embedding import _read_pretrained_embeddings_file
from allennlp.nn.util import combine_initial_dims, uncombine_initial_dims
class SoftmaxLoss(torch.nn.Module):
def __init__(self,
num_words: int,
embedding_dim: int,
padding_index: int = 0) -> None:
super().__init__()
self.softmax_w = torch.nn.Parameter(torch.Tensor(num_words, embedding_dim))
self.softmax_b = torch.nn.Parameter(torch.Tensor(num_words))
self._softmax_func = LogSoftmax(dim=-1)
self._padding_index = padding_index
self._reset_parameters()
def _reset_parameters(self):
stdv = 1. / np.sqrt(self.softmax_w.size(1))
self.softmax_w.data.uniform_(-stdv, stdv)
self.softmax_b.data.uniform_(-stdv, stdv)
def forward(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
logits = self._softmax_func(linear(embeddings, self.softmax_w, self.softmax_b))
criterion = NLLLoss(ignore_index=self._padding_index, reduction="sum")
return criterion(logits, targets.long())
@TokenEmbedder.register("embedding_with_dropout")
class EmbeddingWithDropout(Embedding):
def __init__(self,
num_embeddings: int,
embedding_dim: int,
dropout: float = None,
projection_dim: int = None,
weight: torch.FloatTensor = None,
padding_index: int = None,
trainable: bool = True,
max_norm: float = None,
norm_type: float = 2.,
scale_grad_by_freq: bool = False,
sparse: bool = False) -> None:
Embedding.__init__(self,
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
self.dropout = dropout
def forward(self, inputs):
original_size = inputs.size()
inputs = combine_initial_dims(inputs)
if self.dropout and self.training:
mask = self.weight.data.new().resize_((self.weight.size(0), 1)).bernoulli_(1 - self.dropout)\
.expand_as(self.weight) / (1 - self.dropout)
masked_embed_weight = mask * self.weight
else:
masked_embed_weight = self.weight
embedded = embedding(inputs, masked_embed_weight,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse)
embedded = uncombine_initial_dims(embedded, original_size)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding':
num_embeddings = params.pop_int('num_embeddings', None)
vocab_namespace = params.pop("vocab_namespace", "tokens")
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(vocab_namespace)
embedding_dim = params.pop_int('embedding_dim')
pretrained_file = params.pop("pretrained_file", None)
projection_dim = params.pop_int("projection_dim", None)
trainable = params.pop_bool("trainable", True)
padding_index = params.pop_int('padding_index', None)
max_norm = params.pop_float('max_norm', None)
norm_type = params.pop_float('norm_type', 2.)
scale_grad_by_freq = params.pop_bool('scale_grad_by_freq', False)
sparse = params.pop_bool('sparse', False)
dropout = params.pop_float('dropout', None)
params.assert_empty(cls.__name__)
weight = _read_pretrained_embeddings_file(pretrained_file, embedding_dim,
vocab, vocab_namespace) if pretrained_file else None
return cls(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
dropout=dropout)
@Model.register("encoder_only")
class EncoderOnlyLanguageModel(Model):
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
contextualizer: Seq2SeqEncoder,
dropout: float = None,
tie_embeddings: bool = True,
num_samples: int = None,
use_variational_dropout: bool = False):
super().__init__(vocab)
self._embedder = embedder
self._contextualizer = contextualizer
self._context_dim = contextualizer.get_output_dim()
if use_variational_dropout:
self._dropout = InputVariationalDropout(dropout) if dropout else lambda x: x
else:
self._dropout = Dropout(dropout) if dropout else lambda x: x
vocab_size = self.vocab.get_vocab_size()
padding_index = self.vocab.get_token_index(DEFAULT_PADDING_TOKEN)
if num_samples:
self._softmax_loss = SampledSoftmaxLoss(vocab_size, self._context_dim, num_samples)
else:
self._softmax_loss = SoftmaxLoss(vocab_size, self._context_dim, padding_index)
self._tie_embeddings = tie_embeddings
if self._tie_embeddings:
embedder_children = dict(self._embedder.named_children())
word_embedder = embedder_children["token_embedder_tokens"]
assert self._softmax_loss.softmax_w.size() == word_embedder.weight.size()
self._softmax_loss.softmax_w = word_embedder.weight
def forward(self,
source_tokens: Dict[str, torch.Tensor],
target_tokens: Dict[str, torch.Tensor]=None,
**kwargs) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, max_length)
source = source_tokens["tokens"]
mask = source > 0
# Shape: (batch_size, max_length, embedding_size)
embeddings = self._embedder(source_tokens)
embeddings = self._dropout(embeddings)
# Shape: (batch_size, max_length, context_dim)
contextual_embeddings = self._contextualizer(embeddings, mask)
contextual_embeddings = self._dropout(contextual_embeddings)
result = dict()
if target_tokens:
targets = target_tokens["tokens"]
targets = targets.view(-1)
mask = targets > 0
masked_targets = targets.masked_select(mask)
lined_embeddings = contextual_embeddings.view(-1, self._context_dim)
masked_embeddings = lined_embeddings.masked_select(mask.unsqueeze(-1))
masked_embeddings = masked_embeddings.view(-1, self._context_dim)
loss = self._softmax_loss(masked_embeddings, masked_targets)
num_targets = torch.sum(mask.long())
result["loss"] = loss / num_targets.float()
if not self.training:
result["logits"] = self._get_logits(contextual_embeddings)
return result
def _get_logits(self, embeddings):
linears = linear(embeddings, self._softmax_loss.softmax_w, self._softmax_loss.softmax_b)
return log_softmax(linears, dim=-1)
| 44.218274
| 105
| 0.630926
| 7,844
| 0.900471
| 0
| 0
| 6,951
| 0.797957
| 0
| 0
| 408
| 0.046837
|
0cdf83ec2ee6735ac3ecbd989380ce0f87917a5d
| 102
|
py
|
Python
|
api/queries/models.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 3
|
2019-05-15T09:30:39.000Z
|
2020-04-22T16:14:23.000Z
|
api/queries/models.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 85
|
2019-04-24T10:39:35.000Z
|
2022-03-21T14:52:12.000Z
|
api/queries/models.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 1
|
2021-01-17T11:12:19.000Z
|
2021-01-17T11:12:19.000Z
|
from api.cases.models import Case
class Query(Case):
"""
Base query class
"""
pass
| 10.2
| 33
| 0.588235
| 65
| 0.637255
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.313725
|