hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
384af3d12db6a0b77b5903c57fea8fb3389b584d
| 6,502
|
py
|
Python
|
examples/classification.py
|
jonpas/myo-raw
|
efd54c47d413c38808457697dc1ca8aaa23ac09e
|
[
"MIT"
] | 8
|
2017-11-24T10:33:59.000Z
|
2022-03-17T01:04:52.000Z
|
examples/classification.py
|
jonpas/myo-raw
|
efd54c47d413c38808457697dc1ca8aaa23ac09e
|
[
"MIT"
] | 4
|
2018-05-31T22:39:57.000Z
|
2018-06-28T16:06:09.000Z
|
examples/classification.py
|
jonpas/myo-raw
|
efd54c47d413c38808457697dc1ca8aaa23ac09e
|
[
"MIT"
] | 6
|
2017-10-30T01:00:47.000Z
|
2020-04-22T02:22:55.000Z
|
#
# Original work Copyright (c) 2014 Danny Zhu
# Modified work Copyright (c) 2017 Matthias Gazzari
#
# Licensed under the MIT license. See the LICENSE file for details.
#
from collections import Counter, deque
import sys
import struct
import numpy as np
from myo_raw import MyoRaw, DataCategory, EMGMode
try:
from sklearn import neighbors, svm
HAVE_SK = True
except ImportError:
HAVE_SK = False
try:
import pygame
from pygame.locals import *
HAVE_PYGAME = True
except ImportError:
HAVE_PYGAME = False
SUBSAMPLE = 3
K = 15
class NNClassifier(object):
'''A wrapper for sklearn's nearest-neighbor classifier that stores
training data in vals0, ..., vals9.dat.'''
def __init__(self):
for i in range(10):
with open('vals%d.dat' % i, 'ab') as f: pass
self.read_data()
def store_data(self, cls, vals):
with open('vals%d.dat' % cls, 'ab') as f:
f.write(struct.pack('<8H', *vals))
self.train(np.vstack([self.X, vals]), np.hstack([self.Y, [cls]]))
def read_data(self):
X = []
Y = []
for i in range(10):
X.append(np.fromfile('vals%d.dat' % i, dtype=np.uint16).reshape((-1, 8)))
Y.append(i + np.zeros(X[-1].shape[0]))
self.train(np.vstack(X), np.hstack(Y))
def train(self, X, Y):
self.X = X
self.Y = Y
if HAVE_SK and self.X.shape[0] >= K * SUBSAMPLE:
self.nn = neighbors.KNeighborsClassifier(n_neighbors=K, algorithm='kd_tree')
self.nn.fit(self.X[::SUBSAMPLE], self.Y[::SUBSAMPLE])
else:
self.nn = None
def nearest(self, d):
dists = ((self.X - d)**2).sum(1)
ind = dists.argmin()
return self.Y[ind]
def classify(self, d):
if self.X.shape[0] < K * SUBSAMPLE: return 0
if not HAVE_SK: return self.nearest(d)
return int(self.nn.predict(d)[0])
class Myo(MyoRaw):
'''Adds higher-level pose classification and handling onto MyoRaw.'''
HIST_LEN = 25
def __init__(self, cls, tty=None):
MyoRaw.__init__(self, tty)
self.cls = cls
self.history = deque([0] * Myo.HIST_LEN, Myo.HIST_LEN)
self.history_cnt = Counter(self.history)
self.add_handler(DataCategory.EMG, self.emg_handler)
self.last_pose = None
self.pose_handlers = []
def emg_handler(self, timestamp, emg, moving, characteristic_num):
y = self.cls.classify(emg)
self.history_cnt[self.history[0]] -= 1
self.history_cnt[y] += 1
self.history.append(y)
r, n = self.history_cnt.most_common(1)[0]
if self.last_pose is None or (n > self.history_cnt[self.last_pose] + 5 and n > Myo.HIST_LEN / 2):
self.on_raw_pose(r)
self.last_pose = r
def add_raw_pose_handler(self, h):
self.pose_handlers.append(h)
def on_raw_pose(self, pose):
for h in self.pose_handlers:
h(pose)
class EMGHandler(object):
def __init__(self, m):
self.recording = -1
self.m = m
self.emg = (0,) * 8
def __call__(self, timestamp, emg, moving, characteristic_num):
self.emg = emg
if self.recording >= 0:
self.m.cls.store_data(self.recording, emg)
def classify(m):
if HAVE_PYGAME:
pygame.init()
w, h = 800, 320
scr = pygame.display.set_mode((w, h))
font = pygame.font.Font(None, 30)
hnd = EMGHandler(m)
m.add_handler(DataCategory.EMG, hnd)
m.subscribe(emg_mode=EMGMode.SMOOTHED)
while True:
m.run()
r = m.history_cnt.most_common(1)[0][0]
if HAVE_PYGAME:
for ev in pygame.event.get():
if ev.type == QUIT or (ev.type == KEYDOWN and ev.unicode == 'q'):
raise KeyboardInterrupt()
elif ev.type == KEYDOWN:
if K_0 <= ev.key <= K_9:
hnd.recording = ev.key - K_0
elif K_KP0 <= ev.key <= K_KP9:
hnd.recording = ev.key - K_Kp0
elif ev.unicode == 'r':
hnd.cl.read_data()
elif ev.type == KEYUP:
if K_0 <= ev.key <= K_9 or K_KP0 <= ev.key <= K_KP9:
hnd.recording = -1
scr.fill((0, 0, 0), (0, 0, w, h))
for i in range(10):
x = 0
y = 0 + 30 * i
clr = (0,200,0) if i == r else (255,255,255)
txt = font.render('%5d' % (m.cls.Y == i).sum(), True, (255,255,255))
scr.blit(txt, (x + 20, y))
txt = font.render('%d' % i, True, clr)
scr.blit(txt, (x + 110, y))
scr.fill((0,0,0), (x+130, y + txt.get_height() / 2 - 10, len(m.history) * 20, 20))
scr.fill(clr, (x+130, y + txt.get_height() / 2 - 10, m.history_cnt[i] * 20, 20))
if HAVE_SK and m.cls.nn is not None:
dists, inds = m.cls.nn.kneighbors(hnd.emg)
for i, (d, ind) in enumerate(zip(dists[0], inds[0])):
y = m.cls.Y[SUBSAMPLE*ind]
pos = (650, 20 * i)
txt = '%d %6d' % (y, d)
clr = (255, 255, 255)
scr.blit(font.render(txt, True, clr), pos)
pygame.display.flip()
else:
for i in range(10):
if i == r: sys.stdout.write('\x1b[32m')
print(i, '-' * m.history_cnt[i], '\x1b[K')
if i == r: sys.stdout.write('\x1b[m')
sys.stdout.write('\x1b[11A')
print()
def detect(m):
import subprocess
m.add_raw_pose_handler(print)
def page(pose):
if pose == 5:
subprocess.call(['xte', 'key Page_Down'])
elif pose == 6:
subprocess.call(['xte', 'key Page_Up'])
m.add_raw_pose_handler(page)
m.subscribe(emg_mode=EMGMode.SMOOTHED)
while True:
m.run()
if __name__ == '__main__':
m = Myo(NNClassifier(), sys.argv[1] if len(sys.argv) >= 2 else None)
try:
while True:
choice = input('Do you want to (c)lassify or (d)etect poses?\n')
if choice == 'c':
classify(m)
break
elif choice == 'd':
detect(m)
break
except KeyboardInterrupt:
pass
finally:
m.disconnect()
print("Disconnected")
| 31.717073
| 105
| 0.528299
|
c325ded23b824548e1f8c2c49b69356354ce4d7e
| 2,011
|
py
|
Python
|
args.py
|
torlenor/kalah
|
12a5520445c60855ed42c5bd30e512c168d531ca
|
[
"MIT"
] | 1
|
2020-11-30T21:20:33.000Z
|
2020-11-30T21:20:33.000Z
|
args.py
|
torlenor/kalah
|
12a5520445c60855ed42c5bd30e512c168d531ca
|
[
"MIT"
] | 6
|
2020-11-13T11:07:53.000Z
|
2020-11-13T14:33:32.000Z
|
args.py
|
torlenor/kalah
|
12a5520445c60855ed42c5bd30e512c168d531ca
|
[
"MIT"
] | 1
|
2020-12-10T17:53:06.000Z
|
2020-12-10T17:53:06.000Z
|
def add_common_train_args(parser):
parser.add_argument('--episodes', type=int, default=1000, metavar='E',
help='number of episodes to train (default: 1000)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--evaluation-interval', type=int, default=100, metavar='E',
help='interval between evaluation runs (default: 100)')
parser.add_argument('--evaluation-games', type=int, default=100, metavar='EG',
help='how many games to play to check win rate during training (default: 100)')
parser.add_argument('--bins', type=int, default=6, metavar='B',
help='bins of the Kalah board (default: 6)')
parser.add_argument('--seeds', type=int, default=4, metavar='S',
help='seeds of the Kalah board (default: 4)')
parser.add_argument('--learning-rate', type=float, default=0.01, metavar='L',
help='learning rate (default: 0.01)')
parser.add_argument('--solved', type=float, default=95, metavar='SL',
help='consider problem solved when agent wins x percent of the games (default: 95)')
parser.add_argument('--neurons', type=int, default=512, metavar='NE',
help='how many neurons in each layer (default: 512)')
parser.add_argument('--run-id', type=str, required=True, metavar='RUN_ID',
help='the identifier for the training run.')
parser.add_argument('--force', dest='force', action='store_const',
const=True, default=False,
help='force overwrite already existing results')
| 69.344828
| 108
| 0.595724
|
0d225f92795f232073396004802762e356a9ac35
| 782
|
py
|
Python
|
tests/terraform/checks/resource/aws/test_SQSQueueEncryption.py
|
mgmt1pyro/Test-Theme
|
d3e20b62111636ecbe4267c5fff7c2820a9a892d
|
[
"Apache-2.0"
] | null | null | null |
tests/terraform/checks/resource/aws/test_SQSQueueEncryption.py
|
mgmt1pyro/Test-Theme
|
d3e20b62111636ecbe4267c5fff7c2820a9a892d
|
[
"Apache-2.0"
] | null | null | null |
tests/terraform/checks/resource/aws/test_SQSQueueEncryption.py
|
mgmt1pyro/Test-Theme
|
d3e20b62111636ecbe4267c5fff7c2820a9a892d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from checkov.terraform.checks.resource.aws.SQSQueueEncryption import check
from checkov.terraform.models.enums import CheckResult
class TestS3Encryption(unittest.TestCase):
def test_failure(self):
resource_conf = {'name': ['terraform-example-queue']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'name': ['terraform-example-queue'], 'kms_master_key_id': ['alias/aws/sqs'],
'kms_data_key_reuse_period_seconds': [300]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 34
| 101
| 0.717391
|
560f85cc5b86defccf054e7dc048064e7ec0f1a0
| 591
|
py
|
Python
|
questions/serializers/section.py
|
Ivin0022/django-questions
|
ac241b23108a5a0083e206458d586969a9ce6ef0
|
[
"MIT"
] | null | null | null |
questions/serializers/section.py
|
Ivin0022/django-questions
|
ac241b23108a5a0083e206458d586969a9ce6ef0
|
[
"MIT"
] | 9
|
2020-06-06T02:19:16.000Z
|
2022-03-12T00:39:33.000Z
|
questions/serializers/section.py
|
Ivin0022/django-questions
|
ac241b23108a5a0083e206458d586969a9ce6ef0
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
# djangorestframework-recursive
from rest_framework_recursive.fields import RecursiveField
# local
from .question import QuestionSerializer
from ..models import Section
class SectionSerializer(serializers.ModelSerializer):
children = RecursiveField(required=False, allow_null=True, many=True)
question_set = QuestionSerializer(many=True)
class Meta:
model = Section
fields = (
'id',
'url',
'title',
'parent',
'question_set',
'children',
)
| 23.64
| 73
| 0.654822
|
b43bfa36d634429b494d5dc2926219951ef02372
| 4,044
|
py
|
Python
|
lda_classification/utils/model_selection/xgboost_features.py
|
FeryET/lda_classification
|
530f972b8955c9f51668475ef640cb644f9b3ab7
|
[
"MIT"
] | 8
|
2020-10-12T07:35:13.000Z
|
2022-02-24T21:30:31.000Z
|
lda_classification/utils/model_selection/xgboost_features.py
|
FeryET/LDAThis
|
530f972b8955c9f51668475ef640cb644f9b3ab7
|
[
"MIT"
] | null | null | null |
lda_classification/utils/model_selection/xgboost_features.py
|
FeryET/LDAThis
|
530f972b8955c9f51668475ef640cb644f9b3ab7
|
[
"MIT"
] | 3
|
2021-01-12T22:45:15.000Z
|
2022-01-15T02:25:04.000Z
|
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
from tqdm import tqdm
from xgboost.sklearn import XGBClassifier, XGBRegressor
from xgboost.plotting import plot_importance
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
def _evaluate_thresholds(model, thresholds, x_train, y_train, x_test=None,
y_test=None):
results = []
for thresh in thresholds:
selection = SelectFromModel(model, threshold=thresh, prefit=True)
select_x_train = selection.transform(x_train)
selection_model = XGBClassifier()
selection_model.fit(select_x_train, y_train)
select_x_test = selection.transform(x_test)
predictions = selection_model.predict(select_x_test)
acc = accuracy_score(y_test, predictions)
results.append(
{"n_features": select_x_train.shape[1], "threshold": thresh,
"accuracy": acc * 100.0})
return results
def _optimal_values(results):
df = pd.DataFrame(results)
df["count"] = 1
df = df.groupby("n_features").sum()
df = df[df["count"] > 5]
df[["threshold", "accuracy"]] = df[["threshold", "accuracy"]].div(
df["count"], axis=0)
df = df.reset_index()
df.sort_values("accuracy", ascending=False, ignore_index=True, inplace=True)
n_features = df["n_features"][0]
threshold = df["threshold"][0]
return n_features, threshold
class XGBoostFeatureSelector(TransformerMixin, BaseEstimator):
def __init__(self, n_repeats=5, n_splits=10, **kwargs):
"""
:param n_repeats: number of repeats for inner KFold crossvalidation
:param n_splits: number of splits for inner KFold crossvalidation
:param kwargs: parameters for training the inner XGBClassifer model.
"""
self.model = XGBClassifier(**kwargs)
self.n_repeats = n_repeats
self.n_splits = n_splits
self.selected_indexes = None
def fit_transform(self, X, y=None, **fit_params):
return self.fit(X, y).transform(X)
def fit(self, X, y):
if y is None:
raise ValueError(
"y should be provided, since this is a supervised method.")
folds = RepeatedStratifiedKFold(n_repeats=self.n_repeats,
n_splits=self.n_splits)
scores = []
for train_idx, test_idx in tqdm(folds.split(X, y),
desc="Feature Selection",
total=self.n_repeats * self.n_splits):
x_train, y_train = X[train_idx], y[train_idx]
x_test, y_test = X[test_idx], y[test_idx]
self.model.fit(x_train, y_train)
thresholds = sorted(set(self.model.feature_importances_))
scores.extend(_evaluate_thresholds(self.model, thresholds, x_train,
y_train, x_test, y_test))
optimal_n_features, optimal_threshold = _optimal_values(scores)
self.model.fit(X, y)
importances = sorted(list(enumerate(self.model.feature_importances_)),
key=lambda x: x[1], reverse=True)
self.selected_indexes, _ = list(zip(*importances[:optimal_n_features]))
self.selected_indexes = np.array(self.selected_indexes)
return self
def transform(self, X):
if self.selected_indexes is None:
raise RuntimeError("You should train the feature selector first.")
return X[:, self.selected_indexes]
def plot_importance(self, *args, **kwargs):
"""
Checkout xgboost.plotting.plot_importance for a list of arguments
:param args:
:param kwargs:
:return:
"""
return plot_importance(self.model, *args, **kwargs)
| 40.848485
| 80
| 0.642681
|
e3a032529f2cc68f738bdeb818386382953d0358
| 942
|
py
|
Python
|
t99/t99_response.py
|
1099policy/ten99policy-python
|
168106808350e2d524aa6f00880c72e111ab6167
|
[
"MIT"
] | null | null | null |
t99/t99_response.py
|
1099policy/ten99policy-python
|
168106808350e2d524aa6f00880c72e111ab6167
|
[
"MIT"
] | null | null | null |
t99/t99_response.py
|
1099policy/ten99policy-python
|
168106808350e2d524aa6f00880c72e111ab6167
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import json
from collections import OrderedDict
class T99ResponseBase(object):
def __init__(self, code, headers):
self.code = code
self.headers = headers
@property
def idempotency_key(self):
try:
return self.headers["idempotency-key"]
except KeyError:
return None
@property
def request_id(self):
try:
return self.headers["request-id"]
except KeyError:
return None
class T99Response(T99ResponseBase):
def __init__(self, body, code, headers):
T99ResponseBase.__init__(self, code, headers)
self.body = body
self.data = json.loads(body, object_pairs_hook=OrderedDict)
class T99StreamResponse(T99ResponseBase):
def __init__(self, io, code, headers):
T99ResponseBase.__init__(self, code, headers)
self.io = io
| 24.789474
| 67
| 0.656051
|
e3afbae4aff54a4e41030ce1320a57370af59923
| 381
|
py
|
Python
|
auctions/admin.py
|
iSythnic/Augere-eCommerece
|
2d60874d80f762c3605f0321676ec8ba65dc4b9e
|
[
"MIT"
] | null | null | null |
auctions/admin.py
|
iSythnic/Augere-eCommerece
|
2d60874d80f762c3605f0321676ec8ba65dc4b9e
|
[
"MIT"
] | null | null | null |
auctions/admin.py
|
iSythnic/Augere-eCommerece
|
2d60874d80f762c3605f0321676ec8ba65dc4b9e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import User, listings, comments, bids, categories
# Register your models here.
class userAdmin(admin.ModelAdmin):
list_display = ("id", "first_name", "last_name", "username")
admin.site.register(User, userAdmin)
admin.site.register(listings)
admin.site.register(comments)
admin.site.register(bids)
admin.site.register(categories)
| 31.75
| 64
| 0.782152
|
2e65c1ad65be2923f4438dc8e2a6c25f0890aa18
| 62
|
py
|
Python
|
HelloWorld.py
|
phango-767/2021_refresher
|
6610649b864afc886a54e14e7c45e24f245718ba
|
[
"MIT"
] | null | null | null |
HelloWorld.py
|
phango-767/2021_refresher
|
6610649b864afc886a54e14e7c45e24f245718ba
|
[
"MIT"
] | null | null | null |
HelloWorld.py
|
phango-767/2021_refresher
|
6610649b864afc886a54e14e7c45e24f245718ba
|
[
"MIT"
] | null | null | null |
#this program prints "Hello, World!"
print("Hello, World!")
| 12.4
| 36
| 0.677419
|
33607c73baa1c2413a92e399ab8d98fdb44e07c8
| 1,580
|
py
|
Python
|
homework5/main.py
|
Bodhert/StandfordAlgorithmCourse
|
025547dcddbf6524357c383dbfbbb02a6b4d0822
|
[
"MIT"
] | null | null | null |
homework5/main.py
|
Bodhert/StandfordAlgorithmCourse
|
025547dcddbf6524357c383dbfbbb02a6b4d0822
|
[
"MIT"
] | null | null | null |
homework5/main.py
|
Bodhert/StandfordAlgorithmCourse
|
025547dcddbf6524357c383dbfbbb02a6b4d0822
|
[
"MIT"
] | 1
|
2020-02-02T21:33:22.000Z
|
2020-02-02T21:33:22.000Z
|
from PriorityQueue import PriorityQueue
MAXN = 205
adjList = {}
dist = []
def dijskstra(source):
global dist
global adjList
pq = PriorityQueue()
dist[source] = 0
pq.insertNode((source, 0))
while pq.getSize() > 1:
currPair = pq.getMin()
currNodeIndex = currPair[0]
currWeight = currPair[1]
if( currWeight > dist[currNodeIndex]):
continue
for neighbor in adjList[currNodeIndex]:
nextNodeIndex = neighbor[0]
nextWeight = neighbor[1]
if dist[currNodeIndex] + nextWeight < dist[nextNodeIndex]:
dist[nextNodeIndex] = dist[currNodeIndex] + nextWeight
pq.insertNode(neighbor)
def setVariables():
global dist
global MAXN
dist = [float('inf')] * MAXN
def buildGraphFromInput():
global adjList
with open('dijkstraData.txt') as inputFile:
for line in inputFile:
source, *dest = map(str, line.split())
source = int(source)
dest = [tuple(element.split(',')) for element in dest]
dest = [tuple(map(int, tup)) for tup in dest]
adjList[source] = dest
def main():
global dist
buildGraphFromInput()
setVariables()
dijskstra(1)
print(dist[7])
print(dist[37])
print(dist[59])
print(dist[82])
print(dist[99])
print(dist[115])
print(dist[133])
print(dist[165])
print(dist[188])
print(dist[197])
# 7,37,59,82,99,115,133,165,188,197
if __name__ == '__main__':
main()
| 21.944444
| 70
| 0.577848
|
d90348f71c21b29aed2e40f20efc44b4c71de8d0
| 1,991
|
py
|
Python
|
tests/test_symbols.py
|
pkjmesra/nseta
|
28cd8cede465efe9f506a38c5933602c463e5185
|
[
"MIT"
] | 8
|
2020-10-12T02:59:03.000Z
|
2022-03-20T15:06:50.000Z
|
tests/test_symbols.py
|
pkjmesra/nseta
|
28cd8cede465efe9f506a38c5933602c463e5185
|
[
"MIT"
] | 3
|
2020-10-13T16:30:09.000Z
|
2021-01-07T23:57:05.000Z
|
tests/test_symbols.py
|
pkjmesra/nseta
|
28cd8cede465efe9f506a38c5933602c463e5185
|
[
"MIT"
] | 5
|
2020-10-12T14:57:41.000Z
|
2021-12-30T11:52:34.000Z
|
# -*- coding: utf-8 -*-
import unittest
from nseta.common.symbols import get_symbol_list, get_index_constituents_list
from baseUnitTest import baseUnitTest
class TestSymbols(baseUnitTest):
def setUp(self, redirect_logs=True):
super().setUp()
def test_symbol_list(self):
df = get_symbol_list()
# Check popular names are in the list
_ril = df['SYMBOL'] == 'RELIANCE'
# Expect 1 row
self.assertEqual(df[_ril].shape[0], 1)
_sbi = df['SYMBOL'] == 'SBIN'
# Check company matches the expected value
self.assertEqual(df[_sbi].iloc[0].get(
'NAME OF COMPANY'), 'State Bank of India')
def test_index_constituents_list(self):
df = get_index_constituents_list('NIFTY50')
# Check for 50 items
self.assertEqual(df.shape[0], 50)
# Check popular names are in the list
_sbi = df['Symbol'] == 'SBIN'
# Check company matches the expected value
self.assertEqual(df[_sbi].iloc[0].get(
'Company Name'), 'State Bank of India')
self.assertEqual(df[_sbi].iloc[0].get(
'Industry'), 'FINANCIAL SERVICES')
df = get_index_constituents_list('NIFTYCPSE')
# Check popular names are in the list
_oil = df['Symbol'] == 'OIL'
# Check company matches the expected value
self.assertEqual(df[_oil].iloc[0].get('ISIN Code'), 'INE274J01014')
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSymbols)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if six.PY2:
if result.wasSuccessful():
print('tests OK')
for (test, error) in result.errors:
print('=========Error in: %s===========' % test)
print(error)
print('======================================')
for (test, failures) in result.failures:
print('=========Error in: %s===========' % test)
print(failures)
print('======================================')
| 33.745763
| 78
| 0.607735
|
4d1ab92b84eb8661ad4dd4f3242aaaebc043e368
| 188
|
py
|
Python
|
chat_playground/chat.py
|
maxhumber/fikiwiki
|
db196c1e3e2bb27d1f8f3dea774227b8dd5682e3
|
[
"MIT"
] | null | null | null |
chat_playground/chat.py
|
maxhumber/fikiwiki
|
db196c1e3e2bb27d1f8f3dea774227b8dd5682e3
|
[
"MIT"
] | null | null | null |
chat_playground/chat.py
|
maxhumber/fikiwiki
|
db196c1e3e2bb27d1f8f3dea774227b8dd5682e3
|
[
"MIT"
] | 2
|
2020-10-09T09:24:49.000Z
|
2020-10-21T17:31:50.000Z
|
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
app.route('/')
def
| 18.8
| 41
| 0.755319
|
cb86e78770b5b9e9c08f7a3bd48dee0ae299c6fb
| 1,866
|
py
|
Python
|
src/network/receivequeuethread.py
|
iljah/PyBitmessage
|
5dbe832a1a4fde5d67ec9d13631a2d6733d47730
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/network/receivequeuethread.py
|
iljah/PyBitmessage
|
5dbe832a1a4fde5d67ec9d13631a2d6733d47730
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/network/receivequeuethread.py
|
iljah/PyBitmessage
|
5dbe832a1a4fde5d67ec9d13631a2d6733d47730
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
Process data incoming from network
"""
import errno
import queue
import socket
import state
from network.advanceddispatcher import UnknownStateError
from network.connectionpool import BMConnectionPool
from queues import receiveDataQueue
from network.threads import StoppableThread
class ReceiveQueueThread(StoppableThread):
"""This thread processes data received from the network
(which is done by the asyncore thread)"""
def __init__(self, num=0):
super(ReceiveQueueThread, self).__init__(name="ReceiveQueue_%i" % num)
def run(self):
while not self._stopped and state.shutdown == 0:
try:
dest = receiveDataQueue.get(block=True, timeout=1)
except queue.Empty:
continue
if self._stopped or state.shutdown:
break
# cycle as long as there is data
# methods should return False if there isn't enough data,
# or the connection is to be aborted
# state_* methods should return False if there isn't
# enough data, or the connection is to be aborted
try:
connection = BMConnectionPool().getConnectionByAddr(dest)
# connection object not found
except KeyError:
receiveDataQueue.task_done()
continue
try:
connection.process()
# state isn't implemented
except UnknownStateError:
pass
except socket.error as err:
if err.errno == errno.EBADF:
connection.set_state("close", 0)
else:
self.logger.error('Socket error: %s', err)
except:
self.logger.error('Error processing', exc_info=True)
receiveDataQueue.task_done()
| 32.736842
| 78
| 0.60075
|
0c8e4ff01cb7ee76e0b33693343d47df1fe9f084
| 1,933
|
py
|
Python
|
pyscfad/pbc/gto/test/test_cell.py
|
yangdatou/pyscfad
|
8b90c928928f8244237e5fe415858e074dd5e5fb
|
[
"MIT"
] | 9
|
2021-05-22T07:39:23.000Z
|
2021-11-13T23:25:50.000Z
|
pyscfad/pbc/gto/test/test_cell.py
|
yangdatou/pyscfad
|
8b90c928928f8244237e5fe415858e074dd5e5fb
|
[
"MIT"
] | 1
|
2021-05-22T08:28:17.000Z
|
2021-05-23T04:29:02.000Z
|
pyscfad/pbc/gto/test/test_cell.py
|
yangdatou/pyscfad
|
8b90c928928f8244237e5fe415858e074dd5e5fb
|
[
"MIT"
] | 1
|
2021-09-13T18:34:58.000Z
|
2021-09-13T18:34:58.000Z
|
import pytest
import numpy
import jax
from pyscfad.lib import numpy as jnp
from pyscfad.pbc import gto
@pytest.fixture
def get_cell():
cell = gto.Cell()
cell.atom = '''Si 0., 0., 0.
Si 1.3467560987, 1.3467560987, 1.3467560987'''
cell.a = '''0. 2.6935121974 2.6935121974
2.6935121974 0. 2.6935121974
2.6935121974 2.6935121974 0. '''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.mesh = [5,5,5]
cell.build(trace_coords=True)
return cell
def test_SI(get_cell):
cell = get_cell
Gv = cell.get_Gv()
SI = cell.get_SI()
natm = cell.natm
ng = Gv.shape[0]
g0 = numpy.zeros((natm,ng,natm,3), dtype=numpy.complex128)
for i in range(natm):
g0[i,:,i] += -1j * numpy.einsum("gx,g->gx", Gv, SI[i])
jac_fwd = jax.jacfwd(cell.__class__.get_SI)(cell)
assert abs(jac_fwd.coords - g0).max() < 1e-10
# NOTE vjp for functions f:R->C will lose the imaginary part,
# and reverse-mode autodiff will fail in such cases. For
# functions f:R->R or f:C->C, both jvp and vjp will work.
_, func_vjp = jax.vjp(cell.__class__.get_SI, cell)
ct = jnp.eye((natm*ng), dtype=jnp.complex128).reshape(natm*ng,natm,ng)
jac_bwd = jax.vmap(func_vjp)(ct)[0].coords.reshape(natm,ng,natm,3)
assert abs(jac_bwd - g0.real).max() < 1e-10
def fun(cell):
out = []
SI = cell.get_SI()
for i in range(natm):
out.append((SI[i] * SI[i].conj()).real.sum())
return out
jac_fwd = jax.jacfwd(fun)(cell)
jac_bwd = jax.jacrev(fun)(cell)
norm = fun(cell)
for i in range(natm):
grad = jnp.einsum("gnx,g->nx", g0[i], SI[i].conj())
grad += grad.conj()
grad = (grad * 0.5 / norm[i]).real
assert abs(grad - jac_fwd[i].coords).max() < 1e-10
assert abs(grad - jac_bwd[i].coords).max() < 1e-10
| 33.912281
| 74
| 0.579928
|
53c5856097bb1ca240a016b20318df10be7968df
| 3,207
|
py
|
Python
|
external-contacts/python/external-contacts.py
|
PrinceMerluza/developercenter-tutorials
|
571512d304d5d6d49b6fc1a208e0e01f5aa89d65
|
[
"MIT"
] | 26
|
2016-04-19T13:35:48.000Z
|
2022-01-12T15:36:46.000Z
|
external-contacts/python/external-contacts.py
|
PrinceMerluza/developercenter-tutorials
|
571512d304d5d6d49b6fc1a208e0e01f5aa89d65
|
[
"MIT"
] | 28
|
2016-04-14T13:55:17.000Z
|
2022-02-18T15:41:28.000Z
|
external-contacts/python/external-contacts.py
|
PrinceMerluza/developercenter-tutorials
|
571512d304d5d6d49b6fc1a208e0e01f5aa89d65
|
[
"MIT"
] | 41
|
2016-02-10T18:41:42.000Z
|
2022-02-17T08:48:54.000Z
|
import base64, csv, sys, requests, os
import PureCloudPlatformClientV2
from pprint import pprint
from PureCloudPlatformClientV2.rest import ApiException
print('-------------------------------------------------------------')
print('- Python3 External Contacts -')
print('-------------------------------------------------------------')
# Credentials
CLIENT_ID = os.environ['GENESYS_CLOUD_CLIENT_ID']
CLIENT_SECRET = os.environ['GENESYS_CLOUD_CLIENT_SECRET']
ORG_REGION = os.environ['GENESYS_CLOUD_REGION'] # eg. us_east_1
# Set environment
region = PureCloudPlatformClientV2.PureCloudRegionHosts[ORG_REGION]
PureCloudPlatformClientV2.configuration.host = region.get_api_host()
# OAuth when using Client Credentials
api_client = PureCloudPlatformClientV2.api_client.ApiClient() \
.get_client_credentials_token(CLIENT_ID, CLIENT_SECRET)
# Create an instance of the External Contacts API API
external_contacts_api = PureCloudPlatformClientV2.ExternalContactsApi(api_client)
# Define a new External Organization
new_org = PureCloudPlatformClientV2.ExternalOrganization()
new_org.name = "Developer Tutorial Company"
new_org.industry = "Software"
new_org.address = PureCloudPlatformClientV2.ContactAddress()
new_org.address.address1 = "601 Interactive Way"
new_org.address.city = "Indianapolis"
new_org.address.state = "Indiana"
new_org.address.postalCode = "46278"
new_org.address.countryCode = "USA"
new_org.employee_count = 2000
new_org.websites = ["https://developer.mypurecloud.com"]
new_org.twitter_id = PureCloudPlatformClientV2.TwitterId()
new_org.twitter_id.screen_name = 'GenesysCloudDev'
try:
# Create an external organization
new_org_response = external_contacts_api.post_externalcontacts_organizations(new_org)
org_id = new_org_response.id
print(f"Created organization {org_id}")
except ApiException as e:
print("Exception when calling ExternalContactsApi->post_externalcontacts_organizations: %s\n" % e)
sys.exit()
# Loop through the CSV file and add each contact
with open("contacts.csv", mode="r", encoding='utf-8-sig') as csv_file:
csv_reader = csv.DictReader(csv_file)
print("Adding contacts...")
for row in csv_reader:
new_contact = PureCloudPlatformClientV2.ExternalContact()
new_contact.first_name = row["GivenName"]
new_contact.last_name = row["Surname"]
new_contact.title = row["Title"]
new_contact.work_phone = PureCloudPlatformClientV2.PhoneNumber()
new_contact.work_phone.display = row["TelephoneNumber"]
new_contact.address = PureCloudPlatformClientV2.ContactAddress()
new_contact.address.address1 = row["StreetAddress"]
new_contact.address.city = row["City"]
new_contact.address.postal_code = row["ZipCode"]
new_contact.work_email = row["EmailAddress"]
new_contact.external_organization = new_org_response
try:
# Create an external contact
api_response = external_contacts_api.post_externalcontacts_contacts(new_contact)
pprint(api_response)
except ApiException as e:
print(f"Error occurred when adding {new_contact.first_name}")
print("All contacts added.")
| 41.649351
| 102
| 0.732148
|
6aa54f21aa2273be6f62c08535ee3340e840e64b
| 367
|
py
|
Python
|
server/inference_api/v1/status.py
|
TreeinRandomForest/logo-detector
|
c957032b1fcbce32dfd55a3a21e2ace44ee0ee4b
|
[
"MIT"
] | 1
|
2021-03-25T17:09:40.000Z
|
2021-03-25T17:09:40.000Z
|
server/inference_api/v1/status.py
|
TreeinRandomForest/logo-detector
|
c957032b1fcbce32dfd55a3a21e2ace44ee0ee4b
|
[
"MIT"
] | 6
|
2021-03-10T09:57:47.000Z
|
2022-03-12T00:21:00.000Z
|
server/inference_api/v1/status.py
|
TreeinRandomForest/logo-detector
|
c957032b1fcbce32dfd55a3a21e2ace44ee0ee4b
|
[
"MIT"
] | 1
|
2020-04-06T14:20:31.000Z
|
2020-04-06T14:20:31.000Z
|
import sys
import time
from flask import jsonify
from inference_api.v1 import v1
@v1.route('/status', methods=['GET'])
def status():
status_obj = {
'version': '0.1',
'python_version': '.'.join(str(n) for n in sys.version_info),
'status': 'ok',
'time': time.strftime('%A %B, %d %Y %H:%M:%S')
}
return jsonify(status_obj)
| 21.588235
| 69
| 0.588556
|
72aed8172338f24ccd45723b981472d76c869d00
| 468
|
py
|
Python
|
icekit/appsettings.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 52
|
2016-09-13T03:50:58.000Z
|
2022-02-23T16:25:08.000Z
|
icekit/appsettings.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 304
|
2016-08-11T14:17:30.000Z
|
2020-07-22T13:35:18.000Z
|
icekit/appsettings.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 12
|
2016-09-21T18:46:35.000Z
|
2021-02-15T19:37:50.000Z
|
from django.conf import settings
ICEKIT = getattr(settings, 'ICEKIT', {})
# Sources for `icekit.plugins.FileSystemLayoutPlugin`.
LAYOUT_TEMPLATES = ICEKIT.get('LAYOUT_TEMPLATES', [])
# File class referenced by `icekit.plugins.file.abstract_models.AbstractFileItem`.
FILE_CLASS = ICEKIT.get('FILE_CLASS', 'icekit_plugins_file.File')
DASHBOARD_FEATURED_APPS = ICEKIT.get('DASHBOARD_FEATURED_APPS', ())
DASHBOARD_SORTED_APPS = ICEKIT.get('DASHBOARD_SORTED_APPS', ())
| 36
| 82
| 0.786325
|
6abad205f9fcdba16d2898aceb79cee44cc4d870
| 5,578
|
py
|
Python
|
train_grnn.py
|
airalcorn2/baller2vec
|
bfe0cc4d7988bd8104d7ef3ecd22867b275310ec
|
[
"MIT"
] | 54
|
2021-02-08T02:20:58.000Z
|
2021-08-10T05:14:51.000Z
|
train_grnn.py
|
airalcorn2/baller2vec
|
bfe0cc4d7988bd8104d7ef3ecd22867b275310ec
|
[
"MIT"
] | 4
|
2021-03-18T14:56:01.000Z
|
2021-09-28T21:18:48.000Z
|
train_grnn.py
|
airalcorn2/baller2vec
|
bfe0cc4d7988bd8104d7ef3ecd22867b275310ec
|
[
"MIT"
] | 4
|
2021-02-11T23:10:18.000Z
|
2021-08-15T06:30:29.000Z
|
import sys
import time
import torch
import yaml
from grnn import GRNN
from settings import *
from torch import nn, optim
from train_baller2vec import init_datasets
SEED = 2010
torch.manual_seed(SEED)
torch.set_printoptions(linewidth=160)
def init_model(opts, train_dataset):
model_config = opts["model"]
# Add one for the generic player.
model_config["n_player_ids"] = train_dataset.n_player_ids + 1
model_config["seq_len"] = train_dataset.chunk_size // train_dataset.hz - 1
model_config["n_players"] = train_dataset.n_players
model_config["n_player_labels"] = train_dataset.player_traj_n ** 2
model = GRNN(**model_config)
return model
def get_preds_labels(tensors):
player_trajs = tensors["player_trajs"].flatten()
n_player_trajs = len(player_trajs)
labels = player_trajs.to(device)
preds = model(tensors)["player"][:n_player_trajs]
return (preds, labels)
def train_model():
# Initialize optimizer.
train_params = [params for params in model.parameters()]
optimizer = optim.Adam(train_params, lr=opts["train"]["learning_rate"])
criterion = nn.CrossEntropyLoss()
# Continue training on a prematurely terminated model.
try:
model.load_state_dict(torch.load(f"{JOB_DIR}/best_params.pth"))
try:
optimizer.load_state_dict(torch.load(f"{JOB_DIR}/optimizer.pth"))
except ValueError:
print("Old optimizer doesn't match.")
except FileNotFoundError:
pass
best_train_loss = float("inf")
best_valid_loss = float("inf")
test_loss_best_valid = float("inf")
total_train_loss = None
no_improvement = 0
for epoch in range(175):
print(f"\nepoch: {epoch}", flush=True)
model.eval()
total_valid_loss = 0.0
with torch.no_grad():
n_valid = 0
for valid_tensors in valid_loader:
# Skip bad sequences.
if len(valid_tensors["player_idxs"]) < model.seq_len:
continue
(preds, labels) = get_preds_labels(valid_tensors)
loss = criterion(preds, labels)
total_valid_loss += loss.item()
n_valid += 1
probs = torch.softmax(preds, dim=1)
print(probs.view(model.seq_len, model.n_players), flush=True)
print(preds.view(model.seq_len, model.n_players), flush=True)
print(labels.view(model.seq_len, model.n_players), flush=True)
total_valid_loss /= n_valid
if total_valid_loss < best_valid_loss:
best_valid_loss = total_valid_loss
torch.save(optimizer.state_dict(), f"{JOB_DIR}/optimizer.pth")
torch.save(model.state_dict(), f"{JOB_DIR}/best_params.pth")
test_loss_best_valid = 0.0
with torch.no_grad():
n_test = 0
for test_tensors in test_loader:
# Skip bad sequences.
if len(test_tensors["player_idxs"]) < model.seq_len:
continue
(preds, labels) = get_preds_labels(test_tensors)
loss = criterion(preds, labels)
test_loss_best_valid += loss.item()
n_test += 1
test_loss_best_valid /= n_test
elif no_improvement < patience:
no_improvement += 1
if no_improvement == patience:
print("Reducing learning rate.")
optimizer = optim.Adam(
train_params, lr=0.1 * opts["train"]["learning_rate"]
)
print(f"total_train_loss: {total_train_loss}")
print(f"best_train_loss: {best_train_loss}")
print(f"total_valid_loss: {total_valid_loss}")
print(f"best_valid_loss: {best_valid_loss}")
print(f"test_loss_best_valid: {test_loss_best_valid}")
model.train()
total_train_loss = 0.0
n_train = 0
start_time = time.time()
for (train_idx, train_tensors) in enumerate(train_loader):
if train_idx % 1000 == 0:
print(train_idx, flush=True)
# Skip bad sequences.
if len(train_tensors["player_idxs"]) < model.seq_len:
continue
optimizer.zero_grad()
(preds, labels) = get_preds_labels(train_tensors)
loss = criterion(preds, labels)
total_train_loss += loss.item()
loss.backward()
optimizer.step()
n_train += 1
epoch_time = time.time() - start_time
total_train_loss /= n_train
if total_train_loss < best_train_loss:
best_train_loss = total_train_loss
print(f"epoch_time: {epoch_time:.2f}", flush=True)
if __name__ == "__main__":
JOB = sys.argv[1]
JOB_DIR = f"{EXPERIMENTS_DIR}/{JOB}"
try:
os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[2]
except IndexError:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
opts = yaml.safe_load(open(f"{JOB_DIR}/{JOB}.yaml"))
patience = opts["train"]["patience"]
# Initialize datasets.
(
train_dataset,
train_loader,
valid_dataset,
valid_loader,
test_dataset,
test_loader,
) = init_datasets(opts)
# Initialize model.
device = torch.device("cuda:0")
model = init_model(opts, train_dataset).to(device)
print(model)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Parameters: {n_params}")
train_model()
| 31.874286
| 78
| 0.609896
|
87c88e9c21030b0920cbf578215b9fa41e06fd7a
| 201
|
py
|
Python
|
framework/cei_python3/setup.py
|
macomfan/cei
|
49efb1baf39e0bb3e390791fafa3508226644975
|
[
"MIT"
] | 2
|
2020-05-09T01:54:04.000Z
|
2020-12-31T02:36:45.000Z
|
framework/cei_python3/setup.py
|
macomfan/cei
|
49efb1baf39e0bb3e390791fafa3508226644975
|
[
"MIT"
] | 27
|
2020-04-18T11:21:07.000Z
|
2022-02-26T22:22:33.000Z
|
framework/cei_python3/setup.py
|
macomfan/cei
|
49efb1baf39e0bb3e390791fafa3508226644975
|
[
"MIT"
] | 1
|
2020-04-26T10:58:02.000Z
|
2020-04-26T10:58:02.000Z
|
#!/usr/bin/env python3
from setuptools import setup
setup(
name="cei-python",
version="0.0.1",
packages=['impl'],
install_requires=['requests', 'urllib3', 'websocket-client']
)
| 22.333333
| 65
| 0.631841
|
814093a03040e2655d2280327f5d1efe4fd40a0f
| 395
|
py
|
Python
|
zoo/services/migrations/0007_service_service_url.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | 90
|
2018-11-20T10:58:24.000Z
|
2022-02-19T16:12:46.000Z
|
zoo/services/migrations/0007_service_service_url.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | 348
|
2018-11-21T09:22:31.000Z
|
2021-11-03T13:45:08.000Z
|
zoo/services/migrations/0007_service_service_url.py
|
aexvir/the-zoo
|
7816afb9a0a26c6058b030b4a987c73e952d92bd
|
[
"MIT"
] | 11
|
2018-12-08T18:42:07.000Z
|
2021-02-21T06:27:58.000Z
|
# Generated by Django 2.1 on 2018-08-28 09:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("services", "0006_service_name_slug")]
operations = [
migrations.AddField(
model_name="service",
name="service_url",
field=models.URLField(blank=True, max_length=500, null=True),
)
]
| 23.235294
| 73
| 0.635443
|
9a61dd931ac9b7805c2da603e4fd34bd11d23c6f
| 3,577
|
py
|
Python
|
main.py
|
Omicron02/SNEK
|
9388168697fe09f9f3c5b81800d3f6fd8ef0cf36
|
[
"MIT"
] | null | null | null |
main.py
|
Omicron02/SNEK
|
9388168697fe09f9f3c5b81800d3f6fd8ef0cf36
|
[
"MIT"
] | null | null | null |
main.py
|
Omicron02/SNEK
|
9388168697fe09f9f3c5b81800d3f6fd8ef0cf36
|
[
"MIT"
] | null | null | null |
import pygame
import time
import random
pygame.init()
pygame.mixer.init()
EAT_SOUND = pygame.mixer.Sound('./add.mp3')
END_SOUND = pygame.mixer.Sound('./end.mp3')
width,height=800,600#screen
disp=pygame.display.set_mode((width,height))
pygame.display.set_caption("SNEK")
green,red,black,white,brown=(0,204,153),(255,8,0),(0,0,0),(255,255,255),(165,42,42)
font_style=pygame.font.SysFont(None,30)
cell=20
def get_food_position(width, height, body):
while True:
food_x=round(random.randrange(0,width-cell)/cell)*cell
food_y=round(random.randrange(0,height-cell)/cell)*cell
if [food_x, food_y] not in body:
return food_x, food_y
def gameloop():
end=0
x,y,x1,y1=width/2,height/2,0,0#x,y->head pos;x1,y1->change in pos
snake_speed=10
level = 1
body,blen=[],1
clk=pygame.time.Clock()
food_x, food_y= get_food_position(width,height, body)
while not end:
for event in pygame.event.get():
if event.type==pygame.QUIT:
end=1
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
x1,y1=-cell,0
elif event.key==pygame.K_UP:
x1,y1=-0,-cell
elif event.key==pygame.K_RIGHT:
x1,y1=cell,0
elif event.key==pygame.K_DOWN:
x1,y1=0,cell
x+=x1;y+=y1
if x>width or x<0 or y>height or y<0:#screen boundary condition
break
disp.fill(black)
pygame.draw.rect(disp,red,[food_x,food_y,cell,cell])
head=[]
head.append(x);head.append(y)
body.append(head)#append new head to body
for block in body[:blen-1]:
if block==head:#snake head touches body
end=1
if len(body)>blen:#snake movement display
del body[0]
for block in body:
pygame.draw.rect(disp,green,[block[0],block[1],cell,cell])
score=font_style.render("Score: "+str(blen-1),True,white)
disp.blit(score,[0,0])
pygame.display.update()
level_display = font_style.render("Level: " + str(level), True, white)
disp.blit(level_display,[(width - 80),0])
pygame.display.update()
speed_display = font_style.render("Speed: " + str(snake_speed), True, white)
disp.blit(speed_display,[10,height-20])
pygame.display.update()
if food_x==x and food_y==y:#contact with food
food_x, food_y= get_food_position(width,height, body)
blen+=1#body length increases
EAT_SOUND.play()
if snake_speed<30: snake_speed+=0.5;
if(blen % 10 == 1):
level += 1
clk.tick(snake_speed)#fps
clk.tick(snake_speed)
disp.fill(black)
m=font_style.render("Game Over",True,red)
END_SOUND.play()
disp.blit(m,[(width/2)-40,height/2])
f = open("score.txt","a")
f.write(str(blen-1)+"\n")
f.close()
with open("score.txt", "r") as f:
score = f.read() # Read all file in case values are not on a single line
score_ints = [ int(x) for x in score.split() ] # Convert strings to ints
highscore = max(score_ints) # sum all elements of the list
f_score=font_style.render("Score: "+str(blen-1),True,white)
disp.blit(f_score,[(width/2)-30,(height/2)+27])
f_hscore=font_style.render("High Score: "+str(highscore),True,white)
disp.blit(f_hscore,[(width/2)-50,(height/2)+50])
pygame.display.update()
time.sleep(2)
pygame.quit()
quit()
gameloop()
| 36.5
| 84
| 0.598546
|
52a3ccd7a93d4cd8ae9e92a663445d79b3a1797d
| 4,237
|
py
|
Python
|
shadow4tests/test_beamline/old/test_empty_against_shadow3.py
|
srio/shadow4tests
|
7123475d830fa619a866dbde9afe28a9ff405dfd
|
[
"MIT"
] | null | null | null |
shadow4tests/test_beamline/old/test_empty_against_shadow3.py
|
srio/shadow4tests
|
7123475d830fa619a866dbde9afe28a9ff405dfd
|
[
"MIT"
] | null | null | null |
shadow4tests/test_beamline/old/test_empty_against_shadow3.py
|
srio/shadow4tests
|
7123475d830fa619a866dbde9afe28a9ff405dfd
|
[
"MIT"
] | null | null | null |
import numpy
from shadow4.sources.source_geometrical.source_geometrical import SourceGeometrical
from shadow4.beamline.optical_elements.ideal_elements.s4_empty import S4EmptyElement
import Shadow
from Shadow.ShadowTools import plotxy
from shadow4tests.compatibility.beam3 import Beam3
from numpy.testing import assert_almost_equal
from shadow4tests.compatibility.global_definitions import SHADOW3_BINARY
class FakeOE():
pass
def test_empty_element( do_plot=0,
do_assert = True,
do_shadow3_fortran = True,
N = 1000,
alpha_deg = None, # 20, # None=rondomize
theta1_deg = None, # 10.0, # None=rondomize
theta2_deg = None, # 170.0, # None=rondomize
p = None, # 15.0, # None=rondomize
q = None, # 100.0 # None=rondomize,
):
source = SourceGeometrical()
source.set_angular_distribution_gaussian(1e-6,1e-6)
beam0 = source.calculate_beam(N=N, POL_DEG=1)
print(beam0.info())
beam0s3 = Beam3.initialize_from_shadow4_beam(beam0)
beam1s3 = Beam3.initialize_from_shadow4_beam(beam0)
if alpha_deg is None: alpha_deg = numpy.random.random() * 360.0
if theta1_deg is None: theta1_deg = numpy.random.random() * 90.0
if theta2_deg is None: theta2_deg = numpy.random.random() * 180.0
if p is None: p = numpy.random.random() * 100.0
if q is None: q = numpy.random.random() * 100.0
#
# shadow4
#
empty = S4EmptyElement()
empty.get_coordinates().set_positions(angle_radial=theta1_deg*numpy.pi/180,
angle_radial_out=theta2_deg*numpy.pi/180,
angle_azimuthal=alpha_deg*numpy.pi/180, p=p, q=q)
beam1, mirr1 = empty.trace_beam(beam0)
#
# shadow3
#
oe1 = Shadow.OE()
oe1.ALPHA = alpha_deg
oe1.DUMMY = 100.0
oe1.FWRITE = 0 # 1
oe1.F_REFRAC = 2
oe1.T_IMAGE = q
oe1.T_INCIDENCE = theta1_deg
oe1.T_REFLECTION = theta2_deg
oe1.T_SOURCE = p
if do_shadow3_fortran:
import os
os.system("/bin/rm begin.dat start.01 star.01")
beam0s3.write("begin.dat")
oe1.write("start.01")
f = open("systemfile.dat","w")
f.write("start.01\n")
f.close()
f = open("shadow3.inp","w")
f.write("trace\nsystemfile\n0\nexit\n")
f.close()
os.system("%s < shadow3.inp" % SHADOW3_BINARY)
beam1f = Beam3(N=N)
beam1f.load("star.01")
beam1s3.traceOE(oe1,1)
if do_plot:
plotxy(beam1, 4, 6, title="Image shadow4", nbins=201)
plotxy(beam1s3, 4, 6, title="Image shadow3", nbins=201)
print("alpha_deg, theta1_deg, theta2_deg = ",alpha_deg, theta1_deg, theta2_deg)
print("p, q = ", p, q)
print("\ncol# shadow4 shadow3 (shadow3_fortran) (source)")
for i in range(18):
if do_shadow3_fortran:
print("col%d %f %f %f %f " % (i + 1, beam1.rays[0, i], beam1s3.rays[0, i],
beam1f.rays[0, i], beam0s3.rays[0, i]))
else:
print("col%d %f %f " % (i+1, beam1.rays[0,i], beam1s3.rays[0,i]))
if do_assert:
assert_almost_equal (beam1.rays[:,i], beam1s3.rays[:,i], 4)
if __name__ == "__main__":
# a first test with plots
test_empty_element(do_plot=False,
do_assert = True,
do_shadow3_fortran = True,
N = 1000,
alpha_deg=20,
theta1_deg = 10.0,
theta2_deg = 170.0,
p = 15.0,
q = 100.0)
# 10 random tests
for i in range(10):
test_empty_element(do_plot=0,
do_assert = True,
do_shadow3_fortran = True,
N = 1000,
alpha_deg=None,
theta1_deg = None,
theta2_deg = None,
p = None,
q = None)
| 30.927007
| 91
| 0.540005
|
b512bac2af8bfdf24933dd5b360eb83b4d04d5d4
| 754
|
py
|
Python
|
telegram_bot/handlers/errors/error_handler.py
|
Oorzhakau/TeamForce_bot
|
b8037d53b228bc2ab5149fa67dde6bea17f25a65
|
[
"MIT"
] | null | null | null |
telegram_bot/handlers/errors/error_handler.py
|
Oorzhakau/TeamForce_bot
|
b8037d53b228bc2ab5149fa67dde6bea17f25a65
|
[
"MIT"
] | null | null | null |
telegram_bot/handlers/errors/error_handler.py
|
Oorzhakau/TeamForce_bot
|
b8037d53b228bc2ab5149fa67dde6bea17f25a65
|
[
"MIT"
] | null | null | null |
import logging
from aiogram.utils.exceptions import (
CantParseEntities,
MessageNotModified,
TelegramAPIError,
)
from loader import dp
@dp.errors_handler()
async def errors_handler(update, exception):
"""Error handler, перехватывающий все исключения."""
if isinstance(exception, MessageNotModified):
logging.exception("Message is not modified")
return True
if isinstance(exception, CantParseEntities):
logging.exception(f"CantParseEntities: {exception} \nUpdate: {update}")
return True
if isinstance(exception, TelegramAPIError):
logging.exception(f"TelegramAPIError: {exception} \nUpdate: {update}")
return True
logging.exception(f"Update: {update} \n{exception}")
| 26
| 79
| 0.712202
|
f9a1cdc65d56e0139bf6c9a0e07609f4a1ee953b
| 986
|
py
|
Python
|
blog/migrations/0001_initial.py
|
vierageorge/bootstrapDeploy
|
dd55a242b8ea11cf949a90a884b678453549eaca
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
vierageorge/bootstrapDeploy
|
dd55a242b8ea11cf949a90a884b678453549eaca
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
vierageorge/bootstrapDeploy
|
dd55a242b8ea11cf949a90a884b678453549eaca
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-04-24 21:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.866667
| 120
| 0.634888
|
b9a45a87ceb6daa50fe912858b5531845cfb78dc
| 28,485
|
py
|
Python
|
paddlenlp/transformers/unified_transformer/tokenizer.py
|
qhpeklh5959/PaddleNLP
|
64a56737d57debfbc7b4c970b254d89dd4a07048
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/unified_transformer/tokenizer.py
|
qhpeklh5959/PaddleNLP
|
64a56737d57debfbc7b4c970b254d89dd4a07048
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/unified_transformer/tokenizer.py
|
qhpeklh5959/PaddleNLP
|
64a56737d57debfbc7b4c970b254d89dd4a07048
|
[
"Apache-2.0"
] | 1
|
2021-04-28T09:01:37.000Z
|
2021-04-28T09:01:37.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import json
import os
import six
import re
import unicodedata
from shutil import copyfile
import numpy as np
import jieba
import paddle
from paddle.utils import try_import
from .. import PretrainedTokenizer
from ..tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control
from ...data.vocab import Vocab
__all__ = ['UnifiedTransformerTokenizer']
class UnifiedTransformerTokenizer(PretrainedTokenizer):
resource_files_names = {
"vocab_file": "vocab.txt",
"sentencepiece_model_file": "spm.model",
} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"unified_transformer-12L-cn":
"https://paddlenlp.bj.bcebos.com/models/transformers/unified_transformer/unified_transformer-12L-cn-vocab.txt",
"unified_transformer-12L-cn-luge":
"https://paddlenlp.bj.bcebos.com/models/transformers/unified_transformer/unified_transformer-12L-cn-vocab.txt",
"plato-mini":
"https://paddlenlp.bj.bcebos.com/models/transformers/unified_transformer/plato-mini-vocab.txt",
},
"sentencepiece_model_file": {
"unified_transformer-12L-cn":
"https://paddlenlp.bj.bcebos.com/models/transformers/unified_transformer/unified_transformer-12L-cn-spm.model",
"unified_transformer-12L-cn-luge":
"https://paddlenlp.bj.bcebos.com/models/transformers/unified_transformer/unified_transformer-12L-cn-spm.model",
"plato-mini":
"https://paddlenlp.bj.bcebos.com/models/transformers/unified_transformer/plato-mini-spm.model",
},
}
pretrained_init_configuration = {
"unified_transformer-12L-cn": {
"do_lower_case": False
},
"unified_transformer-12L-cn-luge": {
"do_lower_case": False
},
"plato-mini": {
"do_lower_case": False
},
}
TASK_TO_SPECIAL_TOKEN = {
'chitchat': "[CHAT]",
'knowledge': "[KNOW]",
'recommend': "[RECO]",
}
def __init__(self,
vocab_file,
sentencepiece_model_file,
do_lower_case=False,
unk_token="[UNK]",
pad_token="[PAD]",
cls_token="[CLS]",
sep_token="[SEP]",
mask_token="[MASK]",
special_tokens_file=""):
mod = try_import('sentencepiece')
self.spm_model = mod.SentencePieceProcessor()
self.do_lower_case = do_lower_case
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = ErnieTinyTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(
vocab_file,
unk_token,
pad_token,
cls_token,
sep_token,
mask_token=mask_token)
# if the sentencepiece_model_file is not exists, just the default sentence-piece model
if os.path.isfile(sentencepiece_model_file):
self.spm_model.Load(sentencepiece_model_file)
pat_str = ""
if os.path.isfile(special_tokens_file):
self.specials = self.read_file(special_tokens_file)
for special in self.specials:
pat_str += "(" + re.escape(special) + ")|"
else:
self.specials = {}
pat_str += r"([a-zA-Z0-9\S]+)"
self.pat = re.compile(pat_str)
self.vocab_file = vocab_file
self.sentencepiece_model_file = sentencepiece_model_file
@property
def vocab_size(self):
"""
return the size of vocabulary.
Returns:
int: the size of vocabulary.
"""
return len(self.vocab)
def preprocess_text(self,
inputs,
remove_space=True,
lower=False,
is_split_into_words=True):
"""preprocess data by removing extra space and normalize data."""
if not is_split_into_words:
inputs = " ".join(jieba.lcut(inputs))
outputs = inputs
if remove_space:
outputs = " ".join(inputs.strip().split())
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
text = text.replace(u"“", u'"')\
.replace(u'”', u'"')\
.replace(u'‘', "'")\
.replace(u'’', u"'")\
.replace(u'—', u'-')
output = []
for char in text:
if _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def encode_pieces(self, spm_model, text, return_unicode=True, sample=False):
"""turn sentences into word pieces."""
# liujiaxiang: add for ernie-albert, mainly consider for “/”/‘/’/— causing too many unk
text = self.clean_text(text)
if not sample:
pieces = spm_model.EncodeAsPieces(text)
else:
pieces = spm_model.SampleEncodeAsPieces(text, 64, 0.1)
return pieces
def _tokenize(self, text, is_split_into_words=True):
"""
End-to-end tokenization for BERT models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
text = self.preprocess_text(
text,
lower=self.do_lower_case,
is_split_into_words=is_split_into_words)
tokens = []
for match in self.pat.finditer(text):
part_text = match.group(0)
if part_text in self.specials:
tokens.append(part_text)
continue
part_tokens = self.encode_pieces(self.spm_model, part_text)
tokens.extend(part_tokens)
return tokens
def tokenize(self, text, is_split_into_words=True):
"""
End-to-end tokenization for BERT models.
Args:
text (str): The text to be tokenized.
is_split_into_words(bool, optinal): Whether or not the input `text`
has been pretokenized. Default True.
Returns:
list: A list of string representing converted tokens.
"""
return self._tokenize(text, is_split_into_words=is_split_into_words)
def merge_subword(self, tokens):
"""Merge subword."""
ret = []
for token in tokens:
if token.startswith(u"▁"):
ret.append(token[1:])
else:
if len(ret):
ret[-1] += token
else:
ret.append(token)
ret = [token for token in ret if token]
return ret
def convert_tokens_to_string(self, tokens, keep_space=True):
"""
Converts a sequence of tokens (list of string) in a single string. Since
the usage of WordPiece introducing `__` to concat subwords, also remove
`__` when converting.
Args:
tokens (list): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
"""
tokens = self.merge_subword(tokens)
if keep_space:
out_string = " ".join(tokens).replace("<s>", "")
else:
out_string = "".join(tokens).replace("<s>", "")
out_string = out_string.replace("</s>", "\n").replace("\n ",
"\n").strip()
return out_string
def convert_ids_to_string(self, ids, keep_space=True):
"""Convert ids to string."""
tokens = self.convert_ids_to_tokens(ids)
out_string = self.convert_tokens_to_string(tokens, keep_space)
return out_string
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special
tokens.
Note:
This encodes inputs and checks the number of added tokens, and is
therefore not efficient. Do not put this inside your training loop.
Args:
pair (bool, optional): Returns the number of added tokens in the
case of a sequence pair if set to True, returns the number of
added tokens in the case of a single sequence if set to False.
Default False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence by concatenating
and adding special tokens.
An UnifiedTransformer sequence has the following format:
::
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (list): List of IDs to which the special tokens will be
added.
token_ids_1 (list, optional): Optional second list of IDs for sequence
pairs. Default None.
Returns:
list: List of input_ids with the appropriate special tokens.
"""
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
if token_ids_1 is None:
return _cls + token_ids_0 + _sep
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding
offsets of special tokens.
An UnifiedTransformer offset_mapping has the following format:
::
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: `(0,0) A (0,0) B (0,0)``
Args:
offset_mapping_ids_0 (list): List of char offsets to which the special
tokens will be added.
offset_mapping_ids_1 (list, optional): Optional second list of char
offsets for offset mapping pairs. Dafault None
Returns:
list: List of char offsets with the appropriate offsets of special
tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create the token_type_ids from the two sequences passed for the model.
An UnifiedTransformer sequence token_type_ids has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If `token_ids_1` is None, this method only returns the first portion (0s).
Args:
token_ids_0 (list): List of IDs.
token_ids_1 (list, optional): Optional second list of IDs for sequence
pairs. Default None
Returns:
list: List of token_type_id according to the given sequence(s).
"""
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
if token_ids_1 is None:
return [0] * len(_cls + token_ids_0 + _sep)
return [0] * len(_cls + token_ids_0 + _sep) + [1] * len(token_ids_1 +
_sep)
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieve sequence ids from a token list that has no special tokens added.
This method is called when adding special tokens using the tokenizer
``prepare_for_model`` method.
Args:
token_ids_0 (list): List of IDs.
token_ids_1 (list, optional): Optional second list of IDs for sequence
pairs. Default None.
already_has_special_tokens (bool, optional): Whether or not the token
list is already formatted with special tokens for the model. Default
False.
Returns:
list: A list of integers in the range [0, 1]. 1 for a special token,
0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def save_resources(self, save_directory):
"""
Save tokenizer related resources to files under `save_directory`.
Args:
save_directory (str): Directory to save files into.
"""
for name, file_name in self.resource_files_names.items():
src_path = getattr(self, name)
save_path = os.path.join(save_directory, file_name)
if os.path.abspath(src_path) != os.path.abspath(save_path):
copyfile(src_path, save_path)
@staticmethod
def read_file(filepath):
token_to_idx = {}
with open(filepath, 'r', encoding='utf-8') as f:
for num, line in enumerate(f):
items = convert_to_unicode(line.rstrip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = int(items[1]) if len(items) == 2 else num
token = token.strip()
token_to_idx[token] = index
return token_to_idx
@staticmethod
def load_vocabulary(filepath,
unk_token=None,
pad_token=None,
bos_token=None,
eos_token=None,
**kwargs):
"""
Instantiate an instance of `Vocab` from a file reserving all tokens by
using `Vocab.from_dict`. The file contains a token and index of the
token per line, separated by '\t'.
Args:
filepath (str): path of file to construct vocabulary.
unk_token (str): special token for unknown token. If no need, it also
could be None. Default: None.
pad_token (str): special token for padding token. If no need, it also
could be None. Default: None.
bos_token (str): special token for bos token. If no need, it also
could be None. Default: None.
eos_token (str): special token for eos token. If no need, it also
could be None. Default: None.
**kwargs (dict): keyword arguments for `Vocab.from_dict`.
Returns:
Vocab: An instance of `Vocab`.
"""
token_to_idx = UnifiedTransformerTokenizer.read_file(filepath)
vocab = Vocab.from_dict(
token_to_idx,
unk_token=unk_token,
pad_token=pad_token,
bos_token=bos_token,
eos_token=eos_token,
**kwargs)
# Filtered the tokens that are mapped to the same id
idx_to_token = {v: k for k, v in vocab._token_to_idx.items()}
vocab._idx_to_token = [
idx_to_token[idx] for idx in sorted(idx_to_token.keys())
]
return vocab
def dialogue_encode(self,
history,
response=None,
knowledge=None,
task_type=None,
max_seq_len=512,
max_response_len=128,
max_knowledge_len=128,
return_position_ids=True,
return_token_type_ids=True,
return_attention_mask=True,
return_length=False,
add_start_token_as_response=False,
pad_to_max_seq_len=False,
return_tensors=False,
is_split_into_words=True):
"""
Main method to encode the single-turn or multi-turn dialogue conversation.
It will return a dictionary containing the encoded sequence and other
relative informations which meets the input format requirements of the
UnifiedTransformer model.
See detail at
https://github.com/PaddlePaddle/Knover/tree/luge-dialogue/luge-dialogue
Args:
history (str|list|tuple): The history of dialogue conversation. It
is an utterance or list of utterances to be encoded. Each
utterance is a string.
response (str, optional): The response of dialogue conversation.
It should be set when training the model. It should not be set
when running inference. Default None.
knowledge (str, optional): The knowledge information of dialogue
conversation. It should be set if the `task_type` is "knowledge"
or "recommend". Default None.
task_type (str, optional): The type of dialogue conversation. It is
one of "chitchat", "knowledge" and "recommend". They represent
the chitchat dialogue, knowledge grounded dialogue and
conversational recommendation respectively. Default None, which
means there is no `special_token` added in output sequence for
identifying different conversation types.
max_seq_len (int, optional): The maximum encoded sequence length.
Default 512.
max_response_len (int, optional): The maximum encoded sequence
length of the input `response`. Default 128.
max_knowledge_len (int, optional): The maximum encoded sequence
length of the input `knowledge`. Default 128.
return_position_ids (bool, optional): Whether to return the
position_ids. Default True.
return_token_type_ids (bool, optional): Whether to return the
token_type_ids. Default True.
return_attention_mask (bool, optional): Whether to return the
attention_mask. Default True.
return_length (bool, optional): Whether to return the length of the
encoded sequence. Default False.
add_start_token_as_response (bool, optional): Whether to add the
special token [CLS] at the end of sequence as the begining of
the response when running inference to force the model to start
generating response sequence. Default False.
pad_to_max_seq_len (bool, optional): Whether to pad the returned
sequences to the `max_seq_len`. Note that, in this method,
returned sequences will be padded on the left. Default False.
return_tensors (bool, optional): Whether to convert the returned
sequences to Tensor. Default False.
is_split_into_words(bool, optinal): Whether or not the input text
(`history`, `response` and `knowledge`) has been pretokenized.
Default True.
"""
# Input type checking for clearer error
assert isinstance(history, str) or (
isinstance(history, (list, tuple)) and
(len(history) == 0 or len(history) != 0 and
isinstance(history[0], str))), (
"The input `history` must be with type `str` (single context) "
"or `List[str]` (multi-turn context). But received: {}".format(
history))
assert response is None or isinstance(response, str), (
"The input `response` must of be with type `str`. But received: {}".
format(response))
assert knowledge is None or isinstance(knowledge, str), (
"The input `knowledge` must of be with type `str`. But received: {}".
format(knowledge))
assert task_type is None or task_type in self.TASK_TO_SPECIAL_TOKEN, (
"The input `task_type` must be None or one of {}.".format(", ".join(
self.TASK_TO_SPECIAL_TOKEN.keys())))
assert max_seq_len > max_response_len + max_knowledge_len, (
"`max_seq_len` must be greater than the sum of `max_response_len` "
"and `max_knowledge_len`. But received `max_seq_len` is {}, "
"`max_response_len` is {}, `max_knowledge_len` is {}.".format(
max_seq_len, max_response_len, max_knowledge_len))
assert response is None or not add_start_token_as_response, (
"`add_start_token_as_response` only works when `response` is "
"`None`. But received `add_start_token_as_response`: `{}`, "
"`response`: {}.".format(add_start_token_as_response, response))
knowledge_ids = []
if knowledge is not None:
tokens = self._tokenize(knowledge, is_split_into_words)
knowledge_ids = self.convert_tokens_to_ids(tokens)
if len(knowledge_ids) > max_knowledge_len - 1:
knowledge_ids = knowledge_ids[:max_knowledge_len - 1]
knowledge_ids += [self.sep_token_id]
response_ids = []
if response is not None:
tokens = self._tokenize(response, is_split_into_words)
response_ids = [self.cls_token_id] + self.convert_tokens_to_ids(
tokens)
if len(response_ids) > max_response_len - 1:
response_ids = response_ids[:max_response_len - 1]
response_ids += [self.sep_token_id]
elif add_start_token_as_response:
response_ids = [self.cls_token_id]
if task_type is not None:
special_token = self.TASK_TO_SPECIAL_TOKEN[task_type]
assert special_token in self.vocab._token_to_idx, (
"The vocab file should contain the special token corresponding "
"to the task: {}.".format(task_type))
special_token_id = self.vocab._token_to_idx[special_token]
knowledge_ids = [self.cls_token_id, special_token_id
] + knowledge_ids
else:
knowledge_ids = [self.cls_token_id] + knowledge_ids
max_history_len = max_seq_len - len(knowledge_ids) - len(response_ids)
if isinstance(history, str):
history = [history]
history_ids = []
for i in range(len(history) - 1, -1, -1):
tokens = self._tokenize(history[i], is_split_into_words)
if len(history_ids) + len(tokens) + 1 > max_history_len:
if i == len(history) - 1:
tokens = tokens[1 - max_history_len:]
history_ids = (self.convert_tokens_to_ids(tokens) +
[self.sep_token_id])
break
history_ids = (self.convert_tokens_to_ids(tokens) +
[self.sep_token_id]) + history_ids
history_ids = knowledge_ids + history_ids
# Build output dictionnary
encoded_inputs = {}
encoded_inputs["input_ids"] = history_ids + response_ids
# Check lengths
sequence_length = len(encoded_inputs["input_ids"])
assert sequence_length <= max_seq_len
# Considering that the logits at the last time step in the API of
# generative task are taken to generate the next token. In order to
# avoid the last time step being a pad, so take padding on the left.
pad_length = max_seq_len - sequence_length if pad_to_max_seq_len else 0
if pad_length > 0:
encoded_inputs["input_ids"] = [
self.pad_token_id
] * pad_length + encoded_inputs["input_ids"]
if return_tensors:
# Add dimention for batch_size
encoded_inputs["input_ids"] = paddle.to_tensor(encoded_inputs[
"input_ids"]).unsqueeze(0)
if return_token_type_ids:
encoded_inputs["token_type_ids"] = [0] * len(
history_ids) + [1] * len(response_ids)
if pad_length > 0:
encoded_inputs["token_type_ids"] = [
self.pad_token_id
] * pad_length + encoded_inputs["token_type_ids"]
if return_tensors:
# Add dimention for batch_size
encoded_inputs["token_type_ids"] = paddle.to_tensor(
encoded_inputs["token_type_ids"]).unsqueeze(0)
if return_length:
encoded_inputs["seq_len"] = sequence_length
if return_position_ids:
encoded_inputs["position_ids"] = list(range(sequence_length))
if pad_length > 0:
encoded_inputs["position_ids"] = [
self.pad_token_id
] * pad_length + encoded_inputs["position_ids"]
if return_tensors:
# Add dimention for batch_size
encoded_inputs["position_ids"] = paddle.to_tensor(
encoded_inputs["position_ids"]).unsqueeze(0)
if return_attention_mask:
attention_mask = np.ones(
(sequence_length, sequence_length), dtype='float32') * -1e9
start = len(history_ids)
end = sequence_length
attention_mask[:end, :start] = 0.0
# Generate the lower triangular matrix using the slice of matrix
tmp = np.triu(
np.ones(
[end - start, end - start], dtype='float32') * -1e9, 1)
attention_mask[start:end, start:end] = tmp
encoded_inputs["attention_mask"] = attention_mask
if pad_length > 0:
new_mask = np.ones(
(max_seq_len, max_seq_len), dtype='float32') * -1e9
new_mask[-sequence_length:, -sequence_length:] = attention_mask
encoded_inputs["attention_mask"] = new_mask
if return_tensors:
# Add dimentions for batch_size and num_heads
encoded_inputs["attention_mask"] = paddle.to_tensor(
encoded_inputs["attention_mask"]).unsqueeze((0, 1))
return encoded_inputs
| 43.159091
| 123
| 0.576058
|
21615b5f06dfc02b7c70dadaff7ae271d33cae0a
| 22,549
|
py
|
Python
|
python/taichi/profiler/kernel_profiler.py
|
rwilliams251/taichi
|
442710331be55baf5af17f9667db650c19cbb0b2
|
[
"MIT"
] | 1
|
2022-02-07T06:34:03.000Z
|
2022-02-07T06:34:03.000Z
|
python/taichi/profiler/kernel_profiler.py
|
rwilliams251/taichi
|
442710331be55baf5af17f9667db650c19cbb0b2
|
[
"MIT"
] | null | null | null |
python/taichi/profiler/kernel_profiler.py
|
rwilliams251/taichi
|
442710331be55baf5af17f9667db650c19cbb0b2
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.profiler.kernel_metrics import default_cupti_metrics
class StatisticalResult:
"""Statistical result of records.
Profiling records with the same kernel name will be counted in a ``StatisticalResult`` instance via function ``insert_record(time)``.
Currently, only the kernel elapsed time is counted, other statistics related to the kernel will be added in the feature.
"""
def __init__(self, name):
self.name = name
self.counter = 0
self.min_time = 0.0
self.max_time = 0.0
self.total_time = 0.0
def __lt__(self, other):
# For sorted()
return self.total_time < other.total_time
def insert_record(self, time):
"""Insert records with the same kernel name.
Currently, only the kernel elapsed time is counted.
"""
if self.counter == 0:
self.min_time = time
self.max_time = time
self.counter += 1
self.total_time += time
self.min_time = min(self.min_time, time)
self.max_time = max(self.max_time, time)
class KernelProfiler:
"""Kernel profiler of Taichi.
Kernel profiler acquires kernel profiling records from backend, counts records in Python scope,
and prints the results to the console by :func:`~taichi.profiler.kernel_profiler.KernelProfiler.print_info`.
``KernelProfiler`` now support detailed low-level performance metrics (such as memory bandwidth consumption) in its advanced mode.
This mode is only available for the CUDA backend with CUPTI toolkit, i.e. you need ``ti.init(kernel_profiler=True, arch=ti.cuda)``.
Note:
For details about using CUPTI in Taichi, please visit https://docs.taichi.graphics/docs/lang/articles/misc/profiler#advanced-mode.
"""
def __init__(self):
self._profiling_mode = False
self._profiling_toolkit = 'default'
self._metric_list = [default_cupti_metrics]
self._total_time_ms = 0.0
self._traced_records = []
self._statistical_results = {}
# public methods
def set_kernel_profiler_mode(self, mode=False):
"""Turn on or off :class:`~taichi.profiler.kernel_profiler.KernelProfiler`."""
if type(mode) is bool:
self._profiling_mode = mode
else:
raise TypeError(
f'Arg `mode` must be of type boolean. Type {type(mode)} is not supported.'
)
def get_kernel_profiler_mode(self):
"""Get status of :class:`~taichi.profiler.kernel_profiler.KernelProfiler`."""
return self._profiling_mode
def set_toolkit(self, toolkit_name='default'):
if self._check_not_turned_on_with_warning_message():
return False
status = impl.get_runtime().prog.set_kernel_profiler_toolkit(
toolkit_name)
if status is True:
self._profiling_toolkit = toolkit_name
else:
_ti_core.warn(
f'Failed to set kernel profiler toolkit ({toolkit_name}) , keep using ({self._profiling_toolkit}).'
)
return status
def get_total_time(self):
"""Get elapsed time of all kernels recorded in KernelProfiler.
Returns:
time (float): total time in second.
"""
if self._check_not_turned_on_with_warning_message():
return 0.0
self._update_records() # kernel records
self._count_statistics() # _total_time_ms is counted here
return self._total_time_ms / 1000 # ms to s
def clear_info(self):
"""Clear all records both in front-end :class:`~taichi.profiler.kernel_profiler.KernelProfiler` and back-end instance ``KernelProfilerBase``.
Note:
The values of ``self._profiling_mode`` and ``self._metric_list`` will not be cleared.
"""
if self._check_not_turned_on_with_warning_message():
return None
#sync first
impl.get_runtime().prog.sync_kernel_profiler()
#then clear backend & frontend info
impl.get_runtime().prog.clear_kernel_profile_info()
self._clear_frontend()
return None
def query_info(self, name):
"""For docstring of this function, see :func:`~taichi.profiler.query_kernel_profiler_info`."""
if self._check_not_turned_on_with_warning_message():
return None
self._update_records() # kernel records
self._count_statistics() # statistics results
# TODO : query self.StatisticalResult in python scope
return impl.get_runtime().prog.query_kernel_profile_info(name)
def set_metrics(self, metric_list=default_cupti_metrics):
"""For docstring of this function, see :func:`~taichi.profiler.set_kernel_profiler_metrics`."""
if self._check_not_turned_on_with_warning_message():
return None
self._metric_list = metric_list
metric_name_list = [metric.name for metric in metric_list]
self.clear_info()
impl.get_runtime().prog.reinit_kernel_profiler_with_metrics(
metric_name_list)
return None
@contextmanager
def collect_metrics_in_context(self, metric_list=default_cupti_metrics):
"""This function is not exposed to user now.
For usage of this function, see :func:`~taichi.profiler.collect_kernel_profiler_metrics`.
"""
if self._check_not_turned_on_with_warning_message():
return None
self.set_metrics(metric_list)
yield self
self.set_metrics() #back to default metric list
return None
# mode of print_info
COUNT = 'count' # print the statistical results (min,max,avg time) of Taichi kernels.
TRACE = 'trace' # print the records of launched Taichi kernels with specific profiling metrics (time, memory load/store and core utilization etc.)
def print_info(self, mode=COUNT):
"""Print the profiling results of Taichi kernels.
For usage of this function, see :func:`~taichi.profiler.print_kernel_profiler_info`.
Args:
mode (str): the way to print profiling results.
"""
if self._check_not_turned_on_with_warning_message():
return None
self._update_records() # kernel records
self._count_statistics() # statistics results
#COUNT mode (default) : print statistics of all kernel
if mode == self.COUNT:
self._print_statistics_info()
#TRACE mode : print records of launched kernel
elif mode == self.TRACE:
self._print_kernel_info()
else:
raise ValueError(
'Arg `mode` must be of type \'str\', and has the value \'count\' or \'trace\'.'
)
return None
# private methods
def _check_not_turned_on_with_warning_message(self):
if self._profiling_mode is False:
_ti_core.warn(
'use \'ti.init(kernel_profiler = True)\' to turn on KernelProfiler.'
)
return True
return False
def _clear_frontend(self):
"""Clear member variables in :class:`~taichi.profiler.kernel_profiler.KernelProfiler`.
Note:
The values of ``self._profiling_mode`` and ``self._metric_list`` will not be cleared.
"""
self._total_time_ms = 0.0
self._traced_records.clear()
self._statistical_results.clear()
def _update_records(self):
"""Acquires kernel records from a backend."""
impl.get_runtime().prog.sync_kernel_profiler()
self._clear_frontend()
self._traced_records = impl.get_runtime(
).prog.get_kernel_profiler_records()
def _count_statistics(self):
"""Counts the statistics of launched kernels during the profiling period.
The profiling records with the same kernel name are counted as a profiling result.
"""
for record in self._traced_records:
if self._statistical_results.get(record.name) is None:
self._statistical_results[record.name] = StatisticalResult(
record.name)
self._statistical_results[record.name].insert_record(
record.kernel_time)
self._total_time_ms += record.kernel_time
self._statistical_results = {
k: v
for k, v in sorted(self._statistical_results.items(),
key=lambda item: item[1],
reverse=True)
}
def _make_table_header(self, mode):
header_str = f'Kernel Profiler({mode}, {self._profiling_toolkit})'
arch_name = f' @ {_ti_core.arch_name(impl.current_cfg().arch).upper()}'
device_name = impl.get_runtime().prog.get_kernel_profiler_device_name()
if len(device_name) > 1: # default device_name = ' '
device_name = ' on ' + device_name
return header_str + arch_name + device_name
def _print_statistics_info(self):
"""Print statistics of launched kernels during the profiling period."""
# headers
table_header = table_header = self._make_table_header('count')
column_header = '[ % total count | min avg max ] Kernel name'
# partition line
line_length = max(len(column_header), len(table_header))
outer_partition_line = '=' * line_length
inner_partition_line = '-' * line_length
#message in one line
string_list = []
values_list = []
for key in self._statistical_results:
result = self._statistical_results[key]
fraction = result.total_time / self._total_time_ms * 100.0
string_list.append(
'[{:6.2f}% {:7.3f} s {:6d}x |{:9.3f} {:9.3f} {:9.3f} ms] {}')
values_list.append([
fraction,
result.total_time / 1000.0,
result.counter,
result.min_time,
result.total_time / result.counter, # avg_time
result.max_time,
result.name
])
# summary
summary_line = '[100.00%] Total execution time: '
summary_line += f'{self._total_time_ms/1000:7.3f} s '
summary_line += f'number of results: {len(self._statistical_results)}'
# print
print(outer_partition_line)
print(table_header)
print(outer_partition_line)
print(column_header)
print(inner_partition_line)
result_num = len(self._statistical_results)
for idx in range(result_num):
print(string_list[idx].format(*values_list[idx]))
print(inner_partition_line)
print(summary_line)
print(outer_partition_line)
def _print_kernel_info(self):
"""Print a list of launched kernels during the profiling period."""
metric_list = self._metric_list
values_num = len(self._traced_records[0].metric_values)
# We currently get kernel attributes through CUDA Driver API,
# there is no corresponding implementation in other backends yet.
# Profiler dose not print invalid kernel attributes info for now.
kernel_attribute_state = self._traced_records[0].register_per_thread > 0
# headers
table_header = self._make_table_header('trace')
column_header = ('[ start.time | kernel.time |') #default
if kernel_attribute_state:
column_header += (
' regs | shared mem | grid size | block size | occupancy |'
) #kernel_attributes
for idx in range(values_num):
column_header += metric_list[idx].header + '|'
column_header = (column_header + '] Kernel name').replace("|]", "]")
# partition line
line_length = max(len(column_header), len(table_header))
outer_partition_line = '=' * line_length
inner_partition_line = '-' * line_length
# message in one line: formatted_str.format(*values)
fake_timestamp = 0.0
string_list = []
values_list = []
for record in self._traced_records:
formatted_str = '[{:9.3f} ms |{:9.3f} ms |' #default
values = [fake_timestamp, record.kernel_time] #default
if kernel_attribute_state:
formatted_str += ' {:4d} | {:6d} bytes | {:6d} | {:6d} | {:2d} blocks |'
values += [
record.register_per_thread, record.shared_mem_per_block,
record.grid_size, record.block_size,
record.active_blocks_per_multiprocessor
]
for idx in range(values_num):
formatted_str += metric_list[idx].format + '|'
values += [record.metric_values[idx] * metric_list[idx].scale]
formatted_str = (formatted_str + '] ' + record.name)
string_list.append(formatted_str.replace("|]", "]"))
values_list.append(values)
fake_timestamp += record.kernel_time
# print
print(outer_partition_line)
print(table_header)
print(outer_partition_line)
print(column_header)
print(inner_partition_line)
record_num = len(self._traced_records)
for idx in range(record_num):
print(string_list[idx].format(*values_list[idx]))
print(inner_partition_line)
print(f"Number of records: {len(self._traced_records)}")
print(outer_partition_line)
_ti_kernel_profiler = KernelProfiler()
def get_default_kernel_profiler():
"""We have only one :class:`~taichi.profiler.kernelprofiler.KernelProfiler` instance(i.e. ``_ti_kernel_profiler``) now.
For ``KernelProfiler`` using ``CuptiToolkit``, GPU devices can only work in a certain configuration.
Profiling mode and metrics are configured by the host(CPU) via CUPTI APIs, and device(GPU) will use
its counter registers to collect specific metrics.
So if there are multiple instances of ``KernelProfiler``, the device will work in the latest configuration,
the profiling configuration of other instances will be changed as a result.
For data retention purposes, multiple instances will be considered in the future.
"""
return _ti_kernel_profiler
def print_kernel_profiler_info(mode='count'):
"""Print the profiling results of Taichi kernels.
To enable this profiler, set ``kernel_profiler=True`` in ``ti.init()``.
``'count'`` mode: print the statistics (min,max,avg time) of launched kernels,
``'trace'`` mode: print the records of launched kernels with specific profiling metrics (time, memory load/store and core utilization etc.),
and defaults to ``'count'``.
Args:
mode (str): the way to print profiling results.
Example::
>>> import taichi as ti
>>> ti.init(ti.cpu, kernel_profiler=True)
>>> var = ti.field(ti.f32, shape=1)
>>> @ti.kernel
>>> def compute():
>>> var[0] = 1.0
>>> compute()
>>> ti.profiler.print_kernel_profiler_info()
>>> # equivalent calls :
>>> # ti.profiler.print_kernel_profiler_info('count')
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
Currently the result of `KernelProfiler` could be incorrect on OpenGL
backend due to its lack of support for `ti.sync()`.
For advanced mode of `KernelProfiler`, please visit https://docs.taichi.graphics/docs/lang/articles/misc/profiler#advanced-mode.
"""
get_default_kernel_profiler().print_info(mode)
def query_kernel_profiler_info(name):
"""Query kernel elapsed time(min,avg,max) on devices using the kernel name.
To enable this profiler, set `kernel_profiler=True` in `ti.init`.
Args:
name (str): kernel name.
Returns:
KernelProfilerQueryResult (class): with member variables(counter, min, max, avg)
Example::
>>> import taichi as ti
>>> ti.init(ti.cpu, kernel_profiler=True)
>>> n = 1024*1024
>>> var = ti.field(ti.f32, shape=n)
>>> @ti.kernel
>>> def fill():
>>> for i in range(n):
>>> var[i] = 0.1
>>> fill()
>>> ti.profiler.clear_kernel_profiler_info() #[1]
>>> for i in range(100):
>>> fill()
>>> query_result = ti.profiler.query_kernel_profiler_info(fill.__name__) #[2]
>>> print("kernel excuted times =",query_result.counter)
>>> print("kernel elapsed time(min_in_ms) =",query_result.min)
>>> print("kernel elapsed time(max_in_ms) =",query_result.max)
>>> print("kernel elapsed time(avg_in_ms) =",query_result.avg)
Note:
[1] To get the correct result, query_kernel_profiler_info() must be used in conjunction with
clear_kernel_profiler_info().
[2] Currently the result of `KernelProfiler` could be incorrect on OpenGL
backend due to its lack of support for `ti.sync()`.
"""
return get_default_kernel_profiler().query_info(name)
def clear_kernel_profiler_info():
"""Clear all KernelProfiler records."""
get_default_kernel_profiler().clear_info()
def get_kernel_profiler_total_time():
"""Get elapsed time of all kernels recorded in KernelProfiler.
Returns:
time (float): total time in second.
"""
return get_default_kernel_profiler().get_total_time()
def set_kernel_profiler_toolkit(toolkit_name='default'):
"""Set the toolkit used by KernelProfiler.
Currently, we only support toolkits: ``'default'`` and ``'cupti'``.
Args:
toolkit_name (str): string of toolkit name.
Returns:
status (bool): whether the setting is successful or not.
Example::
>>> import taichi as ti
>>> ti.init(arch=ti.cuda, kernel_profiler=True)
>>> x = ti.field(ti.f32, shape=1024*1024)
>>> @ti.kernel
>>> def fill():
>>> for i in x:
>>> x[i] = i
>>> ti.profiler.set_kernel_profiler_toolkit('cupti')
>>> for i in range(100):
>>> fill()
>>> ti.profiler.print_kernel_profiler_info()
>>> ti.profiler.set_kernel_profiler_toolkit('default')
>>> for i in range(100):
>>> fill()
>>> ti.profiler.print_kernel_profiler_info()
"""
return get_default_kernel_profiler().set_toolkit(toolkit_name)
def set_kernel_profiler_metrics(metric_list=default_cupti_metrics):
"""Set metrics that will be collected by the CUPTI toolkit.
Args:
metric_list (list): a list of :class:`~taichi.profiler.CuptiMetric()` instances, default value: :data:`~taichi.profiler.kernel_metrics.default_cupti_metrics`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> ti.profiler.set_kernel_profiler_toolkit('cupti')
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> # In the case of not pramater, Taichi will print its pre-defined metrics list
>>> ti.profiler.get_predefined_cupti_metrics()
>>> # get Taichi pre-defined metrics
>>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('shared_access')
>>> global_op_atom = ti.profiler.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> format=' {:8.0f} ')
>>> # add user defined metrics
>>> profiling_metrics += [global_op_atom]
>>> # metrics setting will be retained until the next configuration
>>> ti.profiler.set_kernel_profile_metrics(profiling_metrics)
>>> for i in range(16):
>>> reduction()
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
Metrics setting will be retained until the next configuration.
"""
get_default_kernel_profiler().set_metrics(metric_list)
@contextmanager
def collect_kernel_profiler_metrics(metric_list=default_cupti_metrics):
"""Set temporary metrics that will be collected by the CUPTI toolkit within this context.
Args:
metric_list (list): a list of :class:`~taichi.profiler.CuptiMetric()` instances, default value: :data:`~taichi.profiler.kernel_metrics.default_cupti_metrics`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> ti.profiler.set_kernel_profiler_toolkit('cupti')
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> # In the case of not pramater, Taichi will print its pre-defined metrics list
>>> ti.profiler.get_predefined_cupti_metrics()
>>> # get Taichi pre-defined metrics
>>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('device_utilization')
>>> global_op_atom = ti.profiler.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> format=' {:8.0f} ')
>>> # add user defined metrics
>>> profiling_metrics += [global_op_atom]
>>> # metrics setting is temporary, and will be clear when exit from this context.
>>> with ti.profiler.collect_kernel_profiler_metrics(profiling_metrics):
>>> for i in range(16):
>>> reduction()
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
The configuration of the ``metric_list`` will be clear when exit from this context.
"""
get_default_kernel_profiler().set_metrics(metric_list)
yield get_default_kernel_profiler()
get_default_kernel_profiler().set_metrics()
__all__ = [
'clear_kernel_profiler_info', 'collect_kernel_profiler_metrics',
'get_kernel_profiler_total_time', 'print_kernel_profiler_info',
'query_kernel_profiler_info', 'set_kernel_profiler_metrics',
'set_kernel_profiler_toolkit'
]
| 38.025295
| 166
| 0.633509
|
5c5cbfe167f1ab3a54e471d8efb74d9b1ec7ac27
| 2,747
|
py
|
Python
|
4/4-2.py
|
softwaretestbook/apitest_book
|
29f640363ab6ef301ea685196b43805a4ed5a3d4
|
[
"Apache-2.0"
] | null | null | null |
4/4-2.py
|
softwaretestbook/apitest_book
|
29f640363ab6ef301ea685196b43805a4ed5a3d4
|
[
"Apache-2.0"
] | null | null | null |
4/4-2.py
|
softwaretestbook/apitest_book
|
29f640363ab6ef301ea685196b43805a4ed5a3d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/9/3 17:43
# @Author : CrissChan
# @Site : https://blog.csdn.net/crisschan
# @File : 4-2.py
# @Software: PyCharm
import requests
import json
print('--------post-param-------')
url_login = 'http://127.0.0.1:12356/login'
username='CrissChan'
password='CrissChan'
payload = {'username': username,'password':password}
res_login = requests.post(url_login,data=json.dumps(payload))# 字符串参数
res_login = requests.post(url_login,data=payload)#form传参,参数'username': 'CrissChan','password':'password'
payload = (('color', 'red'),('color','green'))
res_login = requests.post(url_login,data=payload)# form传递,参数'color':['red','green']
print(res_login.cookies['username'])
print(res_login.text)
print(res_login.status_code)
print(res_login.headers)
## get
print('--------get-------')
url = 'http://127.0.0.1:12356'
res_index = requests.get(url)
print(res_index.encoding)
print(res_index.json())
res_index = requests.get(url,stream=True)
print(res_index.raw)
if res_index.status_code == requests.codes.ok:
print(requests.codes.ok)
print(res_index.text)
print(res_index.status_code)
print(res_index.headers)
print(res_index.headers['Content-Type'])
print(res_index.headers['content-type'])
print(res_index.headers.get('Content-Type'))
print(res_index.headers.get('content-type'))
## get--headers
print('--------get-headers------')
url = 'http://127.0.0.1:12356'
headers = {'Host': '127.0.0.1',
'Connection': 'keep-alive',
'Content-Type': 'text/plain',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'X-usrg': 'criss'}
res_index = requests.get(url,headers = headers)
print(res_index.text)
print(res_index.status_code)
print(res_index.headers)
## get_param
print('--------get_param-------')
url_diff = 'http://127.0.0.1:12356/diff'
payload = {'diff':'easy'}
res_diff = requests.get(url_diff,params=payload)
print(res_diff.text)
print(res_diff.status_code)
print(res_diff.headers)
## 超时
res_github=requests.get('http://github.com',timeout=0.001)
## post
print('--------post-------')
url_login = 'http://127.0.0.1:12356/login'
username='CrissChan'
password='CrissChan'
payload = {'username': username,'password':password}
res_login = requests.post(url_login,data=json.dumps(payload))
print(res_login.cookies['username'])
print(res_login.text)
print(res_login.status_code)
print(res_login.headers)
## ReqeustsCookieJar
cookie_jar = requests.cookies.RequestsCookieJar()
cookie_jar.set('JSESSIONID', '23A15FE6655327749BC822A79CF77198', domain='127.0.0.1', path='/')
url = 'http://127.0.0.1:12356'
r = requests.get(url, cookies=cookie_jar)
| 28.030612
| 131
| 0.709137
|
c498efcdfbdd4bb51234d719de42842c4fc206bb
| 924
|
py
|
Python
|
test/test_algos/test_opt_algorithm/test_paretoopt/test_paretoopt.py
|
HowardHu97/ZOOpt
|
01568e8e6b0e65ac310d362af2da5245ac375e53
|
[
"MIT"
] | 1
|
2018-11-03T12:05:00.000Z
|
2018-11-03T12:05:00.000Z
|
test/test_algos/test_opt_algorithm/test_paretoopt/test_paretoopt.py
|
HowardHu97/ZOOpt
|
01568e8e6b0e65ac310d362af2da5245ac375e53
|
[
"MIT"
] | null | null | null |
test/test_algos/test_opt_algorithm/test_paretoopt/test_paretoopt.py
|
HowardHu97/ZOOpt
|
01568e8e6b0e65ac310d362af2da5245ac375e53
|
[
"MIT"
] | null | null | null |
from zoopt.algos.opt_algorithms.paretoopt.paretoopt import ParetoOpt
from zoopt import Objective, Parameter, Opt
from math import exp
from sparse_mse import SparseMSE
class TestParetoOpt(object):
def test_mutation(self):
a = [0, 1, 0, 1]
n = 4
res = ParetoOpt.mutation(a, n)
assert res != a
def test_performance(self):
mse = SparseMSE('example/sparse_regression/sonar.arff')
mse.set_sparsity(8)
# setup objective
# print(mse.get_dim().get_size())
objective = Objective(func=mse.loss, dim=mse.get_dim(), constraint=mse.constraint)
parameter = Parameter(algorithm='poss',
budget=2 * exp(1) * (mse.get_sparsity() ** 2) * mse.get_dim().get_size())
# perform sparse regression with constraint |w|_0 <= k
solution = Opt.min(objective, parameter)
assert solution.get_value()[0] < 0.6
| 34.222222
| 103
| 0.635281
|
82a23c7dcca3004944c9907e431c1e15ed1de88e
| 1,636
|
py
|
Python
|
gladier/utils/automate.py
|
rohithj494/gladier
|
00fc1cfd0a05f6f18b94b8afd9fef2503d2d3189
|
[
"Apache-2.0"
] | 2
|
2021-01-19T15:53:16.000Z
|
2021-02-26T15:56:27.000Z
|
gladier/utils/automate.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | 120
|
2021-01-16T16:50:29.000Z
|
2022-03-28T14:49:56.000Z
|
gladier/utils/automate.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | 3
|
2021-01-30T00:33:05.000Z
|
2021-07-28T15:59:28.000Z
|
import traceback
import logging
from funcx.serialize import FuncXSerializer
log = logging.getLogger(__name__)
automate_response_keys = {'action_id', 'status', 'state_name'}
funcx_response_keys = {'result', 'status', 'exception', 'task_id'}
def is_automate_response(state_output):
return (
isinstance(state_output, dict) and
set(state_output.keys()).intersection(automate_response_keys)
)
def is_funcx_response(state_output):
return (
is_automate_response(state_output) and
set(state_output['details'].keys()).intersection(funcx_response_keys)
)
def get_details(response, state_name=None):
if state_name and is_automate_response(response['details']['output'].get(state_name)):
return response['details']['output'][state_name]
if is_funcx_response(response['details']['output'].get(state_name)):
resp = response['details']['output'][state_name]
if resp.get('exception'):
resp['exception'] = deserialize_exception(resp['exception'])
return resp
for flow_state, data in response['details']['output'].items():
# Reject any output that isn't structured as a response
if not is_funcx_response(data):
continue
if isinstance(data['details'], dict) and data['details'].get('exception'):
exc = deserialize_exception(data['details']['exception'])
data['details']['exception'] = exc
return response
def deserialize_exception(encoded_exc):
try:
FuncXSerializer().deserialize(encoded_exc).reraise()
except Exception:
return traceback.format_exc()
| 32.72
| 90
| 0.687653
|
855bb245ba430c445b252d4c638c6e1799b176bf
| 176
|
py
|
Python
|
students/K33422/laboratory_works/Daria Plotskaya/lr_3/users/apps.py
|
olticher/ITMO_ICT_WebDevelopment_2021-2022
|
3de8728c29638d6733ad0664bf13e0d1eccae899
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Daria Plotskaya/lr_3/users/apps.py
|
olticher/ITMO_ICT_WebDevelopment_2021-2022
|
3de8728c29638d6733ad0664bf13e0d1eccae899
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Daria Plotskaya/lr_3/users/apps.py
|
olticher/ITMO_ICT_WebDevelopment_2021-2022
|
3de8728c29638d6733ad0664bf13e0d1eccae899
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class UsersConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'users'
verbose_name = "Пользователи"
| 22
| 56
| 0.744318
|
202c116a07206b47d16a770230d567c5e5223b55
| 295
|
py
|
Python
|
baekjoon/not-classified/1463/1463.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | 2
|
2019-02-08T01:23:07.000Z
|
2020-11-19T12:23:52.000Z
|
baekjoon/not-classified/1463/1463.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
baekjoon/not-classified/1463/1463.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
n = int(input())
d = {}
d[1] = 0
def solution(n):
if n in d:
return d[n]
d[n] = solution(n - 1) + 1
if (n % 2 == 0):
d[n] = min(solution(n / 2) + 1, d[n])
if (n % 3 == 0):
d[n] = min(solution(n / 3) + 1, d[n])
return d[n]
print(solution(n))
| 14.75
| 45
| 0.40678
|
689b8e64355a2fab756df6b312c45a837cf675be
| 62,364
|
py
|
Python
|
ckan/controllers/package.py
|
jcballesteros/ckan
|
312b7a0d44fb1610deb037da5434820ffb698f96
|
[
"Apache-2.0"
] | null | null | null |
ckan/controllers/package.py
|
jcballesteros/ckan
|
312b7a0d44fb1610deb037da5434820ffb698f96
|
[
"Apache-2.0"
] | null | null | null |
ckan/controllers/package.py
|
jcballesteros/ckan
|
312b7a0d44fb1610deb037da5434820ffb698f96
|
[
"Apache-2.0"
] | null | null | null |
import logging
from urllib import urlencode
import datetime
import os
import mimetypes
import cgi
from pylons import config
from genshi.template import MarkupTemplate
from genshi.template.text import NewTextTemplate
from paste.deploy.converters import asbool
import paste.fileapp
import ckan.logic as logic
import ckan.lib.base as base
import ckan.lib.maintain as maintain
import ckan.lib.package_saver as package_saver
import ckan.lib.i18n as i18n
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.lib.accept as accept
import ckan.lib.helpers as h
import ckan.model as model
import ckan.lib.datapreview as datapreview
import ckan.lib.plugins
import ckan.lib.uploader as uploader
import ckan.plugins as p
import ckan.lib.render
from ckan.common import OrderedDict, _, json, request, c, g, response
from home import CACHE_PARAMETERS
log = logging.getLogger(__name__)
render = base.render
abort = base.abort
redirect = base.redirect
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
flatten_to_string_key = logic.flatten_to_string_key
lookup_package_plugin = ckan.lib.plugins.lookup_package_plugin
def _encode_params(params):
return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v))
for k, v in params]
def url_with_params(url, params):
params = _encode_params(params)
return url + u'?' + urlencode(params)
def search_url(params, package_type=None):
if not package_type or package_type == 'dataset':
url = h.url_for(controller='package', action='search')
else:
url = h.url_for('{0}_search'.format(package_type))
return url_with_params(url, params)
class PackageController(base.BaseController):
def _package_form(self, package_type=None):
return lookup_package_plugin(package_type).package_form()
def _setup_template_variables(self, context, data_dict, package_type=None):
return lookup_package_plugin(package_type).\
setup_template_variables(context, data_dict)
def _new_template(self, package_type):
return lookup_package_plugin(package_type).new_template()
def _edit_template(self, package_type):
return lookup_package_plugin(package_type).edit_template()
def _search_template(self, package_type):
return lookup_package_plugin(package_type).search_template()
def _read_template(self, package_type):
return lookup_package_plugin(package_type).read_template()
def _history_template(self, package_type):
return lookup_package_plugin(package_type).history_template()
def _guess_package_type(self, expecting_name=False):
"""
Guess the type of package from the URL handling the case
where there is a prefix on the URL (such as /data/package)
"""
# Special case: if the rot URL '/' has been redirected to the package
# controller (e.g. by an IRoutes extension) then there's nothing to do
# here.
if request.path == '/':
return 'dataset'
parts = [x for x in request.path.split('/') if x]
idx = -1
if expecting_name:
idx = -2
pt = parts[idx]
if pt == 'package':
pt = 'dataset'
return pt
def search(self):
from ckan.lib.search import SearchError
package_type = self._guess_package_type()
try:
context = {'model': model, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
check_access('site_read', context)
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
# unicode format (decoded from utf8)
q = c.q = request.params.get('q', u'')
c.query_error = False
try:
page = int(request.params.get('page', 1))
except ValueError, e:
abort(400, ('"page" parameter must be an integer'))
limit = g.datasets_per_page
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
def drill_down_url(alternative_url=None, **by):
return h.add_url_param(alternative_url=alternative_url,
controller='package', action='search',
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace,
controller='package', action='search')
c.remove_field = remove_field
sort_by = request.params.get('sort', None)
params_nosort = [(k, v) for k, v in params_nopage if k != 'sort']
def _sort_by(fields):
"""
Sort by the given list of fields.
Each entry in the list is a 2-tuple: (fieldname, sort_order)
eg - [('metadata_modified', 'desc'), ('name', 'asc')]
If fields is empty, then the default ordering is used.
"""
params = params_nosort[:]
if fields:
sort_string = ', '.join('%s %s' % f for f in fields)
params.append(('sort', sort_string))
return search_url(params, package_type)
c.sort_by = _sort_by
if sort_by is None:
c.sort_by_fields = []
else:
c.sort_by_fields = [field.split()[0]
for field in sort_by.split(',')]
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params, package_type)
c.search_url_params = urlencode(_encode_params(params_nopage))
try:
c.fields = []
# c.fields_grouped will contain a dict of params containing
# a list of values eg {'tags':['tag1', 'tag2']}
c.fields_grouped = {}
search_extras = {}
fq = ''
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
fq += ' %s:"%s"' % (param, value)
if param not in c.fields_grouped:
c.fields_grouped[param] = [value]
else:
c.fields_grouped[param].append(value)
else:
search_extras[param] = value
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
if package_type and package_type != 'dataset':
# Only show datasets of this particular type
fq += ' +dataset_type:{type}'.format(type=package_type)
else:
# Unless changed via config options, don't show non standard
# dataset types on the default search page
if not asbool(config.get('ckan.search.show_all_types', 'False')):
fq += ' +dataset_type:dataset'
facets = OrderedDict()
default_facet_titles = {
'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license_id': _('Licenses'),
}
for facet in g.facets:
if facet in default_facet_titles:
facets[facet] = default_facet_titles[facet]
else:
facets[facet] = facet
# Facet titles
for plugin in p.PluginImplementations(p.IFacets):
facets = plugin.dataset_facets(facets, package_type)
c.facet_titles = facets
data_dict = {
'q': q,
'fq': fq.strip(),
'facet.field': facets.keys(),
'rows': limit,
'start': (page - 1) * limit,
'sort': sort_by,
'extras': search_extras
}
query = get_action('package_search')(context, data_dict)
c.sort_by_selected = query['sort']
c.page = h.Page(
collection=query['results'],
page=page,
url=pager_url,
item_count=query['count'],
items_per_page=limit
)
c.facets = query['facets']
c.search_facets = query['search_facets']
c.page.items = query['results']
except SearchError, se:
log.error('Dataset search error: %r', se.args)
c.query_error = True
c.facets = {}
c.search_facets = {}
c.page = h.Page(collection=[])
c.search_facets_limits = {}
for facet in c.search_facets.keys():
try:
limit = int(request.params.get('_%s_limit' % facet,
g.facets_default_number))
except ValueError:
abort(400, _('Parameter "{parameter_name}" is not '
'an integer').format(
parameter_name='_%s_limit' % facet
))
c.search_facets_limits[facet] = limit
maintain.deprecate_context_item(
'facets',
'Use `c.search_facets` instead.')
self._setup_template_variables(context, {},
package_type=package_type)
return render(self._search_template(package_type))
def _content_type_from_extension(self, ext):
ct, mu, ext = accept.parse_extension(ext)
if not ct:
return None, None, None,
return ct, ext, (NewTextTemplate, MarkupTemplate)[mu]
def _content_type_from_accept(self):
"""
Given a requested format this method determines the content-type
to set and the genshi template loader to use in order to render
it accurately. TextTemplate must be used for non-xml templates
whilst all that are some sort of XML should use MarkupTemplate.
"""
ct, mu, ext = accept.parse_header(request.headers.get('Accept', ''))
return ct, ext, (NewTextTemplate, MarkupTemplate)[mu]
def resources(self, id):
package_type = self._get_package_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
check_access('package_update', context, data_dict)
except NotAuthorized, e:
abort(401, _('User %r not authorized to edit %s') % (c.user, id))
# check if package exists
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
self._setup_template_variables(context, {'id': id},
package_type=package_type)
return render('package/resources.html')
def read(self, id, format='html'):
if not format == 'html':
ctype, extension, loader = \
self._content_type_from_extension(format)
if not ctype:
# An unknown format, we'll carry on in case it is a
# revision specifier and re-constitute the original id
id = "%s.%s" % (id, format)
ctype, format, loader = "text/html; charset=utf-8", "html", \
MarkupTemplate
else:
ctype, format, loader = self._content_type_from_accept()
response.headers['Content-Type'] = ctype
package_type = self._get_package_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
# interpret @<revision_id> or @<date> suffix
split = id.split('@')
if len(split) == 2:
data_dict['id'], revision_ref = split
if model.is_id(revision_ref):
context['revision_id'] = revision_ref
else:
try:
date = h.date_str_to_datetime(revision_ref)
context['revision_date'] = date
except TypeError, e:
abort(400, _('Invalid revision format: %r') % e.args)
except ValueError, e:
abort(400, _('Invalid revision format: %r') % e.args)
elif len(split) > 2:
abort(400, _('Invalid revision format: %r') %
'Too many "@" symbols')
# check if package exists
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
# used by disqus plugin
c.current_package_id = c.pkg.id
c.related_count = c.pkg.related_count
# can the resources be previewed?
for resource in c.pkg_dict['resources']:
resource['can_be_previewed'] = self._resource_preview(
{'resource': resource, 'package': c.pkg_dict})
self._setup_template_variables(context, {'id': id},
package_type=package_type)
package_saver.PackageSaver().render_package(c.pkg_dict, context)
template = self._read_template(package_type)
template = template[:template.index('.') + 1] + format
try:
return render(template, loader_class=loader)
except ckan.lib.render.TemplateNotFound:
msg = _("Viewing {package_type} datasets in {format} format is "
"not supported (template file {file} not found).".format(
package_type=package_type, format=format, file=template))
abort(404, msg)
assert False, "We should never get here"
def history(self, id):
package_type = self._get_package_type(id.split('@')[0])
if 'diff' in request.params or 'selected1' in request.params:
try:
params = {'id': request.params.getone('pkg_name'),
'diff': request.params.getone('selected1'),
'oldid': request.params.getone('selected2'),
}
except KeyError, e:
if 'pkg_name' in dict(request.params):
id = request.params.getone('pkg_name')
c.error = \
_('Select two revisions before doing the comparison.')
else:
params['diff_entity'] = 'package'
h.redirect_to(controller='revision', action='diff', **params)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg_revisions = get_action('package_revision_list')(context,
data_dict)
# TODO: remove
# Still necessary for the authz check in group/layout.html
c.pkg = context['package']
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
format = request.params.get('format', '')
if format == 'atom':
# Generate and return Atom 1.0 document.
from webhelpers.feedgenerator import Atom1Feed
feed = Atom1Feed(
title=_(u'CKAN Dataset Revision History'),
link=h.url_for(controller='revision', action='read',
id=c.pkg_dict['name']),
description=_(u'Recent changes to CKAN Dataset: ') +
(c.pkg_dict['title'] or ''),
language=unicode(i18n.get_lang()),
)
for revision_dict in c.pkg_revisions:
revision_date = h.date_str_to_datetime(
revision_dict['timestamp'])
try:
dayHorizon = int(request.params.get('days'))
except:
dayHorizon = 30
dayAge = (datetime.datetime.now() - revision_date).days
if dayAge >= dayHorizon:
break
if revision_dict['message']:
item_title = u'%s' % revision_dict['message'].\
split('\n')[0]
else:
item_title = u'%s' % revision_dict['id']
item_link = h.url_for(controller='revision', action='read',
id=revision_dict['id'])
item_description = _('Log message: ')
item_description += '%s' % (revision_dict['message'] or '')
item_author_name = revision_dict['author']
item_pubdate = revision_date
feed.add_item(
title=item_title,
link=item_link,
description=item_description,
author_name=item_author_name,
pubdate=item_pubdate,
)
feed.content_type = 'application/atom+xml'
return feed.writeString('utf-8')
c.related_count = c.pkg.related_count
return render(self._history_template(c.pkg_dict.get('type',
package_type)))
def new(self, data=None, errors=None, error_summary=None):
package_type = self._guess_package_type(True)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params}
# Package needs to have a organization group in the call to
# check_access and also to save it
try:
check_access('package_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to create a package'))
if context['save'] and not data:
return self._save_new(context, package_type=package_type)
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.params, ignore_keys=CACHE_PARAMETERS))))
c.resources_json = h.json.dumps(data.get('resources', []))
# convert tags if not supplied in data
if data and not data.get('tag_string'):
data['tag_string'] = ', '.join(
h.dict_list_reduce(data.get('tags', {}), 'name'))
errors = errors or {}
error_summary = error_summary or {}
# in the phased add dataset we need to know that
# we have already completed stage 1
stage = ['active']
if data.get('state') == 'draft':
stage = ['active', 'complete']
elif data.get('state') == 'draft-complete':
stage = ['active', 'complete', 'complete']
# if we are creating from a group then this allows the group to be
# set automatically
data['group_id'] = request.params.get('group') or \
request.params.get('groups__0__id')
vars = {'data': data, 'errors': errors,
'error_summary': error_summary,
'action': 'new', 'stage': stage}
c.errors_json = h.json.dumps(errors)
self._setup_template_variables(context, {},
package_type=package_type)
# TODO: This check is to maintain backwards compatibility with the
# old way of creating custom forms. This behaviour is now deprecated.
if hasattr(self, 'package_form'):
c.form = render(self.package_form, extra_vars=vars)
else:
c.form = render(self._package_form(package_type=package_type),
extra_vars=vars)
return render(self._new_template(package_type),
extra_vars={'stage': stage})
def resource_edit(self, id, resource_id, data=None, errors=None,
error_summary=None):
if request.method == 'POST' and not data:
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
context = {'model': model, 'session': model.Session,
'api_version': 3, 'for_edit': True,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data['package_id'] = id
try:
if resource_id:
data['id'] = resource_id
get_action('resource_update')(context, data)
else:
get_action('resource_create')(context, data)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.resource_edit(id, resource_id, data,
errors, error_summary)
except NotAuthorized:
abort(401, _('Unauthorized to edit this resource'))
redirect(h.url_for(controller='package', action='resource_read',
id=id, resource_id=resource_id))
context = {'model': model, 'session': model.Session,
'api_version': 3, 'for_edit': True,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
pkg_dict = get_action('package_show')(context, {'id': id})
if pkg_dict['state'].startswith('draft'):
# dataset has not yet been fully created
resource_dict = get_action('resource_show')(context, {'id': resource_id})
fields = ['url', 'resource_type', 'format', 'name', 'description', 'id']
data = {}
for field in fields:
data[field] = resource_dict[field]
return self.new_resource(id, data=data)
# resource is fully created
try:
resource_dict = get_action('resource_show')(context, {'id': resource_id})
except NotFound:
abort(404, _('Resource not found'))
c.pkg_dict = pkg_dict
c.resource = resource_dict
# set the form action
c.form_action = h.url_for(controller='package',
action='resource_edit',
resource_id=resource_id,
id=id)
if not data:
data = resource_dict
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new'}
return render('package/resource_edit.html', extra_vars=vars)
def new_resource(self, id, data=None, errors=None, error_summary=None):
''' FIXME: This is a temporary action to allow styling of the
forms. '''
if request.method == 'POST' and not data:
save_action = request.params.get('save')
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
resource_id = data['id']
del data['id']
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
# see if we have any data that we are trying to save
data_provided = False
for key, value in data.iteritems():
if ((value or isinstance(value, cgi.FieldStorage))
and key != 'resource_type'):
data_provided = True
break
if not data_provided and save_action != "go-dataset-complete":
if save_action == 'go-dataset':
# go to final stage of adddataset
redirect(h.url_for(controller='package',
action='edit', id=id))
# see if we have added any resources
try:
data_dict = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to update dataset'))
except NotFound:
abort(404,
_('The dataset {id} could not be found.').format(id=id))
if not len(data_dict['resources']):
# no data so keep on page
msg = _('You must add at least one data resource')
# On new templates do not use flash message
if g.legacy_templates:
h.flash_error(msg)
redirect(h.url_for(controller='package',
action='new_resource', id=id))
else:
errors = {}
error_summary = {_('Error'): msg}
return self.new_resource(id, data, errors, error_summary)
# we have a resource so let them add metadata
redirect(h.url_for(controller='package',
action='new_metadata', id=id))
data['package_id'] = id
try:
if resource_id:
data['id'] = resource_id
get_action('resource_update')(context, data)
else:
get_action('resource_create')(context, data)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new_resource(id, data, errors, error_summary)
except NotAuthorized:
abort(401, _('Unauthorized to create a resource'))
except NotFound:
abort(404,
_('The dataset {id} could not be found.').format(id=id))
if save_action == 'go-metadata':
# go to final stage of add dataset
redirect(h.url_for(controller='package',
action='new_metadata', id=id))
elif save_action == 'go-dataset':
# go to first stage of add dataset
redirect(h.url_for(controller='package',
action='edit', id=id))
elif save_action == 'go-dataset-complete':
# go to first stage of add dataset
redirect(h.url_for(controller='package',
action='read', id=id))
else:
# add more resources
redirect(h.url_for(controller='package',
action='new_resource', id=id))
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new'}
vars['pkg_name'] = id
# get resources for sidebar
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
pkg_dict = get_action('package_show')(context, {'id': id})
except NotFound:
abort(404, _('The dataset {id} could not be found.').format(id=id))
# required for nav menu
vars['pkg_dict'] = pkg_dict
template = 'package/new_resource_not_draft.html'
if pkg_dict['state'] == 'draft':
vars['stage'] = ['complete', 'active']
template = 'package/new_resource.html'
elif pkg_dict['state'] == 'draft-complete':
vars['stage'] = ['complete', 'active', 'complete']
template = 'package/new_resource.html'
return render(template, extra_vars=vars)
def new_metadata(self, id, data=None, errors=None, error_summary=None):
''' FIXME: This is a temporary action to allow styling of the
forms. '''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
if request.method == 'POST' and not data:
save_action = request.params.get('save')
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
data_dict = get_action('package_show')(context, {'id': id})
data_dict['id'] = id
# update the state
if save_action == 'finish':
# we want this to go live when saved
data_dict['state'] = 'active'
elif save_action in ['go-resources', 'go-dataset']:
data_dict['state'] = 'draft-complete'
# allow the state to be changed
context['allow_state_change'] = True
data_dict.update(data)
try:
get_action('package_update')(context, data_dict)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new_metadata(id, data, errors, error_summary)
except NotAuthorized:
abort(401, _('Unauthorized to update dataset'))
if save_action == 'go-resources':
# we want to go back to the add resources form stage
redirect(h.url_for(controller='package',
action='new_resource', id=id))
elif save_action == 'go-dataset':
# we want to go back to the add dataset stage
redirect(h.url_for(controller='package',
action='edit', id=id))
redirect(h.url_for(controller='package', action='read', id=id))
if not data:
data = get_action('package_show')(context, {'id': id})
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
vars['pkg_name'] = id
package_type = self._get_package_type(id)
self._setup_template_variables(context, {},
package_type=package_type)
return render('package/new_package_metadata.html', extra_vars=vars)
def edit(self, id, data=None, errors=None, error_summary=None):
package_type = self._get_package_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params,
'moderated': config.get('moderated'),
'pending': True}
if context['save'] and not data:
return self._save_edit(id, context, package_type=package_type)
try:
c.pkg_dict = get_action('package_show')(context, {'id': id})
context['for_edit'] = True
old_data = get_action('package_show')(context, {'id': id})
# old data is from the database and data is passed from the
# user if there is a validation error. Use users data if there.
if data:
old_data.update(data)
data = old_data
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
# are we doing a multiphase add?
if data.get('state', '').startswith('draft'):
c.form_action = h.url_for(controller='package', action='new')
c.form_style = 'new'
return self.new(data=data, errors=errors,
error_summary=error_summary)
c.pkg = context.get("package")
c.resources_json = h.json.dumps(data.get('resources', []))
try:
check_access('package_update', context)
except NotAuthorized, e:
abort(401, _('User %r not authorized to edit %s') % (c.user, id))
# convert tags if not supplied in data
if data and not data.get('tag_string'):
data['tag_string'] = ', '.join(h.dict_list_reduce(
c.pkg_dict.get('tags', {}), 'name'))
errors = errors or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'edit'}
c.errors_json = h.json.dumps(errors)
self._setup_template_variables(context, {'id': id},
package_type=package_type)
c.related_count = c.pkg.related_count
# we have already completed stage 1
vars['stage'] = ['active']
if data.get('state') == 'draft':
vars['stage'] = ['active', 'complete']
elif data.get('state') == 'draft-complete':
vars['stage'] = ['active', 'complete', 'complete']
# TODO: This check is to maintain backwards compatibility with the
# old way of creating custom forms. This behaviour is now deprecated.
if hasattr(self, 'package_form'):
c.form = render(self.package_form, extra_vars=vars)
else:
c.form = render(self._package_form(package_type=package_type),
extra_vars=vars)
return render(self._edit_template(package_type),
extra_vars={'stage': vars['stage']})
def read_ajax(self, id, revision=None):
package_type = self._get_package_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'revision_id': revision}
try:
data = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
data.pop('tags')
data = flatten_to_string_key(data)
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(data)
def history_ajax(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
pkg_revisions = get_action('package_revision_list')(
context, data_dict)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
data = []
approved = False
for num, revision in enumerate(pkg_revisions):
if not approved and revision['approved_timestamp']:
current_approved, approved = True, True
else:
current_approved = False
data.append({'revision_id': revision['id'],
'message': revision['message'],
'timestamp': revision['timestamp'],
'author': revision['author'],
'approved': bool(revision['approved_timestamp']),
'current_approved': current_approved})
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(data)
def _get_package_type(self, id):
"""
Given the id of a package it determines the plugin to load
based on the package's type name (type). The plugin found
will be returned, or None if there is no plugin associated with
the type.
"""
pkg = model.Package.get(id)
if pkg:
return pkg.type or 'dataset'
return None
def _tag_string_to_list(self, tag_string):
''' This is used to change tags from a sting to a list of dicts '''
out = []
for tag in tag_string.split(','):
tag = tag.strip()
if tag:
out.append({'name': tag,
'state': 'active'})
return out
def _save_new(self, context, package_type=None):
# The staged add dataset used the new functionality when the dataset is
# partially created so we need to know if we actually are updating or
# this is a real new.
is_an_update = False
ckan_phase = request.params.get('_ckan_phase')
from ckan.lib.search import SearchIndexError
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
if ckan_phase:
# prevent clearing of groups etc
context['allow_partial_update'] = True
# sort the tags
data_dict['tags'] = self._tag_string_to_list(
data_dict['tag_string'])
if data_dict.get('pkg_name'):
is_an_update = True
# This is actually an update not a save
data_dict['id'] = data_dict['pkg_name']
del data_dict['pkg_name']
# this is actually an edit not a save
pkg_dict = get_action('package_update')(context, data_dict)
if request.params['save'] == 'go-metadata':
# redirect to add metadata
url = h.url_for(controller='package',
action='new_metadata',
id=pkg_dict['name'])
else:
# redirect to add dataset resources
url = h.url_for(controller='package',
action='new_resource',
id=pkg_dict['name'])
redirect(url)
# Make sure we don't index this dataset
if request.params['save'] not in ['go-resource', 'go-metadata']:
data_dict['state'] = 'draft'
# allow the state to be changed
context['allow_state_change'] = True
data_dict['type'] = package_type
context['message'] = data_dict.get('log_message', '')
pkg_dict = get_action('package_create')(context, data_dict)
if ckan_phase:
# redirect to add dataset resources
url = h.url_for(controller='package',
action='new_resource',
id=pkg_dict['name'])
redirect(url)
self._form_save_redirect(pkg_dict['name'], 'new', package_type=package_type)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound, e:
abort(404, _('Dataset not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except SearchIndexError, e:
try:
exc_str = unicode(repr(e.args))
except Exception: # We don't like bare excepts
exc_str = unicode(str(e))
abort(500, _(u'Unable to add package to search index.') + exc_str)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
if is_an_update:
# we need to get the state of the dataset to show the stage we
# are on.
pkg_dict = get_action('package_show')(context, data_dict)
data_dict['state'] = pkg_dict['state']
return self.edit(data_dict['id'], data_dict,
errors, error_summary)
data_dict['state'] = 'none'
return self.new(data_dict, errors, error_summary)
def _save_edit(self, name_or_id, context, package_type=None):
from ckan.lib.search import SearchIndexError
log.debug('Package save request name: %s POST: %r',
name_or_id, request.POST)
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
if '_ckan_phase' in data_dict:
# we allow partial updates to not destroy existing resources
context['allow_partial_update'] = True
data_dict['tags'] = self._tag_string_to_list(
data_dict['tag_string'])
del data_dict['_ckan_phase']
del data_dict['save']
context['message'] = data_dict.get('log_message', '')
if not context['moderated']:
context['pending'] = False
data_dict['id'] = name_or_id
pkg = get_action('package_update')(context, data_dict)
if request.params.get('save', '') == 'Approve':
get_action('make_latest_pending_package_active')(
context, data_dict)
c.pkg = context['package']
c.pkg_dict = pkg
self._form_save_redirect(pkg['name'], 'edit', package_type=package_type)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
except NotFound, e:
abort(404, _('Dataset not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except SearchIndexError, e:
try:
exc_str = unicode(repr(e.args))
except Exception: # We don't like bare excepts
exc_str = unicode(str(e))
abort(500, _(u'Unable to update search index.') + exc_str)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(name_or_id, data_dict, errors, error_summary)
def _form_save_redirect(self, pkgname, action, package_type=None):
'''This redirects the user to the CKAN package/read page,
unless there is request parameter giving an alternate location,
perhaps an external website.
@param pkgname - Name of the package just edited
@param action - What the action of the edit was
'''
assert action in ('new', 'edit')
url = request.params.get('return_to') or \
config.get('package_%s_return_url' % action)
if url:
url = url.replace('<NAME>', pkgname)
else:
if package_type is None or package_type == 'dataset':
url = h.url_for(controller='package', action='read', id=pkgname)
else:
url = h.url_for('{0}_read'.format(package_type), id=pkgname)
redirect(url)
def _adjust_license_id_options(self, pkg, fs):
options = fs.license_id.render_opts['options']
is_included = False
for option in options:
license_id = option[1]
if license_id == pkg.license_id:
is_included = True
if not is_included:
options.insert(1, (pkg.license_id, pkg.license_id))
def delete(self, id):
if 'cancel' in request.params:
h.redirect_to(controller='package', action='edit', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
check_access('package_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete package %s') % '')
try:
if request.method == 'POST':
get_action('package_delete')(context, {'id': id})
h.flash_notice(_('Dataset has been deleted.'))
h.redirect_to(controller='package', action='search')
c.pkg_dict = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
return render('package/confirm_delete.html')
def resource_delete(self, id, resource_id):
if 'cancel' in request.params:
h.redirect_to(controller='package', action='resource_edit', resource_id=resource_id, id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
check_access('package_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete package %s') % '')
try:
if request.method == 'POST':
get_action('resource_delete')(context, {'id': resource_id})
h.flash_notice(_('Resource has been deleted.'))
h.redirect_to(controller='package', action='read', id=id)
c.resource_dict = get_action('resource_show')(context, {'id': resource_id})
c.pkg_id = id
except NotAuthorized:
abort(401, _('Unauthorized to delete resource %s') % '')
except NotFound:
abort(404, _('Resource not found'))
return render('package/confirm_delete_resource.html')
def autocomplete(self):
# DEPRECATED in favour of /api/2/util/dataset/autocomplete
q = unicode(request.params.get('q', ''))
if not len(q):
return ''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'q': q}
packages = get_action('package_autocomplete')(context, data_dict)
pkg_list = []
for pkg in packages:
pkg_list.append('%s|%s' % (pkg['match_displayed'].
replace('|', ' '), pkg['name']))
return '\n'.join(pkg_list)
def _render_edit_form(self, fs, params={}, clear_session=False):
# errors arrive in c.error and fs.errors
c.log_message = params.get('log_message', '')
# rgrp: expunge everything from session before dealing with
# validation errors) so we don't have any problematic saves
# when the fs.render causes a flush.
# seb: If the session is *expunged*, then the form can't be
# rendered; I've settled with a rollback for now, which isn't
# necessarily what's wanted here.
# dread: I think this only happened with tags because until
# this changeset, Tag objects were created in the Renderer
# every time you hit preview. So I don't believe we need to
# clear the session any more. Just in case I'm leaving it in
# with the log comments to find out.
if clear_session:
# log to see if clearing the session is ever required
if model.Session.new or model.Session.dirty or \
model.Session.deleted:
log.warn('Expunging session changes which were not expected: '
'%r %r %r', (model.Session.new, model.Session.dirty,
model.Session.deleted))
try:
model.Session.rollback()
except AttributeError:
# older SQLAlchemy versions
model.Session.clear()
edit_form_html = fs.render()
c.form = h.literal(edit_form_html)
return h.literal(render('package/edit_form.html'))
def _update_authz(self, fs):
validation = fs.validate()
if not validation:
c.form = self._render_edit_form(fs, request.params)
raise package_saver.ValidationException(fs)
try:
fs.sync()
except Exception, inst:
model.Session.rollback()
raise
else:
model.Session.commit()
def resource_read(self, id, resource_id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
c.resource = get_action('resource_show')(context,
{'id': resource_id})
c.package = get_action('package_show')(context, {'id': id})
# required for nav menu
c.pkg = context['package']
c.pkg_dict = c.package
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
# get package license info
license_id = c.package.get('license_id')
try:
c.package['isopen'] = model.Package.\
get_license_register()[license_id].isopen()
except KeyError:
c.package['isopen'] = False
# TODO: find a nicer way of doing this
c.datastore_api = '%s/api/action' % config.get('ckan.site_url', '').rstrip('/')
c.related_count = c.pkg.related_count
c.resource['can_be_previewed'] = self._resource_preview(
{'resource': c.resource, 'package': c.package})
return render('package/resource_read.html')
def _resource_preview(self, data_dict):
return bool(datapreview.res_format(data_dict['resource'])
in datapreview.direct() + datapreview.loadable()
or datapreview.get_preview_plugin(
data_dict, return_first=True))
def resource_download(self, id, resource_id, filename=None):
"""
Provides a direct download by either redirecting the user to the url stored
or downloading an uploaded file directly.
"""
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
rsc = get_action('resource_show')(context, {'id': resource_id})
pkg = get_action('package_show')(context, {'id': id})
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
if rsc.get('url_type') == 'upload':
upload = uploader.ResourceUpload(rsc)
filepath = upload.get_path(rsc['id'])
fileapp = paste.fileapp.FileApp(filepath)
try:
status, headers, app_iter = request.call_application(fileapp)
except OSError:
abort(404, _('Resource data not found'))
response.headers.update(dict(headers))
content_type, content_enc = mimetypes.guess_type(rsc.get('url',''))
response.headers['Content-Type'] = content_type
response.status = status
return app_iter
elif not 'url' in rsc:
abort(404, _('No download is available'))
redirect(rsc['url'])
def follow(self, id):
'''Start following this dataset.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('follow_dataset')(context, data_dict)
package_dict = get_action('package_show')(context, data_dict)
h.flash_success(_("You are now following {0}").format(
package_dict['title']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except NotAuthorized as e:
h.flash_error(e.extra_msg)
h.redirect_to(controller='package', action='read', id=id)
def unfollow(self, id):
'''Stop following this dataset.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('unfollow_dataset')(context, data_dict)
package_dict = get_action('package_show')(context, data_dict)
h.flash_success(_("You are no longer following {0}").format(
package_dict['title']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except (NotFound, NotAuthorized) as e:
error_message = e.extra_msg or e.message
h.flash_error(error_message)
h.redirect_to(controller='package', action='read', id=id)
def followers(self, id=None):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
c.followers = get_action('dataset_follower_list')(context,
{'id': c.pkg_dict['id']})
c.related_count = c.pkg.related_count
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
return render('package/followers.html')
def groups(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj, 'use_cache': False}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read dataset %s') % id)
if request.method == 'POST':
new_group = request.POST.get('group_added')
if new_group:
data_dict = {"id": new_group,
"object": id,
"object_type": 'package',
"capacity": 'public'}
try:
get_action('member_create')(context, data_dict)
except NotFound:
abort(404, _('Group not found'))
removed_group = request.POST.get('group_removed')
if removed_group:
data_dict = {"id": removed_group,
"object": id,
"object_type": 'package'}
try:
get_action('member_delete')(context, data_dict)
except NotFound:
abort(404, _('Group not found'))
redirect(h.url_for(controller='package',
action='groups', id=id))
context['is_member'] = True
users_groups = get_action('group_list_authz')(context, data_dict)
pkg_group_ids = set(group['id'] for group
in c.pkg_dict.get('groups', []))
user_group_ids = set(group['id'] for group
in users_groups)
c.group_dropdown = [[group['id'], group['display_name']]
for group in users_groups if
group['id'] not in pkg_group_ids]
for group in c.pkg_dict.get('groups', []):
group['user_member'] = (group['id'] in user_group_ids)
return render('package/group_list.html')
def activity(self, id):
'''Render this package's public activity stream page.'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
c.package_activity_stream = get_action(
'package_activity_list_html')(context,
{'id': c.pkg_dict['id']})
c.related_count = c.pkg.related_count
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read dataset %s') % id)
return render('package/activity.html')
def resource_embedded_dataviewer(self, id, resource_id,
width=500, height=500):
"""
Embeded page for a read-only resource dataview. Allows
for width and height to be specified as part of the
querystring (as well as accepting them via routes).
"""
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
c.resource = get_action('resource_show')(context,
{'id': resource_id})
c.package = get_action('package_show')(context, {'id': id})
c.resource_json = h.json.dumps(c.resource)
# double check that the resource belongs to the specified package
if not c.resource['id'] in [r['id']
for r in c.package['resources']]:
raise NotFound
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
# Construct the recline state
state_version = int(request.params.get('state_version', '1'))
recline_state = self._parse_recline_state(request.params)
if recline_state is None:
abort(400, ('"state" parameter must be a valid recline '
'state (version %d)' % state_version))
c.recline_state = h.json.dumps(recline_state)
c.width = max(int(request.params.get('width', width)), 100)
c.height = max(int(request.params.get('height', height)), 100)
c.embedded = True
return render('package/resource_embedded_dataviewer.html')
def _parse_recline_state(self, params):
state_version = int(request.params.get('state_version', '1'))
if state_version != 1:
return None
recline_state = {}
for k, v in request.params.items():
try:
v = h.json.loads(v)
except ValueError:
pass
recline_state[k] = v
recline_state.pop('width', None)
recline_state.pop('height', None)
recline_state['readOnly'] = True
# previous versions of recline setup used elasticsearch_url attribute
# for data api url - see http://trac.ckan.org/ticket/2639
# fix by relocating this to url attribute which is the default location
if 'dataset' in recline_state and 'elasticsearch_url' in recline_state['dataset']:
recline_state['dataset']['url'] = recline_state['dataset']['elasticsearch_url']
# Ensure only the currentView is available
# default to grid view if none specified
if not recline_state.get('currentView', None):
recline_state['currentView'] = 'grid'
for k in recline_state.keys():
if k.startswith('view-') and \
not k.endswith(recline_state['currentView']):
recline_state.pop(k)
return recline_state
def resource_datapreview(self, id, resource_id):
'''
Embeded page for a resource data-preview.
Depending on the type, different previews are loaded. This could be an
img tag where the image is loaded directly or an iframe that embeds a
webpage, recline or a pdf preview.
'''
context = {
'model': model,
'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj
}
try:
c.resource = get_action('resource_show')(context,
{'id': resource_id})
c.package = get_action('package_show')(context, {'id': id})
data_dict = {'resource': c.resource, 'package': c.package}
preview_plugin = datapreview.get_preview_plugin(data_dict)
if preview_plugin is None:
abort(409, _('No preview has been defined.'))
preview_plugin.setup_template_variables(context, data_dict)
c.resource_json = json.dumps(c.resource)
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
else:
return render(preview_plugin.preview_template(context, data_dict))
| 41.826962
| 103
| 0.549708
|
2fde84402b9257eccc54cdf46e8ddb603708ca95
| 26,135
|
py
|
Python
|
tuning/cactus_tuning.py
|
benedictpaten/cactusTools
|
374b9cbe352d71f111977751f25e6c70c52ab041
|
[
"MIT-0"
] | 2
|
2019-11-17T06:38:17.000Z
|
2020-04-26T09:12:07.000Z
|
tuning/cactus_tuning.py
|
benedictpaten/cactusTools
|
374b9cbe352d71f111977751f25e6c70c52ab041
|
[
"MIT-0"
] | null | null | null |
tuning/cactus_tuning.py
|
benedictpaten/cactusTools
|
374b9cbe352d71f111977751f25e6c70c52ab041
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python
#Copyright (C) 2009-2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
"""Wrapper to run cactus on different combinations of cactus_workflow_config.xml (cactus
parameters) and simulations, followed by evaluations steps.
"""
#nknguyen@soe.ucsc.edu
#05/26/2010
import os, re, sys, time
from optparse import OptionParser
import xml.etree.ElementTree as ET
from jobTree.src.jobTree import runJobTree
from jobTree.scriptTree.target import Target
from sonLib.bioio import logger
from sonLib.bioio import system
from sonLib.bioio import nameValue
from sonLib.bioio import getTempDirectory
from sonLib.bioio import setLogLevel
from cactus.shared.common import cactusRootPath
from cactus.shared.common import runCactusWorkflow
from cactus.shared.common import runCactusMAFGenerator
from cactus.shared.common import runCactusTreeStats
class CactusTuningWrapper(Target):
"""Wrapper to run cactus on different sets of parameters and different simulation data
"""
def __init__(self, options):
Target.__init__(self)
self.options = options
def run(self):
#--------------------------------------------------------------------------------------
#Get parameter sets. For each set, issue job to run cactus on different simulation data
#--------------------------------------------------------------------------------------
setLogLevel("DEBUG")
system("rm -rf %s*" % self.options.outputDir)
logger.info("Remove output directory if exists\n")
#Convert true.mfa of each simulation to maf format
#simTrueMafDir = os.path.join(self.options.outputDir, "sim")
simTrueMafDir = self.options.simTrueMafDir
check_dir(simTrueMafDir)
for sim in self.options.sim:
#convert mfa file of current simulation into MAF format:
sim = modify_dirname(sim)
simName = getRootDir(sim)
trueMAF = os.path.join(simTrueMafDir, "%s_true.maf" %(simName))
if not os.path.exists(trueMAF):
trueMFA = os.path.join(sim, "true.mfa")
runEvalMFAToMAF(trueMFA, trueMAF)
logger.info("Converted true.mfa of simulation %s to %s\n" % (sim, trueMAF))
else:
logger.info("TrueMAF already exists: %s\n" %(trueMAF))
for parameterFile, parameterName in getParameters(self.options.config):
outDir = os.path.join(self.options.outputDir, parameterName)
#system("rm -rf %s" % outDir)
os.mkdir(outDir)
system("mv %s %s/" % (parameterFile, outDir))
logger.info("Created output directory %s for parameter set %s and moved config file to that directory\n" % (outDir, parameterName))
paraFile = os.path.join(outDir, 'param.xml')
statsDir = os.path.join(outDir, "stats")
os.mkdir(statsDir)
logger.info("Created directory for stats files: %s\n" % (statsDir))
self.addChildTarget(CactusTuningSimulationsWrapper(self.options, paraFile, outDir))
logger.info("Added CactusTuningSimulationsWrapper as child for parameter %s\n" %(parameterName))
#Summarize results
#self.setFollowOnTarget(CactusTuningSummary(self.options))
logger.info("Added CactusTuningSummary\n")
class CactusTuningSimulationsWrapper(Target):
"""Run cactus for a set of different simulation data and report results
"""
def __init__(self, options, paraFile, outDir):
Target.__init__(self)
self.options = options
self.paraFile = paraFile
self.outDir = outDir
def run(self):
#--------------------------------------------
#Run cactus & evaluations for each simulation
#--------------------------------------------
logger.info("CactusTuningSimulationsWrapper: going to issue cactus runs for all simulations for parameter %s\n" %(self.paraFile))
simNum = 0
for sim in self.options.sim:
sim = modify_dirname(sim)
simName = getRootDir(sim)
#Get path to sequence file of each species
sequenceFiles = " ".join([ os.path.join(sim, spc) for spc in self.options.species ])
logger.info("Got sequence files: %s\n" % (sequenceFiles))
#add child
#self.addChildTarget(CactusWorkflowWrapper(sim, simNum, self.paraFile, self.outDir, sequenceFiles, self.options.tree))
self.addChildTarget(CactusWorkflowWrapper(sim, simName, self.options.simTrueMafDir, self.paraFile, self.outDir, sequenceFiles, self.options.tree))
logger.info("Added child CactusWorkflowWrapper for sim %s and confi %s\n" % (sim, self.paraFile))
simNum += 1
#----------------------------------------------------------------
#Done running cactus & evaluations steps for all the simulations.
#Now Merge results & clean up.
#----------------------------------------------------------------
logger.info("Done running cactus & evaluations for parameter %s. Now merge results and clean up.\n" %(self.paraFile))
self.setFollowOnTarget(CactusMergeResultsAndCleanup(simNum, self.outDir, self.options))
logger.info("Added CactusMergeResultsAndCleanup as FollowOnTarget for %s\n" %(self.outDir))
class CactusWorkflowWrapper(Target):
"""runCactusWorkFlow and issue child Target to generate MAF for the cactus results
"""
#def __init__(self, simulation, simNum, paraFile, outDir, sequenceFiles, tree):
def __init__(self, simulation, simName, simTrueMafDir, paraFile, outDir, sequenceFiles, tree):
Target.__init__(self)
self.simulation = simulation
#self.simNum = str(simNum)
self.simName = simName
self.simTrueMafDir = simTrueMafDir
self.paraFile = paraFile
self.outDir = outDir
self.sequenceFiles = sequenceFiles
self.tree = tree
def run(self):
#----------------------------------------
# Run cactus_workflow.py and report time#
#----------------------------------------
logger.info("CactusWorkflowWrapper: going to issue cactus run for simulation %s, parameter %s\n" %(self.simulation, self.paraFile))
tempDir = getTempDirectory(self.outDir)
flowerdisk = os.path.join(tempDir, "cactusDisk")
jobtreeDir = os.path.join(tempDir, "jobTree")
#batchSystem = "single_machine"
batchSystem = "parasol"
retryCount = 0
command = "cactus_workflow.py --speciesTree='%s' %s --configFile %s --buildTrees --setupAndBuildAlignments --cactusDisk %s --logDebug --job=JOB_FILE" %(self.tree, self.sequenceFiles, self.paraFile, flowerdisk)
starttime = time.time()
runJobTree(command, jobtreeDir, "DEBUG", retryCount, batchSystem, None)
#runCactusWorkflow(flowerdisk, self.sequenceFiles, self.tree, jobtreeDir, "DEBUG", 0, batchSystem, None, True, True, False, False, self.config)
runtime = time.time() - starttime
logger.info("Done cactus_workflow for simulation %s, config %s\n" %(self.simulation, self.paraFile))
#-----------------------
# Run cactus_treeStats #
#-----------------------
#statsFile = os.path.join(self.outDir, "stats", "%s.xml" % self.simNum)
statsFile = os.path.join(self.outDir, "stats", "%s.xml" % self.simName)
runCactusTreeStats(outputFile=statsFile, cactusDisk=flowerdisk)
#self.addChildCommand(command)
#------------------- Adding child ------------------------#
#self.addChildTarget(CactusMAFGeneratorWrapper(self.outDir, tempDir, self.simNum, runtime))
self.addChildTarget(CactusMAFGeneratorWrapper(self.outDir, tempDir, self.simTrueMafDir, self.simName, runtime))
logger.info("Added child CactusMAFGeneratorWrapper at %s\n" % self.outDir)
#------------------- Cleaning up -------------------------#
self.setFollowOnTarget(CactusWorkflowWrapperCleanup(tempDir))
class CactusMAFGeneratorWrapper(Target):
"""run cactus_MAFGenerator and issue child EvalMafComparatorWrapper
"""
#def __init__(self, outDir, resultsDir, simNum, cactusRunTime):
def __init__(self, outDir, resultsDir, simTrueMafDir, simName, cactusRunTime):
Target.__init__(self)
self.outDir = outDir
self.resultsDir = resultsDir #Directory contains cactus cactusDisk and jobTree
#self.simNum = simNum
self.simTrueMafDir = simTrueMafDir
self.simName = simName
self.cactusRunTime = cactusRunTime
def run(self):
flowerdisk = os.path.join(self.resultsDir, "cactusDisk")
maffile = os.path.join(self.resultsDir, "cactus.maf")
runCactusMAFGenerator(mAFFile = maffile, cactusDisk = flowerdisk)
#truemaffile = os.path.join(self.outDir,"..","sim", "%s_true.maf" %(self.simNum))
#mafCompareFile = os.path.join(self.outDir, "mafCompare%s.xml" %self.simNum)
truemaffile = os.path.join(self.simTrueMafDir, "%s_true.maf" %(self.simName))
mafCompareFile = os.path.join(self.outDir, "mafCompare%s.xml" %self.simName)
self.addChildTarget(EvalMafComparatorWrapper(truemaffile, maffile, mafCompareFile, self.cactusRunTime))
class EvalMafComparatorWrapper(Target):
def __init__(self, maf1, maf2, outputFile, time):
Target.__init__(self)
self.maf1 = maf1
self.maf2 = maf2
self.outputFile = outputFile
self.time = time
def run(self):
sampleNumber = "1000000"
runEvalMAFComparator(self.maf1, self.maf2, self.outputFile, sampleNumber)
#Add the the run time to the results
resultsNode = ET.parse(self.outputFile).getroot()
resultsNode.attrib["time"] = str(self.time)
fileHandle = open(self.outputFile, 'w')
ET.ElementTree(resultsNode).write(fileHandle)
fileHandle.close()
class CactusWorkflowWrapperCleanup(Target):
def __init__(self, dir):
Target.__init__(self)
self.dir = dir
def run(self):
system("rm -rf %s" % self.dir)
logger.info("Clean up tempDir for next run\n")
class CactusMergeResultsAndCleanup(Target):
"""
"""
def __init__(self, count, outDir, options):
Target.__init__(self)
self.count = count #number of files to merge
self.outDir = outDir
self.options = options
def run(self):
mergedFile = os.path.join(self.outDir, "mafCompare.xml")
count = 0
for sim in self.options.sim:
simName = getRootDir(modify_dirname(sim))
currentFile = os.path.join(self.outDir, "mafCompare%s.xml" %simName)
if count == 0:
system("mv %s %s" % (currentFile, mergedFile))
logger.info("Moved %s to %s\n" %(currentFile, mergedFile))
else:
system("mergeMafComparatorResults.py --logLevel DEBUG --results1 %s --results2 %s --outputFile %s" % (mergedFile, currentFile, mergedFile))
logger.info("Merged %s to %s\n" %(currentFile, mergedFile))
count += 1
#system("rm -f %s" % currentFile)
#logger.info("Removed %s\n" %(currentFile))
class CactusTuningSummary(Target):
"""
"""
def __init__(self, options):
Target.__init__(self)
self.options = options
def run(self):
getCactusTuningSummary(self.options.outputDir, self.options.species, self.options.sim)
#============================ Getting parameters =======================================#
def fn(min, max, loops):
if loops == 0 or loops == 1:
return 0
return float(min - max)/(loops - 1) #The value must be zero
def getParameters(startFile):
for minimumTreeCoverage in (0.0,):
for annealingRounds in (5,): #1+trim+minimumChainLength/2):
for lastzThreshold in (1800,):
for (minimumTrim, maximumTrim) in ((0, 3),):
trimChange = fn(minimumTrim, maximumTrim, annealingRounds)
for minimumChainLength, maximumChainLength in ((5,30),):
minimumChainLengthChange = fn(minimumChainLength, maximumChainLength, annealingRounds)
for minimumBlockLength , maximumBlockLength in ((0, 0),):
minimumBlockLengthChange = fn(minimumBlockLength, maximumBlockLength, annealingRounds)
for alignRepeatsAtRound in set((0,)):
for deannealingRounds in set((10,)):
for baseLevel in (True,):
#for minimumTreeCoverage in (0.0,0.5,1.0):
# for annealingRounds in (5,): #1+trim+minimumChainLength/2):
# for lastzThreshold in (1800,2200,2600,3000):
# for (minimumTrim, maximumTrim) in ((0, 3),(1, 3),(2, 3),(3, 5)):
# trimChange = fn(minimumTrim, maximumTrim, annealingRounds)
# for minimumChainLength, maximumChainLength in ((5,30), (5,100), (10,30),(10, 100),(20,30),(20, 100)):
# minimumChainLengthChange = fn(minimumChainLength, maximumChainLength, annealingRounds)
# for minimumBlockLength , maximumBlockLength in ((0, 0),(2, 2),):
# minimumBlockLengthChange = fn(minimumBlockLength, maximumBlockLength, annealingRounds)
# for alignRepeatsAtRound in set((0,)):
# for deannealingRounds in set((10,)):
# for baseLevel in (True, False):
config = ET.parse(startFile).getroot()
iterationNode = config.find("alignment").find("iterations").findall("iteration")[-2]
#node = config.find("alignment").find("iterations").findall("iteration")[-2].find("core")
blastNode = iterationNode.find("blast")
blastNode.attrib["blastString"] = "lastz --format=cigar --hspthresh=%s SEQ_FILE_1[multiple][nameparse=darkspace] SEQ_FILE_2[nameparse=darkspace] > CIGARS_FILE" % lastzThreshold
blastNode.attrib["selfBlastString"]="lastz --format=cigar --hspthresh=%s SEQ_FILE[nameparse=darkspace] --self > CIGARS_FILE" % lastzThreshold
node = iterationNode.find("core")
node.attrib["minimumTreeCoverage"] = str(minimumTreeCoverage)
node.attrib["annealingRounds"] = str(annealingRounds)
node.attrib["trim"] = str(maximumTrim)
node.attrib["trimChange"] = str(trimChange)
node.attrib["alignRepeatsAtRound"] = str(alignRepeatsAtRound)
node.attrib["minimumBlockLength"] = str(maximumBlockLength)
node.attrib["minimumBlockLengthChange"] = str(minimumBlockLengthChange)
node.attrib["minimumChainLength"] = str(maximumChainLength)
node.attrib["minimumChainLengthChange"] = str(minimumChainLengthChange)
node.attrib["deannealingRounds"] = str(deannealingRounds)
#Remove the base alignment stage:
if not baseLevel:
config.find("alignment").find("iterations").remove(config.find("alignment").find("iterations").findall("iteration")[-1])
paramFile = os.path.join(os.getcwd(), "param.xml")
fileHandle = open(paramFile, 'w')
tree = ET.ElementTree(config)
tree.write(fileHandle)
fileHandle.close()
yield (paramFile, ("results_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s" % (minimumTreeCoverage, annealingRounds, maximumTrim, trimChange, alignRepeatsAtRound, maximumChainLength, minimumChainLengthChange, maximumBlockLength, minimumBlockLengthChange, deannealingRounds, lastzThreshold, baseLevel)))
#yield (paramFile, ("results_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s" % (minimumTreeCoverage, annealingRounds, maximumTrim, trimChange, alignRepeatsAtRound, maximumChainLength, minimumChainLengthChange, maximumBlockLength, minimumBlockLengthChange, deannealingRounds)))
#============================ Results summary ==========================================#
def getCactusTuningSummary(dir, species, sim):
l = []
#Use the stat file of first simulation as an estimate for stats of other simulations:
firstSimName = getRootDir(modify_dirname(sim[0]))
for resultsName in os.listdir(dir):
results = os.path.join(dir, resultsName)
statsDir = os.path.join(results, "stats")
#maxBlockMaxDegree = str(getStats(statsDir))
try:
stats = ET.parse(os.path.join(statsDir, "%s.xml" %(firstSimName))).getroot()
config = ET.parse(os.path.join(results, "param.xml")).getroot()
scores = ET.parse(os.path.join(results, "mafCompare.xml")).getroot()
except IOError:
continue
blocksNode = stats.find("blocks")
sensNode = scores.findall("homology_tests")[0]
specNode = scores.findall("homology_tests")[1]
if len(config.find("alignment").find("iterations").findall("iteration")) == 4:
baseLevel = True
iterationNode = config.find("alignment").find("iterations").findall("iteration")[-2]
else:
baseLevel = False
iterationNode = config.find("alignment").find("iterations").findall("iteration")[-1]
node = iterationNode.find("core")
#node = config.find("alignment").find("iterations").findall("iteration")[-2].find("core")
#node = config.find("alignment").find("iterations").findall("iteration")[-1].find("core")#Because obmit the base level
l.append((sensNode.attrib["average"], specNode.attrib["average"], node.attrib["minimumTreeCoverage"], node.attrib["annealingRounds"], \
node.attrib["trim"], node.attrib["trimChange"], \
node.attrib["alignRepeatsAtRound"], node.attrib["minimumChainLength"], \
node.attrib["minimumChainLengthChange"], node.attrib["deannealingRounds"], \
node.attrib["minimumBlockLength"], node.attrib["minimumBlockLengthChange"], \
node.attrib["deannealingRounds"],
str(baseLevel),
blocksNode.find("degrees").attrib["max"], scores.attrib["time"], \
fn("HUMAN", "MOUSE", sensNode), fn("HUMAN", "MOUSE", specNode),\
fn("HUMAN", "DOG", sensNode), fn("HUMAN", "DOG", specNode),\
fn("HUMAN", "CHIMP", sensNode), fn("HUMAN", "CHIMP", specNode),\
fn("HUMAN", "BABOON", sensNode), fn("HUMAN", "BABOON", specNode),\
fn("MOUSE", "RAT", sensNode), fn("MOUSE", "RAT", specNode),\
fn("DOG", "COW", sensNode), fn("DOG", "COW", specNode),\
fn("MOUSE", "DOG", sensNode), fn("MOUSE", "DOG", specNode),
stats.find("terminal_group_sizes").attrib["max"],
stats.find("chains").find("base_block_lengths").attrib["median"],
stats.find("chains").find("base_block_lengths").attrib["avg"],
stats.find("chains").find("base_block_lengths").attrib["max"],
fn2("tangles", stats), fn2("links", stats), fn3(iterationNode.find("blast"))))
#currList = [(sensNode.attrib["average"], specNode.attrib["average"], node.attrib["minimumTreeCoverage"], node.attrib["annealingRounds"], \
#node.attrib["trim"], node.attrib["trimChange"], \
#node.attrib["alignRepeatsAtRound"], node.attrib["minimumChainLength"], \
#node.attrib["minimumChainLengthChange"], node.attrib["deannealingRounds"], \
#node.attrib["minimumBlockLength"], node.attrib["minimumBlockLengthChange"], \
#node.attrib["deannealingRounds"], \
#blocksNode.find("degrees").attrib["max"], scores.attrib["time"], \
#stats.find("terminal_group_sizes").attrib["max"], \
#stats.find("chains").find("base_block_lengths").attrib["median"], \
#stats.find("chains").find("base_block_lengths").attrib["avg"], \
#stats.find("chains").find("base_block_lengths").attrib["max"], \
#fn2("tangles", stats), fn2("links", stats))]
#for i in range(len(species) -1):
# for j in range(i+1, len(species)):
# currList.extend(fn1(species[i], species[j], sensNode))
# currList.extend(fn1(species[i], species[j], specNode))
#l.append(currList)
stats = None
config = None
scores = None
l.sort(cmpFn)
l2 = ("sens\t\tspec\t\tmTCo", "AR", "trim", "trimR", "ARaL", "mCL", "mCLI", "mCLUSS", "mBL", "mBLC", "DAR", "BASE", "deg", "time", \
"HMSE", "HMSP", "HDSE", "HDSP", "HCSE", "HCSP", "HBSE", "HBSP", "MRSE", "MRSP", "DCSE", "DCSP", "MDSE", "MDSP", \
"TGS", "CML", "CAL", "CMXL", "NTTN", "NTLN")
#l2 = ["sens", "spec", "mTCo", "AR", "trim", "trimR", "ARaL", "mCL", "mCLI", "mCLUSS", "mBL", "mBLC", "DAR", "deg", "time", \
# "TGS", "CML", "CAL", "CMXL", "NTTN", "NTLN"]
#for i in range(len(species) -1):
# for j in range(i+1, len(species)):
# sensCol = "%s_%s_SE" %(species[i], species[j])
# specCol = "%s_%s_SP" %(species[i], species[j])
# l2.extend((sensCol, specCol))
#"HMSE", "HMSP", "HDSE", "HDSP", "HCSE", "HCSP", "HBSE", "HBSP", "MRSE", "MRSP", "DCSE", "DCSP", "MDSE", "MDSP", \
outFile = os.path.join(dir, "summary")
f = open(outFile, 'w')
f.write("\t".join(l2) + "\n")
for i in l:
f.write("\t".join(i) + "\n")
#for c in i:
# f.write(c + "\t")
#f.write("\n")
f.write("\t".join(l2) + "\n")
f.close()
def fn1(speciesA, speciesB, node):
for hTest in node.findall("homology_test"):
#print hTest.attrib, speciesA, speciesB
if (hTest.attrib["sequenceA"] == speciesA and hTest.attrib["sequenceB"] == speciesB) or \
(hTest.attrib["sequenceA"] == speciesB and hTest.attrib["sequenceB"] == speciesA):
return hTest.attrib["average"]
assert False
def fn2(type, node):
for ends in node.findall("ends"):
if ends.attrib["include_terminal_groups"] == '1' and ends.attrib["include_non_terminal_groups"] == '0':
if type == "tangles" and ends.attrib["include_tangle_groups"] == '1' and ends.attrib["include_link_groups"] == '0':
return ends.find("counts").attrib["total"]
if type == "links" and ends.attrib["include_tangle_groups"] == '0' and ends.attrib["include_link_groups"] == '1':
return ends.find("counts").attrib["total"]
assert False
def fn3(node):
return node.attrib["blastString"].split()[2]
def getStats(statsDir):
maxBlockMaxDegree = 0
statsList = os.listdir(statsDir)
for s in statsList:
statsFile = os.path.join(statsDir, s)
try:
stats = ET.parse(statsFile).getroot()
except IOError:
continue
blocksNode = stats.find("blocks")
blockMaxDegree = int(blocksNode.find("degrees").attrib["max"])
if maxBlockMaxDegree < blockMaxDegree:
maxBlockMaxDegree = blockMaxDegree
return maxBlockMaxDegree
#============================ Utilities functions ======================================#
def runEvalMAFComparator(mafFile1, mafFile2, outputFile, sampleNumber):
command = "mafComparator -b %s -c %s -d %s -e %s" %(mafFile1, mafFile2, outputFile, sampleNumber)
system(command)
logger.info("Compared MAF %s with MAF %s\n" %(mafFile1, mafFile2))
def runEvalMFAToMAF(mfa, maf):
command = "mfaToMaf -b %s -d %s --logLevel DEBUG" %(mfa, maf)
system(command)
logger.info("Converted MFA %s to MAF %s\n" %(mfa, maf))
def cmpFn(a, b):
i = float(a[0])
j = float(b[0])
return cmp(i, j)
def modify_dirname(dir):
"""Add slash / at the end of the directory name if it doesnt have yet"""
if (not re.search('/$', dir)): #not end with /
dir = dir + '/'
return dir
def check_dir(path):
"""Check if directories on the path, and create them if not."""
if not os.path.exists(path):
os.makedirs(path)
def getList(file):
f = open(file, 'r')
list = []
for line in f.readlines():
list.append(line.rstrip())
f.close()
return list
def getFirstLine(file):
f = open(file, 'r')
line = f.readline().rstrip()
f.close()
return line
def getRoot(path):
pathLi = path.split('/')
if len(pathLi) < 1:
return ''
else:
li = pathLi[len(pathLi) -1].split('.')
return li[0]
def getRootDir(path):
pathLi = path.split('/')
if len(pathLi) < 2:
return ''
else:
li = pathLi[len(pathLi) -2].split('.')
return li[0]
def main():
usg = "Usage: %prog [options]\n"
parser = OptionParser(usage=usg)
parser.add_option("-d", "--simList", dest="sim", help="List of simulation directories. Default: simulations.lst", default="simulations.lst")
parser.add_option("-c", "--configStartFile", dest="config", help="cactus_workflow_config.xml", default="cactus_workflow_config.xml")
parser.add_option("-o", "--outputDir", dest="outputDir", help="Directory for the outputs of the runs. Default: out", default="out/")
parser.add_option("-m", "--simTrueMafDir", dest="simTrueMafDir", help="Directory for 'true' mafs of the simulations. Default: sim/", default="sim/")
parser.add_option("-t", "--tree", dest="tree", help="Phylogeny tree of the species of interest, in Newick format.Default: tree", default="tree")
parser.add_option("-s", "--species", dest="species", help="List of species in the order as they appear in the Newick tree. Default: species.lst", default="species.lst")
parser.add_option("-j", "--job", dest="jobFile", help="Job file containing command to run.", default=None)
(options, args) = parser.parse_args()
#Process options:
options.outputDir = modify_dirname(options.outputDir)
check_dir(options.outputDir)
options.tree = getFirstLine(options.tree)
#assert options.tree == ''
options.species = getFirstLine(options.species).split()
#assert len(options.species) == 0
options.sim = getList(options.sim)
#assert len(options.sim) == 0
#options.config = getList(options.config)
#assert len(options.config) == 0
logger.info("Processed options\n")
#Tuning
cactusTuningWrapper = CactusTuningWrapper(options)
cactusTuningWrapper.execute(options.jobFile)
if __name__ == "__main__":
main()
| 48.578067
| 322
| 0.623646
|
2a503d6bc20830efc2107181ea1fe9c606c94f0d
| 1,437
|
py
|
Python
|
examples/modular.py
|
RickardSjogren/sacred
|
93a0df32ddb22e7634790bda08b530bf7bc45d61
|
[
"MIT"
] | null | null | null |
examples/modular.py
|
RickardSjogren/sacred
|
93a0df32ddb22e7634790bda08b530bf7bc45d61
|
[
"MIT"
] | null | null | null |
examples/modular.py
|
RickardSjogren/sacred
|
93a0df32ddb22e7634790bda08b530bf7bc45d61
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
This is a very basic example of how to use Sacred.
"""
from sacred import Experiment, Ingredient
# ============== Ingredient 0: settings =================
s = Ingredient("settings")
@s.config
def cfg1():
verbose = True
# ============== Ingredient 1: dataset.paths =================
data_paths = Ingredient("dataset.paths", ingredients=[s])
@data_paths.config
def cfg2(settings):
v = not settings['verbose']
base = '/home/sacred/'
# ============== Ingredient 2: dataset =======================
data = Ingredient("dataset", ingredients=[data_paths, s])
@data.config
def cfg3(paths):
basepath = paths['base'] + 'datasets/'
filename = "foo.hdf5"
@data.capture
def foo(basepath, filename, paths, settings):
print(paths)
print(settings)
return basepath + filename
# ============== Experiment ==============================
ex = Experiment('modular_example', ingredients=[data, data_paths])
@ex.config
def cfg(dataset):
a = 10
b = 17
c = a + b
out_base = dataset['paths']['base'] + 'outputs/'
out_filename = dataset['filename'].replace('.hdf5', '.out')
@ex.automain
def main(a, b, c, out_base, out_filename, dataset):
print('a =', a)
print('b =', b)
print('c =', c)
print('out_base =', out_base, out_filename)
# print("dataset", dataset)
# print("dataset.paths", dataset['paths'])
print("foo()", foo())
| 21.447761
| 66
| 0.578984
|
031e735f9d4a1beaed5d151a0902951668f7cd29
| 102,742
|
py
|
Python
|
pyeccodes/defs/grib1/localConcepts/eswi/shortName_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib1/localConcepts/eswi/shortName_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib1/localConcepts/eswi/shortName_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
table2Version = h.get_l('table2Version')
indicatorOfParameter = h.get_l('indicatorOfParameter')
if table2Version == 253 and indicatorOfParameter == 239:
return 'zt'
if table2Version == 253 and indicatorOfParameter == 6:
return 'z'
if table2Version == 253 and indicatorOfParameter == 161:
return 'xhail'
if table2Version == 253 and indicatorOfParameter == 30:
return 'wvsp'
if table2Version == 253 and indicatorOfParameter == 80:
return 'wtmp'
if table2Version == 253 and indicatorOfParameter == 32:
return 'ws'
if table2Version == 253 and indicatorOfParameter == 126:
return 'wmixe'
if table2Version == 253 and indicatorOfParameter == 245:
return 'wevap'
if table2Version == 253 and indicatorOfParameter == 31:
return 'wdir'
if table2Version == 253 and indicatorOfParameter == 193:
return 'w_so_ice'
if table2Version == 253 and indicatorOfParameter == 192:
return 'w_i'
if table2Version == 253 and indicatorOfParameter == 39:
return 'w'
if table2Version == 253 and indicatorOfParameter == 46:
return 'vvcsh'
if table2Version == 253 and indicatorOfParameter == 45:
return 'vucsh'
if table2Version == 253 and indicatorOfParameter == 12:
return 'vptmp'
if table2Version == 253 and indicatorOfParameter == 55:
return 'vp'
if table2Version == 253 and indicatorOfParameter == 43:
return 'vo'
if table2Version == 253 and indicatorOfParameter == 20:
return 'vis'
if table2Version == 253 and indicatorOfParameter == 96:
return 'vice'
if table2Version == 253 and indicatorOfParameter == 163:
return 'vgst'
if table2Version == 253 and indicatorOfParameter == 125:
return 'vflx'
if table2Version == 253 and indicatorOfParameter == 87:
return 'veg'
if table2Version == 253 and indicatorOfParameter == 213:
return 'vdiv'
if table2Version == 253 and indicatorOfParameter == 50:
return 'vcurr'
if table2Version == 253 and indicatorOfParameter == 34:
return 'v'
if table2Version == 253 and indicatorOfParameter == 214:
return 'upom'
if table2Version == 253 and indicatorOfParameter == 216:
return 'upmf'
if table2Version == 253 and indicatorOfParameter == 95:
return 'uice'
if table2Version == 253 and indicatorOfParameter == 162:
return 'ugst'
if table2Version == 253 and indicatorOfParameter == 124:
return 'uflx'
if table2Version == 253 and indicatorOfParameter == 49:
return 'ucurr'
if table2Version == 253 and indicatorOfParameter == 33:
return 'u'
if table2Version == 253 and indicatorOfParameter == 40:
return 'tw'
if table2Version == 253 and indicatorOfParameter == 68:
return 'tthdp'
if table2Version == 253 and indicatorOfParameter == 60:
return 'tstm'
if table2Version == 253 and indicatorOfParameter == 185:
return 'tpsolid'
if table2Version == 253 and indicatorOfParameter == 61:
return 'tp'
if table2Version == 253 and indicatorOfParameter == 167:
return 'totqv'
if table2Version == 253 and indicatorOfParameter == 16:
return 'tmin'
if table2Version == 253 and indicatorOfParameter == 15:
return 'tmax'
if table2Version == 253 and indicatorOfParameter == 200:
return 'tke'
if table2Version == 253 and indicatorOfParameter == 17:
return 'td'
if table2Version == 253 and indicatorOfParameter == 10:
return 'tco'
if table2Version == 253 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 253 and indicatorOfParameter == 25:
return 'ta'
if table2Version == 253 and indicatorOfParameter == 11:
return 't'
if table2Version == 253 and indicatorOfParameter == 238:
return 'swv'
if table2Version == 253 and indicatorOfParameter == 120:
return 'swrad'
if table2Version == 253 and indicatorOfParameter == 106:
return 'swper'
if table2Version == 253 and indicatorOfParameter == 110:
return 'swp'
if table2Version == 253 and indicatorOfParameter == 100:
return 'swh'
if table2Version == 253 and indicatorOfParameter == 105:
return 'swell'
if table2Version == 253 and indicatorOfParameter == 104:
return 'swdir'
if table2Version == 253 and indicatorOfParameter == 116:
return 'swavr'
if table2Version == 253 and indicatorOfParameter == 35:
return 'strf'
if table2Version == 253 and indicatorOfParameter == 220:
return 'stdo'
if table2Version == 253 and indicatorOfParameter == 122:
return 'sshf'
if table2Version == 253 and indicatorOfParameter == 64:
return 'srweq'
if table2Version == 253 and indicatorOfParameter == 83:
return 'srg'
if table2Version == 253 and indicatorOfParameter == 182:
return 'srain'
if table2Version == 253 and indicatorOfParameter == 48:
return 'spc'
if table2Version == 253 and indicatorOfParameter == 246:
return 'snsub'
if table2Version == 253 and indicatorOfParameter == 184:
return 'snow'
if table2Version == 253 and indicatorOfParameter == 99:
return 'snom'
if table2Version == 253 and indicatorOfParameter == 231:
return 'smnr'
if table2Version == 253 and indicatorOfParameter == 86:
return 'sm'
if table2Version == 253 and indicatorOfParameter == 85:
return 'slt'
if table2Version == 253 and indicatorOfParameter == 121:
return 'slhf'
if table2Version == 253 and indicatorOfParameter == 226:
return 'slfr'
if table2Version == 253 and indicatorOfParameter == 237:
return 'sld'
if table2Version == 253 and indicatorOfParameter == 94:
return 'siced'
if table2Version == 253 and indicatorOfParameter == 102:
return 'shww'
if table2Version == 253 and indicatorOfParameter == 247:
return 'shis'
if table2Version == 253 and indicatorOfParameter == 38:
return 'sgcvv'
if table2Version == 253 and indicatorOfParameter == 65:
return 'sf'
if table2Version == 253 and indicatorOfParameter == 235:
return 'se'
if table2Version == 253 and indicatorOfParameter == 66:
return 'sdp'
if table2Version == 253 and indicatorOfParameter == 56:
return 'satd'
if table2Version == 253 and indicatorOfParameter == 88:
return 's'
if table2Version == 253 and indicatorOfParameter == 191:
return 'rsn'
if table2Version == 253 and indicatorOfParameter == 90:
return 'ro'
if table2Version == 253 and indicatorOfParameter == 242:
return 'rmx'
if table2Version == 253 and indicatorOfParameter == 241:
return 'rmn'
if table2Version == 253 and indicatorOfParameter == 240:
return 'rev'
if table2Version == 253 and indicatorOfParameter == 210:
return 'refl'
if table2Version == 253 and indicatorOfParameter == 23:
return 'rdsp'
if table2Version == 253 and indicatorOfParameter == 181:
return 'rain'
if table2Version == 253 and indicatorOfParameter == 52:
return 'r'
if table2Version == 253 and indicatorOfParameter == 51:
return 'q'
if table2Version == 253 and indicatorOfParameter == 54:
return 'pwat'
if table2Version == 253 and indicatorOfParameter == 4:
return 'pv'
if table2Version == 253 and indicatorOfParameter == 3:
return 'ptend'
if table2Version == 253 and indicatorOfParameter == 13:
return 'pt'
if table2Version == 253 and indicatorOfParameter == 138:
return 'pstbc'
if table2Version == 253 and indicatorOfParameter == 137:
return 'pstb'
if table2Version == 253 and indicatorOfParameter == 139:
return 'pscw'
if table2Version == 253 and indicatorOfParameter == 136:
return 'psct'
if table2Version == 253 and indicatorOfParameter == 144:
return 'prtp'
if table2Version == 253 and indicatorOfParameter == 26:
return 'presa'
if table2Version == 253 and indicatorOfParameter == 1:
return 'pres'
if table2Version == 253 and indicatorOfParameter == 59:
return 'prate'
if table2Version == 253 and indicatorOfParameter == 24:
return 'pli'
if table2Version == 253 and indicatorOfParameter == 212:
return 'pdep'
if table2Version == 253 and indicatorOfParameter == 14:
return 'papt'
if table2Version == 253 and indicatorOfParameter == 113:
return 'nswrt'
if table2Version == 253 and indicatorOfParameter == 111:
return 'nswrs'
if table2Version == 253 and indicatorOfParameter == 114:
return 'nlwrt'
if table2Version == 253 and indicatorOfParameter == 112:
return 'nlwrs'
if table2Version == 253 and indicatorOfParameter == 69:
return 'mthd'
if table2Version == 253 and indicatorOfParameter == 70:
return 'mtha'
if table2Version == 253 and indicatorOfParameter == 2:
return 'msl'
if table2Version == 253 and indicatorOfParameter == 133:
return 'msca'
if table2Version == 253 and indicatorOfParameter == 158:
return 'mrad'
if table2Version == 253 and indicatorOfParameter == 103:
return 'mpww'
if table2Version == 253 and indicatorOfParameter == 108:
return 'mpps'
if table2Version == 253 and indicatorOfParameter == 37:
return 'mntsf'
if table2Version == 253 and indicatorOfParameter == 67:
return 'mld'
if table2Version == 253 and indicatorOfParameter == 53:
return 'mixr'
if table2Version == 253 and indicatorOfParameter == 101:
return 'mdww'
if table2Version == 253 and indicatorOfParameter == 107:
return 'mdps'
if table2Version == 253 and indicatorOfParameter == 166:
return 'mcn'
if table2Version == 253 and indicatorOfParameter == 74:
return 'mcc'
if table2Version == 253 and indicatorOfParameter == 119:
return 'lwrad'
if table2Version == 253 and indicatorOfParameter == 115:
return 'lwavr'
if table2Version == 253 and indicatorOfParameter == 62:
return 'lsp'
if table2Version == 253 and indicatorOfParameter == 81:
return 'lsm'
if table2Version == 253 and indicatorOfParameter == 79:
return 'lsf'
if table2Version == 253 and indicatorOfParameter == 244:
return 'lhsub'
if table2Version == 253 and indicatorOfParameter == 132:
return 'lhe'
if table2Version == 253 and indicatorOfParameter == 209:
return 'lgt'
if table2Version == 253 and indicatorOfParameter == 73:
return 'lcc'
if table2Version == 253 and indicatorOfParameter == 19:
return 'lapr'
if table2Version == 253 and indicatorOfParameter == 232:
return 'lai'
if table2Version == 253 and indicatorOfParameter == 127:
return 'imgd'
if table2Version == 253 and indicatorOfParameter == 92:
return 'icetk'
if table2Version == 253 and indicatorOfParameter == 135:
return 'icei'
if table2Version == 253 and indicatorOfParameter == 97:
return 'iceg'
if table2Version == 253 and indicatorOfParameter == 98:
return 'iced'
if table2Version == 253 and indicatorOfParameter == 91:
return 'icec'
if table2Version == 253 and indicatorOfParameter == 5:
return 'icaht'
if table2Version == 253 and indicatorOfParameter == 9:
return 'hstdv'
if table2Version == 253 and indicatorOfParameter == 75:
return 'hcc'
if table2Version == 253 and indicatorOfParameter == 204:
return 'hail'
if table2Version == 253 and indicatorOfParameter == 8:
return 'h'
if table2Version == 253 and indicatorOfParameter == 196:
return 'gwdv'
if table2Version == 253 and indicatorOfParameter == 195:
return 'gwdu'
if table2Version == 253 and indicatorOfParameter == 201:
return 'grpl'
if table2Version == 253 and indicatorOfParameter == 117:
return 'grad'
if table2Version == 253 and indicatorOfParameter == 27:
return 'gpa'
if table2Version == 253 and indicatorOfParameter == 7:
return 'gh'
if table2Version == 253 and indicatorOfParameter == 188:
return 'ful'
if table2Version == 253 and indicatorOfParameter == 129:
return 'frmsp'
if table2Version == 253 and indicatorOfParameter == 228:
return 'fg'
if table2Version == 253 and indicatorOfParameter == 57:
return 'e'
if table2Version == 253 and indicatorOfParameter == 234:
return 'dvi'
if table2Version == 253 and indicatorOfParameter == 243:
return 'dutp'
if table2Version == 253 and indicatorOfParameter == 222:
return 'dtop'
if table2Version == 253 and indicatorOfParameter == 82:
return 'dslm'
if table2Version == 253 and indicatorOfParameter == 215:
return 'dnom'
if table2Version == 253 and indicatorOfParameter == 217:
return 'dnmf'
if table2Version == 253 and indicatorOfParameter == 109:
return 'dirsw'
if table2Version == 253 and indicatorOfParameter == 47:
return 'dirc'
if table2Version == 253 and indicatorOfParameter == 93:
return 'diced'
if table2Version == 253 and indicatorOfParameter == 18:
return 'depr'
if table2Version == 253 and indicatorOfParameter == 89:
return 'den'
if table2Version == 253 and indicatorOfParameter == 44:
return 'd'
if table2Version == 253 and indicatorOfParameter == 76:
return 'cwat'
if table2Version == 253 and indicatorOfParameter == 187:
return 'ct'
if table2Version == 253 and indicatorOfParameter == 130:
return 'cssw'
if table2Version == 253 and indicatorOfParameter == 131:
return 'cslw'
if table2Version == 253 and indicatorOfParameter == 78:
return 'csf'
if table2Version == 253 and indicatorOfParameter == 183:
return 'cr'
if table2Version == 253 and indicatorOfParameter == 250:
return 'co'
if table2Version == 253 and indicatorOfParameter == 225:
return 'clfr'
if table2Version == 253 and indicatorOfParameter == 58:
return 'ciwc'
if table2Version == 253 and indicatorOfParameter == 72:
return 'ccc'
if table2Version == 253 and indicatorOfParameter == 186:
return 'cb'
if table2Version == 253 and indicatorOfParameter == 160:
return 'cape'
if table2Version == 253 and indicatorOfParameter == 118:
return 'btmp'
if table2Version == 253 and indicatorOfParameter == 249:
return 'bo'
if table2Version == 253 and indicatorOfParameter == 77:
return 'bli'
if table2Version == 253 and indicatorOfParameter == 123:
return 'bld'
if table2Version == 253 and indicatorOfParameter == 221:
return 'atop'
if table2Version == 253 and indicatorOfParameter == 190:
return 'asn'
if table2Version == 253 and indicatorOfParameter == 128:
return 'armsp'
if table2Version == 253 and indicatorOfParameter == 248:
return 'ao'
if table2Version == 253 and indicatorOfParameter == 230:
return 'alv'
if table2Version == 253 and indicatorOfParameter == 229:
return 'alb'
if table2Version == 253 and indicatorOfParameter == 84:
return 'al'
if table2Version == 253 and indicatorOfParameter == 251:
return 'aers'
if table2Version == 253 and indicatorOfParameter == 252:
return 'aerl'
if table2Version == 253 and indicatorOfParameter == 254:
return 'aerd'
if table2Version == 253 and indicatorOfParameter == 253:
return 'aerc'
if table2Version == 253 and indicatorOfParameter == 63:
return 'acpcp'
if table2Version == 253 and indicatorOfParameter == 41:
return 'absv'
if table2Version == 253 and indicatorOfParameter == 42:
return 'absd'
if table2Version == 151 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 151 and indicatorOfParameter == 57:
return 'eP'
if table2Version == 151 and indicatorOfParameter == 3:
return 'tp_>50'
if table2Version == 151 and indicatorOfParameter == 2:
return 'tp_10_50'
if table2Version == 151 and indicatorOfParameter == 1:
return 'tp_1_10'
if table2Version == 151 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 150 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 150 and indicatorOfParameter == 58:
return 'spw'
if table2Version == 150 and indicatorOfParameter == 57:
return 'eP'
if table2Version == 150 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 140 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 140 and indicatorOfParameter == 9:
return 'STDC'
if table2Version == 140 and indicatorOfParameter == 8:
return 'SCTCDC'
if table2Version == 140 and indicatorOfParameter == 7:
return 'SCTGDC'
if table2Version == 140 and indicatorOfParameter == 6:
return 'TAAPC'
if table2Version == 140 and indicatorOfParameter == 5:
return 'CTCAAPC'
if table2Version == 140 and indicatorOfParameter == 4:
return 'CTGAAPC'
if table2Version == 140 and indicatorOfParameter == 3:
return 'TDC'
if table2Version == 140 and indicatorOfParameter == 2:
return 'CTCDC'
if table2Version == 140 and indicatorOfParameter == 1:
return 'CTGDC'
if table2Version == 140 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 137 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 137 and indicatorOfParameter == 137:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 136:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 135:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 134:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 133:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 132:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 131:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 130:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 127:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 126:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 125:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 124:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 123:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 122:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 121:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 120:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 117:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 116:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 115:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 114:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 113:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 112:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 111:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 110:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 107:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 106:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 105:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 104:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 103:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 102:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 101:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 100:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 77:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 76:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 75:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 74:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 73:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 72:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 71:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 70:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 67:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 66:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 65:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 64:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 63:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 62:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 61:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 60:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 57:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 56:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 55:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 54:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 53:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 52:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 51:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 50:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 47:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 46:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 45:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 44:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 43:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 42:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 41:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 40:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 37:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 36:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 35:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 34:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 33:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 32:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 31:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 30:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 27:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 26:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 25:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 24:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 23:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 22:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 21:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 20:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 17:
return 'SOX_HIL'
if table2Version == 137 and indicatorOfParameter == 16:
return 'XSOX_TOT'
if table2Version == 137 and indicatorOfParameter == 15:
return 'XSOX_WET'
if table2Version == 137 and indicatorOfParameter == 14:
return 'XSOX_DRY_WA'
if table2Version == 137 and indicatorOfParameter == 13:
return 'XSOX_DRY_UR'
if table2Version == 137 and indicatorOfParameter == 12:
return 'XSOX_DRY_MH'
if table2Version == 137 and indicatorOfParameter == 11:
return 'XSOX_DRY_WE'
if table2Version == 137 and indicatorOfParameter == 10:
return 'XSOX_DRY_PI'
if table2Version == 137 and indicatorOfParameter == 7:
return 'XSOX_DRY_SP'
if table2Version == 137 and indicatorOfParameter == 6:
return 'XSOX_DRY_DE'
if table2Version == 137 and indicatorOfParameter == 5:
return 'XSOX_DRY_BO'
if table2Version == 137 and indicatorOfParameter == 4:
return 'XSOX_DRY_AR'
if table2Version == 137 and indicatorOfParameter == 3:
return 'XSOX_DRY_PA'
if table2Version == 137 and indicatorOfParameter == 2:
return 'XSOX_DRY_MIX'
if table2Version == 137 and indicatorOfParameter == 1:
return 'XSOX_HIL'
if table2Version == 137 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 136 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 136 and indicatorOfParameter == 206:
return 'totO3'
if table2Version == 136 and indicatorOfParameter == 175:
return 'sn_1h'
if table2Version == 136 and indicatorOfParameter == 165:
return 'pr_1h'
if table2Version == 136 and indicatorOfParameter == 120:
return 'PAR'
if table2Version == 136 and indicatorOfParameter == 119:
return 'sun'
if table2Version == 136 and indicatorOfParameter == 118:
return 'BNirr'
if table2Version == 136 and indicatorOfParameter == 117:
return 'GLirr'
if table2Version == 136 and indicatorOfParameter == 116:
return 'UVirr'
if table2Version == 136 and indicatorOfParameter == 91:
return 'icec'
if table2Version == 136 and indicatorOfParameter == 84:
return 'al'
if table2Version == 136 and indicatorOfParameter == 79:
return 'ct_sig'
if table2Version == 136 and indicatorOfParameter == 78:
return 'cb_sig'
if table2Version == 136 and indicatorOfParameter == 77:
return 'cb_sigpr'
if table2Version == 136 and indicatorOfParameter == 73:
return 'lcc'
if table2Version == 136 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 136 and indicatorOfParameter == 66:
return 'sd'
if table2Version == 136 and indicatorOfParameter == 54:
return 'pwat'
if table2Version == 136 and indicatorOfParameter == 51:
return 'q'
if table2Version == 136 and indicatorOfParameter == 11:
return 't'
if table2Version == 136 and indicatorOfParameter == 1:
return 'pres'
if table2Version == 136 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 135 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 135 and indicatorOfParameter == 254:
return 'nlpres'
if table2Version == 135 and indicatorOfParameter == 253:
return 'isor'
if table2Version == 135 and indicatorOfParameter == 252:
return 'gwd'
if table2Version == 135 and indicatorOfParameter == 251:
return 'slsgor'
if table2Version == 135 and indicatorOfParameter == 250:
return 'angsgor'
if table2Version == 135 and indicatorOfParameter == 249:
return 'stdsgor'
if table2Version == 135 and indicatorOfParameter == 248:
return '5wava'
if table2Version == 135 and indicatorOfParameter == 247:
return 'hbpl'
if table2Version == 135 and indicatorOfParameter == 246:
return 'v-gwd'
if table2Version == 135 and indicatorOfParameter == 245:
return 'u-gwd'
if table2Version == 135 and indicatorOfParameter == 244:
return '5wavh'
if table2Version == 135 and indicatorOfParameter == 243:
return 'denalt'
if table2Version == 135 and indicatorOfParameter == 242:
return 'presalt'
if table2Version == 135 and indicatorOfParameter == 241:
return 'thick'
if table2Version == 135 and indicatorOfParameter == 240:
return 'alts'
if table2Version == 135 and indicatorOfParameter == 239:
return 'eta'
if table2Version == 135 and indicatorOfParameter == 238:
return 'cd'
if table2Version == 135 and indicatorOfParameter == 237:
return 'vstm'
if table2Version == 135 and indicatorOfParameter == 236:
return 'ustm'
if table2Version == 135 and indicatorOfParameter == 235:
return 'mflx'
if table2Version == 135 and indicatorOfParameter == 234:
return 'vwsh'
if table2Version == 135 and indicatorOfParameter == 233:
return 'vgust'
if table2Version == 135 and indicatorOfParameter == 232:
return 'ugust'
if table2Version == 135 and indicatorOfParameter == 231:
return 'cswc'
if table2Version == 135 and indicatorOfParameter == 230:
return 'crwc'
if table2Version == 135 and indicatorOfParameter == 229:
return 'ciwc'
if table2Version == 135 and indicatorOfParameter == 228:
return 'clwc'
if table2Version == 135 and indicatorOfParameter == 227:
return 'iprate'
if table2Version == 135 and indicatorOfParameter == 226:
return 'fprate'
if table2Version == 135 and indicatorOfParameter == 225:
return 'sprate'
if table2Version == 135 and indicatorOfParameter == 224:
return 'rprate'
if table2Version == 135 and indicatorOfParameter == 223:
return 'tciwv'
if table2Version == 135 and indicatorOfParameter == 222:
return 'se'
if table2Version == 135 and indicatorOfParameter == 221:
return 'sdwe'
if table2Version == 135 and indicatorOfParameter == 220:
return 'lssrate'
if table2Version == 135 and indicatorOfParameter == 219:
return 'csrate'
if table2Version == 135 and indicatorOfParameter == 218:
return 'tsrate'
if table2Version == 135 and indicatorOfParameter == 217:
return 'prs_gsp'
if table2Version == 135 and indicatorOfParameter == 216:
return 'csrwe'
if table2Version == 135 and indicatorOfParameter == 215:
return 'lsprate'
if table2Version == 135 and indicatorOfParameter == 214:
return 'tcw'
if table2Version == 135 and indicatorOfParameter == 213:
return 'tsnowp'
if table2Version == 135 and indicatorOfParameter == 212:
return 'twatp'
if table2Version == 135 and indicatorOfParameter == 211:
return 'tqs'
if table2Version == 135 and indicatorOfParameter == 210:
return 'tqr'
if table2Version == 135 and indicatorOfParameter == 209:
return 'facra'
if table2Version == 135 and indicatorOfParameter == 208:
return 'fra'
if table2Version == 135 and indicatorOfParameter == 171:
return 'AOD-10000'
if table2Version == 135 and indicatorOfParameter == 170:
return 'AOD-3500'
if table2Version == 135 and indicatorOfParameter == 169:
return 'AOD-1064'
if table2Version == 135 and indicatorOfParameter == 168:
return 'AOD-1020'
if table2Version == 135 and indicatorOfParameter == 167:
return 'AOD-870'
if table2Version == 135 and indicatorOfParameter == 166:
return 'AOD-675'
if table2Version == 135 and indicatorOfParameter == 165:
return 'AOD-532'
if table2Version == 135 and indicatorOfParameter == 164:
return 'AOD-500'
if table2Version == 135 and indicatorOfParameter == 163:
return 'AOD-440'
if table2Version == 135 and indicatorOfParameter == 162:
return 'AOD-380'
if table2Version == 135 and indicatorOfParameter == 161:
return 'AOD-355'
if table2Version == 135 and indicatorOfParameter == 160:
return 'AOD-340'
if table2Version == 135 and indicatorOfParameter == 151:
return 'EXT-10000'
if table2Version == 135 and indicatorOfParameter == 150:
return 'EXT-3500'
if table2Version == 135 and indicatorOfParameter == 149:
return 'EXT-1064'
if table2Version == 135 and indicatorOfParameter == 148:
return 'EXT-1020'
if table2Version == 135 and indicatorOfParameter == 147:
return 'EXT-870'
if table2Version == 135 and indicatorOfParameter == 146:
return 'EXT-675'
if table2Version == 135 and indicatorOfParameter == 145:
return 'EXT-532'
if table2Version == 135 and indicatorOfParameter == 144:
return 'EXT-500'
if table2Version == 135 and indicatorOfParameter == 143:
return 'EXT-440'
if table2Version == 135 and indicatorOfParameter == 142:
return 'EXT-380'
if table2Version == 135 and indicatorOfParameter == 141:
return 'EXT-355'
if table2Version == 135 and indicatorOfParameter == 140:
return 'EXT-340'
if table2Version == 135 and indicatorOfParameter == 131:
return 'BSCA-10000'
if table2Version == 135 and indicatorOfParameter == 130:
return 'BSCA-3500'
if table2Version == 135 and indicatorOfParameter == 129:
return 'BSCA-1064'
if table2Version == 135 and indicatorOfParameter == 128:
return 'BSCA-1020'
if table2Version == 135 and indicatorOfParameter == 127:
return 'BSCA-870'
if table2Version == 135 and indicatorOfParameter == 126:
return 'BSCA-675'
if table2Version == 135 and indicatorOfParameter == 125:
return 'BSCA-532'
if table2Version == 135 and indicatorOfParameter == 124:
return 'BSCA-500'
if table2Version == 135 and indicatorOfParameter == 123:
return 'BSCA-440'
if table2Version == 135 and indicatorOfParameter == 122:
return 'BSCA-380'
if table2Version == 135 and indicatorOfParameter == 121:
return 'BSCA-355'
if table2Version == 135 and indicatorOfParameter == 120:
return 'BSCA-340'
if table2Version == 135 and indicatorOfParameter == 111:
return 'VIS-10000'
if table2Version == 135 and indicatorOfParameter == 110:
return 'VIS-3500'
if table2Version == 135 and indicatorOfParameter == 109:
return 'VIS-1064'
if table2Version == 135 and indicatorOfParameter == 108:
return 'VIS-1020'
if table2Version == 135 and indicatorOfParameter == 107:
return 'VIS-870'
if table2Version == 135 and indicatorOfParameter == 106:
return 'VIS-675'
if table2Version == 135 and indicatorOfParameter == 105:
return 'VIS-532'
if table2Version == 135 and indicatorOfParameter == 104:
return 'VIS-500'
if table2Version == 135 and indicatorOfParameter == 103:
return 'VIS-440'
if table2Version == 135 and indicatorOfParameter == 102:
return 'VIS-380'
if table2Version == 135 and indicatorOfParameter == 101:
return 'VIS-355'
if table2Version == 135 and indicatorOfParameter == 100:
return 'VIS-340'
if table2Version == 135 and indicatorOfParameter == 5:
return 'GRG5'
if table2Version == 135 and indicatorOfParameter == 4:
return 'GRG4'
if table2Version == 135 and indicatorOfParameter == 3:
return 'GRG3'
if table2Version == 135 and indicatorOfParameter == 2:
return 'GRG2'
if table2Version == 135 and indicatorOfParameter == 1:
return 'GRG1'
if table2Version == 135 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 134 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 134 and indicatorOfParameter == 113:
return 'H2CCHCl'
if table2Version == 134 and indicatorOfParameter == 112:
return 'COCl2'
if table2Version == 134 and indicatorOfParameter == 111:
return 'HCN'
if table2Version == 134 and indicatorOfParameter == 110:
return 'SF6'
if table2Version == 134 and indicatorOfParameter == 108:
return 'CH3NH2'
if table2Version == 134 and indicatorOfParameter == 107:
return 'CS2'
if table2Version == 134 and indicatorOfParameter == 106:
return 'Hcl'
if table2Version == 134 and indicatorOfParameter == 105:
return 'HF'
if table2Version == 134 and indicatorOfParameter == 103:
return 'CH2OC2'
if table2Version == 134 and indicatorOfParameter == 102:
return 'CH2OC2H3Cl'
if table2Version == 134 and indicatorOfParameter == 101:
return '(CH3)2NNH2'
if table2Version == 134 and indicatorOfParameter == 100:
return 'CH2CHCN'
if table2Version == 134 and indicatorOfParameter == 92:
return 'TOLUENE'
if table2Version == 134 and indicatorOfParameter == 91:
return 'BIGALK'
if table2Version == 134 and indicatorOfParameter == 90:
return 'BIGENE'
if table2Version == 134 and indicatorOfParameter == 84:
return 'CH2CO2HCH3'
if table2Version == 134 and indicatorOfParameter == 83:
return 'CH2CCH3'
if table2Version == 134 and indicatorOfParameter == 82:
return 'MACOOH'
if table2Version == 134 and indicatorOfParameter == 81:
return 'MACO3H'
if table2Version == 134 and indicatorOfParameter == 80:
return 'MACRO2'
if table2Version == 134 and indicatorOfParameter == 79:
return 'AOH1H'
if table2Version == 134 and indicatorOfParameter == 78:
return 'AOH1'
if table2Version == 134 and indicatorOfParameter == 77:
return 'MACR'
if table2Version == 134 and indicatorOfParameter == 76:
return 'ISNIRH'
if table2Version == 134 and indicatorOfParameter == 75:
return 'ISNIR'
if table2Version == 134 and indicatorOfParameter == 74:
return 'ISNI'
if table2Version == 134 and indicatorOfParameter == 70:
return 'BENZENE'
if table2Version == 134 and indicatorOfParameter == 68:
return 'MVKO2H'
if table2Version == 134 and indicatorOfParameter == 67:
return 'MVKO2'
if table2Version == 134 and indicatorOfParameter == 66:
return 'MVK'
if table2Version == 134 and indicatorOfParameter == 65:
return 'ISRO2H'
if table2Version == 134 and indicatorOfParameter == 64:
return 'OXYO2'
if table2Version == 134 and indicatorOfParameter == 63:
return 'XO2'
if table2Version == 134 and indicatorOfParameter == 62:
return 'IPRO2'
if table2Version == 134 and indicatorOfParameter == 61:
return 'MALO2H'
if table2Version == 134 and indicatorOfParameter == 60:
return 'CH3COCHO2HCH3'
if table2Version == 134 and indicatorOfParameter == 59:
return 'CH3CHOOHCH2OH'
if table2Version == 134 and indicatorOfParameter == 58:
return 'CH2OOHCH2OH'
if table2Version == 134 and indicatorOfParameter == 57:
return 'SECC4H9O2H'
if table2Version == 134 and indicatorOfParameter == 56:
return 'OXYO2H'
if table2Version == 134 and indicatorOfParameter == 55:
return 'CH3COO2H'
if table2Version == 134 and indicatorOfParameter == 54:
return 'C2H5OOH'
if table2Version == 134 and indicatorOfParameter == 53:
return 'ISOPROD'
if table2Version == 134 and indicatorOfParameter == 52:
return 'ISRO2'
if table2Version == 134 and indicatorOfParameter == 51:
return 'MALO2'
if table2Version == 134 and indicatorOfParameter == 50:
return 'MAL'
if table2Version == 134 and indicatorOfParameter == 49:
return 'CH3CHO2CH2OH'
if table2Version == 134 and indicatorOfParameter == 48:
return 'CH2O2CH2OH'
if table2Version == 134 and indicatorOfParameter == 47:
return 'ACETOL'
if table2Version == 134 and indicatorOfParameter == 46:
return 'CH3COCHO2CH3'
if table2Version == 134 and indicatorOfParameter == 45:
return 'SECC4H9O2'
if table2Version == 134 and indicatorOfParameter == 44:
return 'CH3COO2'
if table2Version == 134 and indicatorOfParameter == 43:
return 'C2H5O2'
if table2Version == 134 and indicatorOfParameter == 42:
return 'CH3O2H'
if table2Version == 134 and indicatorOfParameter == 41:
return 'CH3O2'
if table2Version == 134 and indicatorOfParameter == 40:
return '-'
if table2Version == 134 and indicatorOfParameter == 34:
return 'O1D'
if table2Version == 134 and indicatorOfParameter == 33:
return 'O'
if table2Version == 134 and indicatorOfParameter == 32:
return 'H2'
if table2Version == 134 and indicatorOfParameter == 31:
return 'HO2'
if table2Version == 134 and indicatorOfParameter == 30:
return 0
if table2Version == 134 and indicatorOfParameter == 29:
return 'HONO'
if table2Version == 134 and indicatorOfParameter == 28:
return 'ISONO3H'
if table2Version == 134 and indicatorOfParameter == 27:
return 'MPAN'
if table2Version == 134 and indicatorOfParameter == 26:
return 'HO2NO2'
if table2Version == 134 and indicatorOfParameter == 25:
return 'ISONRO2'
if table2Version == 134 and indicatorOfParameter == 24:
return 'ONIT'
if table2Version == 134 and indicatorOfParameter == 23:
return 'N2O5'
if table2Version == 134 and indicatorOfParameter == 22:
return 'NO3'
if table2Version == 134 and indicatorOfParameter == 21:
return 'PAN'
if table2Version == 134 and indicatorOfParameter == 20:
return 0
if table2Version == 134 and indicatorOfParameter == 19:
return 'NMVOC_C'
if table2Version == 134 and indicatorOfParameter == 15:
return 'CH3COOH'
if table2Version == 134 and indicatorOfParameter == 14:
return 'HCOOH'
if table2Version == 134 and indicatorOfParameter == 13:
return 'CH3OH'
if table2Version == 134 and indicatorOfParameter == 12:
return 'C2H5OH'
if table2Version == 134 and indicatorOfParameter == 11:
return 'C5H8'
if table2Version == 134 and indicatorOfParameter == 10:
return 'GLYOX'
if table2Version == 134 and indicatorOfParameter == 9:
return 'MGLYOX'
if table2Version == 134 and indicatorOfParameter == 8:
return 'CH3COC2H5'
if table2Version == 134 and indicatorOfParameter == 7:
return 'CH3CHO'
if table2Version == 134 and indicatorOfParameter == 6:
return 'HCHO'
if table2Version == 134 and indicatorOfParameter == 5:
return 'OXYLENE'
if table2Version == 134 and indicatorOfParameter == 4:
return 'C3H6'
if table2Version == 134 and indicatorOfParameter == 3:
return 'C2H4'
if table2Version == 134 and indicatorOfParameter == 2:
return 'NC4H10'
if table2Version == 134 and indicatorOfParameter == 1:
return 'C2H6'
if table2Version == 134 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 133 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 133 and indicatorOfParameter == 243:
return 'dpt'
if table2Version == 133 and indicatorOfParameter == 239:
return 'tsn'
if table2Version == 133 and indicatorOfParameter == 233:
return 'wcurmean'
if table2Version == 133 and indicatorOfParameter == 232:
return 'vcurmean'
if table2Version == 133 and indicatorOfParameter == 231:
return 'ucurmean'
if table2Version == 133 and indicatorOfParameter == 223:
return 'Rd'
if table2Version == 133 and indicatorOfParameter == 222:
return 'Rh'
if table2Version == 133 and indicatorOfParameter == 221:
return 'hrdg'
if table2Version == 133 and indicatorOfParameter == 220:
return 'hlev'
if table2Version == 133 and indicatorOfParameter == 203:
return 'Kh'
if table2Version == 133 and indicatorOfParameter == 202:
return 'Km'
if table2Version == 133 and indicatorOfParameter == 201:
return 'DTKE'
if table2Version == 133 and indicatorOfParameter == 200:
return 'TKE'
if table2Version == 133 and indicatorOfParameter == 166:
return 'NO3_agg'
if table2Version == 133 and indicatorOfParameter == 165:
return 'flag'
if table2Version == 133 and indicatorOfParameter == 164:
return 'diat'
if table2Version == 133 and indicatorOfParameter == 163:
return 'inorg_mat'
if table2Version == 133 and indicatorOfParameter == 162:
return 'li_wacol'
if table2Version == 133 and indicatorOfParameter == 161:
return 'SiO2_bi'
if table2Version == 133 and indicatorOfParameter == 160:
return 'SiO4'
if table2Version == 133 and indicatorOfParameter == 159:
return 'benP'
if table2Version == 133 and indicatorOfParameter == 158:
return 'benN'
if table2Version == 133 and indicatorOfParameter == 157:
return 'dtr'
if table2Version == 133 and indicatorOfParameter == 156:
return 'zpl'
if table2Version == 133 and indicatorOfParameter == 155:
return 'phpl'
if table2Version == 133 and indicatorOfParameter == 154:
return 'O2'
if table2Version == 133 and indicatorOfParameter == 153:
return 'PO4'
if table2Version == 133 and indicatorOfParameter == 152:
return 'NH4'
if table2Version == 133 and indicatorOfParameter == 151:
return 'NO3'
if table2Version == 133 and indicatorOfParameter == 131:
return 'vsurf'
if table2Version == 133 and indicatorOfParameter == 130:
return 'usurf'
if table2Version == 133 and indicatorOfParameter == 113:
return 'pp1d'
if table2Version == 133 and indicatorOfParameter == 112:
return 'wadir'
if table2Version == 133 and indicatorOfParameter == 111:
return 'mpw'
if table2Version == 133 and indicatorOfParameter == 110:
return 'persw'
if table2Version == 133 and indicatorOfParameter == 109:
return 'dirsw'
if table2Version == 133 and indicatorOfParameter == 108:
return 'perpw'
if table2Version == 133 and indicatorOfParameter == 107:
return 'dirpw'
if table2Version == 133 and indicatorOfParameter == 106:
return 'swper'
if table2Version == 133 and indicatorOfParameter == 105:
return 'shps'
if table2Version == 133 and indicatorOfParameter == 104:
return 'swdir'
if table2Version == 133 and indicatorOfParameter == 103:
return 'mpww'
if table2Version == 133 and indicatorOfParameter == 102:
return 'shww'
if table2Version == 133 and indicatorOfParameter == 101:
return 'wvdir'
if table2Version == 133 and indicatorOfParameter == 100:
return 'swh'
if table2Version == 133 and indicatorOfParameter == 98:
return 'iced'
if table2Version == 133 and indicatorOfParameter == 97:
return 'iceg'
if table2Version == 133 and indicatorOfParameter == 96:
return 'vice'
if table2Version == 133 and indicatorOfParameter == 95:
return 'uice'
if table2Version == 133 and indicatorOfParameter == 94:
return 'siced'
if table2Version == 133 and indicatorOfParameter == 93:
return 'diced'
if table2Version == 133 and indicatorOfParameter == 92:
return 'icetk'
if table2Version == 133 and indicatorOfParameter == 91:
return 'icec'
if table2Version == 133 and indicatorOfParameter == 89:
return 'den'
if table2Version == 133 and indicatorOfParameter == 88:
return 's'
if table2Version == 133 and indicatorOfParameter == 82:
return 'dslm'
if table2Version == 133 and indicatorOfParameter == 80:
return 'wtmp'
if table2Version == 133 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 133 and indicatorOfParameter == 70:
return 'mtha'
if table2Version == 133 and indicatorOfParameter == 69:
return 'mthd'
if table2Version == 133 and indicatorOfParameter == 68:
return 'tthdp'
if table2Version == 133 and indicatorOfParameter == 67:
return 'mld'
if table2Version == 133 and indicatorOfParameter == 66:
return 'sd'
if table2Version == 133 and indicatorOfParameter == 51:
return 'q'
if table2Version == 133 and indicatorOfParameter == 50:
return 'vcur'
if table2Version == 133 and indicatorOfParameter == 49:
return 'ucur'
if table2Version == 133 and indicatorOfParameter == 48:
return 'spdhcur'
if table2Version == 133 and indicatorOfParameter == 47:
return 'dirhcur'
if table2Version == 133 and indicatorOfParameter == 46:
return 'vshv'
if table2Version == 133 and indicatorOfParameter == 45:
return 'vshu'
if table2Version == 133 and indicatorOfParameter == 44:
return 'd'
if table2Version == 133 and indicatorOfParameter == 43:
return 'vo'
if table2Version == 133 and indicatorOfParameter == 42:
return 'absd'
if table2Version == 133 and indicatorOfParameter == 41:
return 'absv'
if table2Version == 133 and indicatorOfParameter == 40:
return 'wcur_ge'
if table2Version == 133 and indicatorOfParameter == 39:
return 'wcur_pr'
if table2Version == 133 and indicatorOfParameter == 38:
return 'sgcvv'
if table2Version == 133 and indicatorOfParameter == 37:
return 'mntsf'
if table2Version == 133 and indicatorOfParameter == 36:
return 'vp'
if table2Version == 133 and indicatorOfParameter == 35:
return 'strf'
if table2Version == 133 and indicatorOfParameter == 34:
return 'v'
if table2Version == 133 and indicatorOfParameter == 33:
return 'u'
if table2Version == 133 and indicatorOfParameter == 32:
return 'ws'
if table2Version == 133 and indicatorOfParameter == 31:
return 'wdir'
if table2Version == 133 and indicatorOfParameter == 30:
return 'wvsp3'
if table2Version == 133 and indicatorOfParameter == 29:
return 'wvsp2'
if table2Version == 133 and indicatorOfParameter == 28:
return 'wvsp1'
if table2Version == 133 and indicatorOfParameter == 13:
return 'pt'
if table2Version == 133 and indicatorOfParameter == 11:
return 't'
if table2Version == 133 and indicatorOfParameter == 1:
return 'MSL'
if table2Version == 133 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 131 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 131 and indicatorOfParameter == 252:
return 'TKEdiss'
if table2Version == 131 and indicatorOfParameter == 251:
return 'TKE'
if table2Version == 131 and indicatorOfParameter == 250:
return 'heat_pr'
if table2Version == 131 and indicatorOfParameter == 246:
return 'icfr_pr'
if table2Version == 131 and indicatorOfParameter == 245:
return 'intic_pr'
if table2Version == 131 and indicatorOfParameter == 244:
return 'icth_ri'
if table2Version == 131 and indicatorOfParameter == 241:
return 'bit_pr'
if table2Version == 131 and indicatorOfParameter == 196:
return 'fl'
if table2Version == 131 and indicatorOfParameter == 183:
return 'icc'
if table2Version == 131 and indicatorOfParameter == 180:
return 'sst'
if table2Version == 131 and indicatorOfParameter == 173:
return 'icth_E'
if table2Version == 131 and indicatorOfParameter == 172:
return 'icth_D'
if table2Version == 131 and indicatorOfParameter == 171:
return 'icth_C'
if table2Version == 131 and indicatorOfParameter == 170:
return 'icth_ABC'
if table2Version == 131 and indicatorOfParameter == 164:
return 'Elake'
if table2Version == 131 and indicatorOfParameter == 163:
return 'Dlake'
if table2Version == 131 and indicatorOfParameter == 162:
return 'Clake'
if table2Version == 131 and indicatorOfParameter == 161:
return 'dp_ABC'
if table2Version == 131 and indicatorOfParameter == 160:
return 'ar_ABC'
if table2Version == 131 and indicatorOfParameter == 153:
return 't_E'
if table2Version == 131 and indicatorOfParameter == 152:
return 't_D'
if table2Version == 131 and indicatorOfParameter == 151:
return 't_C'
if table2Version == 131 and indicatorOfParameter == 150:
return 't_ABC'
if table2Version == 131 and indicatorOfParameter == 92:
return 'icth_pr'
if table2Version == 131 and indicatorOfParameter == 91:
return 'iccLAKE'
if table2Version == 131 and indicatorOfParameter == 66:
return 'sd_pr'
if table2Version == 131 and indicatorOfParameter == 50:
return 'ncurr'
if table2Version == 131 and indicatorOfParameter == 49:
return 'ecurr'
if table2Version == 131 and indicatorOfParameter == 11:
return 'sstLAKE'
if table2Version == 131 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 130 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 130 and indicatorOfParameter == 149:
return 'parmedian'
if table2Version == 130 and indicatorOfParameter == 148:
return 'parmean'
if table2Version == 130 and indicatorOfParameter == 147:
return 'Wsymb'
if table2Version == 130 and indicatorOfParameter == 146:
return 'pcat'
if table2Version == 130 and indicatorOfParameter == 145:
return 'ptype'
if table2Version == 130 and indicatorOfParameter == 143:
return 'parmax'
if table2Version == 130 and indicatorOfParameter == 142:
return 'parmin'
if table2Version == 130 and indicatorOfParameter == 141:
return 'pis'
if table2Version == 130 and indicatorOfParameter == 140:
return 'pit'
if table2Version == 130 and indicatorOfParameter == 139:
return 'parmean2'
if table2Version == 130 and indicatorOfParameter == 138:
return 'parmax2'
if table2Version == 130 and indicatorOfParameter == 137:
return 'parmin2'
if table2Version == 130 and indicatorOfParameter == 136:
return 'ct_sig'
if table2Version == 130 and indicatorOfParameter == 135:
return 'cb_sig'
if table2Version == 130 and indicatorOfParameter == 131:
return 'gust'
if table2Version == 130 and indicatorOfParameter == 130:
return 'maxws'
if table2Version == 130 and indicatorOfParameter == 111:
return 'epststdv'
if table2Version == 130 and indicatorOfParameter == 110:
return 'epstm'
if table2Version == 130 and indicatorOfParameter == 100:
return '2tmax3dind'
if table2Version == 130 and indicatorOfParameter == 77:
return 'cm'
if table2Version == 130 and indicatorOfParameter == 75:
return 'hcc'
if table2Version == 130 and indicatorOfParameter == 74:
return 'mcc'
if table2Version == 130 and indicatorOfParameter == 73:
return 'lcc'
if table2Version == 130 and indicatorOfParameter == 72:
return 'ccc'
if table2Version == 130 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 130 and indicatorOfParameter == 70:
return 'tccarmean'
if table2Version == 130 and indicatorOfParameter == 69:
return 'tccarmedian'
if table2Version == 130 and indicatorOfParameter == 68:
return 'tccarmax'
if table2Version == 130 and indicatorOfParameter == 67:
return 'tccarmin'
if table2Version == 130 and indicatorOfParameter == 65:
return 'sdwe'
if table2Version == 130 and indicatorOfParameter == 61:
return 'tp'
if table2Version == 130 and indicatorOfParameter == 60:
return 'tstm'
if table2Version == 130 and indicatorOfParameter == 58:
return 'fzrpr'
if table2Version == 130 and indicatorOfParameter == 52:
return 'r'
if table2Version == 130 and indicatorOfParameter == 34:
return 'v'
if table2Version == 130 and indicatorOfParameter == 33:
return 'u'
if table2Version == 130 and indicatorOfParameter == 20:
return 'vis'
if table2Version == 130 and indicatorOfParameter == 11:
return 't'
if table2Version == 130 and indicatorOfParameter == 1:
return 'msl'
if table2Version == 130 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 129 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 129 and indicatorOfParameter == 239:
return 'frsn15h_corsta'
if table2Version == 129 and indicatorOfParameter == 238:
return 'frsn9h_corsta'
if table2Version == 129 and indicatorOfParameter == 237:
return 'frsn3h_corsta'
if table2Version == 129 and indicatorOfParameter == 236:
return 'frsn2h_corsta'
if table2Version == 129 and indicatorOfParameter == 235:
return 'frsn1h_corsta'
if table2Version == 129 and indicatorOfParameter == 234:
return 'frsn24h_corsta'
if table2Version == 129 and indicatorOfParameter == 233:
return 'frsn18h_corsta'
if table2Version == 129 and indicatorOfParameter == 232:
return 'frsn12h_corsta'
if table2Version == 129 and indicatorOfParameter == 231:
return 'frsn6h_corsta'
if table2Version == 129 and indicatorOfParameter == 229:
return 'prec15h_corsta'
if table2Version == 129 and indicatorOfParameter == 228:
return 'prec9h_corsta'
if table2Version == 129 and indicatorOfParameter == 227:
return 'prec3h_corsta'
if table2Version == 129 and indicatorOfParameter == 226:
return 'prec2h_corsta'
if table2Version == 129 and indicatorOfParameter == 225:
return 'prec1h_corsta'
if table2Version == 129 and indicatorOfParameter == 224:
return 'prec24h_corsta'
if table2Version == 129 and indicatorOfParameter == 223:
return 'prec18h_corsta'
if table2Version == 129 and indicatorOfParameter == 222:
return 'prec12h_corsta'
if table2Version == 129 and indicatorOfParameter == 221:
return 'prec6h_corsta'
if table2Version == 129 and indicatorOfParameter == 219:
return 'frsn15h_sta'
if table2Version == 129 and indicatorOfParameter == 218:
return 'frsn9h_sta'
if table2Version == 129 and indicatorOfParameter == 217:
return 'frsn3h_sta'
if table2Version == 129 and indicatorOfParameter == 216:
return 'frsn2h_sta'
if table2Version == 129 and indicatorOfParameter == 215:
return 'frsn1h_sta'
if table2Version == 129 and indicatorOfParameter == 214:
return 'frsn24h_sta'
if table2Version == 129 and indicatorOfParameter == 213:
return 'frsn18h_sta'
if table2Version == 129 and indicatorOfParameter == 212:
return 'frsn12h_sta'
if table2Version == 129 and indicatorOfParameter == 211:
return 'frsn6h_sta'
if table2Version == 129 and indicatorOfParameter == 209:
return 'prec15h_sta'
if table2Version == 129 and indicatorOfParameter == 208:
return 'prec9h_sta'
if table2Version == 129 and indicatorOfParameter == 207:
return 'prec3h_sta'
if table2Version == 129 and indicatorOfParameter == 206:
return 'prec2h_sta'
if table2Version == 129 and indicatorOfParameter == 205:
return 'prec1h_sta'
if table2Version == 129 and indicatorOfParameter == 204:
return 'prec24h_sta'
if table2Version == 129 and indicatorOfParameter == 203:
return 'prec18h_sta'
if table2Version == 129 and indicatorOfParameter == 202:
return 'prec12h_sta'
if table2Version == 129 and indicatorOfParameter == 201:
return 'prec6h_sta'
if table2Version == 129 and indicatorOfParameter == 199:
return 'frsn15h_cor'
if table2Version == 129 and indicatorOfParameter == 198:
return 'frsn9h_cor'
if table2Version == 129 and indicatorOfParameter == 197:
return 'frsn3h_cor'
if table2Version == 129 and indicatorOfParameter == 196:
return 'frsn2h_cor'
if table2Version == 129 and indicatorOfParameter == 195:
return 'frsn1h_cor'
if table2Version == 129 and indicatorOfParameter == 194:
return 'frsn24h_cor'
if table2Version == 129 and indicatorOfParameter == 193:
return 'frsn18h_cor'
if table2Version == 129 and indicatorOfParameter == 192:
return 'frsn12h_cor'
if table2Version == 129 and indicatorOfParameter == 191:
return 'frsn6h_cor'
if table2Version == 129 and indicatorOfParameter == 189:
return 'prec15h_cor'
if table2Version == 129 and indicatorOfParameter == 188:
return 'prec9h_cor'
if table2Version == 129 and indicatorOfParameter == 187:
return 'prec3h_cor'
if table2Version == 129 and indicatorOfParameter == 186:
return 'prec2h_cor'
if table2Version == 129 and indicatorOfParameter == 185:
return 'prec1h_cor'
if table2Version == 129 and indicatorOfParameter == 184:
return 'prec24h_cor'
if table2Version == 129 and indicatorOfParameter == 183:
return 'prec18h_cor'
if table2Version == 129 and indicatorOfParameter == 182:
return 'prec12h_cor'
if table2Version == 129 and indicatorOfParameter == 181:
return 'prec6h_cor'
if table2Version == 129 and indicatorOfParameter == 179:
return 'frsn15h'
if table2Version == 129 and indicatorOfParameter == 178:
return 'frsn9h'
if table2Version == 129 and indicatorOfParameter == 177:
return 'frsn3h'
if table2Version == 129 and indicatorOfParameter == 176:
return 'frsn2h'
if table2Version == 129 and indicatorOfParameter == 175:
return 'frsn1h'
if table2Version == 129 and indicatorOfParameter == 174:
return 'frsn24h'
if table2Version == 129 and indicatorOfParameter == 173:
return 'frsn18h'
if table2Version == 129 and indicatorOfParameter == 172:
return 'frsn12h'
if table2Version == 129 and indicatorOfParameter == 171:
return 'frsn6h'
if table2Version == 129 and indicatorOfParameter == 169:
return 'prec15h'
if table2Version == 129 and indicatorOfParameter == 168:
return 'prec9h'
if table2Version == 129 and indicatorOfParameter == 167:
return 'prec3h'
if table2Version == 129 and indicatorOfParameter == 166:
return 'prec2h'
if table2Version == 129 and indicatorOfParameter == 165:
return 'prec1h'
if table2Version == 129 and indicatorOfParameter == 164:
return 'prec24h'
if table2Version == 129 and indicatorOfParameter == 163:
return 'prec18h'
if table2Version == 129 and indicatorOfParameter == 162:
return 'prec12h'
if table2Version == 129 and indicatorOfParameter == 161:
return 'prec6h'
if table2Version == 129 and indicatorOfParameter == 146:
return 'prsort'
if table2Version == 129 and indicatorOfParameter == 145:
return 'prtype'
if table2Version == 129 and indicatorOfParameter == 79:
return 'ct_sig'
if table2Version == 129 and indicatorOfParameter == 78:
return 'cb_sig'
if table2Version == 129 and indicatorOfParameter == 77:
return 'c_sigfr'
if table2Version == 129 and indicatorOfParameter == 75:
return 'hcc'
if table2Version == 129 and indicatorOfParameter == 74:
return 'mcc'
if table2Version == 129 and indicatorOfParameter == 73:
return 'lcc'
if table2Version == 129 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 129 and indicatorOfParameter == 52:
return 'r'
if table2Version == 129 and indicatorOfParameter == 34:
return 'v'
if table2Version == 129 and indicatorOfParameter == 33:
return 'u'
if table2Version == 129 and indicatorOfParameter == 32:
return 'gust'
if table2Version == 129 and indicatorOfParameter == 20:
return 'vis'
if table2Version == 129 and indicatorOfParameter == 16:
return 'tmin'
if table2Version == 129 and indicatorOfParameter == 15:
return 'tmax'
if table2Version == 129 and indicatorOfParameter == 13:
return 'mean2t24h'
if table2Version == 129 and indicatorOfParameter == 12:
return 'Tiw'
if table2Version == 129 and indicatorOfParameter == 11:
return 't'
if table2Version == 129 and indicatorOfParameter == 1:
return 'MSL'
if table2Version == 129 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 128 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 128 and indicatorOfParameter == 242:
return 'LAT'
if table2Version == 128 and indicatorOfParameter == 241:
return 'LONG'
if table2Version == 128 and indicatorOfParameter == 240:
return 'EMIS'
if table2Version == 128 and indicatorOfParameter == 223:
return 'DXDY'
if table2Version == 128 and indicatorOfParameter == 222:
return 'CONV_TOP'
if table2Version == 128 and indicatorOfParameter == 221:
return 'CONV_BOT'
if table2Version == 128 and indicatorOfParameter == 220:
return 'CONV_TIED'
if table2Version == 128 and indicatorOfParameter == 219:
return 'DAOD'
if table2Version == 128 and indicatorOfParameter == 218:
return 'AOD'
if table2Version == 128 and indicatorOfParameter == 217:
return 'BSCA'
if table2Version == 128 and indicatorOfParameter == 216:
return 'EXT'
if table2Version == 128 and indicatorOfParameter == 215:
return 'VIS'
if table2Version == 128 and indicatorOfParameter == 214:
return 'ASYMPAR'
if table2Version == 128 and indicatorOfParameter == 213:
return 'SSALB'
if table2Version == 128 and indicatorOfParameter == 212:
return 'SOILTYPE'
if table2Version == 128 and indicatorOfParameter == 211:
return 'LAI'
if table2Version == 128 and indicatorOfParameter == 210:
return 'SURFTYPE'
if table2Version == 128 and indicatorOfParameter == 204:
return 'Z-D'
if table2Version == 128 and indicatorOfParameter == 203:
return 'W*'
if table2Version == 128 and indicatorOfParameter == 202:
return 'U*'
if table2Version == 128 and indicatorOfParameter == 201:
return 'L'
if table2Version == 128 and indicatorOfParameter == 200:
return 'KZ'
if table2Version == 128 and indicatorOfParameter == 180:
return 'BIRCH_POLLEN'
if table2Version == 128 and indicatorOfParameter == 175:
return 'PM'
if table2Version == 128 and indicatorOfParameter == 174:
return 'PM2.5'
if table2Version == 128 and indicatorOfParameter == 173:
return 'SOA'
if table2Version == 128 and indicatorOfParameter == 172:
return 'PPM10'
if table2Version == 128 and indicatorOfParameter == 171:
return 'PPMFINE'
if table2Version == 128 and indicatorOfParameter == 170:
return 'PNHX'
if table2Version == 128 and indicatorOfParameter == 169:
return 'PNOX'
if table2Version == 128 and indicatorOfParameter == 168:
return 'PSOX'
if table2Version == 128 and indicatorOfParameter == 167:
return 'PM10'
if table2Version == 128 and indicatorOfParameter == 166:
return 'PMASS'
if table2Version == 128 and indicatorOfParameter == 165:
return 'PSURFACE'
if table2Version == 128 and indicatorOfParameter == 164:
return 'PRADIUS'
if table2Version == 128 and indicatorOfParameter == 163:
return 'PNUMBER'
if table2Version == 128 and indicatorOfParameter == 162:
return 'DUST'
if table2Version == 128 and indicatorOfParameter == 161:
return 'PMCOARSE'
if table2Version == 128 and indicatorOfParameter == 160:
return 'PMFINE'
if table2Version == 128 and indicatorOfParameter == 140:
return 'Cl2'
if table2Version == 128 and indicatorOfParameter == 128:
return 'XCA'
if table2Version == 128 and indicatorOfParameter == 126:
return 'XK'
if table2Version == 128 and indicatorOfParameter == 125:
return 'XMG'
if table2Version == 128 and indicatorOfParameter == 124:
return 'Ca++'
if table2Version == 128 and indicatorOfParameter == 123:
return 'K+'
if table2Version == 128 and indicatorOfParameter == 122:
return 'Mg++'
if table2Version == 128 and indicatorOfParameter == 121:
return 'Na+'
if table2Version == 128 and indicatorOfParameter == 120:
return 'NACL'
if table2Version == 128 and indicatorOfParameter == 119:
return 'ALL'
if table2Version == 128 and indicatorOfParameter == 116:
return 'Pb210'
if table2Version == 128 and indicatorOfParameter == 115:
return 'Pu241'
if table2Version == 128 and indicatorOfParameter == 114:
return 'Np239'
if table2Version == 128 and indicatorOfParameter == 113:
return 'Np238'
if table2Version == 128 and indicatorOfParameter == 112:
return 'Ce144'
if table2Version == 128 and indicatorOfParameter == 111:
return 'Nb95'
if table2Version == 128 and indicatorOfParameter == 110:
return 'Zr95'
if table2Version == 128 and indicatorOfParameter == 108:
return 'Ra228'
if table2Version == 128 and indicatorOfParameter == 106:
return 'Ra223'
if table2Version == 128 and indicatorOfParameter == 105:
return 'Cs137'
if table2Version == 128 and indicatorOfParameter == 104:
return 'Cs134'
if table2Version == 128 and indicatorOfParameter == 103:
return 'Ru106'
if table2Version == 128 and indicatorOfParameter == 102:
return 'Ru103'
if table2Version == 128 and indicatorOfParameter == 101:
return 'Co60'
if table2Version == 128 and indicatorOfParameter == 100:
return 'Sr90'
if table2Version == 128 and indicatorOfParameter == 98:
return 'I135'
if table2Version == 128 and indicatorOfParameter == 97:
return 'I133'
if table2Version == 128 and indicatorOfParameter == 96:
return 'I132'
if table2Version == 128 and indicatorOfParameter == 95:
return 'I131'
if table2Version == 128 and indicatorOfParameter == 93:
return 'Rn222'
if table2Version == 128 and indicatorOfParameter == 92:
return 'Xe133'
if table2Version == 128 and indicatorOfParameter == 91:
return 'Xe131'
if table2Version == 128 and indicatorOfParameter == 88:
return 'Kr88'
if table2Version == 128 and indicatorOfParameter == 87:
return 'Kr85'
if table2Version == 128 and indicatorOfParameter == 86:
return 'Ar41'
if table2Version == 128 and indicatorOfParameter == 85:
return 'H3'
if table2Version == 128 and indicatorOfParameter == 84:
return 'Inert'
if table2Version == 128 and indicatorOfParameter == 83:
return 'TRACER'
if table2Version == 128 and indicatorOfParameter == 82:
return 'PMCP'
if table2Version == 128 and indicatorOfParameter == 81:
return 'PMCH'
if table2Version == 128 and indicatorOfParameter == 80:
return 'CF6'
if table2Version == 128 and indicatorOfParameter == 75:
return 'EC'
if table2Version == 128 and indicatorOfParameter == 74:
return 'OC'
if table2Version == 128 and indicatorOfParameter == 73:
return 'CH4'
if table2Version == 128 and indicatorOfParameter == 72:
return 'CO2'
if table2Version == 128 and indicatorOfParameter == 71:
return 'CO'
if table2Version == 128 and indicatorOfParameter == 70:
return 'C'
if table2Version == 128 and indicatorOfParameter == 65:
return 'OX'
if table2Version == 128 and indicatorOfParameter == 64:
return 'H2O2_AQ'
if table2Version == 128 and indicatorOfParameter == 63:
return 'O3_AQ'
if table2Version == 128 and indicatorOfParameter == 62:
return 'OH'
if table2Version == 128 and indicatorOfParameter == 61:
return 'H2O2'
if table2Version == 128 and indicatorOfParameter == 60:
return 'O3'
if table2Version == 128 and indicatorOfParameter == 59:
return 'NHX_N'
if table2Version == 128 and indicatorOfParameter == 58:
return 'LRT_NHX_N'
if table2Version == 128 and indicatorOfParameter == 57:
return 'LRT_NH4_N'
if table2Version == 128 and indicatorOfParameter == 56:
return 'LRT_NH3_N'
if table2Version == 128 and indicatorOfParameter == 55:
return 'NH4_N'
if table2Version == 128 and indicatorOfParameter == 54:
return 'NH3_N'
if table2Version == 128 and indicatorOfParameter == 52:
return 'AMMONIUM'
if table2Version == 128 and indicatorOfParameter == 51:
return 'NH4(+1)'
if table2Version == 128 and indicatorOfParameter == 50:
return 'NH3'
if table2Version == 128 and indicatorOfParameter == 49:
return 'NOZ_N'
if table2Version == 128 and indicatorOfParameter == 48:
return 'NOY_N'
if table2Version == 128 and indicatorOfParameter == 47:
return 'NOX_N'
if table2Version == 128 and indicatorOfParameter == 46:
return 'NO2_N'
if table2Version == 128 and indicatorOfParameter == 45:
return 'NO_N'
if table2Version == 128 and indicatorOfParameter == 44:
return 'NOX'
if table2Version == 128 and indicatorOfParameter == 43:
return 'LRT_NOZ_N'
if table2Version == 128 and indicatorOfParameter == 42:
return 'LRT_NO2_N'
if table2Version == 128 and indicatorOfParameter == 41:
return 'LRT_HNO3_N'
if table2Version == 128 and indicatorOfParameter == 40:
return 'LRT_NO3_N'
if table2Version == 128 and indicatorOfParameter == 39:
return 'HNO3_N'
if table2Version == 128 and indicatorOfParameter == 38:
return 'NO3_N'
if table2Version == 128 and indicatorOfParameter == 37:
return 'LRT_NOY_N'
if table2Version == 128 and indicatorOfParameter == 36:
return 'PNO3'
if table2Version == 128 and indicatorOfParameter == 35:
return 'NITRATE'
if table2Version == 128 and indicatorOfParameter == 34:
return 'NH4NO3'
if table2Version == 128 and indicatorOfParameter == 33:
return 'NO3(-1)'
if table2Version == 128 and indicatorOfParameter == 32:
return 'HNO3'
if table2Version == 128 and indicatorOfParameter == 31:
return 'NO2'
if table2Version == 128 and indicatorOfParameter == 30:
return 'NO'
if table2Version == 128 and indicatorOfParameter == 29:
return 'SOX_S'
if table2Version == 128 and indicatorOfParameter == 28:
return 'SO4_S'
if table2Version == 128 and indicatorOfParameter == 27:
return 'SO2_S'
if table2Version == 128 and indicatorOfParameter == 26:
return 'XSOX_S'
if table2Version == 128 and indicatorOfParameter == 25:
return 'LRT_SOX_S'
if table2Version == 128 and indicatorOfParameter == 24:
return 'LRT_SO4_S'
if table2Version == 128 and indicatorOfParameter == 23:
return 'LRT_SO2_S'
if table2Version == 128 and indicatorOfParameter == 11:
return 'SO4_AQ'
if table2Version == 128 and indicatorOfParameter == 10:
return 'SO2_AQ'
if table2Version == 128 and indicatorOfParameter == 9:
return 'SFT'
if table2Version == 128 and indicatorOfParameter == 8:
return 'NH42SO4'
if table2Version == 128 and indicatorOfParameter == 7:
return 'NH4HSO4'
if table2Version == 128 and indicatorOfParameter == 6:
return 'NH4SO4'
if table2Version == 128 and indicatorOfParameter == 5:
return 'H2S'
if table2Version == 128 and indicatorOfParameter == 4:
return 'MSA'
if table2Version == 128 and indicatorOfParameter == 3:
return 'DMS'
if table2Version == 128 and indicatorOfParameter == 2:
return 'SO4(2-)'
if table2Version == 128 and indicatorOfParameter == 1:
return 'SO2'
if table2Version == 128 and indicatorOfParameter == 0:
return 'Reserved'
if table2Version == 1 and indicatorOfParameter == 255:
return 'Missing'
if table2Version == 1 and indicatorOfParameter == 251:
return 'anpr12'
if table2Version == 1 and indicatorOfParameter == 250:
return 'anpr3'
if table2Version == 1 and indicatorOfParameter == 228:
return 'gust'
if table2Version == 1 and indicatorOfParameter == 227:
return 'vfr'
if table2Version == 1 and indicatorOfParameter == 226:
return 'ptype'
if table2Version == 1 and indicatorOfParameter == 225:
return 'CAPE'
if table2Version == 1 and indicatorOfParameter == 224:
return 'ci'
if table2Version == 1 and indicatorOfParameter == 223:
return 'lnb'
if table2Version == 1 and indicatorOfParameter == 222:
return 'lcl'
if table2Version == 1 and indicatorOfParameter == 210:
return 'iceex'
if table2Version == 1 and indicatorOfParameter == 209:
return 'sdsso'
if table2Version == 1 and indicatorOfParameter == 208:
return 'mssso'
if table2Version == 1 and indicatorOfParameter == 206:
return 'anmo'
if table2Version == 1 and indicatorOfParameter == 205:
return 'amo'
if table2Version == 1 and indicatorOfParameter == 204:
return 'orostdv'
if table2Version == 1 and indicatorOfParameter == 200:
return 'TKE'
if table2Version == 1 and indicatorOfParameter == 199:
return 'vgtyp'
if table2Version == 1 and indicatorOfParameter == 198:
return 'fool'
if table2Version == 1 and indicatorOfParameter == 197:
return 'fof'
if table2Version == 1 and indicatorOfParameter == 196:
return 'fol'
if table2Version == 1 and indicatorOfParameter == 195:
return 'slt'
if table2Version == 1 and indicatorOfParameter == 194:
return 'frst'
if table2Version == 1 and indicatorOfParameter == 193:
return 'ssi'
if table2Version == 1 and indicatorOfParameter == 192:
return 'watcn'
if table2Version == 1 and indicatorOfParameter == 191:
return 'dsn'
if table2Version == 1 and indicatorOfParameter == 190:
return 'asn'
if table2Version == 1 and indicatorOfParameter == 189:
return 'swi'
if table2Version == 1 and indicatorOfParameter == 169:
return 'al_scorr'
if table2Version == 1 and indicatorOfParameter == 168:
return 'hero'
if table2Version == 1 and indicatorOfParameter == 167:
return 'frasp'
if table2Version == 1 and indicatorOfParameter == 166:
return 'skwf'
if table2Version == 1 and indicatorOfParameter == 165:
return 'susl'
if table2Version == 1 and indicatorOfParameter == 164:
return 'movegro'
if table2Version == 1 and indicatorOfParameter == 163:
return 'RSHB'
if table2Version == 1 and indicatorOfParameter == 162:
return 'RSHA'
if table2Version == 1 and indicatorOfParameter == 161:
return 'shfr'
if table2Version == 1 and indicatorOfParameter == 160:
return 'slfr'
if table2Version == 1 and indicatorOfParameter == 143:
return 'dptland'
if table2Version == 1 and indicatorOfParameter == 142:
return 'rhland'
if table2Version == 1 and indicatorOfParameter == 141:
return 'qland'
if table2Version == 1 and indicatorOfParameter == 140:
return 'tland'
if table2Version == 1 and indicatorOfParameter == 139:
return 'sd_cold_ol'
if table2Version == 1 and indicatorOfParameter == 138:
return 'sd_cold'
if table2Version == 1 and indicatorOfParameter == 137:
return 'icc'
if table2Version == 1 and indicatorOfParameter == 136:
return 'mingust'
if table2Version == 1 and indicatorOfParameter == 135:
return 'maxgust'
if table2Version == 1 and indicatorOfParameter == 134:
return 'cwref'
if table2Version == 1 and indicatorOfParameter == 133:
return 'wvbt_corr'
if table2Version == 1 and indicatorOfParameter == 132:
return 'wvbt'
if table2Version == 1 and indicatorOfParameter == 131:
return 'ctt'
if table2Version == 1 and indicatorOfParameter == 130:
return 'radtop'
if table2Version == 1 and indicatorOfParameter == 129:
return 'qten'
if table2Version == 1 and indicatorOfParameter == 128:
return 'mofl'
if table2Version == 1 and indicatorOfParameter == 127:
return 'imgd'
if table2Version == 1 and indicatorOfParameter == 126:
return 'wmixe'
if table2Version == 1 and indicatorOfParameter == 125:
return 'vflx'
if table2Version == 1 and indicatorOfParameter == 124:
return 'uflx'
if table2Version == 1 and indicatorOfParameter == 123:
return 'bld'
if table2Version == 1 and indicatorOfParameter == 122:
return 'shtfl'
if table2Version == 1 and indicatorOfParameter == 121:
return 'lhtfl'
if table2Version == 1 and indicatorOfParameter == 120:
return 'swrad'
if table2Version == 1 and indicatorOfParameter == 119:
return 'lwrad'
if table2Version == 1 and indicatorOfParameter == 118:
return 'btmp'
if table2Version == 1 and indicatorOfParameter == 117:
return 'grad'
if table2Version == 1 and indicatorOfParameter == 116:
return 'swavr'
if table2Version == 1 and indicatorOfParameter == 115:
return 'lwavr'
if table2Version == 1 and indicatorOfParameter == 114:
return 'nlwrt'
if table2Version == 1 and indicatorOfParameter == 113:
return 'nswrt'
if table2Version == 1 and indicatorOfParameter == 112:
return 'nlwrs'
if table2Version == 1 and indicatorOfParameter == 111:
return 'nswrs'
if table2Version == 1 and indicatorOfParameter == 110:
return 'persw'
if table2Version == 1 and indicatorOfParameter == 109:
return 'dirsw'
if table2Version == 1 and indicatorOfParameter == 108:
return 'perpw'
if table2Version == 1 and indicatorOfParameter == 107:
return 'prwd'
if table2Version == 1 and indicatorOfParameter == 106:
return 'swper'
if table2Version == 1 and indicatorOfParameter == 105:
return 'swell'
if table2Version == 1 and indicatorOfParameter == 104:
return 'swdir'
if table2Version == 1 and indicatorOfParameter == 103:
return 'mpww'
if table2Version == 1 and indicatorOfParameter == 102:
return 'shww'
if table2Version == 1 and indicatorOfParameter == 101:
return 'mdww'
if table2Version == 1 and indicatorOfParameter == 100:
return 'swh'
if table2Version == 1 and indicatorOfParameter == 99:
return 'snom'
if table2Version == 1 and indicatorOfParameter == 98:
return 'iced'
if table2Version == 1 and indicatorOfParameter == 97:
return 'iceg'
if table2Version == 1 and indicatorOfParameter == 96:
return 'vice'
if table2Version == 1 and indicatorOfParameter == 95:
return 'uice'
if table2Version == 1 and indicatorOfParameter == 94:
return 'siced'
if table2Version == 1 and indicatorOfParameter == 93:
return 'diced'
if table2Version == 1 and indicatorOfParameter == 92:
return 'icetk'
if table2Version == 1 and indicatorOfParameter == 91:
return 'icec'
if table2Version == 1 and indicatorOfParameter == 90:
return 'watr'
if table2Version == 1 and indicatorOfParameter == 89:
return 'den'
if table2Version == 1 and indicatorOfParameter == 88:
return 's'
if table2Version == 1 and indicatorOfParameter == 87:
return 'veg'
if table2Version == 1 and indicatorOfParameter == 86:
return 'ssw'
if table2Version == 1 and indicatorOfParameter == 85:
return 'st'
if table2Version == 1 and indicatorOfParameter == 84:
return 'al'
if table2Version == 1 and indicatorOfParameter == 83:
return 'sr'
if table2Version == 1 and indicatorOfParameter == 82:
return 'dslm'
if table2Version == 1 and indicatorOfParameter == 81:
return 'lsm'
if table2Version == 1 and indicatorOfParameter == 80:
return 'wtmp'
if table2Version == 1 and indicatorOfParameter == 79:
return 'lsf'
if table2Version == 1 and indicatorOfParameter == 78:
return 'csf'
if table2Version == 1 and indicatorOfParameter == 77:
return 'bli'
if table2Version == 1 and indicatorOfParameter == 76:
return 'cwat'
if table2Version == 1 and indicatorOfParameter == 75:
return 'hcc'
if table2Version == 1 and indicatorOfParameter == 74:
return 'mcc'
if table2Version == 1 and indicatorOfParameter == 73:
return 'lcc'
if table2Version == 1 and indicatorOfParameter == 72:
return 'ccc'
if table2Version == 1 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 1 and indicatorOfParameter == 70:
return 'mtha'
if table2Version == 1 and indicatorOfParameter == 69:
return 'mthd'
if table2Version == 1 and indicatorOfParameter == 68:
return 'tthdp'
if table2Version == 1 and indicatorOfParameter == 67:
return 'mld'
if table2Version == 1 and indicatorOfParameter == 66:
return 'sd'
if table2Version == 1 and indicatorOfParameter == 65:
return 'sdwe'
if table2Version == 1 and indicatorOfParameter == 64:
return 'srweq'
if table2Version == 1 and indicatorOfParameter == 63:
return 'acpcp'
if table2Version == 1 and indicatorOfParameter == 62:
return 'lsp'
if table2Version == 1 and indicatorOfParameter == 61:
return 'tp'
if table2Version == 1 and indicatorOfParameter == 60:
return 'tstm'
if table2Version == 1 and indicatorOfParameter == 59:
return 'prate'
if table2Version == 1 and indicatorOfParameter == 58:
return 'cice'
if table2Version == 1 and indicatorOfParameter == 57:
return 'e'
if table2Version == 1 and indicatorOfParameter == 56:
return 'satd'
if table2Version == 1 and indicatorOfParameter == 55:
return 'vp'
if table2Version == 1 and indicatorOfParameter == 54:
return 'pwat'
if table2Version == 1 and indicatorOfParameter == 53:
return 'mixr'
if table2Version == 1 and indicatorOfParameter == 52:
return 'r'
if table2Version == 1 and indicatorOfParameter == 51:
return 'q'
if table2Version == 1 and indicatorOfParameter == 50:
return 'vcurr'
if table2Version == 1 and indicatorOfParameter == 49:
return 'ucurr'
if table2Version == 1 and indicatorOfParameter == 48:
return 'spc'
if table2Version == 1 and indicatorOfParameter == 47:
return 'dirc'
if table2Version == 1 and indicatorOfParameter == 46:
return 'vvsch'
if table2Version == 1 and indicatorOfParameter == 45:
return 'vusch'
if table2Version == 1 and indicatorOfParameter == 44:
return 'd'
if table2Version == 1 and indicatorOfParameter == 43:
return 'vo'
if table2Version == 1 and indicatorOfParameter == 42:
return 'absd'
if table2Version == 1 and indicatorOfParameter == 41:
return 'absv'
if table2Version == 1 and indicatorOfParameter == 40:
return 'w'
if table2Version == 1 and indicatorOfParameter == 39:
return 'omega'
if table2Version == 1 and indicatorOfParameter == 38:
return 'sgcvv'
if table2Version == 1 and indicatorOfParameter == 37:
return 'mntsf'
if table2Version == 1 and indicatorOfParameter == 36:
return 'vp'
if table2Version == 1 and indicatorOfParameter == 35:
return 'strf'
if table2Version == 1 and indicatorOfParameter == 34:
return 'v'
if table2Version == 1 and indicatorOfParameter == 33:
return 'u'
if table2Version == 1 and indicatorOfParameter == 32:
return 'ws'
if table2Version == 1 and indicatorOfParameter == 31:
return 'wdir'
if table2Version == 1 and indicatorOfParameter == 30:
return 'wvsp3'
if table2Version == 1 and indicatorOfParameter == 29:
return 'wvsp2'
if table2Version == 1 and indicatorOfParameter == 28:
return 'wvsp1'
if table2Version == 1 and indicatorOfParameter == 27:
return 'gpa'
if table2Version == 1 and indicatorOfParameter == 26:
return 'presa'
if table2Version == 1 and indicatorOfParameter == 25:
return 'ta'
if table2Version == 1 and indicatorOfParameter == 24:
return 'pli'
if table2Version == 1 and indicatorOfParameter == 23:
return 'rdsp3'
if table2Version == 1 and indicatorOfParameter == 22:
return 'rdsp2'
if table2Version == 1 and indicatorOfParameter == 21:
return 'rdsp1'
if table2Version == 1 and indicatorOfParameter == 20:
return 'vis'
if table2Version == 1 and indicatorOfParameter == 19:
return 'lapr'
if table2Version == 1 and indicatorOfParameter == 18:
return 'dptd'
if table2Version == 1 and indicatorOfParameter == 17:
return 'dpt'
if table2Version == 1 and indicatorOfParameter == 16:
return 'tmin'
if table2Version == 1 and indicatorOfParameter == 15:
return 'tmax'
if table2Version == 1 and indicatorOfParameter == 14:
return 'papt'
if table2Version == 1 and indicatorOfParameter == 13:
return 'pt'
if table2Version == 1 and indicatorOfParameter == 12:
return 'vtmp'
if table2Version == 1 and indicatorOfParameter == 11:
return 't'
if table2Version == 1 and indicatorOfParameter == 10:
return 'tozne'
if table2Version == 1 and indicatorOfParameter == 9:
return 'hstdv'
if table2Version == 1 and indicatorOfParameter == 8:
return 'h'
if table2Version == 1 and indicatorOfParameter == 7:
return 'gh'
if table2Version == 1 and indicatorOfParameter == 6:
return 'z'
if table2Version == 1 and indicatorOfParameter == 5:
return 'icaht'
if table2Version == 1 and indicatorOfParameter == 4:
return 'pv'
if table2Version == 1 and indicatorOfParameter == 3:
return 'ptend'
if table2Version == 1 and indicatorOfParameter == 2:
return 'msl'
if table2Version == 1 and indicatorOfParameter == 1:
return 'pres'
if table2Version == 1 and indicatorOfParameter == 0:
return 'Reserved'
return wrapped
| 30.797962
| 64
| 0.588902
|
a0f0f179fa613ba2fa52fccd7affb6deb3f40b3e
| 5,770
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql/implementation/fetch_assets.py
|
kyohei3/dagster
|
60319ba89d765abdd77a0934ca90eeb154f66a03
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql/implementation/fetch_assets.py
|
kyohei3/dagster
|
60319ba89d765abdd77a0934ca90eeb154f66a03
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql/implementation/fetch_assets.py
|
kyohei3/dagster
|
60319ba89d765abdd77a0934ca90eeb154f66a03
|
[
"Apache-2.0"
] | null | null | null |
from dagster import AssetKey, DagsterEventType, EventRecordsFilter, check, seven
from .utils import capture_error
def _normalize_asset_cursor_str(cursor_string):
# the cursor for assets is derived from a json serialized string of the path. Because there are
# json serialization differences between JS and Python in its treatment of whitespace, we should
# take extra precaution here and do a deserialization/serialization pass
if not cursor_string:
return cursor_string
try:
return seven.json.dumps(seven.json.loads(cursor_string))
except seven.JSONDecodeError:
return cursor_string
@capture_error
def get_assets(graphene_info, prefix=None, cursor=None, limit=None):
from ..schema.pipelines.pipeline import GrapheneAsset
from ..schema.roots.assets import GrapheneAssetConnection
instance = graphene_info.context.instance
normalized_cursor_str = _normalize_asset_cursor_str(cursor)
materialized_keys = instance.get_asset_keys(
prefix=prefix, limit=limit, cursor=normalized_cursor_str
)
asset_nodes_by_asset_key = {
asset_key: asset_node
for asset_key, asset_node in get_asset_nodes_by_asset_key(graphene_info).items()
if (not prefix or asset_key.path[: len(prefix)] == prefix)
and (not cursor or asset_key.to_string() > cursor)
}
asset_keys = sorted(set(materialized_keys).union(asset_nodes_by_asset_key.keys()), key=str)
if limit:
asset_keys = asset_keys[:limit]
return GrapheneAssetConnection(
nodes=[
GrapheneAsset(
key=asset_key,
definition=asset_nodes_by_asset_key.get(asset_key),
)
for asset_key in asset_keys
]
)
def get_asset_nodes_by_asset_key(graphene_info):
from ..schema.asset_graph import GrapheneAssetNode
return {
external_asset_node.asset_key: GrapheneAssetNode(repository, external_asset_node)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
for external_asset_node in repository.get_external_asset_nodes()
}
def get_asset_nodes(graphene_info):
from ..schema.asset_graph import GrapheneAssetNode
return [
GrapheneAssetNode(repository, external_asset_node)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
for external_asset_node in repository.get_external_asset_nodes()
]
def get_asset_node(graphene_info, asset_key):
from ..schema.errors import GrapheneAssetNotFoundError
check.inst_param(asset_key, "asset_key", AssetKey)
node = next((n for n in get_asset_nodes(graphene_info) if n.assetKey == asset_key), None)
if not node:
return GrapheneAssetNotFoundError(asset_key=asset_key)
return node
def get_asset(graphene_info, asset_key):
from ..schema.errors import GrapheneAssetNotFoundError
from ..schema.pipelines.pipeline import GrapheneAsset
check.inst_param(asset_key, "asset_key", AssetKey)
instance = graphene_info.context.instance
asset_nodes_by_asset_key = get_asset_nodes_by_asset_key(graphene_info)
asset_node = asset_nodes_by_asset_key.get(asset_key)
if not asset_node and not instance.has_asset_key(asset_key):
return GrapheneAssetNotFoundError(asset_key=asset_key)
return GrapheneAsset(key=asset_key, definition=asset_node)
def get_asset_materializations(
graphene_info,
asset_key,
partitions=None,
limit=None,
before_timestamp=None,
after_timestamp=None,
):
check.inst_param(asset_key, "asset_key", AssetKey)
check.opt_int_param(limit, "limit")
check.opt_float_param(before_timestamp, "before_timestamp")
instance = graphene_info.context.instance
event_records = instance.get_event_records(
EventRecordsFilter(
event_type=DagsterEventType.ASSET_MATERIALIZATION,
asset_key=asset_key,
asset_partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
),
limit=limit,
)
return [event_record.event_log_entry for event_record in event_records]
def get_asset_observations(
graphene_info,
asset_key,
partitions=None,
limit=None,
before_timestamp=None,
after_timestamp=None,
):
check.inst_param(asset_key, "asset_key", AssetKey)
check.opt_int_param(limit, "limit")
check.opt_float_param(before_timestamp, "before_timestamp")
check.opt_float_param(after_timestamp, "after_timestamp")
instance = graphene_info.context.instance
event_records = instance.get_event_records(
EventRecordsFilter(
event_type=DagsterEventType.ASSET_OBSERVATION,
asset_key=asset_key,
asset_partitions=partitions,
before_timestamp=before_timestamp,
after_timestamp=after_timestamp,
),
limit=limit,
)
return [event_record.event_log_entry for event_record in event_records]
def get_asset_run_ids(graphene_info, asset_key):
check.inst_param(asset_key, "asset_key", AssetKey)
instance = graphene_info.context.instance
return instance.run_ids_for_asset_key(asset_key)
def get_assets_for_run_id(graphene_info, run_id):
from ..schema.pipelines.pipeline import GrapheneAsset
check.str_param(run_id, "run_id")
records = graphene_info.context.instance.all_logs(run_id)
asset_keys = [
record.dagster_event.asset_key
for record in records
if record.is_dagster_event and record.dagster_event.asset_key
]
return [GrapheneAsset(key=asset_key) for asset_key in asset_keys]
| 33.941176
| 100
| 0.734315
|
7451f8e73b23701def1d108e476b04ccf1addc0d
| 2,973
|
py
|
Python
|
local_configs/10.14/logits_sg128_64.py
|
wzpscott/SegformerDistillation
|
6558757f5071251410e90270e197755860a6f41c
|
[
"DOC"
] | null | null | null |
local_configs/10.14/logits_sg128_64.py
|
wzpscott/SegformerDistillation
|
6558757f5071251410e90270e197755860a6f41c
|
[
"DOC"
] | null | null | null |
local_configs/10.14/logits_sg128_64.py
|
wzpscott/SegformerDistillation
|
6558757f5071251410e90270e197755860a6f41c
|
[
"DOC"
] | null | null | null |
_base_ = [
'../_base_/datasets/ade20k_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='SDModule',
cfg_s=dict(
type='EncoderDecoder',
pretrained='pretrained/mit_b0.pth',
backbone=dict(
type='mit_b0',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[32, 64, 160, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=256),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
cfg_t=dict(
type='EncoderDecoder',
backbone=dict(
type='mit_b4',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=768),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
),
distillation = [
{'student_layer':'decode_head.linear_pred',
'teacher_layer':'decode_head.linear_pred',
'loss_name':'KLDLoss',
'loss_config':{
'weight':1,
'tau':1,
'reshape_config':'logits',
'resize_config':{'mode':'bilinear','align_corners':False},
'mask_config':False,
'transform_config':{'loss_type':'spatial','kernel_size':128,'stride':64},
'ff_config':False
},
},
],
s_pretrain = './pretrained/mit_b0.pth', # 学生的预训练模型
t_pretrain = './pretrained/segformer.b4.512x512.ade.160k.pth', # 老师的预训练模型
train_cfg=dict(),
test_cfg=dict(mode='whole'),
)
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9,0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
work_dir = '/apdcephfs/private_inchzhang/shared_info/sg/logits_sg127_stride64'
data = dict(samples_per_gpu=2)
evaluation = dict(interval=16000, metric='mIoU')
# resume_from = ''
| 35.392857
| 95
| 0.541541
|
53112168bf7e0a13d3abd82d1591242adcaa7574
| 20,577
|
py
|
Python
|
src/zope/index/text/tests/mhindex.py
|
minddistrict/zope.index
|
7fd8bbad0584e21c0158e73681bcf99b6bacb699
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/index/text/tests/mhindex.py
|
minddistrict/zope.index
|
7fd8bbad0584e21c0158e73681bcf99b6bacb699
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/index/text/tests/mhindex.py
|
minddistrict/zope.index
|
7fd8bbad0584e21c0158e73681bcf99b6bacb699
|
[
"ZPL-2.1"
] | 1
|
2021-09-29T19:54:14.000Z
|
2021-09-29T19:54:14.000Z
|
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""MH mail indexer.
To index messages from a single folder (messages defaults to 'all'):
mhindex.py [options] -u +folder [messages ...]
To bulk index all messages from several folders:
mhindex.py [options] -b folder ...; the folder name ALL means all folders.
To execute a single query:
mhindex.py [options] query
To enter interactive query mode:
mhindex.py [options]
Common options:
-d FILE -- specify the Data.fs to use (default ~/.Data.fs)
-w -- dump the word list in alphabetical order and exit
-W -- dump the word list ordered by word id and exit
Indexing options:
-O -- do a prescan on the data to compute optimal word id assignments;
this is only useful the first time the Data.fs is used
-t N -- commit a transaction after every N messages (default 20000)
-p N -- pack after every N commits (by default no packing is done)
Querying options:
-m N -- show at most N matching lines from the message (default 3)
-n N -- show the N best matching messages (default 3)
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import os
import re
import sys
import time
import mhlib
import getopt
import traceback
from StringIO import StringIO
from stat import ST_MTIME
DATAFS = "~/.mhindex.fs"
ZOPECODE = "~/projects/Zope3/lib/python"
zopecode = os.path.expanduser(ZOPECODE)
sys.path.insert(0, zopecode)
from ZODB.DB import DB
from ZODB.Storage.FileStorage import FileStorage
import transaction
from BTrees.IOBTree import IOBTree
from BTrees.OIBTree import OIBTree
from BTrees.IIBTree import IIBTree
from zope.index.text.okapiindex import OkapiIndex
from zope.index.text.lexicon import Splitter
from zope.index.text.lexicon import CaseNormalizer, StopWordRemover
from zope.index.text.stopdict import get_stopdict
from zope.index.text.textindexwrapper import TextIndexWrapper
NBEST = 3
MAXLINES = 3
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "bd:fhm:n:Op:t:uwW")
except getopt.error as msg:
print(msg)
print("use -h for help")
return 2
update = 0
bulk = 0
optimize = 0
nbest = NBEST
maxlines = MAXLINES
datafs = os.path.expanduser(DATAFS)
pack = 0
trans = 20000
dumpwords = dumpwids = dumpfreqs = 0
for o, a in opts:
if o == "-b":
bulk = 1
if o == "-d":
datafs = a
if o == "-f":
dumpfreqs = 1
if o == "-h":
print(__doc__)
return
if o == "-m":
maxlines = int(a)
if o == "-n":
nbest = int(a)
if o == "-O":
optimize = 1
if o == "-p":
pack = int(a)
if o == "-t":
trans = int(a)
if o == "-u":
update = 1
if o == "-w":
dumpwords = 1
if o == "-W":
dumpwids = 1
ix = Indexer(datafs, writable=update or bulk, trans=trans, pack=pack)
if dumpfreqs:
ix.dumpfreqs()
if dumpwords:
ix.dumpwords()
if dumpwids:
ix.dumpwids()
if dumpwords or dumpwids or dumpfreqs:
return
if bulk:
if optimize:
ix.optimize(args)
ix.bulkupdate(args)
elif update:
ix.update(args)
elif args:
for i in range(len(args)):
a = args[i]
if " " in a:
if a[0] == "-":
args[i] = '-"' + a[1:] + '"'
else:
args[i] = '"' + a + '"'
ix.query(" ".join(args), nbest, maxlines)
else:
ix.interact(nbest)
if pack:
ix.pack()
class Indexer(object):
filestorage = database = connection = root = None
def __init__(self, datafs, writable=0, trans=0, pack=0):
self.trans_limit = trans
self.pack_limit = pack
self.trans_count = 0
self.pack_count = 0
self.stopdict = get_stopdict()
self.mh = mhlib.MH()
self.filestorage = FileStorage(datafs, read_only=(not writable))
self.database = DB(self.filestorage)
self.connection = self.database.open()
self.root = self.connection.root()
try:
self.index = self.root["index"]
except KeyError:
self.index = self.root["index"] = TextIndexWrapper()
try:
self.docpaths = self.root["docpaths"]
except KeyError:
self.docpaths = self.root["docpaths"] = IOBTree()
try:
self.doctimes = self.root["doctimes"]
except KeyError:
self.doctimes = self.root["doctimes"] = IIBTree()
try:
self.watchfolders = self.root["watchfolders"]
except KeyError:
self.watchfolders = self.root["watchfolders"] = {}
self.path2docid = OIBTree()
for docid in self.docpaths.keys():
path = self.docpaths[docid]
self.path2docid[path] = docid
try:
self.maxdocid = max(self.docpaths.keys())
except ValueError:
self.maxdocid = 0
print(len(self.docpaths), "Document ids")
print(len(self.path2docid), "Pathnames")
print(self.index.lexicon.length(), "Words")
def dumpfreqs(self):
lexicon = self.index.lexicon
index = self.index.index
assert isinstance(index, OkapiIndex)
L = []
for wid in lexicon.wids():
freq = 0
for f in index._wordinfo.get(wid, {}).values():
freq += f
L.append((freq, wid, lexicon.get_word(wid)))
L.sort()
L.reverse()
for freq, wid, word in L:
print("%10d %10d %s" % (wid, freq, word))
def dumpwids(self):
lexicon = self.index.lexicon
index = self.index.index
assert isinstance(index, OkapiIndex)
for wid in lexicon.wids():
freq = 0
for f in index._wordinfo.get(wid, {}).values():
freq += f
print("%10d %10d %s" % (wid, freq, lexicon.get_word(wid)))
def dumpwords(self):
lexicon = self.index.lexicon
index = self.index.index
assert isinstance(index, OkapiIndex)
for word in lexicon.words():
wid = lexicon.get_wid(word)
freq = 0
for f in index._wordinfo.get(wid, {}).values():
freq += f
print("%10d %10d %s" % (wid, freq, word))
def close(self):
self.root = None
if self.connection is not None:
self.connection.close()
self.connection = None
if self.database is not None:
self.database.close()
self.database = None
if self.filestorage is not None:
self.filestorage.close()
self.filestorage = None
def interact(self, nbest=NBEST, maxlines=MAXLINES):
try:
import readline
except ImportError:
pass
text = ""
top = 0
results = []
while 1:
try:
line = raw_input("Query: ")
except EOFError:
print("\nBye.")
break
line = line.strip()
if line.startswith("/"):
self.specialcommand(line, results, top - nbest)
continue
if line:
text = line
top = 0
else:
if not text:
continue
try:
results, n = self.timequery(text, top + nbest)
except KeyboardInterrupt:
raise
except:
reportexc()
text = ""
continue
if len(results) <= top:
if not n:
print("No hits for %r." % text)
else:
print("No more hits for %r." % text)
text = ""
continue
print("[Results %d-%d from %d" % (top+1, min(n, top+nbest), n), end=' ')
print("for query %s]" % repr(text))
self.formatresults(text, results, maxlines, top, top+nbest)
top += nbest
def specialcommand(self, line, results, first):
assert line.startswith("/")
line = line[1:]
if not line:
n = first
else:
try:
n = int(line) - 1
except:
print("Huh?")
return
if n < 0 or n >= len(results):
print("Out of range")
return
docid, score = results[n]
path = self.docpaths[docid]
i = path.rfind("/")
assert i > 0
folder = path[:i]
n = path[i+1:]
cmd = "show +%s %s" % (folder, n)
if os.getenv("DISPLAY"):
os.system("xterm -e sh -c '%s | less' &" % cmd)
else:
os.system(cmd)
def query(self, text, nbest=NBEST, maxlines=MAXLINES):
results, n = self.timequery(text, nbest)
if not n:
print("No hits for %r." % text)
return
print("[Results 1-%d from %d]" % (len(results), n))
self.formatresults(text, results, maxlines)
def timequery(self, text, nbest):
t0 = time.time()
c0 = time.clock()
results, n = self.index.query(text, 0, nbest)
t1 = time.time()
c1 = time.clock()
print("[Query time: %.3f real, %.3f user]" % (t1-t0, c1-c0))
return results, n
def formatresults(self, text, results, maxlines=MAXLINES,
lo=0, hi=sys.maxint):
stop = self.stopdict.has_key
words = [w for w in re.findall(r"\w+\*?", text.lower()) if not stop(w)]
pattern = r"\b(" + "|".join(words) + r")\b"
pattern = pattern.replace("*", ".*") # glob -> re syntax
prog = re.compile(pattern, re.IGNORECASE)
print('='*70)
rank = lo
for docid, score in results[lo:hi]:
rank += 1
path = self.docpaths[docid]
score *= 100.0
print("Rank: %d Score: %d%% File: %s" % (rank, score, path))
path = os.path.join(self.mh.getpath(), path)
try:
fp = open(path)
except (IOError, OSError) as msg:
print("Can't open:", msg)
continue
msg = mhlib.Message("<folder>", 0, fp)
for header in "From", "To", "Cc", "Bcc", "Subject", "Date":
h = msg.getheader(header)
if h:
print("%-8s %s" % (header+":", h))
text = self.getmessagetext(msg)
if text:
print()
nleft = maxlines
for part in text:
for line in part.splitlines():
if prog.search(line):
print(line)
nleft -= 1
if nleft <= 0:
break
if nleft <= 0:
break
print('-'*70)
def update(self, args):
folder = None
seqs = []
for arg in args:
if arg.startswith("+"):
if folder is None:
folder = arg[1:]
else:
print("only one folder at a time")
return
else:
seqs.append(arg)
if not folder:
folder = self.mh.getcontext()
if not seqs:
seqs = ['all']
try:
f = self.mh.openfolder(folder)
except mhlib.Error as msg:
print(msg)
return
dict = {}
for seq in seqs:
try:
nums = f.parsesequence(seq)
except mhlib.Error as msg:
print(msg or "unparsable message sequence: %s" % repr(seq))
return
for n in nums:
dict[n] = n
msgs = dict.keys()
msgs.sort()
self.updatefolder(f, msgs)
self.commit()
def optimize(self, args):
uniqwords = {}
for folder in args:
if folder.startswith("+"):
folder = folder[1:]
print("\nOPTIMIZE FOLDER", folder)
try:
f = self.mh.openfolder(folder)
except mhlib.Error as msg:
print(msg)
continue
self.prescan(f, f.listmessages(), uniqwords)
L = [(uniqwords[word], word) for word in uniqwords.keys()]
L.sort()
L.reverse()
for i in range(100):
print("%3d. %6d %s" % ((i+1,) + L[i]))
self.index.lexicon.sourceToWordIds([word for (count, word) in L])
def prescan(self, f, msgs, uniqwords):
pipeline = [Splitter(), CaseNormalizer(), StopWordRemover()]
for n in msgs:
print("prescanning", n)
m = f.openmessage(n)
text = self.getmessagetext(m, f.name)
for p in pipeline:
text = p.process(text)
for word in text:
uniqwords[word] = uniqwords.get(word, 0) + 1
def bulkupdate(self, args):
if not args:
print("No folders specified; use ALL to bulk-index all folders")
return
if "ALL" in args:
i = args.index("ALL")
args[i:i+1] = self.mh.listfolders()
for folder in args:
if folder.startswith("+"):
folder = folder[1:]
print("\nFOLDER", folder)
try:
f = self.mh.openfolder(folder)
except mhlib.Error as msg:
print(msg)
continue
self.updatefolder(f, f.listmessages())
print("Total", len(self.docpaths))
self.commit()
print("Indexed", self.index.lexicon._nbytes, "bytes and", end=' ')
print(self.index.lexicon._nwords, "words;", end=' ')
print(len(self.index.lexicon._words), "unique words.")
def updatefolder(self, f, msgs):
self.watchfolders[f.name] = self.getmtime(f.name)
for n in msgs:
path = "%s/%s" % (f.name, n)
docid = self.path2docid.get(path, 0)
if docid and self.getmtime(path) == self.doctimes.get(docid, 0):
print("unchanged", docid, path)
continue
docid = self.newdocid(path)
try:
m = f.openmessage(n)
except IOError:
print("disappeared", docid, path)
self.unindexpath(path)
continue
text = self.getmessagetext(m, f.name)
if not text:
self.unindexpath(path)
continue
print("indexing", docid, path)
self.index.index_doc(docid, text)
self.maycommit()
# Remove messages from the folder that no longer exist
for path in list(self.path2docid.keys(f.name)):
if not path.startswith(f.name + "/"):
break
if self.getmtime(path) == 0:
self.unindexpath(path)
print("done.")
def unindexpath(self, path):
if path in self.path2docid:
docid = self.path2docid[path]
print("unindexing", docid, path)
del self.docpaths[docid]
del self.doctimes[docid]
del self.path2docid[path]
try:
self.index.unindex_doc(docid)
except KeyError as msg:
print("KeyError", msg)
self.maycommit()
def getmessagetext(self, m, name=None):
L = []
if name:
L.append("_folder " + name) # To restrict search to a folder
self.getheaders(m, L)
try:
self.getmsgparts(m, L, 0)
except KeyboardInterrupt:
raise
except:
print("(getmsgparts failed:)")
reportexc()
return L
def getmsgparts(self, m, L, level):
ctype = m.gettype()
if level or ctype != "text/plain":
print(". "*level + str(ctype))
if ctype == "text/plain":
L.append(m.getbodytext())
elif ctype in ("multipart/alternative", "multipart/mixed"):
for part in m.getbodyparts():
self.getmsgparts(part, L, level+1)
elif ctype == "message/rfc822":
f = StringIO(m.getbodytext())
m = mhlib.Message("<folder>", 0, f)
self.getheaders(m, L)
self.getmsgparts(m, L, level+1)
def getheaders(self, m, L):
H = []
for key in "from", "to", "cc", "bcc", "subject":
value = m.get(key)
if value:
H.append(value)
if H:
L.append("\n".join(H))
def newdocid(self, path):
docid = self.path2docid.get(path)
if docid is not None:
self.doctimes[docid] = self.getmtime(path)
return docid
docid = self.maxdocid + 1
self.maxdocid = docid
self.docpaths[docid] = path
self.doctimes[docid] = self.getmtime(path)
self.path2docid[path] = docid
return docid
def getmtime(self, path):
path = os.path.join(self.mh.getpath(), path)
try:
st = os.stat(path)
except os.error as msg:
return 0
return int(st[ST_MTIME])
def maycommit(self):
self.trans_count += 1
if self.trans_count >= self.trans_limit > 0:
self.commit()
def commit(self):
if self.trans_count > 0:
print("committing...")
transaction.commit()
self.trans_count = 0
self.pack_count += 1
if self.pack_count >= self.pack_limit > 0:
self.pack()
def pack(self):
if self.pack_count > 0:
print("packing...")
self.database.pack()
self.pack_count = 0
def reportexc():
traceback.print_exc()
if __name__ == "__main__":
sys.exit(main())
| 32.507109
| 84
| 0.543471
|
75699a62219908547bb9f766ea1d89682e4352df
| 5,771
|
py
|
Python
|
dojo/db_migrations/0004_cve_field.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,772
|
2018-01-22T23:32:15.000Z
|
2022-03-31T14:49:33.000Z
|
dojo/db_migrations/0004_cve_field.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 3,461
|
2018-01-20T19:12:28.000Z
|
2022-03-31T17:14:39.000Z
|
dojo/db_migrations/0004_cve_field.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,173
|
2018-01-23T07:10:23.000Z
|
2022-03-31T14:40:43.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-06 21:54
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0003_test_title'),
]
operations = [
migrations.AddField(
model_name='finding',
name='cve',
field=models.TextField(max_length=20, null=True, validators=[django.core.validators.RegexValidator(message=b"CVE must be entered in the format: 'CVE-9999-9999'. ", regex=b'^CVE-\\d{4}-\\d{4,7}$')]),
),
migrations.AddField(
model_name='finding_template',
name='cve',
field=models.TextField(max_length=20, null=True, validators=[django.core.validators.RegexValidator(message=b"CVE must be entered in the format: 'CVE-9999-9999'. ", regex=b'^CVE-\\d{4}-\\d{4,7}$')]),
),
migrations.AlterField(
model_name='child_rule',
name='match_field',
field=models.CharField(choices=[('id', 'id'), (b'title', b'title'), (b'date', b'date'), (b'cwe', b'cwe'), (b'cve', b'cve'), (b'url', b'url'), (b'severity', b'severity'), (b'description', b'description'), (b'mitigation', b'mitigation'), (b'impact', b'impact'), (b'steps_to_reproduce', b'steps_to_reproduce'), (b'severity_justification', b'severity_justification'), (b'references', b'references'), (b'test', b'test'), (b'is_template', b'is_template'), (b'active', b'active'), (b'verified', b'verified'), (b'false_p', b'false_p'), (b'duplicate', b'duplicate'), (b'duplicate_finding', b'duplicate_finding'), (b'out_of_scope', b'out_of_scope'), (b'under_review', b'under_review'), (b'review_requested_by', b'review_requested_by'), (b'under_defect_review', b'under_defect_review'), (b'defect_review_requested_by', b'defect_review_requested_by'), (b'thread_id', b'thread_id'), (b'mitigated', b'mitigated'), (b'mitigated_by', b'mitigated_by'), (b'reporter', b'reporter'), (b'numerical_severity', b'numerical_severity'), (b'last_reviewed', b'last_reviewed'), (b'last_reviewed_by', b'last_reviewed_by'), (b'line_number', b'line_number'), (b'sourcefilepath', b'sourcefilepath'), (b'sourcefile', b'sourcefile'), (b'param', b'param'), (b'payload', b'payload'), (b'hash_code', b'hash_code'), (b'line', b'line'), (b'file_path', b'file_path'), (b'static_finding', b'static_finding'), (b'dynamic_finding', b'dynamic_finding'), (b'created', b'created'), (b'scanner_confidence', b'scanner_confidence')], max_length=200),
),
migrations.AlterField(
model_name='rule',
name='applied_field',
field=models.CharField(choices=[('id', 'id'), (b'title', b'title'), (b'date', b'date'), (b'cwe', b'cwe'), (b'cve', b'cve'), (b'url', b'url'), (b'severity', b'severity'), (b'description', b'description'), (b'mitigation', b'mitigation'), (b'impact', b'impact'), (b'steps_to_reproduce', b'steps_to_reproduce'), (b'severity_justification', b'severity_justification'), (b'references', b'references'), (b'test', b'test'), (b'is_template', b'is_template'), (b'active', b'active'), (b'verified', b'verified'), (b'false_p', b'false_p'), (b'duplicate', b'duplicate'), (b'duplicate_finding', b'duplicate_finding'), (b'out_of_scope', b'out_of_scope'), (b'under_review', b'under_review'), (b'review_requested_by', b'review_requested_by'), (b'under_defect_review', b'under_defect_review'), (b'defect_review_requested_by', b'defect_review_requested_by'), (b'thread_id', b'thread_id'), (b'mitigated', b'mitigated'), (b'mitigated_by', b'mitigated_by'), (b'reporter', b'reporter'), (b'numerical_severity', b'numerical_severity'), (b'last_reviewed', b'last_reviewed'), (b'last_reviewed_by', b'last_reviewed_by'), (b'line_number', b'line_number'), (b'sourcefilepath', b'sourcefilepath'), (b'sourcefile', b'sourcefile'), (b'param', b'param'), (b'payload', b'payload'), (b'hash_code', b'hash_code'), (b'line', b'line'), (b'file_path', b'file_path'), (b'static_finding', b'static_finding'), (b'dynamic_finding', b'dynamic_finding'), (b'created', b'created'), (b'scanner_confidence', b'scanner_confidence')], max_length=200),
),
migrations.AlterField(
model_name='rule',
name='match_field',
field=models.CharField(choices=[('id', 'id'), (b'title', b'title'), (b'date', b'date'), (b'cwe', b'cwe'), (b'cve', b'cve'), (b'url', b'url'), (b'severity', b'severity'), (b'description', b'description'), (b'mitigation', b'mitigation'), (b'impact', b'impact'), (b'steps_to_reproduce', b'steps_to_reproduce'), (b'severity_justification', b'severity_justification'), (b'references', b'references'), (b'test', b'test'), (b'is_template', b'is_template'), (b'active', b'active'), (b'verified', b'verified'), (b'false_p', b'false_p'), (b'duplicate', b'duplicate'), (b'duplicate_finding', b'duplicate_finding'), (b'out_of_scope', b'out_of_scope'), (b'under_review', b'under_review'), (b'review_requested_by', b'review_requested_by'), (b'under_defect_review', b'under_defect_review'), (b'defect_review_requested_by', b'defect_review_requested_by'), (b'thread_id', b'thread_id'), (b'mitigated', b'mitigated'), (b'mitigated_by', b'mitigated_by'), (b'reporter', b'reporter'), (b'numerical_severity', b'numerical_severity'), (b'last_reviewed', b'last_reviewed'), (b'last_reviewed_by', b'last_reviewed_by'), (b'line_number', b'line_number'), (b'sourcefilepath', b'sourcefilepath'), (b'sourcefile', b'sourcefile'), (b'param', b'param'), (b'payload', b'payload'), (b'hash_code', b'hash_code'), (b'line', b'line'), (b'file_path', b'file_path'), (b'static_finding', b'static_finding'), (b'dynamic_finding', b'dynamic_finding'), (b'created', b'created'), (b'scanner_confidence', b'scanner_confidence')], max_length=200),
),
]
| 137.404762
| 1,513
| 0.670941
|
8c7439d1e8ec4a2278d503c671a02953b95c25b1
| 4,003
|
py
|
Python
|
pyfos/utils/zoning/zoning_alias_remove.py
|
madhavinaiduprathap/pyfosbrocade
|
ec100e77c441761c3e688f1d8e5d18ad38cc83f4
|
[
"Apache-2.0"
] | 44
|
2017-11-17T12:03:11.000Z
|
2022-02-03T20:57:56.000Z
|
pyfos/utils/zoning/zoning_alias_remove.py
|
madhavinaiduprathap/pyfosbrocade
|
ec100e77c441761c3e688f1d8e5d18ad38cc83f4
|
[
"Apache-2.0"
] | 13
|
2018-10-09T15:34:15.000Z
|
2022-02-24T20:03:17.000Z
|
pyfos/utils/zoning/zoning_alias_remove.py
|
madhavinaiduprathap/pyfosbrocade
|
ec100e77c441761c3e688f1d8e5d18ad38cc83f4
|
[
"Apache-2.0"
] | 23
|
2017-12-14T18:08:33.000Z
|
2022-02-03T15:33:40.000Z
|
#!/usr/bin/env python3
# Copyright © 2018 Broadcom. All Rights Reserved. The term “Broadcom” refers to
# Broadcom Inc. and/or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`zoning_alias_remove` - PyFOS util for alias remove use case
***********************************************************************************
The :mod:`zoning_alias_remove` supports an alias remove use case.
This module is a stand-alone script and API that can be used to remove
members from an existing alias(es).
* Inputs:
* -L=<login>: Login ID. If not provided, an interactive
prompt will request one.
* -P=<password>: Password. If not provided, an interactive
prompt will request one.
* -i=<IP address>: IP address.
* --name=<alias name>: string name of an existing alias
* --members=<member list>: list of members separated by ";".
Multiple members need to be enclosed by "".
* -f=<VFID>: VFID or -1 if VF is disabled. If unspecified,
a VFID of 128 is assumed.
* Outputs:
* Python dictionary content with RESTCONF response data.
"""
import sys
from pyfos import pyfos_auth
import pyfos.pyfos_brocade_zone as pyfos_zone
from pyfos.utils import brcd_zone_util
from pyfos.utils import brcd_util
def aliasremove(session, aliases):
"""Remove members from existing alias(es).
Example usage of the method::
aliases = [
{
"alias-name": name,
"member-entry": {"alias-entry-name": members}
}
]
result = aliasremove(session, aliases)
:param session: session returned by login.
:param aliases: an array of alias and to be removed members.
:rtype: Dictionary of return status matching rest response.
*Use cases*
Remove members from an existing alias.
"""
new_defined = pyfos_zone.defined_configuration()
new_defined.set_alias(aliases)
result = new_defined.delete(session)
return result
def __aliasremove(session, name, members):
aliases = [
{
"alias-name": name,
"member-entry": {"alias-entry-name": members}}
]
return aliasremove(session, aliases)
def usage():
print(" Script specific options:")
print("")
print(" --name=NAME name of alias")
print(" --members=MEMBERS ; separated list of alias members")
print(" multiple members enclosed by \"\"")
print("")
def main(argv):
valid_options = ["name", "members"]
inputs = brcd_util.generic_input(argv, usage, valid_options)
session = pyfos_auth.login(inputs["login"], inputs["password"],
inputs["ipaddr"], inputs["secured"],
verbose=inputs["verbose"])
if pyfos_auth.is_failed_login(session):
print("login failed because",
session.get(pyfos_auth.CREDENTIAL_KEY)
[pyfos_auth.LOGIN_ERROR_KEY])
brcd_util.full_usage(usage, valid_options)
sys.exit()
brcd_util.exit_register(session)
vfid = None
if 'vfid' in inputs:
vfid = inputs['vfid']
if vfid is not None:
pyfos_auth.vfid_set(session, vfid)
brcd_zone_util.zone_name_members_func(
session, inputs, usage, __aliasremove)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 31.031008
| 83
| 0.625031
|
6e36d6bc84817ab81f9bd98ba3adf73f0c5cc401
| 5,031
|
py
|
Python
|
lib/streamlit/elements/checkbox.py
|
sujithapandalaneni/streamlit
|
5f39da13c0c551533a6d313dd0e2f6f9f0f9a5ac
|
[
"Apache-2.0"
] | 1
|
2022-01-19T10:48:49.000Z
|
2022-01-19T10:48:49.000Z
|
lib/streamlit/elements/checkbox.py
|
sujithapandalaneni/streamlit
|
5f39da13c0c551533a6d313dd0e2f6f9f0f9a5ac
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/elements/checkbox.py
|
sujithapandalaneni/streamlit
|
5f39da13c0c551533a6d313dd0e2f6f9f0f9a5ac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from streamlit.script_run_context import ScriptRunContext, get_script_run_ctx
from streamlit.type_util import Key, to_key
from textwrap import dedent
from typing import cast, Optional
import streamlit
from streamlit.proto.Checkbox_pb2 import Checkbox as CheckboxProto
from streamlit.state.widgets import register_widget
from streamlit.state.session_state import (
WidgetArgs,
WidgetCallback,
WidgetKwargs,
)
from .form import current_form_id
from .utils import check_callback_rules, check_session_state_rules
class CheckboxMixin:
def checkbox(
self,
label: str,
value: bool = False,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
) -> bool:
"""Display a checkbox widget.
Parameters
----------
label : str
A short label explaining to the user what this checkbox is for.
value : bool
Preselect the checkbox when it first renders. This will be
cast to bool internally.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
help : str
An optional tooltip that gets displayed next to the checkbox.
on_change : callable
An optional callback invoked when this checkbox's value changes.
args : tuple
An optional tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean, which disables the checkbox if set to True.
The default is False. This argument can only be supplied by keyword.
Returns
-------
bool
Whether or not the checkbox is checked.
Example
-------
>>> agree = st.checkbox('I agree')
>>>
>>> if agree:
... st.write('Great!')
"""
ctx = get_script_run_ctx()
return self._checkbox(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
ctx=ctx,
)
def _checkbox(
self,
label: str,
value: bool = False,
key: Optional[Key] = None,
help: Optional[str] = None,
on_change: Optional[WidgetCallback] = None,
args: Optional[WidgetArgs] = None,
kwargs: Optional[WidgetKwargs] = None,
*, # keyword-only arguments:
disabled: bool = False,
ctx: Optional[ScriptRunContext] = None,
) -> bool:
key = to_key(key)
check_callback_rules(self.dg, on_change)
check_session_state_rules(
default_value=None if value is False else value, key=key
)
checkbox_proto = CheckboxProto()
checkbox_proto.label = label
checkbox_proto.default = bool(value)
checkbox_proto.form_id = current_form_id(self.dg)
checkbox_proto.disabled = disabled
if help is not None:
checkbox_proto.help = dedent(help)
def deserialize_checkbox(ui_value: Optional[bool], widget_id: str = "") -> bool:
return bool(ui_value if ui_value is not None else value)
current_value, set_frontend_value = register_widget(
"checkbox",
checkbox_proto,
user_key=key,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=deserialize_checkbox,
serializer=bool,
ctx=ctx,
)
if set_frontend_value:
checkbox_proto.value = current_value
checkbox_proto.set_value = True
self.dg._enqueue("checkbox", checkbox_proto)
return cast(bool, current_value)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
| 33.54
| 88
| 0.622739
|
ecb808d6103af26da2ee84ad5e81c139b36e4f5c
| 401
|
py
|
Python
|
music_review/wsgi.py
|
wmalarski/music-reviews
|
7190a2fc489965d951b3879ef89bfdf7893b2456
|
[
"MIT"
] | null | null | null |
music_review/wsgi.py
|
wmalarski/music-reviews
|
7190a2fc489965d951b3879ef89bfdf7893b2456
|
[
"MIT"
] | 80
|
2020-09-22T19:26:24.000Z
|
2021-09-22T19:44:09.000Z
|
music_review/wsgi.py
|
wmalarski/music-reviews
|
7190a2fc489965d951b3879ef89bfdf7893b2456
|
[
"MIT"
] | null | null | null |
"""
WSGI config for music_review project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "music_review.settings")
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
5ad6cfcd17b221dc9d9eedf6123f29b71e364f66
| 867
|
py
|
Python
|
backend/authentication/urls.py
|
Gimb0/financeManager
|
b054567ccbcc66535b30b92af1bf11e270968779
|
[
"MIT"
] | null | null | null |
backend/authentication/urls.py
|
Gimb0/financeManager
|
b054567ccbcc66535b30b92af1bf11e270968779
|
[
"MIT"
] | null | null | null |
backend/authentication/urls.py
|
Gimb0/financeManager
|
b054567ccbcc66535b30b92af1bf11e270968779
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework_simplejwt import views as jwt_views
from .views import ObtainTokenPairWithColorView, CustomUserCreate
from rest_framework import routers
from spendings.views import ExpensesView, CategoryView
router = routers.DefaultRouter()
router.register(r'expenses', ExpensesView, 'expenses')
router.register(r'categories', CategoryView, 'categories')
urlpatterns = [
path('user/create/', CustomUserCreate.as_view(), name="create_user"),
path('token/obtain/', ObtainTokenPairWithColorView.as_view(), name='token_create'),
path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
path('spendings/', include(router.urls)),
# path('spendings/expenses/', ExpensesView.as_view, name="expenses"),
# path('spendings/categories/', CategoryView.as_view, name="categories"),
]
| 41.285714
| 87
| 0.769319
|
e29ac73ee0f89532fb7185ece9d9d79626d29a76
| 105
|
py
|
Python
|
lbry/__init__.py
|
StripedMonkey/lbry-sdk
|
b7cb2a7aa553cf3eafc239275fa6e4e30b9057e1
|
[
"MIT"
] | null | null | null |
lbry/__init__.py
|
StripedMonkey/lbry-sdk
|
b7cb2a7aa553cf3eafc239275fa6e4e30b9057e1
|
[
"MIT"
] | null | null | null |
lbry/__init__.py
|
StripedMonkey/lbry-sdk
|
b7cb2a7aa553cf3eafc239275fa6e4e30b9057e1
|
[
"MIT"
] | null | null | null |
__version__ = "0.70.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
| 35
| 81
| 0.695238
|
1b64be6d7fb2e25f4cbb36303f70d04c23bf8f7c
| 687
|
py
|
Python
|
codegen/scripts/test_validator.py
|
Citrusboa/firmware_xiv
|
4379cefae900fd67bd14d930da6b8acfce625176
|
[
"MIT"
] | 14
|
2019-11-12T00:11:29.000Z
|
2021-12-13T05:32:41.000Z
|
codegen/scripts/test_validator.py
|
123Logan321/firmware_xiv
|
14468d55753ad62f8a63a9289511e72131443042
|
[
"MIT"
] | 191
|
2019-11-12T05:36:58.000Z
|
2022-03-21T19:54:46.000Z
|
codegen/scripts/test_validator.py
|
123Logan321/firmware_xiv
|
14468d55753ad62f8a63a9289511e72131443042
|
[
"MIT"
] | 14
|
2020-06-06T14:43:14.000Z
|
2022-03-08T00:48:11.000Z
|
"""Module for testing validator methods."""
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import validator
from constants import NUM_CAN_MESSAGES
class TestValidatorMethods(unittest.TestCase):
"""Tests the validator module methods."""
def test_valid_can_id_in_range(self):
"""Tests if a valid can message is in range."""
for can_msg_id in range(0, NUM_CAN_MESSAGES):
self.assertTrue(validator.valid_can_id(can_msg_id))
def test_valid_can_id_out_of_range(self):
"""Tests if a valid can message is out of range."""
self.assertFalse(validator.valid_can_id(NUM_CAN_MESSAGES))
| 32.714286
| 82
| 0.743814
|
316bf2b6ee73e303cfd2af021ccebefeb88cf8dc
| 1,408
|
py
|
Python
|
api/applications/views/party_documents.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 3
|
2019-05-15T09:30:39.000Z
|
2020-04-22T16:14:23.000Z
|
api/applications/views/party_documents.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 85
|
2019-04-24T10:39:35.000Z
|
2022-03-21T14:52:12.000Z
|
api/applications/views/party_documents.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 1
|
2021-01-17T11:12:19.000Z
|
2021-01-17T11:12:19.000Z
|
from django.db import transaction
from rest_framework.views import APIView
from api.applications.libraries.get_applications import get_application
from api.applications.libraries.document_helpers import upload_party_document, delete_party_document, get_party_document
from api.core.authentication import ExporterAuthentication
from api.core.decorators import authorised_to_view_application
from api.users.models import ExporterUser
class PartyDocumentView(APIView):
"""
Retrieve, add or delete an end user document from an application
"""
authentication_classes = (ExporterAuthentication,)
@authorised_to_view_application(ExporterUser)
def get(self, request, pk, party_pk):
application = get_application(pk)
party = application.get_party(party_pk)
return get_party_document(party)
@transaction.atomic
@authorised_to_view_application(ExporterUser)
def post(self, request, pk, party_pk):
application = get_application(pk)
party = application.get_party(party_pk)
return upload_party_document(party, request.data, application, request.user)
@transaction.atomic
@authorised_to_view_application(ExporterUser)
def delete(self, request, pk, party_pk):
application = get_application(pk)
party = application.get_party(party_pk)
return delete_party_document(party, application, request.user)
| 38.054054
| 120
| 0.772727
|
67ca6f6fa9d170915986b76886eec289a455cdbc
| 26,382
|
py
|
Python
|
discord/permissions.py
|
z03h/discord.py
|
7e5831ba9cc3f881e11b3536159a3851fba6ab52
|
[
"MIT"
] | null | null | null |
discord/permissions.py
|
z03h/discord.py
|
7e5831ba9cc3f881e11b3536159a3851fba6ab52
|
[
"MIT"
] | 7
|
2021-09-06T04:52:13.000Z
|
2022-01-13T04:56:21.000Z
|
discord/permissions.py
|
z03h/discord.py
|
7e5831ba9cc3f881e11b3536159a3851fba6ab52
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Callable, Any, ClassVar, Dict, Iterator, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Optional
from .flags import BaseFlags, flag_value, fill_with_flags, alias_flag_value
__all__ = (
'Permissions',
'PermissionOverwrite',
)
# A permission alias works like a regular flag but is marked
# So the PermissionOverwrite knows to work with it
class permission_alias(alias_flag_value):
alias: str
def make_permission_alias(alias: str) -> Callable[[Callable[[Any], int]], permission_alias]:
def decorator(func: Callable[[Any], int]) -> permission_alias:
ret = permission_alias(func)
ret.alias = alias
return ret
return decorator
P = TypeVar('P', bound='Permissions')
@fill_with_flags()
class Permissions(BaseFlags):
"""Wraps up the Discord permission value.
The properties provided are two way. You can set and retrieve individual
bits using the properties as if they were regular bools. This allows
you to edit permissions.
.. versionchanged:: 1.3
You can now use keyword arguments to initialize :class:`Permissions`
similar to :meth:`update`.
.. container:: operations
.. describe:: x == y
Checks if two permissions are equal.
.. describe:: x != y
Checks if two permissions are not equal.
.. describe:: x <= y
Checks if a permission is a subset of another permission.
.. describe:: x >= y
Checks if a permission is a superset of another permission.
.. describe:: x < y
Checks if a permission is a strict subset of another permission.
.. describe:: x > y
Checks if a permission is a strict superset of another permission.
.. describe:: hash(x)
Return the permission's hash.
.. describe:: iter(x)
Returns an iterator of ``(perm, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Note that aliases are not shown.
Attributes
-----------
value: :class:`int`
The raw value. This value is a bit array field of a 53-bit integer
representing the currently available permissions. You should query
permissions via the properties rather than using this raw value.
"""
__slots__ = ()
def __init__(self, permissions: int = 0, **kwargs: bool):
if not isinstance(permissions, int):
raise TypeError(f'Expected int parameter, received {permissions.__class__.__name__} instead.')
self.value = permissions
for key, value in kwargs.items():
if key not in self.VALID_FLAGS:
raise TypeError(f'{key!r} is not a valid permission name.')
setattr(self, key, value)
def is_subset(self, other: Permissions) -> bool:
"""Returns ``True`` if self has the same or fewer permissions as other."""
if isinstance(other, Permissions):
return (self.value & other.value) == self.value
else:
raise TypeError(f"cannot compare {self.__class__.__name__} with {other.__class__.__name__}")
def is_superset(self, other: Permissions) -> bool:
"""Returns ``True`` if self has the same or more permissions as other."""
if isinstance(other, Permissions):
return (self.value | other.value) == self.value
else:
raise TypeError(f"cannot compare {self.__class__.__name__} with {other.__class__.__name__}")
def is_strict_subset(self, other: Permissions) -> bool:
"""Returns ``True`` if the permissions on other are a strict subset of those on self."""
return self.is_subset(other) and self != other
def is_strict_superset(self, other: Permissions) -> bool:
"""Returns ``True`` if the permissions on other are a strict superset of those on self."""
return self.is_superset(other) and self != other
__le__ = is_subset
__ge__ = is_superset
__lt__ = is_strict_subset
__gt__ = is_strict_superset
@classmethod
def none(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
permissions set to ``False``."""
return cls(0)
@classmethod
def all(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
permissions set to ``True``.
"""
return cls(0b11111111111111111111111111111111111111111)
@classmethod
def all_channel(cls: Type[P]) -> P:
"""A :class:`Permissions` with all channel-specific permissions set to
``True`` and the guild-specific ones set to ``False``. The guild-specific
permissions are currently:
- :attr:`manage_emojis`
- :attr:`view_audit_log`
- :attr:`view_guild_insights`
- :attr:`manage_guild`
- :attr:`change_nickname`
- :attr:`manage_nicknames`
- :attr:`kick_members`
- :attr:`ban_members`
- :attr:`administrator`
.. versionchanged:: 1.7
Added :attr:`stream`, :attr:`priority_speaker` and :attr:`use_slash_commands` permissions.
.. versionchanged:: 2.0
Added :attr:`create_public_threads`, :attr:`create_private_threads`, :attr:`manage_threads`,
:attr:`use_external_stickers`, :attr:`send_messages_in_threads` and
:attr:`request_to_speak` permissions.
"""
return cls(0b111110110110011111101111111111101010001)
@classmethod
def general(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"General" permissions from the official Discord UI set to ``True``.
.. versionchanged:: 1.7
Permission :attr:`read_messages` is now included in the general permissions, but
permissions :attr:`administrator`, :attr:`create_instant_invite`, :attr:`kick_members`,
:attr:`ban_members`, :attr:`change_nickname` and :attr:`manage_nicknames` are
no longer part of the general permissions.
"""
return cls(0b01110000000010000000010010110000)
@classmethod
def membership(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"Membership" permissions from the official Discord UI set to ``True``.
.. versionadded:: 1.7
"""
return cls(0b10000000000001100000000000000000000000111)
@classmethod
def text(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"Text" permissions from the official Discord UI set to ``True``.
.. versionchanged:: 1.7
Permission :attr:`read_messages` is no longer part of the text permissions.
Added :attr:`use_slash_commands` permission.
.. versionchanged:: 2.0
Added :attr:`create_public_threads`, :attr:`create_private_threads`, :attr:`manage_threads`,
:attr:`send_messages_in_threads` and :attr:`use_external_stickers` permissions.
"""
return cls(0b111110010000000000001111111100001000000)
@classmethod
def voice(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"Voice" permissions from the official Discord UI set to ``True``."""
return cls(0b1000000000000011111100000000001100000000)
@classmethod
def stage(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"Stage Channel" permissions from the official Discord UI set to ``True``.
.. versionadded:: 1.7
"""
return cls(1 << 32)
@classmethod
def stage_moderator(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"Stage Moderator" permissions from the official Discord UI set to ``True``.
.. versionadded:: 1.7
"""
return cls(0b100000001010000000000000000000000)
@classmethod
def advanced(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
"Advanced" permissions from the official Discord UI set to ``True``.
.. versionadded:: 1.7
"""
return cls(1 << 3)
def update(self, **kwargs: bool) -> None:
r"""Bulk updates this permission object.
Allows you to set multiple attributes by using keyword
arguments. The names must be equivalent to the properties
listed. Extraneous key/value pairs will be silently ignored.
Parameters
------------
\*\*kwargs
A list of key/value pairs to bulk update permissions with.
"""
for key, value in kwargs.items():
if key in self.VALID_FLAGS:
setattr(self, key, value)
def handle_overwrite(self, allow: int, deny: int) -> None:
# Basically this is what's happening here.
# We have an original bit array, e.g. 1010
# Then we have another bit array that is 'denied', e.g. 1111
# And then we have the last one which is 'allowed', e.g. 0101
# We want original OP denied to end up resulting in
# whatever is in denied to be set to 0.
# So 1010 OP 1111 -> 0000
# Then we take this value and look at the allowed values.
# And whatever is allowed is set to 1.
# So 0000 OP2 0101 -> 0101
# The OP is base & ~denied.
# The OP2 is base | allowed.
self.value = (self.value & ~deny) | allow
@flag_value
def create_instant_invite(self) -> int:
""":class:`bool`: Returns ``True`` if the user can create instant invites."""
return 1 << 0
@flag_value
def kick_members(self) -> int:
""":class:`bool`: Returns ``True`` if the user can kick users from the guild."""
return 1 << 1
@flag_value
def ban_members(self) -> int:
""":class:`bool`: Returns ``True`` if a user can ban users from the guild."""
return 1 << 2
@flag_value
def administrator(self) -> int:
""":class:`bool`: Returns ``True`` if a user is an administrator. This role overrides all other permissions.
This also bypasses all channel-specific overrides.
"""
return 1 << 3
@flag_value
def manage_channels(self) -> int:
""":class:`bool`: Returns ``True`` if a user can edit, delete, or create channels in the guild.
This also corresponds to the "Manage Channel" channel-specific override."""
return 1 << 4
@flag_value
def manage_guild(self) -> int:
""":class:`bool`: Returns ``True`` if a user can edit guild properties."""
return 1 << 5
@flag_value
def add_reactions(self) -> int:
""":class:`bool`: Returns ``True`` if a user can add reactions to messages."""
return 1 << 6
@flag_value
def view_audit_log(self) -> int:
""":class:`bool`: Returns ``True`` if a user can view the guild's audit log."""
return 1 << 7
@flag_value
def priority_speaker(self) -> int:
""":class:`bool`: Returns ``True`` if a user can be more easily heard while talking."""
return 1 << 8
@flag_value
def stream(self) -> int:
""":class:`bool`: Returns ``True`` if a user can stream in a voice channel."""
return 1 << 9
@flag_value
def view_channel(self) -> int:
""":class:`bool`: Returns ``True`` if a user can read messages from all or specific text channels.
.. versionchanged:: 2.0
No longer an alias for :attr:`read_messages`.
"""
return 1 << 10
@make_permission_alias('view_channel')
def read_messages(self) -> int:
""":class:`bool`: An alias for :attr:`view_channel`.
.. versionadded:: 1.3
.. versionchanged:: 2.0
Is now an alias for :attr:`view_channel`.
"""
return 1 << 10
@flag_value
def send_messages(self) -> int:
""":class:`bool`: Returns ``True`` if a user can send messages from all or specific text channels."""
return 1 << 11
@flag_value
def send_tts_messages(self) -> int:
""":class:`bool`: Returns ``True`` if a user can send TTS messages from all or specific text channels."""
return 1 << 12
@flag_value
def manage_messages(self) -> int:
""":class:`bool`: Returns ``True`` if a user can delete or pin messages in a text channel.
.. note::
Note that there are currently no ways to edit other people's messages.
"""
return 1 << 13
@flag_value
def embed_links(self) -> int:
""":class:`bool`: Returns ``True`` if a user's messages will automatically be embedded by Discord."""
return 1 << 14
@flag_value
def attach_files(self) -> int:
""":class:`bool`: Returns ``True`` if a user can send files in their messages."""
return 1 << 15
@flag_value
def read_message_history(self) -> int:
""":class:`bool`: Returns ``True`` if a user can read a text channel's previous messages."""
return 1 << 16
@flag_value
def mention_everyone(self) -> int:
""":class:`bool`: Returns ``True`` if a user's @everyone or @here will mention everyone in the text channel."""
return 1 << 17
@flag_value
def external_emojis(self) -> int:
""":class:`bool`: Returns ``True`` if a user can use emojis from other guilds."""
return 1 << 18
@make_permission_alias('external_emojis')
def use_external_emojis(self) -> int:
""":class:`bool`: An alias for :attr:`external_emojis`.
.. versionadded:: 1.3
"""
return 1 << 18
@flag_value
def view_guild_insights(self) -> int:
""":class:`bool`: Returns ``True`` if a user can view the guild's insights.
.. versionadded:: 1.3
"""
return 1 << 19
@flag_value
def connect(self) -> int:
""":class:`bool`: Returns ``True`` if a user can connect to a voice channel."""
return 1 << 20
@flag_value
def speak(self) -> int:
""":class:`bool`: Returns ``True`` if a user can speak in a voice channel."""
return 1 << 21
@flag_value
def mute_members(self) -> int:
""":class:`bool`: Returns ``True`` if a user can mute other users."""
return 1 << 22
@flag_value
def deafen_members(self) -> int:
""":class:`bool`: Returns ``True`` if a user can deafen other users."""
return 1 << 23
@flag_value
def move_members(self) -> int:
""":class:`bool`: Returns ``True`` if a user can move users between other voice channels."""
return 1 << 24
@flag_value
def use_voice_activation(self) -> int:
""":class:`bool`: Returns ``True`` if a user can use voice activation in voice channels."""
return 1 << 25
@flag_value
def change_nickname(self) -> int:
""":class:`bool`: Returns ``True`` if a user can change their nickname in the guild."""
return 1 << 26
@flag_value
def manage_nicknames(self) -> int:
""":class:`bool`: Returns ``True`` if a user can change other user's nickname in the guild."""
return 1 << 27
@flag_value
def manage_roles(self) -> int:
""":class:`bool`: Returns ``True`` if a user can create or edit roles less than their role's position.
This also corresponds to the "Manage Permissions" channel-specific override.
"""
return 1 << 28
@make_permission_alias('manage_roles')
def manage_permissions(self) -> int:
""":class:`bool`: An alias for :attr:`manage_roles`.
.. versionadded:: 1.3
"""
return 1 << 28
@flag_value
def manage_webhooks(self) -> int:
""":class:`bool`: Returns ``True`` if a user can create, edit, or delete webhooks."""
return 1 << 29
@flag_value
def manage_emojis(self) -> int:
""":class:`bool`: Returns ``True`` if a user can create, edit, or delete emojis."""
return 1 << 30
@make_permission_alias('manage_emojis')
def manage_emojis_and_stickers(self) -> int:
""":class:`bool`: An alias for :attr:`manage_emojis`.
.. versionadded:: 2.0
"""
return 1 << 30
@flag_value
def use_slash_commands(self) -> int:
""":class:`bool`: Returns ``True`` if a user can use slash commands.
.. versionadded:: 1.7
"""
return 1 << 31
@flag_value
def request_to_speak(self) -> int:
""":class:`bool`: Returns ``True`` if a user can request to speak in a stage channel.
.. versionadded:: 1.7
"""
return 1 << 32
@flag_value
def manage_events(self) -> int:
""":class:`bool`: Returns ``True`` if a user can manage guild events.
.. versionadded:: 2.0
"""
return 1 << 33
@flag_value
def manage_threads(self) -> int:
""":class:`bool`: Returns ``True`` if a user can manage threads.
.. versionadded:: 2.0
"""
return 1 << 34
@flag_value
def create_public_threads(self) -> int:
""":class:`bool`: Returns ``True`` if a user can create public threads.
.. versionadded:: 2.0
"""
return 1 << 35
@flag_value
def create_private_threads(self) -> int:
""":class:`bool`: Returns ``True`` if a user can create private threads.
.. versionadded:: 2.0
"""
return 1 << 36
@flag_value
def external_stickers(self) -> int:
""":class:`bool`: Returns ``True`` if a user can use stickers from other guilds.
.. versionadded:: 2.0
"""
return 1 << 37
@make_permission_alias('external_stickers')
def use_external_stickers(self) -> int:
""":class:`bool`: An alias for :attr:`external_stickers`.
.. versionadded:: 2.0
"""
return 1 << 37
@flag_value
def send_messages_in_threads(self) -> int:
""":class:`bool`: Returns ``True`` if a user can send messages in threads.
.. versionadded:: 2.0
"""
return 1 << 38
@flag_value
def start_embedded_activities(self) -> int:
""":class:`bool`: Returns ``True`` if a user can start embedded activities.
.. versionadded:: 2.0
"""
return 1 << 39
@flag_value
def moderate_members(self) -> int:
""":class:`bool`: Returns ``True`` if a user can timeout other users.
.. versionadded:: 2.0
"""
return 1 << 40
PO = TypeVar('PO', bound='PermissionOverwrite')
def _augment_from_permissions(cls):
cls.VALID_NAMES = set(Permissions.VALID_FLAGS)
aliases = set()
# make descriptors for all the valid names and aliases
for name, value in Permissions.__dict__.items():
if isinstance(value, permission_alias):
key = value.alias
aliases.add(name)
elif isinstance(value, flag_value):
key = name
else:
continue
# god bless Python
def getter(self, x=key):
return self._values.get(x)
def setter(self, value, x=key):
self._set(x, value)
prop = property(getter, setter)
setattr(cls, name, prop)
cls.PURE_FLAGS = cls.VALID_NAMES - aliases
return cls
@_augment_from_permissions
class PermissionOverwrite:
r"""A type that is used to represent a channel specific permission.
Unlike a regular :class:`Permissions`\, the default value of a
permission is equivalent to ``None`` and not ``False``. Setting
a value to ``False`` is **explicitly** denying that permission,
while setting a value to ``True`` is **explicitly** allowing
that permission.
The values supported by this are the same as :class:`Permissions`
with the added possibility of it being set to ``None``.
.. container:: operations
.. describe:: x == y
Checks if two overwrites are equal.
.. describe:: x != y
Checks if two overwrites are not equal.
.. describe:: iter(x)
Returns an iterator of ``(perm, value)`` pairs. This allows it
to be, for example, constructed as a dict or a list of pairs.
Note that aliases are not shown.
Parameters
-----------
\*\*kwargs
Set the value of permissions by their name.
"""
__slots__ = ('_values',)
if TYPE_CHECKING:
VALID_NAMES: ClassVar[Set[str]]
PURE_FLAGS: ClassVar[Set[str]]
# I wish I didn't have to do this
create_instant_invite: Optional[bool]
kick_members: Optional[bool]
ban_members: Optional[bool]
administrator: Optional[bool]
manage_channels: Optional[bool]
manage_guild: Optional[bool]
add_reactions: Optional[bool]
view_audit_log: Optional[bool]
priority_speaker: Optional[bool]
stream: Optional[bool]
read_messages: Optional[bool]
view_channel: Optional[bool]
send_messages: Optional[bool]
send_tts_messages: Optional[bool]
manage_messages: Optional[bool]
embed_links: Optional[bool]
attach_files: Optional[bool]
read_message_history: Optional[bool]
mention_everyone: Optional[bool]
external_emojis: Optional[bool]
use_external_emojis: Optional[bool]
view_guild_insights: Optional[bool]
connect: Optional[bool]
speak: Optional[bool]
mute_members: Optional[bool]
deafen_members: Optional[bool]
move_members: Optional[bool]
use_voice_activation: Optional[bool]
change_nickname: Optional[bool]
manage_nicknames: Optional[bool]
manage_roles: Optional[bool]
manage_permissions: Optional[bool]
manage_webhooks: Optional[bool]
manage_emojis: Optional[bool]
manage_emojis_and_stickers: Optional[bool]
use_slash_commands: Optional[bool]
request_to_speak: Optional[bool]
manage_events: Optional[bool]
manage_threads: Optional[bool]
create_public_threads: Optional[bool]
create_private_threads: Optional[bool]
send_messages_in_threads: Optional[bool]
external_stickers: Optional[bool]
use_external_stickers: Optional[bool]
start_embedded_activities: Optional[bool]
def __init__(self, **kwargs: Optional[bool]):
self._values: Dict[str, Optional[bool]] = {}
for key, value in kwargs.items():
if key not in self.VALID_NAMES:
raise ValueError(f'no permission called {key}.')
setattr(self, key, value)
def __eq__(self, other: Any) -> bool:
return isinstance(other, PermissionOverwrite) and self._values == other._values
def _set(self, key: str, value: Optional[bool]) -> None:
if value not in (True, None, False):
raise TypeError(f'Expected bool or NoneType, received {value.__class__.__name__}')
if value is None:
self._values.pop(key, None)
else:
self._values[key] = value
def pair(self) -> Tuple[Permissions, Permissions]:
"""Tuple[:class:`Permissions`, :class:`Permissions`]: Returns the (allow, deny) pair from this overwrite."""
allow = Permissions.none()
deny = Permissions.none()
for key, value in self._values.items():
if value is True:
setattr(allow, key, True)
elif value is False:
setattr(deny, key, True)
return allow, deny
@classmethod
def from_pair(cls: Type[PO], allow: Permissions, deny: Permissions) -> PO:
"""Creates an overwrite from an allow/deny pair of :class:`Permissions`."""
ret = cls()
for key, value in allow:
if value is True:
setattr(ret, key, True)
for key, value in deny:
if value is True:
setattr(ret, key, False)
return ret
def is_empty(self) -> bool:
"""Checks if the permission overwrite is currently empty.
An empty permission overwrite is one that has no overwrites set
to ``True`` or ``False``.
Returns
-------
:class:`bool`
Indicates if the overwrite is empty.
"""
return len(self._values) == 0
def update(self, **kwargs: bool) -> None:
r"""Bulk updates this permission overwrite object.
Allows you to set multiple attributes by using keyword
arguments. The names must be equivalent to the properties
listed. Extraneous key/value pairs will be silently ignored.
Parameters
------------
\*\*kwargs
A list of key/value pairs to bulk update with.
"""
for key, value in kwargs.items():
if key not in self.VALID_NAMES:
continue
setattr(self, key, value)
def __iter__(self) -> Iterator[Tuple[str, Optional[bool]]]:
for key in self.PURE_FLAGS:
yield key, self._values.get(key)
| 33.823077
| 119
| 0.615344
|
b3562d942aeaec8882c21c4abaaa1792d4f7a9d0
| 1,565
|
py
|
Python
|
tests/test_gnets_legalize.py
|
enics-labs/salamandra
|
e3f334d0ead5296b02c471b56cb90b1516e12769
|
[
"Apache-2.0"
] | 1
|
2021-11-18T10:45:26.000Z
|
2021-11-18T10:45:26.000Z
|
tests/test_gnets_legalize.py
|
enics-labs/salamandra
|
e3f334d0ead5296b02c471b56cb90b1516e12769
|
[
"Apache-2.0"
] | null | null | null |
tests/test_gnets_legalize.py
|
enics-labs/salamandra
|
e3f334d0ead5296b02c471b56cb90b1516e12769
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 EnICS Labs, Bar-Ilan University.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys, os
sys.path.append(os.path.abspath('..'))
from salamandra import *
def main():
test(is_metatest=False)
def test(is_metatest):
nand = Component('nand')
and_ = Component('and2')
and_.add_pinbus(Bus(Output, 'A_b_o', 8))
and_.add_pinbus(Bus(Output, 'A_b_o2', 8))
and_.add_pinbus(Bus(Input, 'A_b_i', 8))
and_.add_pin(Input('A'))
nand.add_subcomponent(and_, 'i_and')
nand.add_pinbus(Bus(Input, 'A_b', 5))
nand.add_pin(Input('A'))
nand.connect("1'b0", 'i_and.A')
# nand.connect('A_b[1]', 'i_and.A_b_o[4]')
# nand.connect('A_b[2]', 'i_and.A_b_o[5]')
nand.connect('1\'b0', 'i_and.A_b_i[2]')
nand.connect('1\'b0', 'i_and.A_b_i[3]')
nand.connect('A_b[1]', 'i_and.A_b_i[4]')
nand.connect('A_b[2]', 'i_and.A_b_i[5]')
nand.connect('1\'b0', 'i_and.A_b_i[6]')
nand.connect('1\'b1', 'i_and.A_b_i[7]')
nand2 = Component('nand2', nand)
nand.legalize()
nand2.legalize()
if not is_metatest:
# with open('verilog_files/{}.v'.format(re.findall(r'/(\w+)\.py', __file__)[0]), 'w') as f:
# for com in [and_, nand, nand2]:
# for l in com.write_verilog():
# f.write(l)
# f.write('\n')
for com in [nand, nand2]:
for l in com.write_verilog():
print(l)
return True
if __name__ == '__main__':
main()
| 29.528302
| 99
| 0.584026
|
d790434f58f48fbc8f083f7855591bf2875e192b
| 832
|
py
|
Python
|
setup.py
|
renereimann/FID_Simulation
|
40fe7f0892a5f4600d863658f748906bff050b67
|
[
"MIT"
] | null | null | null |
setup.py
|
renereimann/FID_Simulation
|
40fe7f0892a5f4600d863658f748906bff050b67
|
[
"MIT"
] | null | null | null |
setup.py
|
renereimann/FID_Simulation
|
40fe7f0892a5f4600d863658f748906bff050b67
|
[
"MIT"
] | 1
|
2020-04-11T04:18:31.000Z
|
2020-04-11T04:18:31.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open('LICENSE') as f:
license = f.read()
setuptools.setup(
name="FreeInductionDecay", # Replace with your own username
version="0.0.1",
author="Rene Reimann ",
author_email="rreimann@uni-mainz.de",
description="A package to simulate Free Induction Decay signals in pulsed NMR",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/renereimann/FID_Simulation",
license=license,
packages=setuptools.find_packages(exclude=('tests', 'docs')),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.814815
| 83
| 0.673077
|
a29d9aec87f516112f2333b8e4d257f351013125
| 3,949
|
py
|
Python
|
mtg_deckbuilder/users/migrations/0001_initial.py
|
MrGreenTea/deckbuilder
|
ec6617add05e8567d8b9e4ada90b86ad50055f0e
|
[
"MIT"
] | null | null | null |
mtg_deckbuilder/users/migrations/0001_initial.py
|
MrGreenTea/deckbuilder
|
ec6617add05e8567d8b9e4ada90b86ad50055f0e
|
[
"MIT"
] | null | null | null |
mtg_deckbuilder/users/migrations/0001_initial.py
|
MrGreenTea/deckbuilder
|
ec6617add05e8567d8b9e4ada90b86ad50055f0e
|
[
"MIT"
] | null | null | null |
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "0008_alter_user_username_max_length")]
operations = [
migrations.CreateModel(
name="User",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("password", models.CharField(max_length=128, verbose_name="password")),
("last_login", models.DateTimeField(blank=True, null=True, verbose_name="last login")),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
("first_name", models.CharField(blank=True, max_length=30, verbose_name="first name")),
("last_name", models.CharField(blank=True, max_length=150, verbose_name="last name")),
("email", models.EmailField(blank=True, max_length=254, verbose_name="email address")),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
("date_joined", models.DateTimeField(default=django.utils.timezone.now, verbose_name="date joined")),
("name", models.CharField(blank=True, max_length=255, verbose_name="Name of User")),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={"verbose_name_plural": "users", "verbose_name": "user", "abstract": False},
managers=[("objects", django.contrib.auth.models.UserManager())],
)
]
| 45.390805
| 138
| 0.488731
|
9666b3142e97951c09a65a4297c38d03e636dc7c
| 6,520
|
py
|
Python
|
src/data/make_dataset.py
|
mlotfic/Communicate-Dtata-Finding
|
f2b7e283e93e9a78bf5179d5907d42706a33861d
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
mlotfic/Communicate-Dtata-Finding
|
f2b7e283e93e9a78bf5179d5907d42706a33861d
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
mlotfic/Communicate-Dtata-Finding
|
f2b7e283e93e9a78bf5179d5907d42706a33861d
|
[
"MIT"
] | null | null | null |
# import all packages and set plots to be embedded inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import time
from pathlib import Path
import os
relative_path = Path(__file__).parents[2]
if not os.path.exists('{}/data/interim'.format(relative_path)):
os.makedirs('{}/data/interim'.format(relative_path))
'''
schema = {
'Year' : str,
'Month' : str,
'DayofMonth' : str,
'DayOfWeek' : str,
'DepTime' : str,
'CRSDepTime' : str,
'ArrTime' : str,
'CRSArrTime' : str,
'UniqueCarrier' : str,
'FlightNum' : str,
'TailNum' : str,
'ActualElapsedTime' : int,
'CRSElapsedTime' : int,
'AirTime' : int,
'ArrDelay' : int,
'DepDelay' : int,
'Origin' : str,
'Dest' : str,
'Distance' : int,
'TaxiIn' : int,
'TaxiOut' : int,
'Cancelled' : bool,
'CancellationCode' : str,
'Diverted' : bool,
'CarrierDelay' : int,
'WeatherDelay' : int,
'NASDelay' : int,
'SecurityDelay' : int,
'LateAircraftDelay' : int
}
'''
# user define function
def load_dataset(year='2008'):
'''
Description: load dataset acoordding to year
parameter year string
return dataframe
'''
t1 = time.time()
df = pd.read_csv('{}/data/raw/{}.csv.bz2'.format(relative_path, year), compression='bz2', dtype=str, na_values=['na', '-', '.', ''])
t2 = time.time()
print('Elapsed loading time :', t2-t1)
return df
def validate_int2str(col, l=1, _min=False):
'''
validate data to int and then to str
parameter : float , int string number
col : string text
l : min length of the number
_min: bool
return : string type
'''
try:
if col:
col = int(float(col))
if (_min and (l > len(str(col)))):
return np.NaN
elif (_min and (l <= len(str(col)))):
col = str(col).zfill(4)
col = datetime.strptime(col, '%H%M').time().strftime("%I:%M %p")
return col
else:
return np.NaN
except Exception as e:
return np.NaN
'''
# test function
print(validate_int2str("1.0", 1, 1))
print(validate_int2str("1.0", 0, 1))
print(validate_int2str("12.0x"))
print(validate_int2str("12.0"))
'''
def validate_str(col):
'''
validate data to str
parameter : float , int string number
col : string text
return : string type
'''
try:
if str(col).strip():
return str(col)
else:
return np.NaN
except Exception as e:
return np.NaN
'''
# test function
print(validate_str(""))
print(validate_str(" "))
print(validate_str(" \n"))
print(validate_str("12.0x"))
print(validate_str("12.0"))
print(validate_str(12))
'''
# load dataset 2008
df = load_dataset()
# correcting dates formate
df['DepTime'] = df['DepTime'].apply(lambda x: str(int(x)).zfill(4) if pd.notnull(x) else x)
df['CRSDepTime'] = df['CRSDepTime'].apply(lambda x: str(int(x)).zfill(4) if pd.notnull(x) else x)
df['ArrTime'] = df.ArrTime.apply(lambda x: str(int(x)).zfill(4) if pd.notnull(x) else x)
df['CRSArrTime'] = df.CRSArrTime.apply(lambda x: str(int(x)).zfill(4) if pd.notnull(x) else x)
# validate data
df['Year'] = df['Year'].apply(lambda x: validate_str(x))
df['Month'] = df['Month'].apply(lambda x: validate_str(x))
df['DayofMonth'] = df['DayofMonth'].apply(lambda x: validate_str(x))
df['DayOfWeek'] = df['DayOfWeek'].apply(lambda x: validate_str(x))
'''
#Col 1 = where you want the values replaced
#Col 2 = where you want to take the values from
df.["Col 1"].fillna(df.["Col 2"], inplace=True)
# datetime(year, month, day, hour, minute, second, microsecond)
'''
# remove one number value and reformat hh:mm AM
t1 = time.time()
df['DepTime'] = df['DepTime'].apply(lambda x: validate_int2str(x, l=1, _min=True))
df['CRSDepTime'] = df['CRSDepTime'].astype('str').apply(lambda x: validate_int2str(x, l=1, _min=True))
df['ArrTime'] = df['ArrTime'].astype('str').apply(lambda x: validate_int2str(x, l=1, _min=True))
df['CRSArrTime'] = df['CRSArrTime'].astype('str').apply(lambda x: validate_int2str(x, l=1, _min=True))
t2 = time.time()
print('Elapsed loading time :', t2-t1)
# filling nan to zero to modify schema
df['CarrierDelay'].fillna(0, inplace=True)
df['WeatherDelay'].fillna(0, inplace=True)
df['NASDelay'].fillna(0, inplace=True)
df['SecurityDelay'].fillna(0, inplace=True)
df['LateAircraftDelay'].fillna(0, inplace=True)
df['CarrierDelay'] = df['CarrierDelay'].astype('int')
df['WeatherDelay'] = df['WeatherDelay'].astype('int')
df['NASDelay'] = df['NASDelay'].astype('int')
df['SecurityDelay'] = df['SecurityDelay'].astype('int')
df['LateAircraftDelay'] = df['LateAircraftDelay'].astype('int')
# divide dataset into
# divide dataset in two
# - flights
# - cancelled
# - diverted
# df[~df.CancellationCode.notna()]
flights = df[~(df.Diverted == 1)]
flights = flights[~(flights.Cancelled == 1)].drop(columns=['Cancelled', 'CancellationCode', 'Diverted'])
t1 = time.time()
flights.to_csv('{}/data/interim/{}.csv'.format(relative_path, 'flights'), index=False)
t2 = time.time()
print('Elapsed saving time :', t2-t1)
del flights
df_cancelled = df[df.Cancelled == '1'].drop(columns=['DepTime', 'ArrTime', 'ActualElapsedTime', 'AirTime', 'ArrDelay',
'DepDelay', 'TaxiIn', 'TaxiOut', 'Cancelled', 'Diverted', 'CarrierDelay',
'WeatherDelay', 'NASDelay', 'SecurityDelay', 'LateAircraftDelay'])
t1 = time.time()
df_cancelled.to_csv('{}/data/interim/{}.csv'.format(relative_path, 'canceled'), index=False)
t2 = time.time()
print('Elapsed saving time :', t2-t1)
del df_cancelled
df_diverted = df[df.Diverted == '1'].drop(columns=['ArrTime', 'CRSArrTime', 'CRSElapsedTime', 'Cancelled', 'CancellationCode', 'Diverted',
'CarrierDelay', 'WeatherDelay', 'NASDelay', 'SecurityDelay',
'LateAircraftDelay', 'TaxiIn', 'TaxiOut'])
t1 = time.time()
df_diverted.to_csv('{}/data/interim/{}.csv'.format(relative_path, 'diverted'), index=False)
t2 = time.time()
print('Elapsed saving time :', t2-t1)
del df_diverted
| 32.929293
| 139
| 0.602301
|
a479214d8dd8a8d0514c7f9047a21ce049d55f4f
| 151
|
py
|
Python
|
scofield/customer/admin.py
|
howiworkdaily/scofield-project
|
f0daaf785c344a0da1f5b624518c9fa6c0514745
|
[
"BSD-3-Clause"
] | 4
|
2016-04-10T13:37:58.000Z
|
2018-06-11T18:49:29.000Z
|
scofield/customer/admin.py
|
howiworkdaily/scofield-project
|
f0daaf785c344a0da1f5b624518c9fa6c0514745
|
[
"BSD-3-Clause"
] | null | null | null |
scofield/customer/admin.py
|
howiworkdaily/scofield-project
|
f0daaf785c344a0da1f5b624518c9fa6c0514745
|
[
"BSD-3-Clause"
] | 2
|
2015-04-08T19:52:19.000Z
|
2021-02-10T08:08:19.000Z
|
from models import *
from django.contrib import admin
admin.site.register(Customer)
admin.site.register(Phonenumber)
admin.site.register(Address)
| 13.727273
| 32
| 0.801325
|
8b404776ad282d3c7a1773c45bdb528ce9a03d9a
| 55,569
|
py
|
Python
|
lib/rucio/core/rse.py
|
DanilaOleynik/rucio
|
b6708b41abd6e781f976970e758babbd87a8941e
|
[
"Apache-2.0"
] | 2
|
2020-02-18T22:34:24.000Z
|
2022-03-09T16:26:18.000Z
|
lib/rucio/core/rse.py
|
DanilaOleynik/rucio
|
b6708b41abd6e781f976970e758babbd87a8941e
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/core/rse.py
|
DanilaOleynik/rucio
|
b6708b41abd6e781f976970e758babbd87a8941e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2012-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Ralph Vigne <ralph.vigne@cern.ch>, 2012-2015
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2021
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2020
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2021
# - Thomas Beermann <thomas.beermann@cern.ch>, 2014-2017
# - Wen Guan <wen.guan@cern.ch>, 2015-2016
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Frank Berghaus <frank.berghaus@cern.ch>, 2018
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2018-2021
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Aristeidis Fkiaras <aristeidis.fkiaras@cern.ch>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Tomas Javurek <tomas.javurek@cern.ch>, 2020
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
import json
from io import StringIO
from re import match
from typing import TYPE_CHECKING
import sqlalchemy
import sqlalchemy.orm
from dogpile.cache.api import NO_VALUE
from six import string_types
from sqlalchemy.exc import DatabaseError, IntegrityError, OperationalError
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import FlushError
from sqlalchemy.sql.expression import or_, false
import rucio.core.account_counter
from rucio.common import exception, utils
from rucio.common.cache import make_region_memcached
from rucio.common.config import get_lfn2pfn_algorithm_default
from rucio.common.utils import CHECKSUM_KEY, is_checksum_valid, GLOBALLY_SUPPORTED_CHECKSUMS
from rucio.core.rse_counter import add_counter, get_counter
from rucio.db.sqla import models
from rucio.db.sqla.constants import RSEType
from rucio.db.sqla.session import read_session, transactional_session, stream_session
if TYPE_CHECKING:
from typing import Dict, Optional
from sqlalchemy.orm import Session
REGION = make_region_memcached(expiration_time=900)
@transactional_session
def add_rse(rse, vo='def', deterministic=True, volatile=False, city=None, region_code=None, country_name=None, continent=None, time_zone=None,
ISP=None, staging_area=False, rse_type=RSEType.DISK, longitude=None, latitude=None, ASN=None, availability=7, session=None):
"""
Add a rse with the given location name.
:param rse: the name of the new rse.
:param vo: the vo to add the RSE to.
:param deterministic: Boolean to know if the pfn is generated deterministically.
:param volatile: Boolean for RSE cache.
:param city: City for the RSE.
:param region_code: The region code for the RSE.
:param country_name: The country.
:param continent: The continent.
:param time_zone: Timezone.
:param ISP: Internet service provider.
:param staging_area: Staging area.
:param rse_type: RSE type.
:param latitude: Latitude coordinate of RSE.
:param longitude: Longitude coordinate of RSE.
:param ASN: Access service network.
:param availability: Availability.
:param session: The database session in use.
"""
if isinstance(rse_type, string_types):
rse_type = RSEType(rse_type)
new_rse = models.RSE(rse=rse, vo=vo, deterministic=deterministic, volatile=volatile, city=city,
region_code=region_code, country_name=country_name,
continent=continent, time_zone=time_zone, staging_area=staging_area, ISP=ISP, availability=availability,
rse_type=rse_type, longitude=longitude, latitude=latitude, ASN=ASN)
try:
new_rse.save(session=session)
except IntegrityError:
raise exception.Duplicate('RSE \'%(rse)s\' already exists!' % locals())
except DatabaseError as error:
raise exception.RucioException(error.args)
# Add rse name as a RSE-Tag
add_rse_attribute(rse_id=new_rse.id, key=rse, value=True, session=session)
# Add counter to monitor the space usage
add_counter(rse_id=new_rse.id, session=session)
# Add account counter
rucio.core.account_counter.create_counters_for_new_rse(rse_id=new_rse.id, session=session)
return new_rse.id
@read_session
def rse_exists(rse, vo='def', include_deleted=False, session=None):
"""
Checks to see if RSE exists.
:param rse: Name of the rse.
:param vo: The VO for the RSE.
:param session: The database session in use.
:returns: True if found, otherwise false.
"""
return True if session.query(models.RSE).filter_by(rse=rse, vo=vo, deleted=include_deleted).first() else False
@read_session
def sort_rses(rses, session=None):
"""
Sort a list of RSES by free space (ascending order).
:param rses: List of RSEs.
:param session: The database session in use.
:returns: Sorted list of RSEs
"""
if not rses:
raise exception.InputValidationError('The list rses should not be empty!')
if len(rses) == 1:
return rses
false_value = False
query = session.query(models.RSE.rse, models.RSE.staging_area, models.RSEUsage.rse_id).\
filter(models.RSEUsage.source == 'storage').\
filter(models.RSEUsage.rse_id == models.RSE.id).\
filter(models.RSE.deleted == false_value)
condition = []
for rse in rses:
condition.append(models.RSE.id == rse['id'])
query = query.filter(or_(*condition)).order_by(models.RSEUsage.free.asc())
return [{'rse': rse, 'staging_area': staging_area, 'id': rse_id} for rse, staging_area, rse_id in query]
@transactional_session
def del_rse(rse_id, session=None):
"""
Disable a rse with the given rse id.
:param rse_id: the rse id.
:param session: The database session in use.
"""
old_rse = None
try:
old_rse = session.query(models.RSE).filter_by(id=rse_id, deleted=False).one()
if not rse_is_empty(rse_id=rse_id, session=session):
raise exception.RSEOperationNotSupported('RSE \'%s\' is not empty' % get_rse_name(rse_id=rse_id, session=session))
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound('RSE with id \'%s\' cannot be found' % rse_id)
rse = old_rse.rse
old_rse.delete(session=session)
try:
del_rse_attribute(rse_id=rse_id, key=rse, session=session)
except exception.RSEAttributeNotFound:
pass
@transactional_session
def restore_rse(rse_id, session=None):
"""
Restore a rse with the given rse id.
:param rse_id: the rse id.
:param session: The database session in use.
"""
old_rse = None
try:
old_rse = session.query(models.RSE).filter_by(id=rse_id, deleted=True).one()
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound('RSE with id \'%s\' cannot be found' % rse_id)
old_rse.deleted = False
old_rse.deleted_at = None
old_rse.save(session=session)
rse = old_rse.rse
add_rse_attribute(rse_id=rse_id, key=rse, value=True, session=session)
@read_session
def rse_is_empty(rse_id, session=None):
"""
Check if a RSE is empty.
:param rse_id: the rse id.
:param session: the database session in use.
"""
is_empty = False
try:
is_empty = get_counter(rse_id, session=session)['bytes'] == 0
except exception.CounterNotFound:
is_empty = True
return is_empty
@read_session
def get_rse(rse_id, session=None):
"""
Get a RSE or raise if it does not exist.
:param rse_id: The rse id.
:param session: The database session in use.
:raises RSENotFound: If referred RSE was not found in the database.
"""
false_value = False # To make pep8 checker happy ...
try:
tmp = session.query(models.RSE).\
filter(sqlalchemy.and_(models.RSE.deleted == false_value,
models.RSE.id == rse_id))\
.one()
tmp['type'] = tmp.rse_type
return tmp
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound('RSE with id \'%s\' cannot be found' % rse_id)
@read_session
def get_rse_id(rse, vo='def', session=None, include_deleted=True):
"""
Get a RSE ID or raise if it does not exist.
:param rse: the rse name.
:param session: The database session in use.
:param include_deleted: Flag to toggle finding rse's marked as deleted.
:returns: The rse id.
:raises RSENotFound: If referred RSE was not found in the database.
"""
if include_deleted:
if vo != 'def':
cache_key = 'rse-id_{}@{}'.format(rse, vo).replace(' ', '.')
else:
cache_key = 'rse-id_{}'.format(rse).replace(' ', '.')
result = REGION.get(cache_key)
if result != NO_VALUE:
return result
try:
query = session.query(models.RSE.id).filter_by(rse=rse, vo=vo)
if not include_deleted:
query = query.filter_by(deleted=False)
result = query.one()[0]
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound("RSE '%s' cannot be found in vo '%s'" % (rse, vo))
if include_deleted:
REGION.set(cache_key, result)
return result
@read_session
def get_rse_name(rse_id, session=None, include_deleted=True):
"""
Get a RSE name or raise if it does not exist.
:param rse_id: the rse uuid from the database.
:param session: The database session in use.
:param include_deleted: Flag to toggle finding rse's marked as deleted.
:returns: The rse name.
:raises RSENotFound: If referred RSE was not found in the database.
"""
if include_deleted:
cache_key = 'rse-name_{}'.format(rse_id)
result = REGION.get(cache_key)
if result != NO_VALUE:
return result
try:
query = session.query(models.RSE.rse).filter_by(id=rse_id)
if not include_deleted:
query = query.filter_by(deleted=False)
result = query.one()[0]
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound('RSE with ID \'%s\' cannot be found' % rse_id)
if include_deleted:
REGION.set(cache_key, result)
return result
@read_session
def get_rse_vo(rse_id, session=None, include_deleted=True):
"""
Get the VO for a given RSE id.
:param rse_id: the rse uuid from the database.
:param session: the database session in use.
:param include_deleted: Flag to toggle finding rse's marked as deleted.
:returns The vo name.
:raises RSENotFound: If referred RSE was not found in database.
"""
if include_deleted:
cache_key = 'rse-vo_{}'.format(rse_id)
result = REGION.get(cache_key)
if result != NO_VALUE:
return result
try:
query = session.query(models.RSE.vo).filter_by(id=rse_id)
if not include_deleted:
query = query.filter_by(deleted=False)
result = query.one()[0]
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound('RSE with ID \'%s\' cannot be found' % rse_id)
if include_deleted:
REGION.set(cache_key, result)
return result
@read_session
def list_rses(filters={}, session=None):
"""
Returns a list of all RSEs.
:param filters: dictionary of attributes by which the results should be filtered.
:param session: The database session in use.
:returns: a list of dictionaries.
"""
rse_list = []
availability_mask1 = 0
availability_mask2 = 7
availability_mapping = {'availability_read': 4, 'availability_write': 2, 'availability_delete': 1}
false_value = False # To make pep8 checker happy ...
if filters and filters.get('vo'):
filters = filters.copy() # Make a copy so we can pop('vo') without affecting the object `filters` outside this function
vo = filters.pop('vo')
else:
vo = None
if filters:
if 'availability' in filters and ('availability_read' in filters or 'availability_write' in filters or 'availability_delete' in filters):
raise exception.InvalidObject('Cannot use availability and read, write, delete filter at the same time.')
query = session.query(models.RSE).\
join(models.RSEAttrAssociation, models.RSE.id == models.RSEAttrAssociation.rse_id).\
filter(models.RSE.deleted == false_value).group_by(models.RSE)
for (k, v) in filters.items():
if hasattr(models.RSE, k):
if k == 'rse_type':
query = query.filter(getattr(models.RSE, k) == RSEType[v])
else:
query = query.filter(getattr(models.RSE, k) == v)
elif k in ['availability_read', 'availability_write', 'availability_delete']:
if v:
availability_mask1 = availability_mask1 | availability_mapping[k]
else:
availability_mask2 = availability_mask2 & ~availability_mapping[k]
else:
t = aliased(models.RSEAttrAssociation)
query = query.join(t, t.rse_id == models.RSEAttrAssociation.rse_id)
query = query.filter(t.key == k,
t.value == v)
condition1, condition2 = [], []
for i in range(0, 8):
if i | availability_mask1 == i:
condition1.append(models.RSE.availability == i)
if i & availability_mask2 == i:
condition2.append(models.RSE.availability == i)
if 'availability' not in filters:
query = query.filter(sqlalchemy.and_(sqlalchemy.or_(*condition1), sqlalchemy.or_(*condition2)))
else:
query = session.query(models.RSE).filter_by(deleted=False).order_by(models.RSE.rse)
if vo:
query = query.filter(getattr(models.RSE, 'vo') == vo)
for row in query:
dic = {}
for column in row.__table__.columns:
dic[column.name] = getattr(row, column.name)
rse_list.append(dic)
return rse_list
@transactional_session
def add_rse_attribute(rse_id, key, value, session=None):
""" Adds a RSE attribute.
:param rse_id: the rse id.
:param key: the key name.
:param value: the value name.
:param issuer: The issuer account.
:param session: The database session in use.
:returns: True is successful
"""
try:
new_rse_attr = models.RSEAttrAssociation(rse_id=rse_id, key=key, value=value)
new_rse_attr = session.merge(new_rse_attr)
new_rse_attr.save(session=session)
except IntegrityError:
rse = get_rse_name(rse_id=rse_id, session=session)
raise exception.Duplicate("RSE attribute '%(key)s-%(value)s\' for RSE '%(rse)s' already exists!" % locals())
return True
@transactional_session
def del_rse_attribute(rse_id, key, session=None):
"""
Delete a RSE attribute.
:param rse_id: the id of the rse.
:param key: the attribute key.
:param session: The database session in use.
:return: True if RSE attribute was deleted.
"""
rse_attr = None
try:
query = session.query(models.RSEAttrAssociation).filter_by(rse_id=rse_id).filter(models.RSEAttrAssociation.key == key)
rse_attr = query.one()
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSEAttributeNotFound('RSE attribute \'%s\' cannot be found' % key)
rse_attr.delete(session=session)
return True
@read_session
def list_rse_attributes(rse_id, session=None):
"""
List RSE attributes for a RSE.
:param rse_id: The RSE id.
:param session: The database session in use.
:returns: A dictionary with RSE attributes for a RSE.
"""
rse_attrs = {}
query = session.query(models.RSEAttrAssociation).filter_by(rse_id=rse_id)
for attr in query:
rse_attrs[attr.key] = attr.value
return rse_attrs
@read_session
def has_rse_attribute(rse_id, key, session=None):
"""
Indicates whether the named key is present for the RSE.
:param rse_id: The RSE id.
:param key: The key for the attribute.
:param session: The database session in use.
:returns: True or False
"""
if session.query(models.RSEAttrAssociation.value).filter_by(rse_id=rse_id, key=key).first():
return True
return False
@read_session
def get_rses_with_attribute(key, session=None):
"""
Return all RSEs with a certain attribute.
:param key: The key for the attribute.
:param session: The database session in use.
:returns: List of rse dictionaries
"""
rse_list = []
query = session.query(models.RSE).\
join(models.RSEAttrAssociation, models.RSE.id == models.RSEAttrAssociation.rse_id).\
filter(models.RSE.deleted == False, models.RSEAttrAssociation.key == key).group_by(models.RSE) # NOQA
for row in query:
d = {}
for column in row.__table__.columns:
d[column.name] = getattr(row, column.name)
rse_list.append(d)
return rse_list
@read_session
def get_rses_with_attribute_value(key, value, lookup_key, vo='def', session=None):
"""
Return all RSEs with a certain attribute.
:param key: The key for the attribute.
:param value: The value for the attribute.
:param lookup_key: The value of the this key will be returned.
:param session: The database session in use.
:returns: List of rse dictionaries with the rse_id and lookup_key/value pair
"""
if vo != 'def':
cache_key = 'av-%s-%s-%s@%s' % (key, value, lookup_key, vo)
else:
cache_key = 'av-%s-%s-%s' % (key, value, lookup_key)
result = REGION.get(cache_key)
if result is NO_VALUE:
rse_list = []
subquery = session.query(models.RSEAttrAssociation.rse_id)\
.filter(models.RSEAttrAssociation.key == key,
models.RSEAttrAssociation.value == value)\
.subquery()
query = session.query(models.RSEAttrAssociation.rse_id,
models.RSEAttrAssociation.key,
models.RSEAttrAssociation.value)\
.join(models.RSE, models.RSE.id == models.RSEAttrAssociation.rse_id)\
.join(subquery, models.RSEAttrAssociation.rse_id == subquery.c.rse_id)\
.filter(models.RSE.deleted == false(),
models.RSEAttrAssociation.key == lookup_key,
models.RSE.vo == vo)
for row in query:
rse_list.append({'rse_id': row[0],
'key': row[1],
'value': row[2]})
REGION.set(cache_key, rse_list)
return rse_list
return result
@read_session
def get_rse_attribute(key, rse_id=None, value=None, use_cache=True, session=None):
"""
Retrieve RSE attribute value.
:param rse_id: The RSE id.
:param key: The key for the attribute.
:param value: Optionally, the desired value for the attribute.
:param use_cache: Boolean to use memcached.
:param session: The database session in use.
:returns: A list with RSE attribute values for a Key.
"""
result = NO_VALUE
if use_cache:
result = REGION.get('%s-%s-%s' % (key, rse_id, value))
if result is NO_VALUE:
rse_attrs = []
if rse_id:
query = session.query(models.RSEAttrAssociation.value).filter_by(rse_id=rse_id, key=key).distinct()
if value:
query = session.query(models.RSEAttrAssociation.value).filter_by(rse_id=rse_id, key=key, value=value).distinct()
else:
query = session.query(models.RSEAttrAssociation.value).filter_by(key=key).distinct()
if value:
query = session.query(models.RSEAttrAssociation.value).filter_by(key=key, value=value).distinct()
for attr_value in query:
rse_attrs.append(attr_value[0])
REGION.set('%s-%s-%s' % (key, rse_id, value), rse_attrs)
return rse_attrs
return result
@read_session
def get_rse_supported_checksums(rse_id, session=None):
"""
Retrieve from the DB and parse the RSE attribute defining the checksum supported by the RSE
"""
return parse_checksum_support_attribute(get_rse_attribute(key=CHECKSUM_KEY, rse_id=rse_id, session=session))
def get_rse_supported_checksums_from_attributes(rse_attributes):
"""
Parse the RSE attribute defining the checksum supported by the RSE
:param rse_attributes: attributes retrieved using list_rse_attributes
"""
return parse_checksum_support_attribute(rse_attributes.get(CHECKSUM_KEY))
def parse_checksum_support_attribute(checksum_attribute):
"""
Parse the checksum support RSE attribute.
:param checksum_attribute: The value of the RSE attribute storing the checksum value
:returns: The list of checksums supported by the selected RSE.
If the list is empty (aka attribute is not set) it returns all the default checksums.
Use 'none' to explicitly tell the RSE does not support any checksum algorithm.
"""
if not checksum_attribute:
return GLOBALLY_SUPPORTED_CHECKSUMS
else:
supported_checksum_list = checksum_attribute[0].split(',')
if 'none' in supported_checksum_list:
return []
return supported_checksum_list
@read_session
def get_rse_is_checksum_supported(checksum_name, rse_id=None, session=None):
"""
Retrieve RSE attribute value.
:param checksum_name: The desired checksum name for the attribute.
:param rse_id: The RSE id.
:param session: The database session in use.
:returns: True if required checksum is supported, False otherwise.
"""
if is_checksum_valid(checksum_name):
return checksum_name in get_rse_supported_checksums(rse_id=rse_id, session=session)
else:
return False
@transactional_session
def set_rse_usage(rse_id, source, used, free, files=None, session=None):
"""
Set RSE usage information.
:param rse_id: the location id.
:param source: The information source, e.g. srm.
:param used: the used space in bytes.
:param free: the free in bytes.
:param files: the number of files
:param session: The database session in use.
:returns: True if successful, otherwise false.
"""
rse_usage = models.RSEUsage(rse_id=rse_id, source=source, used=used, free=free, files=files)
# versioned_session(session)
rse_usage = session.merge(rse_usage)
rse_usage.save(session=session)
# rse_usage_history = models.RSEUsage.__history_mapper__.class_(rse_id=rse.id, source=source, used=used, free=free)
# rse_usage_history.save(session=session)
return True
@read_session
def get_rse_usage(rse_id, source=None, session=None, per_account=False):
"""
get rse usage information.
:param rse_id: The RSE id.
:param source: The information source, e.g. srm.
:param session: The database session in use.
:param per_account: Boolean whether the usage should be also calculated per account or not.
:returns: List of RSE usage data.
"""
query_rse_usage = session.query(models.RSEUsage).filter_by(rse_id=rse_id)
usage = list()
if source:
query_rse_usage = query_rse_usage.filter_by(source=source)
rse = get_rse_name(rse_id=rse_id, session=session)
for row in query_rse_usage:
total = (row.free or 0) + (row.used or 0)
rse_usage = {'rse_id': rse_id,
'rse': rse,
'source': row.source,
'used': row.used, 'free': row.free,
'total': total,
'files': row.files,
'updated_at': row.updated_at}
if per_account and row.source == 'rucio':
query_account_usage = session.query(models.AccountUsage).filter_by(rse_id=rse_id)
account_usages = []
for row in query_account_usage:
if row.bytes != 0:
percentage = round(float(row.bytes) / float(total) * 100, 2) if total else 0
account_usages.append({'used': row.bytes, 'account': row.account, 'percentage': percentage})
account_usages.sort(key=lambda x: x['used'], reverse=True)
rse_usage['account_usages'] = account_usages
usage.append(rse_usage)
return usage
@transactional_session
def set_rse_limits(rse_id: str, name: str, value: int,
session: 'Session' = None) -> bool:
"""
Set RSE limits.
:param rse_id: The RSE id.
:param name: The name of the limit.
:param value: The feature value.
:param session: The database session in use.
:returns: True if successful, otherwise false.
"""
rse_limit = models.RSELimit(rse_id=rse_id, name=name, value=value)
rse_limit = session.merge(rse_limit)
rse_limit.save(session=session)
return True
@read_session
def get_rse_limits(rse_id: str, name: 'Optional[str]' = None,
session: 'Session' = None) -> 'Dict[str, int]':
"""
Get RSE limits.
:param rse_id: The RSE id.
:param name: A Limit name.
:returns: A dictionary with the limits {'limit.name': limit.value}.
"""
query = session.query(models.RSELimit).filter_by(rse_id=rse_id)
if name:
query = query.filter_by(name=name)
return {limit.name: limit.value for limit in query}
@transactional_session
def delete_rse_limits(rse_id: str, name: 'Optional[str]' = None,
session: 'Session' = None) -> None:
"""
Delete RSE limit.
:param rse_id: The RSE id.
:param name: The name of the limit.
"""
try:
session.query(models.RSELimit).filter_by(rse_id=rse_id, name=name).delete()
except IntegrityError as error:
raise exception.RucioException(error.args)
@transactional_session
def set_rse_transfer_limits(rse_id, activity, rse_expression=None, max_transfers=0, transfers=0, waitings=0, volume=0, deadline=1, strategy='fifo', session=None):
"""
Set RSE transfer limits.
:param rse_id: The RSE id.
:param activity: The activity.
:param rse_expression: RSE expression string.
:param max_transfers: Maximum transfers.
:param transfers: Current number of tranfers.
:param waitings: Current number of waitings.
:param volume: Maximum transfer volume in bytes.
:param deadline: Maximum waiting time in hours until a datasets gets released.
:param strategy: Stragey to handle datasets `fifo` or `grouped_fifo`.
:param session: The database session in use.
:returns: True if successful, otherwise false.
"""
try:
rse_tr_limit = models.RSETransferLimit(rse_id=rse_id, activity=activity, rse_expression=rse_expression,
max_transfers=max_transfers, transfers=transfers,
waitings=waitings, volume=volume, strategy=strategy, deadline=deadline)
rse_tr_limit = session.merge(rse_tr_limit)
rowcount = rse_tr_limit.save(session=session)
return rowcount
except IntegrityError as error:
raise exception.RucioException(error.args)
@read_session
def get_rse_transfer_limits(rse_id=None, activity=None, session=None):
"""
Get RSE transfer limits.
:param rse_id: The RSE id.
:param activity: The activity.
:returns: A dictionary with the limits {'limit.activity': {'limit.rse_id': {'max_transfers': limit.max_transfers, 'transfers': 0, 'waitings': 0, 'volume': 1}}}.
"""
try:
query = session.query(models.RSETransferLimit)
if rse_id:
query = query.filter_by(rse_id=rse_id)
if activity:
query = query.filter_by(activity=activity)
limits = {}
for limit in query:
if limit.activity not in limits:
limits[limit.activity] = {}
limits[limit.activity][limit.rse_id] = {'max_transfers': limit.max_transfers,
'transfers': limit.transfers,
'waitings': limit.waitings,
'volume': limit.volume,
'strategy': limit.strategy,
'deadline': limit.deadline}
return limits
except IntegrityError as error:
raise exception.RucioException(error.args)
@transactional_session
def delete_rse_transfer_limits(rse_id, activity=None, session=None):
"""
Delete RSE transfer limits.
:param rse_id: The RSE id.
:param activity: The activity.
"""
try:
query = session.query(models.RSETransferLimit).filter_by(rse_id=rse_id)
if activity:
query = query.filter_by(activity=activity)
rowcount = query.delete()
return rowcount
except IntegrityError as error:
raise exception.RucioException(error.args)
@stream_session
def list_rse_usage_history(rse_id, source=None, session=None):
"""
List RSE usage history information.
:param rse_id: The RSE id.
:param source: The source of the usage information (srm, rucio).
:param session: The database session in use.
:returns: A list of historic RSE usage.
"""
query = session.query(models.RSEUsageHistory).filter_by(rse_id=rse_id).order_by(models.RSEUsageHistory.updated_at.desc()) # pylint: disable=no-member
if source:
query = query.filter_by(source=source)
rse = get_rse_name(rse_id=rse_id, session=session)
for usage in query.yield_per(5):
yield ({'rse_id': rse_id, 'rse': rse,
'source': usage.source,
'used': usage.used if usage.used else 0,
'total': usage.used if usage.used else 0 + usage.free if usage.free else 0,
'free': usage.free if usage.free else 0,
'updated_at': usage.updated_at})
@transactional_session
def add_protocol(rse_id, parameter, session=None):
"""
Add a protocol to an existing RSE. If entries with equal or less priority for
an operation exist, the existing one will be reorded (i.e. +1).
:param rse_id: the id of the new rse.
:param parameter: parameters of the new protocol entry.
:param session: The database session in use.
:raises RSENotFound: If RSE is not found.
:raises RSEOperationNotSupported: If no scheme supported the requested operation for the given RSE.
:raises RSEProtocolDomainNotSupported: If an undefined domain was provided.
:raises RSEProtocolPriorityError: If the provided priority for the scheme is to big or below zero.
:raises Duplicate: If scheme with identifier, hostname and port already exists
for the given RSE.
"""
rse = ""
try:
rse = get_rse_name(rse_id=rse_id, session=session, include_deleted=False)
except exception.RSENotFound:
raise exception.RSENotFound('RSE id \'%s\' not found' % rse_id)
# Insert new protocol entry
parameter['rse_id'] = rse_id
# Default values
parameter['port'] = parameter.get('port', 0)
parameter['hostname'] = parameter.get('hostname', 'localhost')
# Transform nested domains to match DB schema e.g. [domains][lan][read] => [read_lan]
if 'domains' in parameter.keys():
for s in parameter['domains']:
if s not in utils.rse_supported_protocol_domains():
raise exception.RSEProtocolDomainNotSupported('The protocol domain \'%s\' is not defined in the schema.' % s)
for op in parameter['domains'][s]:
if op not in utils.rse_supported_protocol_operations():
raise exception.RSEOperationNotSupported('Operation \'%s\' not defined in schema.' % (op))
op_name = op if op == 'third_party_copy' else ''.join([op, '_', s]).lower()
if parameter['domains'][s][op] < 0:
raise exception.RSEProtocolPriorityError('The provided priority (%s)for operation \'%s\' in domain \'%s\' is not supported.' % (parameter['domains'][s][op], op, s))
parameter[op_name] = parameter['domains'][s][op]
del parameter['domains']
if ('extended_attributes' in parameter) and parameter['extended_attributes']:
try:
parameter['extended_attributes'] = json.dumps(parameter['extended_attributes'], separators=(',', ':'))
except ValueError:
pass # String is not JSON
if parameter['scheme'] == 'srm':
if ('extended_attributes' not in parameter) or ('web_service_path' not in parameter['extended_attributes']):
raise exception.InvalidObject('Missing values! For SRM, extended_attributes and web_service_path must be specified')
try:
new_protocol = models.RSEProtocols()
new_protocol.update(parameter)
new_protocol.save(session=session)
except (IntegrityError, FlushError, OperationalError) as error:
if ('UNIQUE constraint failed' in error.args[0]) or ('conflicts with persistent instance' in error.args[0]) \
or match('.*IntegrityError.*ORA-00001: unique constraint.*RSE_PROTOCOLS_PK.*violated.*', error.args[0]) \
or match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', error.args[0]) \
or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]) \
or match('.*UniqueViolation.*duplicate key value violates unique constraint.*', error.args[0]) \
or match('.*IntegrityError.*columns.*are not unique.*', error.args[0]):
raise exception.Duplicate('Protocol \'%s\' on port %s already registered for \'%s\' with hostname \'%s\'.' % (parameter['scheme'], parameter['port'], rse, parameter['hostname']))
elif 'may not be NULL' in error.args[0] \
or match('.*IntegrityError.*ORA-01400: cannot insert NULL into.*RSE_PROTOCOLS.*IMPL.*', error.args[0]) \
or match('.*IntegrityError.*Column.*cannot be null.*', error.args[0]) \
or match('.*IntegrityError.*null value in column.*violates not-null constraint.*', error.args[0]) \
or match('.*IntegrityError.*NOT NULL constraint failed.*', error.args[0]) \
or match('.*NotNullViolation.*null value in column.*violates not-null constraint.*', error.args[0]) \
or match('.*OperationalError.*cannot be null.*', error.args[0]):
raise exception.InvalidObject('Missing values!')
raise exception.RucioException(error.args)
return new_protocol
@read_session
def get_rse_protocols(rse_id, schemes=None, session=None):
"""
Returns protocol information. Parameter combinations are: (operation OR default) XOR scheme.
:param rse_id: The id of the rse.
:param schemes: a list of schemes to filter by.
:param session: The database session.
:returns: A dict with RSE information and supported protocols
:raises RSENotFound: If RSE is not found.
"""
_rse = get_rse(rse_id=rse_id, session=session)
if not _rse:
raise exception.RSENotFound('RSE with id \'%s\' not found' % rse_id)
lfn2pfn_algorithms = get_rse_attribute('lfn2pfn_algorithm', rse_id=_rse.id, session=session)
# Resolve LFN2PFN default algorithm as soon as possible. This way, we can send back the actual
# algorithm name in response to REST queries.
lfn2pfn_algorithm = get_lfn2pfn_algorithm_default()
if lfn2pfn_algorithms:
lfn2pfn_algorithm = lfn2pfn_algorithms[0]
# Copy verify_checksum from the attributes, later: assume True if not specified
verify_checksum = get_rse_attribute('verify_checksum', rse_id=_rse.id, session=session)
# Copy sign_url from the attributes
sign_url = get_rse_attribute('sign_url', rse_id=_rse.id, session=session)
read = True if _rse.availability & 4 else False
write = True if _rse.availability & 2 else False
delete = True if _rse.availability & 1 else False
info = {'availability_delete': delete,
'availability_read': read,
'availability_write': write,
'credentials': None,
'deterministic': _rse.deterministic,
'domain': utils.rse_supported_protocol_domains(),
'id': _rse.id,
'lfn2pfn_algorithm': lfn2pfn_algorithm,
'protocols': list(),
'qos_class': _rse.qos_class,
'rse': _rse.rse,
'rse_type': _rse.rse_type.name,
'sign_url': sign_url[0] if sign_url else None,
'staging_area': _rse.staging_area,
'verify_checksum': verify_checksum[0] if verify_checksum else True,
'volatile': _rse.volatile}
for op in utils.rse_supported_protocol_operations():
info['%s_protocol' % op] = 1 # 1 indicates the default protocol
query = None
terms = [models.RSEProtocols.rse_id == _rse.id]
if schemes:
if not type(schemes) is list:
schemes = [schemes]
terms.extend([models.RSEProtocols.scheme.in_(schemes)])
query = session.query(models.RSEProtocols.hostname,
models.RSEProtocols.scheme,
models.RSEProtocols.port,
models.RSEProtocols.prefix,
models.RSEProtocols.impl,
models.RSEProtocols.read_lan,
models.RSEProtocols.write_lan,
models.RSEProtocols.delete_lan,
models.RSEProtocols.read_wan,
models.RSEProtocols.write_wan,
models.RSEProtocols.delete_wan,
models.RSEProtocols.third_party_copy,
models.RSEProtocols.extended_attributes).filter(*terms)
for row in query:
p = {'hostname': row.hostname,
'scheme': row.scheme,
'port': row.port,
'prefix': row.prefix if row.prefix is not None else '',
'impl': row.impl,
'domains': {
'lan': {'read': row.read_lan,
'write': row.write_lan,
'delete': row.delete_lan},
'wan': {'read': row.read_wan,
'write': row.write_wan,
'delete': row.delete_wan,
'third_party_copy': row.third_party_copy}
},
'extended_attributes': row.extended_attributes}
try:
p['extended_attributes'] = json.load(StringIO(p['extended_attributes']))
except ValueError:
pass # If value is not a JSON string
info['protocols'].append(p)
info['protocols'] = sorted(info['protocols'], key=lambda p: (p['hostname'], p['scheme'], p['port']))
return info
@transactional_session
def update_protocols(rse_id, scheme, data, hostname, port, session=None):
"""
Updates an existing protocol entry for an RSE. If necessary, priorities for read,
write, and delete operations of other protocol entires will be updated too.
:param rse_id: the id of the new rse.
:param scheme: Protocol identifer.
:param data: Dict with new values (keys must match column names in the database).
:param hostname: Hostname defined for the scheme, used if more than one scheme
is registered with the same identifier.
:param port: The port registered for the hostename, used if more than one scheme
is regsitered with the same identifier and hostname.
:param session: The database session in use.
:raises RSENotFound: If RSE is not found.
:raises RSEProtocolNotSupported: If no macthing protocol was found for the given RSE.
:raises RSEOperationNotSupported: If no protocol supported the requested operation for the given RSE.
:raises RSEProtocolDomainNotSupported: If an undefined domain was provided.
:raises RSEProtocolPriorityError: If the provided priority for the protocol is to big or below zero.
:raises KeyNotFound: Invalid data for update provided.
:raises Duplicate: If protocol with identifier, hostname and port already exists
for the given RSE.
"""
# Transform nested domains to match DB schema e.g. [domains][lan][read] => [read_lan]
if 'domains' in data:
for s in data['domains']:
if s not in utils.rse_supported_protocol_domains():
raise exception.RSEProtocolDomainNotSupported('The protocol domain \'%s\' is not defined in the schema.' % s)
for op in data['domains'][s]:
if op not in utils.rse_supported_protocol_operations():
raise exception.RSEOperationNotSupported('Operation \'%s\' not defined in schema.' % (op))
op_name = op
if op != 'third_party_copy':
op_name = ''.join([op, '_', s])
no = session.query(models.RSEProtocols).\
filter(sqlalchemy.and_(models.RSEProtocols.rse_id == rse_id,
getattr(models.RSEProtocols, op_name) >= 0)).\
count()
if not 0 <= data['domains'][s][op] <= no:
raise exception.RSEProtocolPriorityError('The provided priority (%s)for operation \'%s\' in domain \'%s\' is not supported.' % (data['domains'][s][op], op, s))
data[op_name] = data['domains'][s][op]
del data['domains']
if 'extended_attributes' in data:
try:
data['extended_attributes'] = json.dumps(data['extended_attributes'], separators=(',', ':'))
except ValueError:
pass # String is not JSON
try:
rse = get_rse_name(rse_id=rse_id, session=session, include_deleted=False)
except exception.RSENotFound:
raise exception.RSENotFound('RSE with id \'%s\' not found' % rse_id)
terms = [models.RSEProtocols.rse_id == rse_id,
models.RSEProtocols.scheme == scheme,
models.RSEProtocols.hostname == hostname,
models.RSEProtocols.port == port]
try:
up = session.query(models.RSEProtocols).filter(*terms).first()
if up is None:
msg = 'RSE \'%s\' does not support protocol \'%s\' for hostname \'%s\' on port \'%s\'' % (rse, scheme, hostname, port)
raise exception.RSEProtocolNotSupported(msg)
# Preparing gaps if priority is updated
for domain in utils.rse_supported_protocol_domains():
for op in utils.rse_supported_protocol_operations():
op_name = op
if op != 'third_party_copy':
op_name = ''.join([op, '_', domain])
if op_name in data:
prots = []
if (not getattr(up, op_name)) and data[op_name]: # reactivate protocol e.g. from 0 to 1
prots = session.query(models.RSEProtocols).\
filter(sqlalchemy.and_(models.RSEProtocols.rse_id == rse_id,
getattr(models.RSEProtocols, op_name) >= data[op_name])).\
order_by(getattr(models.RSEProtocols, op_name).asc())
val = data[op_name] + 1
elif getattr(up, op_name) and (not data[op_name]): # deactivate protocol e.g. from 1 to 0
prots = session.query(models.RSEProtocols).\
filter(sqlalchemy.and_(models.RSEProtocols.rse_id == rse_id,
getattr(models.RSEProtocols, op_name) > getattr(up, op_name))).\
order_by(getattr(models.RSEProtocols, op_name).asc())
val = getattr(up, op_name)
elif getattr(up, op_name) > data[op_name]: # shift forward e.g. from 5 to 2
prots = session.query(models.RSEProtocols).\
filter(sqlalchemy.and_(models.RSEProtocols.rse_id == rse_id,
getattr(models.RSEProtocols, op_name) >= data[op_name],
getattr(models.RSEProtocols, op_name) < getattr(up, op_name))).\
order_by(getattr(models.RSEProtocols, op_name).asc())
val = data[op_name] + 1
elif getattr(up, op_name) < data[op_name]: # shift backward e.g. from 1 to 3
prots = session.query(models.RSEProtocols).\
filter(sqlalchemy.and_(models.RSEProtocols.rse_id == rse_id,
getattr(models.RSEProtocols, op_name) <= data[op_name],
getattr(models.RSEProtocols, op_name) > getattr(up, op_name))).\
order_by(getattr(models.RSEProtocols, op_name).asc())
val = getattr(up, op_name)
for p in prots:
p.update({op_name: val})
val += 1
up.update(data, flush=True, session=session)
except (IntegrityError, OperationalError) as error:
if 'UNIQUE'.lower() in error.args[0].lower() or 'Duplicate' in error.args[0]: # Covers SQLite, Oracle and MySQL error
raise exception.Duplicate('Protocol \'%s\' on port %s already registered for \'%s\' with hostname \'%s\'.' % (scheme, port, rse, hostname))
elif 'may not be NULL' in error.args[0] or "cannot be null" in error.args[0]:
raise exception.InvalidObject('Missing values: %s' % error.args[0])
raise error
except DatabaseError as error:
if match('.*DatabaseError.*ORA-01407: cannot update .*RSE_PROTOCOLS.*IMPL.*to NULL.*', error.args[0]):
raise exception.InvalidObject('Invalid values !')
raise error
@transactional_session
def del_protocols(rse_id, scheme, hostname=None, port=None, session=None):
"""
Deletes an existing protocol entry for an RSE.
:param rse_id: the id of the new rse.
:param scheme: Protocol identifer.
:param hostname: Hostname defined for the scheme, used if more than one scheme
is registered with the same identifier.
:param port: The port registered for the hostename, used if more than one scheme
is regsitered with the same identifier and hostname.
:param session: The database session in use.
:raises RSENotFound: If RSE is not found.
:raises RSEProtocolNotSupported: If no macthing scheme was found for the given RSE.
"""
try:
rse_name = get_rse_name(rse_id=rse_id, session=session, include_deleted=False)
except exception.RSENotFound:
raise exception.RSENotFound('RSE \'%s\' not found' % rse_id)
terms = [models.RSEProtocols.rse_id == rse_id, models.RSEProtocols.scheme == scheme]
if hostname:
terms.append(models.RSEProtocols.hostname == hostname)
if port:
terms.append(models.RSEProtocols.port == port)
p = session.query(models.RSEProtocols).filter(*terms)
if not p.all():
msg = 'RSE \'%s\' does not support protocol \'%s\'' % (rse_name, scheme)
msg += ' for hostname \'%s\'' % hostname if hostname else ''
msg += ' on port \'%s\'' % port if port else ''
raise exception.RSEProtocolNotSupported(msg)
for row in p:
row.delete(session=session)
# Filling gaps in protocol priorities
for domain in utils.rse_supported_protocol_domains():
for op in utils.rse_supported_protocol_operations():
op_name = ''.join([op, '_', domain])
if getattr(models.RSEProtocols, op_name, None):
prots = session.query(models.RSEProtocols).\
filter(sqlalchemy.and_(models.RSEProtocols.rse_id == rse_id,
getattr(models.RSEProtocols, op_name) > 0)).\
order_by(getattr(models.RSEProtocols, op_name).asc())
i = 1
for p in prots:
p.update({op_name: i})
i += 1
@transactional_session
def update_rse(rse_id, parameters, session=None):
"""
Update RSE properties like availability or name.
:param rse_id: the id of the new rse.
:param parameters: A dictionnary with property (name, read, write, delete as keys).
:param session: The database session in use.
:raises RSENotFound: If RSE is not found.
"""
try:
query = session.query(models.RSE).filter_by(id=rse_id).one()
except sqlalchemy.orm.exc.NoResultFound:
raise exception.RSENotFound('RSE with ID \'%s\' cannot be found' % rse_id)
availability = 0
rse = query.rse
for column in query:
if column[0] == 'availability':
availability = column[1] or availability
param = {}
availability_mapping = {'availability_read': 4, 'availability_write': 2, 'availability_delete': 1}
for key in parameters:
if key == 'name' and parameters['name'] != rse: # Needed due to wrongly setting name in pre1.22.7 clients
param['rse'] = parameters['name']
elif key in ['availability_read', 'availability_write', 'availability_delete']:
if parameters[key] is True:
availability = availability | availability_mapping[key]
else:
availability = availability & ~availability_mapping[key]
elif key in ['latitude', 'longitude', 'time_zone', 'rse_type', 'volatile', 'deterministic', 'region_code', 'country_name', 'city', 'staging_area', 'qos_class']:
param[key] = parameters[key]
param['availability'] = availability
# handle null-able keys
for key in parameters:
if key in ['qos_class']:
if param[key] and param[key].lower() in ['', 'none', 'null']:
param[key] = None
query.update(param)
if 'rse' in param:
add_rse_attribute(rse_id=rse_id, key=parameters['name'], value=True, session=session)
query = session.query(models.RSEAttrAssociation).filter_by(rse_id=rse_id).filter(models.RSEAttrAssociation.key == rse)
rse_attr = query.one()
rse_attr.delete(session=session)
@read_session
def export_rse(rse_id, session=None):
"""
Get the internal representation of an RSE.
:param rse_id: The RSE id.
:returns: A dictionary with the internal representation of an RSE.
"""
query = session.query(models.RSE).filter_by(id=rse_id)
rse_data = {}
for _rse in query:
for k, v in _rse:
rse_data[k] = v
rse_data.pop('continent')
rse_data.pop('ASN')
rse_data.pop('ISP')
rse_data.pop('deleted')
rse_data.pop('deleted_at')
# get RSE attributes
rse_data['attributes'] = list_rse_attributes(rse_id=rse_id, session=session)
protocols = get_rse_protocols(rse_id=rse_id, session=session)
rse_data['lfn2pfn_algorithm'] = protocols.get('lfn2pfn_algorithm')
rse_data['verify_checksum'] = protocols.get('verify_checksum')
rse_data['credentials'] = protocols.get('credentials')
rse_data['availability_delete'] = protocols.get('availability_delete')
rse_data['availability_write'] = protocols.get('availability_write')
rse_data['availability_read'] = protocols.get('availability_read')
rse_data['protocols'] = protocols.get('protocols')
# get RSE limits
limits = get_rse_limits(rse_id=rse_id, session=session)
rse_data['MinFreeSpace'] = limits.get('MinFreeSpace')
rse_data['MaxBeingDeletedFiles'] = limits.get('MaxBeingDeletedFiles')
return rse_data
@transactional_session
def add_qos_policy(rse_id, qos_policy, session=None):
"""
Add a QoS policy from an RSE.
:param rse_id: The id of the RSE.
:param qos_policy: The QoS policy to add.
:param session: The database session in use.
:raises Duplicate: If the QoS policy already exists.
:returns: True if successful, except otherwise.
"""
try:
new_qos_policy = models.RSEQoSAssociation()
new_qos_policy.update({'rse_id': rse_id,
'qos_policy': qos_policy})
new_qos_policy.save(session=session)
except (IntegrityError, FlushError, OperationalError) as error:
if ('UNIQUE constraint failed' in error.args[0]) or ('conflicts with persistent instance' in error.args[0]) \
or match('.*IntegrityError.*ORA-00001: unique constraint.*RSE_PROTOCOLS_PK.*violated.*', error.args[0]) \
or match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', error.args[0]) \
or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0])\
or match('.*UniqueViolation.*duplicate key value violates unique constraint.*', error.args[0])\
or match('.*IntegrityError.*columns.*are not unique.*', error.args[0]):
raise exception.Duplicate('QoS policy %s already exists!' % qos_policy)
except DatabaseError as error:
raise exception.RucioException(error.args)
return True
@transactional_session
def delete_qos_policy(rse_id, qos_policy, session=None):
"""
Delete a QoS policy from an RSE.
:param rse_id: The id of the RSE.
:param qos_policy: The QoS policy to delete.
:param session: The database session in use.
:returns: True if successful, silent failure if QoS policy does not exist.
"""
try:
session.query(models.RSEQoSAssociation).filter_by(rse_id=rse_id, qos_policy=qos_policy).delete()
except DatabaseError as error:
raise exception.RucioException(error.args)
return True
@read_session
def list_qos_policies(rse_id, session=None):
"""
List all QoS policies of an RSE.
:param rse_id: The id of the RSE.
:param session: The database session in use.
:returns: List containing all QoS policies.
"""
qos_policies = []
try:
query = session.query(models.RSEQoSAssociation.qos_policy).filter_by(rse_id=rse_id)
for qos_policy in query:
qos_policies.append(qos_policy[0])
except DatabaseError as error:
raise exception.RucioException(error.args)
return qos_policies
| 39.663812
| 191
| 0.641329
|
8f0ead51b24fb58afbd0cf4b4af38e57648eccc7
| 83
|
py
|
Python
|
backend/feed/apps.py
|
stasfilin/rss_portal
|
e6e9f8d254c80c8a7a40901b3b7dab059f259d55
|
[
"MIT"
] | null | null | null |
backend/feed/apps.py
|
stasfilin/rss_portal
|
e6e9f8d254c80c8a7a40901b3b7dab059f259d55
|
[
"MIT"
] | 3
|
2021-04-08T21:05:07.000Z
|
2022-02-10T10:05:39.000Z
|
sfymca/feed/apps.py
|
streeter/sf-ymca-pools
|
7f3ff7d561d51158ae27b8abba05f61f4966e862
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class FeedConfig(AppConfig):
name = "feed"
| 13.833333
| 33
| 0.73494
|
54721c3f7c48efd199f9fff8b3e4b3d30311994f
| 5,360
|
py
|
Python
|
src/main/python/twitter/thermos/config/loader.py
|
isomer/incubator-aurora
|
5f54d4de25413bb18acec16120eb18f3e08c6bf0
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/twitter/thermos/config/loader.py
|
isomer/incubator-aurora
|
5f54d4de25413bb18acec16120eb18f3e08c6bf0
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/twitter/thermos/config/loader.py
|
isomer/incubator-aurora
|
5f54d4de25413bb18acec16120eb18f3e08c6bf0
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
import os
import re
import textwrap
from twitter.common.dirutil import safe_open
from twitter.common.lang import Compatibility
from twitter.thermos.common.planner import TaskPlanner
from twitter.thermos.config.schema import Task
from pystachio import Ref
from pystachio.config import Config
class PortExtractor(object):
class InvalidPorts(Exception): pass
@staticmethod
def extract(obj):
port_scope = Ref.from_address('thermos.ports')
_, uninterp = obj.interpolate()
ports = []
for ref in uninterp:
subscope = port_scope.scoped_to(ref)
if subscope is not None:
if not subscope.is_index():
raise PortExtractor.InvalidPorts(
'Bad port specification "%s" (should be of form "thermos.ports[name]"' % ref.address())
ports.append(subscope.action().value)
return ports
class ThermosProcessWrapper(object):
# >=1 characters && anything but NULL and '/'
VALID_PROCESS_NAME_RE = re.compile(r'^[^./][^/]*$')
class InvalidProcess(Exception): pass
def __init__(self, process):
self._process = process
def ports(self):
try:
return PortExtractor.extract(self._process)
except PortExtractor.InvalidPorts:
raise self.InvalidProcess('Process has invalid ports scoping!')
@staticmethod
def assert_valid_process_name(name):
if not ThermosProcessWrapper.VALID_PROCESS_NAME_RE.match(name):
raise ThermosProcessWrapper.InvalidProcess('Invalid process name: %s' % name)
class ThermosTaskWrapper(object):
class InvalidTask(Exception): pass
def __init__(self, task, bindings=None, strict=True):
if bindings:
task = task.bind(*bindings)
if not task.check().ok() and strict:
raise ThermosTaskWrapper.InvalidTask(task.check().message())
self._task = task
@property
def task(self):
return self._task
def ports(self):
ti, _ = self._task.interpolate()
ports = set()
if ti.has_processes():
for process in ti.processes():
try:
ports.update(ThermosProcessWrapper(process).ports())
except ThermosProcessWrapper.InvalidProcess:
raise self.InvalidTask('Task has invalid process: %s' % process)
return ports
def to_json(self):
return json.dumps(self._task.get())
def to_file(self, filename):
ti, _ = self._task.interpolate()
with safe_open(filename, 'w') as fp:
json.dump(ti.get(), fp)
@staticmethod
def from_file(filename, **kw):
try:
with safe_open(filename) as fp:
task = Task.json_load(fp)
return ThermosTaskWrapper(task, **kw)
except Exception as e:
return None
# TODO(wickman) These should be validators pushed onto ThermosConfigLoader.plugins
class ThermosTaskValidator(object):
class InvalidTaskError(Exception): pass
@classmethod
def assert_valid_task(cls, task):
cls.assert_valid_names(task)
cls.assert_typecheck(task)
cls.assert_valid_plan(task)
@classmethod
def assert_valid_plan(cls, task):
try:
TaskPlanner(task, process_filter=lambda proc: proc.final().get() == False)
TaskPlanner(task, process_filter=lambda proc: proc.final().get() == True)
except TaskPlanner.InvalidSchedule as e:
raise cls.InvalidTaskError('Task has invalid plan: %s' % e)
@classmethod
def assert_valid_names(cls, task):
for process in task.processes():
name = process.name().get()
try:
ThermosProcessWrapper.assert_valid_process_name(name)
except ThermosProcessWrapper.InvalidProcess as e:
raise cls.InvalidTaskError('Task has invalid process: %s' % e)
@classmethod
def assert_typecheck(cls, task):
typecheck = task.check()
if not typecheck.ok():
raise cls.InvalidTaskError('Failed to fully evaluate task: %s' %
typecheck.message())
@classmethod
def assert_valid_ports(cls, task, portmap):
for port in ThermosTaskWrapper(task).ports():
if port not in portmap:
raise cls.InvalidTaskError('Task requires unbound port %s!' % port)
@classmethod
def assert_same_task(cls, spec, task):
active_task = spec.given(state='active').getpath('task_path')
if os.path.exists(active_task):
task_on_disk = ThermosTaskWrapper.from_file(active_task)
if not task_on_disk or task_on_disk.task != task:
raise cls.InvalidTaskError('Task differs from on disk copy: %r vs %r' % (
task_on_disk.task if task_on_disk else None, task))
class ThermosConfigLoader(object):
SCHEMA = textwrap.dedent("""
from pystachio import *
from twitter.thermos.config.schema import *
__TASKS = []
def export(task):
__TASKS.append(Task(task) if isinstance(task, dict) else task)
""")
@classmethod
def load(cls, loadable, **kw):
config = Config(loadable, schema=cls.SCHEMA)
return cls(ThermosTaskWrapper(task, **kw) for task in config.environment['__TASKS'])
@classmethod
def load_json(cls, filename, **kw):
tc = cls()
task = ThermosTaskWrapper.from_file(filename, **kw)
if task:
ThermosTaskValidator.assert_valid_task(task.task())
tc.add_task(task)
return tc
def __init__(self, exported_tasks=None):
self._exported_tasks = exported_tasks or []
def add_task(self, task):
self._exported_tasks.append(task)
def tasks(self):
return self._exported_tasks
| 29.777778
| 99
| 0.698881
|
706ac65a996211faf94af010fe5d635479da724a
| 175
|
py
|
Python
|
examples/sponza/dependencies.py
|
Contraz/demosys-py
|
0479e0f3b0a3901f601bffd2d11e155f97b47555
|
[
"0BSD"
] | 70
|
2017-03-31T12:01:41.000Z
|
2022-01-05T06:30:57.000Z
|
examples/sponza/dependencies.py
|
Contraz/demosys-py
|
0479e0f3b0a3901f601bffd2d11e155f97b47555
|
[
"0BSD"
] | 69
|
2017-06-18T22:37:46.000Z
|
2020-01-23T04:02:22.000Z
|
examples/sponza/dependencies.py
|
Contraz/demosys-py
|
0479e0f3b0a3901f601bffd2d11e155f97b47555
|
[
"0BSD"
] | 9
|
2017-05-13T21:13:02.000Z
|
2020-10-01T18:09:49.000Z
|
from demosys.resources.meta import SceneDescription
effect_packages = []
resources = [
SceneDescription(label="sponza", path="sponza/Sponza/glTF/Sponza.gltf"),
]
| 21.875
| 77
| 0.725714
|
ac6f42278ead1b750b57b1738720719e7cef4d47
| 199
|
py
|
Python
|
ex066.py
|
brunocorbetta/exerciciocursoemvideo
|
b6ef52f3426f70f211ad70f233f0222c703a2c41
|
[
"MIT"
] | null | null | null |
ex066.py
|
brunocorbetta/exerciciocursoemvideo
|
b6ef52f3426f70f211ad70f233f0222c703a2c41
|
[
"MIT"
] | null | null | null |
ex066.py
|
brunocorbetta/exerciciocursoemvideo
|
b6ef52f3426f70f211ad70f233f0222c703a2c41
|
[
"MIT"
] | null | null | null |
cont = 0
soma = 0
while True:
n1 = int(input('Digite 999 para parar: '))
if n1 == 999:
break
cont += 1
soma += n1
print(f'Você digitou {cont} numeros e a soma deles da {soma} ')
| 19.9
| 63
| 0.577889
|
13e94eb6232feaec4d46f9963967606be1610e67
| 447
|
py
|
Python
|
polls/forms.py
|
davidefabbrico/Progetto_School
|
e32e345d154764725b96e2d22b441a17fae67ade
|
[
"MIT"
] | 1
|
2021-09-04T08:56:32.000Z
|
2021-09-04T08:56:32.000Z
|
polls/forms.py
|
davidefabbrico/Progetto_School
|
e32e345d154764725b96e2d22b441a17fae67ade
|
[
"MIT"
] | null | null | null |
polls/forms.py
|
davidefabbrico/Progetto_School
|
e32e345d154764725b96e2d22b441a17fae67ade
|
[
"MIT"
] | null | null | null |
from django.forms import ModelForm
from .models import *
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
class Form(forms.ModelForm):
class Meta:
model = School
fields = '__all__'
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
| 23.526316
| 68
| 0.666667
|
72f5e5eb668c9a46fd2aa529ed524fec0ca5cbb3
| 3,573
|
py
|
Python
|
server/UserProfile/models.py
|
dimejiconsult/Telemedicine
|
af812bd8703d86e648105dc0c01b02f6af783dee
|
[
"MIT"
] | null | null | null |
server/UserProfile/models.py
|
dimejiconsult/Telemedicine
|
af812bd8703d86e648105dc0c01b02f6af783dee
|
[
"MIT"
] | 8
|
2020-08-04T22:42:45.000Z
|
2022-03-12T00:48:53.000Z
|
server/UserProfile/models.py
|
dimejiconsult/Telemedicine
|
af812bd8703d86e648105dc0c01b02f6af783dee
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import BaseUserManager,AbstractBaseUser, PermissionsMixin
from django.utils.translation import ugettext_lazy as _
Gender = (
('Male', 'Male'),
('Female', 'Female')
)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, active=False, staff=False, admin=False, **extra_fields):
""" usermanager for creating users """
if not email:
raise ValueError('please provide a email')
email = self.normalize_email(email)
user =self.model(email=email, **extra_fields)
user.active = False
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,password):
""" create super user """
user =self.create_user(email,password)
user.admin = True
user.staff = True
user.active = True
user.superuser =True
user.save(using=self._db)
return user
# def create_DoctorProfile(self,email,password,**extra_fields):
# """ create super user """
# user =self.create_user(email,password,**extra_fields)
# user.is_active = False
# user.save(using=self._db)
# return user
def get_by_natural_key(self, email):
return self.get(email=email)
class Profile(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(db_index=True, unique=True)
active = models.BooleanField(default=True)
admin = models.BooleanField(default=False)
staff = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def get_full_name(self):
return (self.first_name+' '+self.last_name)
def get_short_name(self):
return self.first_name
def natural_key(self):
return (self.first_name, self.last_name)
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.staff
def is_admin(self):
"Is the user a admin member?"
return self.admin
class DoctorProfile(Profile, PermissionsMixin):
gender = models.CharField(max_length=7, choices=Gender)
date_of_birth = models.DateField()
Year_of_Graduation = models.DateField()
Sch_of_Graduation = models.CharField(max_length=255)
Hospital_of_housemanship = models.CharField(max_length=255)
Folio_Number = models.CharField(max_length=50)
Full_License = models.FileField(upload_to='../media/License_document/%Y/%m/%d/')
Evidence_of_License_Reg = models.FileField(upload_to='../media/Evidence_of_Annual_License_Reg/%Y/%m/%d/')
CV = models.FileField(upload_to='../media/CV/%Y/%m/%d/')
Specialization = models.CharField(max_length=50)
objects = UserManager()
def __str__(self):
return self.first_name+' '+self.last_name
| 31.342105
| 109
| 0.670025
|
073659812594ba8061e59acae25d4774af47a863
| 274
|
py
|
Python
|
gcamp_analysis_files_finished/180313-04-top-acclimate/src/delta_video_config.py
|
eleanorlutz/aedes-aegypti-gcamp6s-larval-behavior
|
f8773525124a4138278b56f6de4fc5a9910a6319
|
[
"MIT"
] | null | null | null |
gcamp_analysis_files_finished/180313-04-top-acclimate/src/delta_video_config.py
|
eleanorlutz/aedes-aegypti-gcamp6s-larval-behavior
|
f8773525124a4138278b56f6de4fc5a9910a6319
|
[
"MIT"
] | null | null | null |
gcamp_analysis_files_finished/180313-04-top-acclimate/src/delta_video_config.py
|
eleanorlutz/aedes-aegypti-gcamp6s-larval-behavior
|
f8773525124a4138278b56f6de4fc5a9910a6319
|
[
"MIT"
] | null | null | null |
class Config:
def __init__(self):
self.basename = 'delta_video'
self.directory = '/home/eleanor/Documents/gcamp_analysis_files_temp/180313-04-top-acclimate/data'
self.topics = ['/multi_tracker/1/delta_video',]
self.record_length_hours = 1
| 45.666667
| 105
| 0.693431
|
c8c75af43565f6e140287644aaaefa97dd6e67c5
| 2,982
|
py
|
Python
|
ldm/modules/ema.py
|
samedii/latent-diffusion
|
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
|
[
"MIT"
] | 563
|
2021-12-21T02:26:38.000Z
|
2022-03-31T05:54:51.000Z
|
ldm/modules/ema.py
|
samedii/latent-diffusion
|
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
|
[
"MIT"
] | 23
|
2021-12-22T10:00:00.000Z
|
2022-03-24T20:43:49.000Z
|
ldm/modules/ema.py
|
samedii/latent-diffusion
|
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
|
[
"MIT"
] | 51
|
2021-12-21T02:27:04.000Z
|
2022-03-23T12:30:31.000Z
|
import torch
from torch import nn
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
super().__init__()
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
else torch.tensor(-1,dtype=torch.int))
for name, p in model.named_parameters():
if p.requires_grad:
#remove as '.'-character is not allowed in buffers
s_name = name.replace('.','')
self.m_name2s_name.update({name:s_name})
self.register_buffer(s_name,p.clone().detach().data)
self.collected_params = []
def forward(self,model):
decay = self.decay
if self.num_updates >= 0:
self.num_updates += 1
decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
else:
assert not key in self.m_name2s_name
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert not key in self.m_name2s_name
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
| 38.727273
| 102
| 0.60228
|
b42eb69bc2f0185ac2ec8f9d76d2f17e7507678e
| 3,163
|
py
|
Python
|
benchmark/startCirq2698.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq2698.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq2698.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=41
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[3])) # number=36
c.append(cirq.H.on(input_qubit[3])) # number=16
c.append(cirq.CZ.on(input_qubit[1],input_qubit[3])) # number=17
c.append(cirq.H.on(input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=37
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=38
c.append(cirq.Z.on(input_qubit[1])) # number=39
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=32
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.H.on(input_qubit[3])) # number=26
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=27
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=29
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=30
c.append(cirq.H.on(input_qubit[2])) # number=31
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=14
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2698.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 36.77907
| 77
| 0.681315
|
b81afcddd4de1126ee8d2c87f25050a73bc30287
| 7,130
|
py
|
Python
|
env/Lib/site-packages/plotly/graph_objs/scattersmith/marker/colorbar/_title.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 7
|
2022-01-16T12:28:16.000Z
|
2022-03-04T15:31:45.000Z
|
env/Lib/site-packages/plotly/graph_objs/scattersmith/marker/colorbar/_title.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 14
|
2021-10-20T23:33:47.000Z
|
2021-12-21T04:50:37.000Z
|
env/Lib/site-packages/plotly/graph_objs/scattersmith/marker/colorbar/_title.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattersmith.marker.colorbar"
_path_str = "scattersmith.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scattersmith.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h". Note that the
title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattersmith.m
arker.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattersmith.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.791469
| 93
| 0.543198
|
458d5ce62a9e9557847f69a30f64f71be1f7f272
| 30,348
|
py
|
Python
|
srg3d/potential.py
|
cheshyre/srg3d-py
|
a62592c0d9bcb62d6a54d13827882cdfe46fa706
|
[
"MIT"
] | null | null | null |
srg3d/potential.py
|
cheshyre/srg3d-py
|
a62592c0d9bcb62d6a54d13827882cdfe46fa706
|
[
"MIT"
] | null | null | null |
srg3d/potential.py
|
cheshyre/srg3d-py
|
a62592c0d9bcb62d6a54d13827882cdfe46fa706
|
[
"MIT"
] | 1
|
2020-02-25T14:47:54.000Z
|
2020-02-25T14:47:54.000Z
|
# pylint: disable=too-many-lines
"""Nuclear potential module.
Module containing representations of 3D potentials for use in nuclear theory.
Also contains logic to read them from and save them to files with a standard
naming convention.
class Channel
-------------
A container for the channel information for a potential. It has the following
method::
channel = Channel(spin, orb_ang_mom_1, orb_ang_mom_2, tot_ang_mom, isospin)
These are also commonly read as S, L, L, J, and T.
class CoupledChannel
--------------------
A container to handle coupled channels. It has the following method::
channel = CoupledChannel(list_of_channels)
All channels in coupled channel should have same S, J, and T.
class PotentialType
-------------------
A container class to hold all the physical information about the potential. It
has the following method::
potential_type = PotentialType(n_body, order, name, channel, particles)
class Potential
---------------
Abstraction for the representation of a potential. Handles the logic of adding
and removing weights. Can generate corresponding kinetic energy. It has the
following methods::
potential = Potential(potential_type, nodes, weights, potential, lam=50.0,
has_weights=False)
kinetic_energy = potential.kinetic_energy()
potential_data_wo_weights = potential.without_weights()
potential_data_w_weights = potential.with_weights()
new_potential = potential.copy(potential_data, lam)
reduced_potential = potential.reduce_dim(dim)
class CoupledPotential
----------------------
Abstraction for representation for potential of coupled channel. Handles logic
of adding and removing weights. Can generate kinetic energy. It has the
following methods::
potential = CoupledPotential([potential1, potential2, potential3,
potential4])
kinetic_energy = potential.kinetic_energy()
potential_data_wo_weights = potential.without_weights()
potential_data_w_weights = potential.with_weights()
new_potential = potential.copy(potential_data, lam)
reduced_potential = potential.reduce_dim(dim)
channel_potential = potential.extract_channel_potential(
potential1.potential_type.channel
)
Methods
-------
potential = load_from_file(file_str)
Method to load a potential from a file. Requires that standard file-naming
conventions have been followed.
potential = load(n_body, order, name, channel, lambda, particles,
num_points='*')
Method to load potential from a standard directory. Requires that potential was
saved there earlier.
save(potential, directory=None)
Method to save potential with correct naming convention either to a standard
folder or to a user-specified directory.
Changelog:
2018.11.14
Added:
CoupledChannel for coupled channels
CoupledPotential for potentials in coupled channels
2018.11.09
Added:
load_from_file method
Changed:
Make load take parameters and use load_from_file for loading from a
specific file
Save now has different parameter ordering with the dir_str param being
optional
2018.11.06
Added:
Initial creation of module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
from math import pi
from math import sqrt
import os
import re
import numpy as np
import matplotlib.pyplot as plt
NBODY_DICT = {
'NN': 2,
'3N': 3,
}
STANDARD_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'potentials')
INV_NBODY_DICT = {v: k for k, v in NBODY_DICT.items()}
ORDER_DICT = {
'LO': 0,
'NLO': 1,
'N2LO': 2,
'N3LO': 3,
}
INV_ORDER_DICT = {v: k for k, v in ORDER_DICT.items()}
class Channel:
"""Container for information on channel for potential."""
# pylint: disable=too-many-arguments
def __init__(self, spin, orb_ang_mom_1, orb_ang_mom_2, tot_ang_mom,
isospin):
"""Create Channel object.
Parameters
----------
spin : int
Spin quantum number.
orb_ang_mom_1 : int
First angular momentum quantum number.
orb_ang_mom_2 : int
Second angular momentum quantum number.
tot_ang_mom : int
Total angular momentum.
isospin : int
2-body isospin quantum number.
"""
self._spin = spin
self._l1 = orb_ang_mom_1
self._l2 = orb_ang_mom_2
self._j = tot_ang_mom
self._isospin = isospin
def as_5tuple(self):
"""Return 5-tuple representation of channel.
Returns
-------
(int, int, int, int, int)
5-tuple with channel quantum numbers.
"""
return (self._spin, self._l1, self._l2, self._j, self._isospin)
def __str__(self):
"""Return string representation of channel.
Returns
-------
str
String of 5 integers with channel information which are SLLJT.
"""
return '{}{}{}{}{}'.format(self._spin, self._l1, self._l2, self._j,
self._isospin)
def __eq__(self, other):
"""Return whether channel is same as another channel object.
Returns
-------
bool
True if self and other are the same, False otherwise.
"""
return self.as_5tuple() == other.as_5tuple()
def __ne__(self, other):
"""Return whether channel is different from another channel object.
Returns
-------
bool
False if self and other are the same, False otherwise.
"""
return self.as_5tuple() != other.as_5tuple()
class CoupledChannel(Channel):
"""Container for information about coupled channel."""
def __init__(self, list_of_channels):
"""Create coupled channel container.
Parameters
----------
list_of_channels : list of Channel objects
List of channels in coupled channel.
"""
spins = {x.as_5tuple()[0] for x in list_of_channels}
tot_ang_moms = {x.as_5tuple()[3] for x in list_of_channels}
isospins = {x.as_5tuple()[4] for x in list_of_channels}
if len(spins) * len(isospins) * len(tot_ang_moms) != 1:
raise ValueError('Given channels cannot be coupled.')
super(CoupledChannel, self).__init__(spins.pop(), '*', '*',
tot_ang_moms.pop(),
isospins.pop())
self._channels = list_of_channels
@property
def channels(self):
"""Return list of channels in coupled channel.
Returns
-------
list of Channel objects
"""
return self._channels
def __eq__(self, other):
"""Return whether coupled channel object is same as another.
Returns
-------
bool
True if coupled channels are equal, False otherwise.
"""
return False not in {x == y for x, y in zip(self.channels,
other.channels)}
def __ne__(self, other):
"""Return whether coupled channel object is not same as another.
Returns
-------
bool
True if coupled channels are not equal, False otherwise.
"""
return False in {x == y for x, y in zip(self.channels,
other.channels)}
class PotentialType:
"""Container for information related to potential."""
# pylint: disable=too-many-arguments
def __init__(self, n_body, order, name, channel, particles):
"""Construct potential type.
Parameters
----------
n_body : int
Number of particles interacting in potential.
order : int
Order to which potential was calculated.
name : str
Name for potential, may reflect something about origin.
channel: Channel
Object representing the partial wave channel for the potential.
particles: str
String representing constituent particles in the interaction.
"""
self._n_body = n_body
self._order = order
self._name = name
self._channel = channel
self._particles = particles
@property
def n_body(self):
"""Return number of particles in potential.
Returns
-------
int
Number of particles.
"""
return self._n_body
@property
def order(self):
"""Return order to which potential was calculated.
Returns
-------
int
Order of potential.
"""
return self._order
@property
def name(self):
"""Return name of potential.
Returns
-------
str
Name of potential.
"""
return self._name
@property
def channel(self):
"""Return partial wave channel of potential.
Returns
-------
Channel
Channel object representing partial wave channel.
"""
return self._channel
@property
def particles(self):
"""Return particles in interaction to which potential applies.
Returns
-------
str
String with particles in interaction.
"""
return self._particles
def __eq__(self, other):
"""Return whether potential type is same as other potential type.
Returns
-------
bool
True if same, False otherwise.
"""
return ((self.n_body == other.n_body)
and (self.order == other.order)
and (self.name == other.name)
and (self.channel == other.channel)
and (self.particles == other.particles))
def __ne__(self, other):
"""Return whether potential type is not same as other potential type.
Returns
-------
bool
False if same, True otherwise.
"""
return not ((self.n_body == other.n_body)
and (self.order == other.order)
and (self.name == other.name)
and (self.channel == other.channel)
and (self.particles == other.particles))
class Potential:
"""Class encapsulating all relevant information about a potential."""
# pylint: disable=too-many-arguments
def __init__(self, potential_type, nodes, weights, potential, lam=50.0,
has_weights=False):
"""Create potential from parameters.
Parameters
----------
potential_type : PotentialType
PotentialType instance with information about the potential.
nodes : list of floats
List of momenta at which the potential is defined.
weights : list of floats
List of integration weights corresponding to nodes.
potential : matrix of floats
Value of potential at incoming and outgoing momenta in nodes.
lam : float, optional
Value of lambda (SRG flow parameter) for potential. For unevolved
potentials, a value of 50 is the default.
has_weights : bool, optional
Specifies whether potential given has weights factored in already.
"""
self._potential_type = potential_type
self._nodes = nodes
self._weights = weights
self._lam = lam
if has_weights:
potential = _rem_w(potential, self._weights, self._nodes)
self._potential = potential
def copy(self, potential, lam):
"""Create potential from current potential with new data and lam.
Parameters
----------
potential : matrix of floats
Potential data.
lam : float
Value of lambda
Returns
-------
Potential
New potential with new data.
"""
return Potential(self._potential_type, self._nodes, self._weights,
potential, lam)
def with_weights(self):
"""Return potential with weights factored in (for calculations).
Returns
-------
matrix of floats
Potential with integration weights.
"""
return _add_w(self._potential, self._weights, self._nodes)
def without_weights(self):
"""Return potential without weights (for visualization).
Returns
-------
matrix of floats
Potential without integration weights.
"""
return np.array(self._potential)
def reduce_dim(self, dim):
"""Return new potential with only `dim` lowest energy states.
Parameters
----------
dim : int
Dimension to which potential is to be reduced.
Returns
-------
Potential
New reduced dimension potential.
Raises
------
ValueError
When value for new dim is too small or too large.
"""
if dim >= len(self.nodes):
raise ValueError('Value of dim is not smaller than current dim.')
if dim <= 0:
raise ValueError('Zero or negative dim is not allowed.')
new_data = self._potential[np.ix_(list(range(dim)), list(range(dim)))]
new_nodes = self._nodes[:dim]
new_weights = self._weights[:dim]
return Potential(self._potential_type, new_nodes, new_weights,
new_data, self._lam)
def kinetic_energy(self):
"""Return kinetic energy for potential (for calculations).
Returns
-------
matrix of floats
Kinetic energy matrix.
"""
return np.diag(np.array([p**2 for p in self._nodes]))
def __eq__(self, other):
"""Return whether two potentials are equal to with numerical error.
Returns
-------
bool
True when potential type, nodes, weights, potential, and lam are
all equal within epsilon, False otherwise.
"""
# Numerical errors smaller than this are acceptable
# If there is something wrong with the physics, it should produce
# errors larger than this.
eps = 10**(-4)
if self.potential_type != other.potential_type:
return False
if self.dim != other.dim:
return False
if abs(self.lam - other.lam) > eps:
return False
for p_self, p_other, w_self, w_other in zip(self.nodes, other.nodes,
self.weights,
other.weights):
if abs(p_self - p_other) > eps or abs(w_self - w_other) > eps:
return False
for i in range(self.dim):
for j in range(self.dim):
diff = abs(self.without_weights()[i][j] -
other.without_weights()[i][j])
if diff > eps:
return False
return True
def __ne__(self, other):
"""Return whether two potentials are not equal to with numerical error.
Returns
-------
bool
False when potential type, nodes, weights, potential, and lam are
all equal within epsilon, True otherwise.
"""
# Numerical errors smaller than this are acceptable
# If there is something wrong with the physics, it should produce
# errors larger than this.
eps = 10**(-4)
if self.potential_type != other.potential_type:
return True
if self.dim != other.dim:
return True
if abs(self.lam - other.lam) > eps:
return True
for p_self, p_other, w_self, w_other in zip(self.nodes, other.nodes,
self.weights,
other.weights):
if abs(p_self - p_other) > eps or abs(w_self - w_other) > eps:
return True
for i in range(self.dim):
for j in range(self.dim):
diff = abs(self.without_weights()[i][j] -
other.without_weights()[i][j])
if diff > eps:
return True
return False
@property
def dim(self):
"""Return the dimension of the potential matrix.
Returns
-------
int
The dimension of the (square) potential matrix.
"""
return len(self._nodes)
@property
def potential_type(self):
"""Return `PotentialType` object for potential.
Returns
-------
PotentialType
Object with all physics related information for the potential.
"""
return self._potential_type
@property
def nodes(self):
"""Return the nodes for the potential.
Returns
-------
list of floats
List of momenta at which potential is defined.
"""
return self._nodes
@property
def weights(self):
"""Return weights for the potential.
Returns
-------
list of floats
Integration weights corresponding to nodes for potential.
"""
return self._weights
@property
def lam(self):
"""Return lambda for potential.
Returns
-------
float
Value of lambda, the SRG flow parameter, for potential.
"""
return self._lam
class CoupledPotential(Potential):
"""Representation of potential of coupled channel."""
def __init__(self, list_of_potentials): # pylint: disable=too-many-locals
"""Create potential from list of potentials in a coupled channel.
Parameters
----------
list_of_potentials : list of Potential objects
List of potentials to form coupled channel.
Returns
-------
Potential
New potential with full coupled channel.
"""
self._construction = list_of_potentials
channels = [x.potential_type.channel for x in list_of_potentials]
n_body = {x.potential_type.n_body for x in list_of_potentials}
order = {x.potential_type.order for x in list_of_potentials}
name = {x.potential_type.name for x in list_of_potentials}
particles = {x.potential_type.particles for x in list_of_potentials}
if len(n_body) * len(order) * len(name) * len(particles) != 1:
raise ValueError('Given potentials cannot be coupled.')
coupled_channel = CoupledChannel(channels)
potential_type = PotentialType(n_body.pop(), order.pop(), name.pop(),
coupled_channel, particles.pop())
lam = {x.lam for x in list_of_potentials}
if len(lam) != 1:
raise ValueError('Not all given potentials are at the same lam.')
lam = lam.pop()
dim = {x.dim for x in list_of_potentials}
if len(dim) != 1:
raise ValueError('Not all given potentials have same dim.')
dim = dim.pop()
c_dim = int(sqrt(len(list_of_potentials)))
if c_dim**2 != len(list_of_potentials):
raise ValueError('Non-square number of potentials given.')
nodes = []
weights = []
for pot in list_of_potentials[:c_dim]:
nodes += pot.nodes
weights += pot.weights
nodes = np.array(nodes)
weights = np.array(weights)
potential_data = np.zeros((c_dim * dim, c_dim * dim))
self._channel_indexes = []
for i in range(c_dim):
for j in range(c_dim):
r_s = i * dim
r_e = (i + 1) * dim
c_s = j * dim
c_e = (j + 1) * dim
data = list_of_potentials[i * c_dim + j].without_weights()
potential_data[r_s:r_e, c_s:c_e] = data
self._channel_indexes.append((r_s, r_e, c_s, c_e))
super(CoupledPotential, self).__init__(potential_type, nodes, weights,
potential_data, lam)
self._c_dim = c_dim
self._w_dim = dim
self._channels = channels
def copy(self, potential, lam):
"""Create potential from current potential with new data and lam.
Parameters
----------
potential : matrix of floats
Potential data.
lam : float
Value of lambda
Returns
-------
Potential
New potential with new data.
"""
new_potentials = []
for pot, ranges in zip(self._construction, self._channel_indexes):
sub_matrix = _submatrix(potential, ranges)
new_potentials.append(pot.copy(sub_matrix, lam))
return CoupledPotential(new_potentials)
def reduce_dim(self, dim):
"""Return new potential with only `dim` lowest energy states.
Parameters
----------
dim : int
Dimension to which potential is to be reduced.
Returns
-------
Potential
New reduced dimension potential.
Raises
------
ValueError
When value for new dim is too small or too large.
"""
if dim >= self._w_dim:
raise ValueError('Value of dim is not smaller than current dim.')
if dim <= 0:
raise ValueError('Zero or negative dim is not allowed.')
new_potentials = []
for pot, ranges in zip(self._construction, self._channel_indexes):
sub_matrix = _submatrix(self._potential, ranges)
new_potentials.append(pot.copy(sub_matrix,
self._lam).reduce_dim(dim))
return CoupledPotential(new_potentials)
def extract_channel_potential(self, channel):
"""Return potential corresponding to channel.
Parameters
----------
channel : Channel
Channel to extract.
Returns
-------
Potential
Potential corresponding to channel.
"""
for chan, potential, ranges in zip(self._channels, self._construction,
self._channel_indexes):
if channel == chan:
sub_matrix = _submatrix(self._potential, ranges)
return potential.copy(sub_matrix, self._lam)
raise ValueError('Channel not found.')
@property
def dim(self):
"""Return the dimension of single channel in the potential matrix.
Returns
-------
int
The dimension of a single channel in the (square) potential matrix.
"""
return self._w_dim
# pylint: disable=too-many-locals
def load_from_file(file_str):
"""Load potential from file.
Parameters
----------
file_str : str
String path to file with potential data.
Returns
-------
Potential
Potential created from extracted information and data from file.
"""
# Parse info about potential from filename
# Strip directory structure
end = file_str.split('/')[-1]
# Match regular expression
regex_str = r'V(.*)_(.*)_(.*)_SLLJT_(.*)_lambda_(.*)_Np_(.*)_(.*)\.dat'
result = re.search(regex_str, end)
# Extract values from matches
n_body_str = result.group(1)
order_str = result.group(2)
name = result.group(3)
channel_str = result.group(4)
lam = float(result.group(5))
particles = result.group(7)
# Convert string values to integer values
n_body = NBODY_DICT[n_body_str]
order = ORDER_DICT[order_str]
# Convert channel to 5-tuple, then Channel object
channel = Channel(*tuple([int(n) for n in channel_str]))
# Get number of points
num_points = int(result.group(6))
# Read potential
with open(file_str) as file:
nodes = []
weights = []
for _ in range(num_points):
vals = file.readline().split()
weights.append(float(vals[0]))
nodes.append(float(vals[1]))
potential = np.array([[float(file.readline().split()[-1]) for _ in
range(num_points)] for _ in range(num_points)])
# Create potential_type
potential_type = PotentialType(n_body, order, name, channel, particles)
# Return potential
return Potential(potential_type, nodes, weights, potential, lam)
# pylint: disable=too-many-arguments
def load(n_body, order, name, channel, lam, particles, num_points='*'):
"""Load potential based on parameters.
Parameters
----------
n_body : int
Number of particles interacting in potential.
order : int
Order to which potential was calculated.
name : str
Name for potential, may reflect something about origin.
channel: Channel or (int, int, int, int, int) or str
Object representing the partial wave channel for the potential.
lam : float
Value of SRG flow parameter for potential.
particles: str
String representing constituent particles in the interaction.
num_points : int, optional
Number of points in potential. Should only be specified if multiple
versions of same potential are saved and you need a specific one.
Otherwise, will match the first one in lexicographical ordering.
Returns
-------
Potential
Potential created from extracted information and data from file.
Raises
------
FileNotFoundError
If globbing doesn't match any files.
"""
# Set up format string
file_format_str = '{}/V{}_{}_{}_SLLJT_{}_lambda_{:.2f}_Np_{}_{}.dat'
# Get values for format string
n_body_str = INV_NBODY_DICT[n_body]
order_str = INV_ORDER_DICT[order]
# Handle non-string formats
if isinstance(channel, Channel):
channel = str(channel)
elif isinstance(channel, tuple):
channel = ''.join(channel)
dir_str = os.path.join(STANDARD_PATH, n_body_str,
'SLLJT_{}'.format(channel))
# Create full file path string
file_path = file_format_str.format(dir_str, n_body_str, order_str, name,
channel, lam, num_points, particles)
# Handle globbing
if num_points == '*':
try:
file_path = glob.glob(file_path)[0]
except IndexError:
raise FileNotFoundError('No potential with those params found.')
return load_from_file(file_path)
def save(potential, dir_str=None):
"""Save potential with correct file-naming.
Parameters
----------
potential : Potential
Potential to be saved.
dir_str : str, optional
String corresponding to directory where file should be saved. May have
trailing `/`.
"""
# Set up format strings
file_format_str = '{}/V{}_{}_{}_SLLJT_{}_lambda_{:.2f}_Np_{}_{}.dat'
nodes_format_str = '{:.5e} {:.5e}\n'
potential_format_str = '{:.5e} {:.5e} {:.5e}\n'
# Get values for format string
potential_type = potential.potential_type
n_body = potential_type.n_body
n_body_str = INV_NBODY_DICT[n_body]
order = potential_type.order
order_str = INV_ORDER_DICT[order]
name = potential_type.name
channel_str = str(potential_type.channel)
lam = potential.lam
num_points = len(potential.nodes)
particles = potential_type.particles
# Handle optional argument
if dir_str is None:
dir_str = os.path.join(STANDARD_PATH, n_body_str,
'SLLJT_{}'.format(channel_str))
# Strip potential trailing '/'
if dir_str[-1] == '/':
dir_str = dir_str[:-1]
# Create full file path string
file_path = file_format_str.format(dir_str, n_body_str, order_str, name,
channel_str, lam, num_points, particles)
# Create directory if it doesnt exist
_ensure_dir_for_file(file_path)
# Output potential
with open(file_path, 'w+') as file:
for weight, node in zip(potential.weights, potential.nodes):
file.write(nodes_format_str.format(weight, node))
for i in range(num_points):
for j in range(num_points):
file.write(potential_format_str.format(
potential.nodes[i], potential.nodes[j],
potential.without_weights()[i][j]))
def plot(potential, v_min=None, v_max=None):
"""Plot potential with colorbar.
Parameters
----------
potential : Potential
Potential to be plotted.
v_min : int, optional
Minimum value to be reflected on the colorbar scale.
v_max : int, optional
Maximum value to be reflected on the colorbar scale.
"""
if v_min is None or v_max is None:
plt.matshow(potential.without_weights())
else:
plt.matshow(potential.without_weights(), vmin=v_min, vmax=v_max)
plt.colorbar()
plt.show()
plt.close()
# ------------------- Internal Methods ------------------------------------- #
def _add_w(matrix, weights, nodes):
factor_vector = [sqrt(w) * p for w, p in zip(weights, nodes)]
weighted_matrix = np.dot(np.dot(np.diag(factor_vector), matrix),
np.diag(factor_vector))
return 2 / pi * weighted_matrix
def _rem_w(matrix, weights, nodes):
factor_vector = [1/(sqrt(w) * p) for w, p in zip(weights, nodes)]
unweighted_matrix = np.dot(np.dot(np.diag(factor_vector), pi / 2 * matrix),
np.diag(factor_vector))
return unweighted_matrix
def _ensure_dir_for_file(file):
directory = os.path.dirname(file)
if not os.path.exists(directory):
os.makedirs(directory)
def _submatrix(potential, ranges):
return potential[np.ix_(list(range(ranges[0], ranges[1])),
list(range(ranges[2], ranges[3])))]
| 30.137041
| 79
| 0.588474
|
3690ff6bf4c8454401253f8bde218fc801093ff8
| 21,694
|
py
|
Python
|
scripts/Crawlers/OSFCrawler.py
|
emmetaobrien/conp-dataset
|
7776edbb9025711eb38e8482c221fbb45715f27d
|
[
"MIT"
] | 18
|
2018-05-15T23:01:38.000Z
|
2021-09-22T17:12:13.000Z
|
scripts/Crawlers/OSFCrawler.py
|
emmetaobrien/conp-dataset
|
7776edbb9025711eb38e8482c221fbb45715f27d
|
[
"MIT"
] | 411
|
2019-01-07T15:05:54.000Z
|
2022-03-21T15:08:36.000Z
|
scripts/Crawlers/OSFCrawler.py
|
emmetaobrien/conp-dataset
|
7776edbb9025711eb38e8482c221fbb45715f27d
|
[
"MIT"
] | 92
|
2018-05-15T21:04:02.000Z
|
2022-01-31T02:48:37.000Z
|
import datetime
import json
import os
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
import humanize
import requests
from datalad.distribution.dataset import Dataset
from git import Repo
from scripts.Crawlers.BaseCrawler import BaseCrawler
def _create_osf_tracker(path, dataset):
with open(path, "w") as f:
data = {
"version": dataset["version"],
"title": dataset["title"],
}
json.dump(data, f, indent=4)
class OSFCrawler(BaseCrawler):
def __init__(self, github_token, config_path, verbose, force, no_pr, basedir):
super().__init__(github_token, config_path, verbose, force, no_pr, basedir)
self.osf_token = self._get_token()
def _get_token(self):
if os.path.isfile(self.config_path):
with open(self.config_path) as f:
data = json.load(f)
if "osf_token" in data.keys():
return data["osf_token"]
def _get_request_with_bearer_token(self, link, redirect=True):
header = {"Authorization": f"Bearer {self.osf_token}"}
r = requests.get(link, headers=header, allow_redirects=redirect)
if r.ok:
return r
else:
raise Exception(f"Request to {r.url} failed: {r.content}")
def _query_osf(self):
query = "https://api.osf.io/v2/nodes/?filter[tags]=canadian-open-neuroscience-platform"
r_json = self._get_request_with_bearer_token(query).json()
results = r_json["data"]
# Retrieve results from other pages
if r_json["links"]["meta"]["total"] > r_json["links"]["meta"]["per_page"]:
next_page = r_json["links"]["next"]
while next_page is not None:
next_page_json = self._get_request_with_bearer_token(next_page).json()
results.extend(next_page_json["data"])
next_page = next_page_json["links"]["next"]
if self.verbose:
print("OSF query: {}".format(query))
return results
def _download_files(
self,
link,
current_dir,
inner_path,
d,
annex,
sizes,
is_private=False,
):
r_json = self._get_request_with_bearer_token(link).json()
files = r_json["data"]
# Retrieve the files in the other pages if there are more than 1 page
if (
"links" in r_json.keys()
and r_json["links"]["meta"]["total"] > r_json["links"]["meta"]["per_page"]
):
next_page = r_json["links"]["next"]
while next_page is not None:
next_page_json = self._get_request_with_bearer_token(next_page).json()
files.extend(next_page_json["data"])
next_page = next_page_json["links"]["next"]
for file in files:
# Handle folders
if file["attributes"]["kind"] == "folder":
folder_path = os.path.join(current_dir, file["attributes"]["name"])
os.mkdir(folder_path)
self._download_files(
file["relationships"]["files"]["links"]["related"]["href"],
folder_path,
os.path.join(inner_path, file["attributes"]["name"]),
d,
annex,
sizes,
is_private,
)
# Handle single files
elif file["attributes"]["kind"] == "file":
# Private dataset/files
if is_private:
correct_download_link = self._get_request_with_bearer_token(
file["links"]["download"],
redirect=False,
).headers["location"]
if "https://accounts.osf.io/login" not in correct_download_link:
zip_file = (
True
if file["attributes"]["name"].split(".")[-1] == "zip"
else False
)
d.download_url(
correct_download_link,
path=os.path.join(inner_path, ""),
archive=zip_file,
)
else: # Token did not work for downloading file, return
print(
f'Unable to download file {file["links"]["download"]} with current token, skipping file',
)
return
# Public file
else:
# Handle zip files
if file["attributes"]["name"].split(".")[-1] == "zip":
d.download_url(
file["links"]["download"],
path=os.path.join(inner_path, ""),
archive=True,
)
else:
d.download_url(
file["links"]["download"],
path=os.path.join(inner_path, ""),
)
# append the size of the downloaded file to the sizes array
file_size = file["attributes"]["size"]
if not file_size:
# if the file size cannot be found in the OSF API response, then get it from git annex info
inner_file_path = os.path.join(
inner_path,
file["attributes"]["name"],
)
annex_info_dict = json.loads(
annex("info", "--bytes", "--json", inner_file_path),
)
file_size = int(annex_info_dict.get("size", 0))
sizes.append(file_size)
def _download_components(
self,
components_list,
current_dir,
inner_path,
d,
annex,
dataset_size,
is_private,
):
# Loop through each available components and download their files
for component in components_list:
component_title = self._clean_dataset_title(
component["attributes"]["title"],
)
component_inner_path = os.path.join(
inner_path,
"components",
component_title,
)
os.makedirs(os.path.join(current_dir, component_inner_path))
self._download_files(
component["relationships"]["files"]["links"]["related"]["href"],
os.path.join(current_dir, component_inner_path),
component_inner_path,
d,
annex,
dataset_size,
is_private,
)
# check if the component contains (sub)components, in which case, download the (sub)components data
subcomponents_list = self._get_components(
component["relationships"]["children"]["links"]["related"]["href"],
)
if subcomponents_list:
self._download_components(
subcomponents_list,
current_dir,
os.path.join(component_inner_path),
d,
annex,
dataset_size,
is_private,
)
# Once we have downloaded all the components files, check to see if there are any empty
# directories (in the case the 'OSF parent' dataset did not have any downloaded files
list_of_empty_dirs = [
dirpath
for (dirpath, dirnames, filenames) in os.walk(current_dir)
if len(dirnames) == 0 and len(filenames) == 0
]
for empty_dir in list_of_empty_dirs:
os.rmdir(empty_dir)
def _get_contributors(self, link):
r = self._get_request_with_bearer_token(link)
contributors = [
contributor["embeds"]["users"]["data"]["attributes"]["full_name"]
for contributor in r.json()["data"]
]
return contributors
def _get_license(self, link):
r = self._get_request_with_bearer_token(link)
return r.json()["data"]["attributes"]["name"]
def _get_components(self, link):
r = self._get_request_with_bearer_token(link)
return r.json()["data"]
def _get_wiki(self, link) -> Optional[str]:
r = self._get_request_with_bearer_token(link)
data = r.json()["data"]
if len(data) > 0:
return self._get_request_with_bearer_token(
data[0]["links"]["download"]
).content.decode()
def _get_institutions(self, link):
r = self._get_request_with_bearer_token(link)
if r.json()["data"]:
institutions = [
institution["attributes"]["name"] for institution in r.json()["data"]
]
return institutions
def _get_identifier(self, link):
r = self._get_request_with_bearer_token(link)
return r.json()["data"][0]["attributes"]["value"] if r.json()["data"] else False
def get_all_dataset_description(self):
osf_dois = []
datasets = self._query_osf()
for dataset in datasets:
# skip datasets that have a parent since the files' components will
# go into the parent dataset.
if "parent" in dataset["relationships"].keys():
continue
attributes = dataset["attributes"]
# Retrieve keywords/tags
keywords = list(map(lambda x: {"value": x}, attributes["tags"]))
# Retrieve contributors/creators
contributors = self._get_contributors(
dataset["relationships"]["contributors"]["links"]["related"]["href"],
)
# Retrieve license
license_ = "None"
if "license" in dataset["relationships"].keys():
license_ = self._get_license(
dataset["relationships"]["license"]["links"]["related"]["href"],
)
# Retrieve institution information
institutions = self._get_institutions(
dataset["relationships"]["affiliated_institutions"]["links"]["related"][
"href"
],
)
# Retrieve identifier information
identifier = self._get_identifier(
dataset["relationships"]["identifiers"]["links"]["related"]["href"],
)
# Get link for the dataset files
files_link = dataset["relationships"]["files"]["links"]["related"]["href"]
# Get components list
components_list = self._get_components(
dataset["relationships"]["children"]["links"]["related"]["href"],
)
# Get wiki to put in README
wiki: Optional[str] = None
try:
wiki = self._get_wiki(
dataset["relationships"]["wikis"]["links"]["related"]["href"]
)
except Exception as e:
print(f'Error getting wiki for {attributes["title"]} because of {e}')
# Gather extra properties
extra_properties = [
{
"category": "logo",
"values": [
{
"value": "https://osf.io/static/img/institutions/shields/cos-shield.png",
},
],
},
]
if institutions:
extra_properties.append(
{
"category": "origin_institution",
"values": list(
map(lambda x: {"value": x}, institutions),
),
},
)
# Retrieve dates
date_created = datetime.datetime.strptime(
attributes["date_created"],
"%Y-%m-%dT%H:%M:%S.%f",
)
date_modified = datetime.datetime.strptime(
attributes["date_modified"],
"%Y-%m-%dT%H:%M:%S.%f",
)
dataset_dats_content = {
"title": attributes["title"],
"files": files_link,
"components_list": components_list,
"homepage": dataset["links"]["html"],
"creators": list(
map(lambda x: {"name": x}, contributors),
),
"description": attributes["description"],
"wiki": wiki,
"version": attributes["date_modified"],
"licenses": [
{
"name": license_,
},
],
"dates": [
{
"date": date_created.strftime("%Y-%m-%d %H:%M:%S"),
"type": {
"value": "date created",
},
},
{
"date": date_modified.strftime("%Y-%m-%d %H:%M:%S"),
"type": {
"value": "date modified",
},
},
],
"keywords": keywords,
"distributions": [
{
"size": 0,
"unit": {"value": "B"},
"access": {
"landingPage": dataset["links"]["html"],
"authorizations": [
{
"value": "public"
if attributes["public"]
else "private",
},
],
},
},
],
"extraProperties": extra_properties,
}
if identifier:
source = "OSF DOI" if "OSF.IO" in identifier else "DOI"
dataset_dats_content["identifier"] = {
"identifier": identifier,
"identifierSource": source,
}
osf_dois.append(dataset_dats_content)
if self.verbose:
print("Retrieved OSF DOIs: ")
for osf_doi in osf_dois:
print(
"- Title: {}, Last modified: {}".format(
osf_doi["title"],
osf_doi["version"],
),
)
return osf_dois
def add_new_dataset(self, dataset: Dict[str, Any], dataset_dir: str):
d: Dataset = self.datalad.Dataset(dataset_dir)
d.no_annex(".conp-osf-crawler.json")
d.save()
annex: Callable = Repo(dataset_dir).git.annex
dataset_size: List[int] = []
# Setup private OSF dataset if the dataset is private
is_private: bool = self._setup_private_dataset(
dataset["files"],
dataset_dir,
annex,
d,
)
self._download_files(
dataset["files"],
dataset_dir,
"",
d,
annex,
dataset_size,
is_private,
)
if dataset["components_list"]:
self._download_components(
dataset["components_list"],
dataset_dir,
"",
d,
annex,
dataset_size,
is_private,
)
dataset_size_num, dataset_unit = humanize.naturalsize(sum(dataset_size)).split(
" ",
)
dataset["distributions"][0]["size"] = float(dataset_size_num)
dataset["distributions"][0]["unit"]["value"] = dataset_unit
# Add .conp-osf-crawler.json tracker file
_create_osf_tracker(
os.path.join(dataset_dir, ".conp-osf-crawler.json"),
dataset,
)
def update_if_necessary(self, dataset_description, dataset_dir):
tracker_path = os.path.join(dataset_dir, ".conp-osf-crawler.json")
if not os.path.isfile(tracker_path):
print("{} does not exist in dataset, skipping".format(tracker_path))
return False
with open(tracker_path) as f:
tracker = json.load(f)
if tracker["version"] == dataset_description["version"]:
# Same version, no need to update
if self.verbose:
print(
"{}, version {} same as OSF version DOI, no need to update".format(
dataset_description["title"],
dataset_description["version"],
),
)
return False
else:
# Update dataset
if self.verbose:
print(
"{}, version {} different from OSF version DOI {}, updating".format(
dataset_description["title"],
tracker["version"],
dataset_description["version"],
),
)
# Remove all data and DATS.json files
for file_name in os.listdir(dataset_dir):
if file_name[0] == ".":
continue
self.datalad.remove(os.path.join(dataset_dir, file_name), check=False)
d = self.datalad.Dataset(dataset_dir)
annex = Repo(dataset_dir).git.annex
dataset_size = []
is_private: bool = self._is_private_dataset(dataset_description["files"])
self._download_files(
dataset_description["files"],
dataset_dir,
"",
d,
annex,
dataset_size,
is_private,
)
if dataset_description["components_list"]:
self._download_components(
dataset_description["components_list"],
dataset_dir,
"",
d,
annex,
dataset_size,
is_private,
)
dataset_size, dataset_unit = humanize.naturalsize(sum(dataset_size)).split(
" ",
)
dataset_description["distributions"][0]["size"] = float(dataset_size)
dataset_description["distributions"][0]["unit"]["value"] = dataset_unit
# Add .conp-osf-crawler.json tracker file
_create_osf_tracker(
os.path.join(dataset_dir, ".conp-osf-crawler.json"),
dataset_description,
)
return True
def get_readme_content(self, dataset):
readme_content = (
f'# {dataset["title"]}\n\nCrawled from [OSF]({dataset["homepage"]})'
)
if "description" in dataset and dataset["description"]:
readme_content += f'\n\n## Description\n\n{dataset["description"]}'
if "identifier" in dataset and dataset["identifier"]:
readme_content += f'\n\n## DOI: {dataset["identifier"]["identifier"]}'
if "wiki" in dataset and dataset["wiki"]:
readme_content += f'\n\n## WIKI\n\n{dataset["wiki"]}'
return readme_content
def _setup_private_dataset(
self,
files_url: str,
dataset_dir: str,
annex: Callable,
dataset: Dataset,
) -> bool:
# Check if the dataset is indeed private
if self._is_private_dataset(files_url):
if self.verbose:
print(
"Dataset is private, creating OSF provider and make git annex autoenable datalad remote",
)
# Create OSF provider file and needed directories and don't annex the file
datalad_dir: str = os.path.join(dataset_dir, ".datalad")
if not os.path.exists(datalad_dir):
os.mkdir(datalad_dir)
providers_dir: str = os.path.join(datalad_dir, "providers")
if not os.path.exists(providers_dir):
os.mkdir(providers_dir)
osf_config_path: str = os.path.join(providers_dir, "OSF.cfg")
with open(osf_config_path, "w") as f:
f.write(
"""[provider:OSF]
url_re = .*osf\\.io.*
authentication_type = bearer_token
credential = OSF
[credential:OSF]
# If known, specify URL or email to how/where to request credentials
# url = ???
type = token"""
)
dataset.no_annex(os.path.join("**", "OSF.cfg"))
# Make git annex autoenable datalad remote
annex(
"initremote",
"datalad",
"externaltype=datalad",
"type=external",
"encryption=none",
"autoenable=true",
)
# Set OSF token as a environment variable for authentication
os.environ["DATALAD_OSF_token"] = self.osf_token
# Save changes
dataset.save()
return True
return False
def _is_private_dataset(self, files_url) -> bool:
return True if requests.get(files_url).status_code == 401 else False
| 36.277592
| 117
| 0.487692
|
b0b21ca1e91460afa559f9cc2002af13cc207631
| 3,678
|
py
|
Python
|
colour_demosaicing/bayer/demosaicing/bilinear.py
|
jewfro-cuban/colour-demosaicing
|
fcdb5fd769d611a440b804340e735bf0ee222b51
|
[
"BSD-3-Clause"
] | null | null | null |
colour_demosaicing/bayer/demosaicing/bilinear.py
|
jewfro-cuban/colour-demosaicing
|
fcdb5fd769d611a440b804340e735bf0ee222b51
|
[
"BSD-3-Clause"
] | null | null | null |
colour_demosaicing/bayer/demosaicing/bilinear.py
|
jewfro-cuban/colour-demosaicing
|
fcdb5fd769d611a440b804340e735bf0ee222b51
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Bilinear Bayer CFA Demosaicing
==============================
*Bayer* CFA (Colour Filter Array) bilinear demosaicing.
References
----------
- :cite:`Losson2010c` : Losson, O., Macaire, L., & Yang, Y. (2010).
Comparison of Color Demosaicing Methods. In Advances in Imaging and
Electron Physics (Vol. 162, pp. 173-265). doi:10.1016/S1076-5670(10)62005-8
"""
from __future__ import division, unicode_literals
from scipy.ndimage.filters import convolve
from colour.utilities import as_float_array, tstack
from colour_demosaicing.bayer import masks_CFA_Bayer
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2019 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['demosaicing_CFA_Bayer_bilinear']
def demosaicing_CFA_Bayer_bilinear(CFA, pattern='RGGB'):
"""
Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using
bilinear interpolation.
Parameters
----------
CFA : array_like
*Bayer* CFA.
pattern : unicode, optional
**{'RGGB', 'BGGR', 'GRBG', 'GBRG'}**,
Arrangement of the colour filters on the pixel array.
Returns
-------
ndarray
*RGB* colourspace array.
Notes
-----
- The definition output is not clipped in range [0, 1] : this allows for
direct HDRI / radiance image generation on *Bayer* CFA data and post
demosaicing of the high dynamic range data as showcased in this
`Jupyter Notebook <https://github.com/colour-science/colour-hdri/\
blob/develop/colour_hdri/examples/\
examples_merge_from_raw_files_with_post_demosaicing.ipynb>`_.
References
----------
:cite:`Losson2010c`
Examples
--------
>>> import numpy as np
>>> CFA = np.array(
... [[0.30980393, 0.36078432, 0.30588236, 0.3764706],
... [0.35686275, 0.39607844, 0.36078432, 0.40000001]])
>>> demosaicing_CFA_Bayer_bilinear(CFA)
array([[[ 0.69705884, 0.17941177, 0.09901961],
[ 0.46176472, 0.4509804 , 0.19803922],
[ 0.45882354, 0.27450981, 0.19901961],
[ 0.22941177, 0.5647059 , 0.30000001]],
<BLANKLINE>
[[ 0.23235295, 0.53529412, 0.29705883],
[ 0.15392157, 0.26960785, 0.59411766],
[ 0.15294118, 0.4509804 , 0.59705884],
[ 0.07647059, 0.18431373, 0.90000002]]])
>>> CFA = np.array(
... [[0.3764706, 0.360784320, 0.40784314, 0.3764706],
... [0.35686275, 0.30980393, 0.36078432, 0.29803923]])
>>> demosaicing_CFA_Bayer_bilinear(CFA, 'BGGR')
array([[[ 0.07745098, 0.17941177, 0.84705885],
[ 0.15490197, 0.4509804 , 0.5882353 ],
[ 0.15196079, 0.27450981, 0.61176471],
[ 0.22352942, 0.5647059 , 0.30588235]],
<BLANKLINE>
[[ 0.23235295, 0.53529412, 0.28235295],
[ 0.4647059 , 0.26960785, 0.19607843],
[ 0.45588237, 0.4509804 , 0.20392157],
[ 0.67058827, 0.18431373, 0.10196078]]])
"""
CFA = as_float_array(CFA)
R_m, G_m, B_m = masks_CFA_Bayer(CFA.shape, pattern)
H_G = as_float_array(
[[0, 1, 0],
[1, 4, 1],
[0, 1, 0]]) / 4 # yapf: disable
H_RB = as_float_array(
[[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]) / 4 # yapf: disable
R = convolve(CFA * R_m, H_RB)
G = convolve(CFA * G_m, H_G)
B = convolve(CFA * B_m, H_RB)
del R_m, G_m, B_m, H_RB, H_G
return tstack([R, G, B])
| 31.982609
| 79
| 0.600598
|
fd2a9bc2808019d4667810c7810bb50831a960ca
| 274
|
py
|
Python
|
Chapter05_code/Ch05_R03/my_module_ch15r03/models.py
|
PacktPublishing/Odoo-Development-Cookbook
|
5553110c0bc352c4541f11904e236cad3c443b8b
|
[
"MIT"
] | 55
|
2016-05-23T16:05:50.000Z
|
2021-07-19T00:16:46.000Z
|
Chapter05_code/Ch05_R03/my_module_ch15r03/models.py
|
kogkog098/Odoo-Development-Cookbook
|
166c9b98efbc9108b30d719213689afb1f1c294d
|
[
"MIT"
] | 1
|
2016-12-09T02:14:21.000Z
|
2018-07-02T09:02:20.000Z
|
Chapter05_code/Ch05_R03/my_module_ch15r03/models.py
|
kogkog098/Odoo-Development-Cookbook
|
166c9b98efbc9108b30d719213689afb1f1c294d
|
[
"MIT"
] | 52
|
2016-06-01T20:03:59.000Z
|
2020-10-31T23:58:25.000Z
|
# coding: utf-8
from openerp import models, api
class LibraryBook(models.Model):
_inherit = 'library.book'
@api.model
def get_all_library_members(self):
library_member_model = self.env['library.member']
return library_member_model.search([])
| 21.076923
| 57
| 0.70073
|
17523664b6d4e13caf5234fd0d49d6eaf441c9f7
| 1,558
|
py
|
Python
|
common/framework_excuter/tensorflow_excute.py
|
wavelet2008/rknn-v5
|
16288a88844e887634f74df8f43fff9b82f4ba62
|
[
"Apache-2.0"
] | 11
|
2022-02-24T10:44:54.000Z
|
2022-03-31T03:40:21.000Z
|
common/framework_excuter/tensorflow_excute.py
|
wavelet2008/rknn-v5
|
16288a88844e887634f74df8f43fff9b82f4ba62
|
[
"Apache-2.0"
] | 1
|
2022-03-01T07:21:04.000Z
|
2022-03-31T11:03:47.000Z
|
common/framework_excuter/tensorflow_excute.py
|
wavelet2008/rknn-v5
|
16288a88844e887634f74df8f43fff9b82f4ba62
|
[
"Apache-2.0"
] | 5
|
2022-03-18T09:05:50.000Z
|
2022-03-30T07:35:55.000Z
|
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
class Tensorflow_model_container:
def __init__(self, model_path, inputs, outputs) -> None:
self.input_names = []
for i, item in enumerate(inputs):
self.input_names.append('import/' + item + ':0')
self.output_names = []
for item in outputs:
self.output_names.append('import/' + item + ':0')
self.sess = tf.compat.v1.Session()
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
self.sess.graph.as_default()
tf.import_graph_def(graph_def)
# tensor_name_list = [tensor.name for tensor in tf.compat.v1.get_default_graph().as_graph_def().node]
# print(tensor_name_list)
self.tf_inputs = list()
for _name in self.input_names:
in_tensor = self.sess.graph.get_tensor_by_name(_name)
self.tf_inputs.append(in_tensor)
self.tf_outputs = list()
for _name in self.output_names:
out_tensor = self.sess.graph.get_tensor_by_name(_name)
self.tf_outputs.append(out_tensor)
def run(self, input_datas):
feed_dict = {}
for i in range(len(self.tf_inputs)):
feed_dict[self.tf_inputs[i]] = input_datas[i]
out_res = self.sess.run(self.tf_outputs, feed_dict=feed_dict)
return out_res
| 37.095238
| 113
| 0.621951
|
dea5c85f3f89a8e76c1e46b841bc397777b785f6
| 2,606
|
py
|
Python
|
turn_in/TSPAllVisited.py
|
OSU-CS-325/Project_Four_TSP
|
c88e496b755fa5dfc3220f68a3daa3eba2e57e2e
|
[
"MIT"
] | null | null | null |
turn_in/TSPAllVisited.py
|
OSU-CS-325/Project_Four_TSP
|
c88e496b755fa5dfc3220f68a3daa3eba2e57e2e
|
[
"MIT"
] | null | null | null |
turn_in/TSPAllVisited.py
|
OSU-CS-325/Project_Four_TSP
|
c88e496b755fa5dfc3220f68a3daa3eba2e57e2e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import math, re, sys
# usage: python TSPAllVisited.py input_file output_file
def main(input_file, output_file):
input_point_labels = read_input_vals(input_file)
output_point_labels = read_output_vals(output_file)
problems = check_match(input_point_labels, output_point_labels)
if( len(problems) == 0):
print('Each item appears to exist in both the input file and the output file.')
else:
print('possible problems include:\n')
for each in problems:
print(problems[each])
def read_input_vals(in_file):
# each line of in_file shoudl have a label as its first int on each line,
# this captures a list of those labels
# (expected from 0 to n - 1, but only uniqueness is necessary)
file = open(in_file,'r')
line = file.readline()
#points tracks the points as teh key and the number of visitations as the value at that key
points = []
while len(line) > 1:
line_parse = re.findall(r'[^,;\s]+', line)
points.append(int(line_parse[0]))
line = file.readline()
file.close()
points = sorted(points)
return points
def read_output_vals(out_file):
# each line of in_file should have a label as its first int on each line,
# this captures a list of those labels
# (expected from 0 to n - 1, but only uniqueness is necessary)
file = open(out_file,'r')
# toss the first line, which should contain a total
file.readline()
line = file.readline()
#points tracks the points as teh key and the number of visitations as the value at that key
points = []
while len(line) > 1:
line_parse = re.findall(r'[^,;\s]+', line)
points.append(int(line_parse[0]))
line = file.readline()
file.close()
points = sorted(points)
return points
def check_match(list_a, list_b):
problems = dict()
if(len(list_a) != len(list_b) ):
problems[-1] = ('Different number of points in the files, so they cannot match.')
#smaller = min(len(list_a), len(list_b) )
offset_a = 0
offset_b = 0
problem_count = 0
while (offset_a < len(list_a) ) and (offset_b < len(list_b) ):
item_a = list_a[offset_a]
item_b = list_b[offset_b]
#print(str(item_a) + ', ' + str(item_b) )
if(item_a < item_b):
problem = (str(offset_a) + ' seems to be missing from the output.')
problems[offset_a] = problem
offset_a += 1
problem_count += 1
elif(item_a > item_b):
problem = (str(offset_b) + ' seems to be missing from the output.')
problems[offset_a] = problem
offset_b += 1
problem_count += 1
else:
offset_a += 1
offset_b += 1
return problems
#if __name__ == '__main__':
#main(sys.argv[1], sys.argv[2])
| 25.300971
| 92
| 0.686109
|
3eee4026a225d87ce1ac42ec074b4f9ccbd2a06c
| 1,460
|
py
|
Python
|
twitchat/settings.py
|
Fittiboy/twitchat
|
c82341675f5eb7ce49c06f41f0f71ecf07bdcdea
|
[
"MIT"
] | 6
|
2021-01-11T05:50:03.000Z
|
2022-03-24T01:55:41.000Z
|
twitchat/settings.py
|
Fittiboy/twitchat
|
c82341675f5eb7ce49c06f41f0f71ecf07bdcdea
|
[
"MIT"
] | 4
|
2020-07-30T19:39:26.000Z
|
2021-06-12T20:08:54.000Z
|
twitchat/settings.py
|
Fittiboy/python-twitch-bot
|
b810688bb0d24bf059a228e771eb2aae91cc43d0
|
[
"MIT"
] | 1
|
2021-08-04T17:35:23.000Z
|
2021-08-04T17:35:23.000Z
|
import json
from twitchat.permissions import permissions
def main():
try:
with open('settings.json') as settings_file:
settings = json.load(settings_file)
except FileNotFoundError:
settings = {}
try:
open('timers.json').close()
except FileNotFoundError:
with open('timers.json', 'w') as timers_file:
json.dump({}, timers_file, indent=4)
try:
open('extra_commands.py').close()
except FileNotFoundError:
open('extra_commands.py', 'w').close()
try:
open('permissions.json').close()
except FileNotFoundError:
with open('permissions.json', 'w') as permissions_file:
json.dump(permissions, permissions_file, indent=4)
set_setting(settings, 'username', 'Username: ')
set_setting(settings, 'client_id', 'Client-ID: ')
set_setting(settings, 'token', 'Token: ')
set_setting(settings, 'channel', 'Channel: ')
settings['keepalive'] = 300
with open('settings.json', 'w') as settings_file:
json.dump(settings, settings_file, indent=4)
def set_setting(settings, setting, prompt):
choice = input(prompt)
if not choice:
print("You have not entered a value. " +
"If you want to leave this blank, " +
"just hit enter again")
if setting == "channel":
choice = choice.lower()
settings[setting] = choice
| 31.06383
| 64
| 0.603425
|
f7d5da05cbb9a7018163ed4a345cc1ee09ccb0a7
| 1,794
|
py
|
Python
|
src/sounds.py
|
ScampyOwl/robolab-group138
|
c5151bc3c541d49d8ebc2fdb74eb2703f0cb5685
|
[
"MIT"
] | null | null | null |
src/sounds.py
|
ScampyOwl/robolab-group138
|
c5151bc3c541d49d8ebc2fdb74eb2703f0cb5685
|
[
"MIT"
] | null | null | null |
src/sounds.py
|
ScampyOwl/robolab-group138
|
c5151bc3c541d49d8ebc2fdb74eb2703f0cb5685
|
[
"MIT"
] | 1
|
2020-08-20T14:11:50.000Z
|
2020-08-20T14:11:50.000Z
|
import ev3dev.ev3 as ev3
class Sounds:
def __init__(self):
self.sounds = ev3.Sound
def say_red(self):
self.sounds.speak("red").wait()
def say_blue(self):
self.sounds.speak("blue").wait()
def say_white(self):
self.sounds.speak("white").wait()
def say_black(self):
self.sounds.speak("black").wait()
def say_obstacle(self):
self.sounds.speak("rrrr").wait()
def obstacle_mel(self):
self.sounds.tone([(550, 50, 20), (500, 50, 20), (450, 50, 20), (400, 50, 20)]).wait()
def down(self):
self.sounds.tone([(550, 150, 50), (500, 150, 50), (450, 150, 50), (400, 150, 50),
(350, 150, 50), (300, 150, 50), (250, 150, 50), (200, 150, 50),
(150, 150, 50), (100, 150, 50), (90, 150, 50), (80, 150, 50)]).wait()
def test(self):
self.sounds.play_song((
('D4', 'e3'), # intro anacrouse
('A4', 'h.'),
)).wait()
def victory(self):
self.sounds.play_song((('F3', 'q'), ('A3', 'q'), ('C4', 'q'), ('F4', 'h'), )).wait()
def sound_obstacle(self):
self.sounds.play("/home/robot/src/src/zonk.wav").wait()
def sound_startup(self):
self.sounds.play("/home/robot/src/src/startup.wav").wait()
def sound_shutdown(self):
self.sounds.play("/home/robot/src/src/shutdown.wav").wait()
def say_coordinate(self, position, direction):
position_x = position[0]
position_y = position[1]
self.sounds.speak("new position ").wait()
self.sounds.speak(position_x).wait()
self.sounds.speak("and").wait()
self.sounds.speak(position_y).wait()
self.sounds.speak("direction is").wait()
self.sounds.speak(direction).wait()
| 30.931034
| 95
| 0.549052
|
8721c6f17c18aa6186367933fcc9b2fb9befd4fe
| 6,224
|
py
|
Python
|
lib/neovim/msgpack_rpc/event_loop/base.py
|
nicholas-zww/ActualVim
|
e9a1c74411748a8e68c7436a62cea846f25411d7
|
[
"MIT"
] | 849
|
2017-03-28T14:20:24.000Z
|
2022-03-29T14:10:37.000Z
|
lib/neovim/msgpack_rpc/event_loop/base.py
|
nicholas-zww/ActualVim
|
e9a1c74411748a8e68c7436a62cea846f25411d7
|
[
"MIT"
] | 113
|
2017-03-27T14:13:55.000Z
|
2020-06-21T00:40:21.000Z
|
lib/neovim/msgpack_rpc/event_loop/base.py
|
nicholas-zww/ActualVim
|
e9a1c74411748a8e68c7436a62cea846f25411d7
|
[
"MIT"
] | 40
|
2017-05-29T00:37:03.000Z
|
2022-02-22T09:11:33.000Z
|
"""Common code for event loop implementations."""
import signal
import threading
# When signals are restored, the event loop library may reset SIGINT to SIG_DFL
# which exits the program. To be able to restore the python interpreter to it's
# default state, we keep a reference to the default handler
default_int_handler = signal.getsignal(signal.SIGINT)
main_thread = threading.current_thread()
class BaseEventLoop(object):
"""Abstract base class for all event loops.
Event loops act as the bottom layer for Nvim sessions created by this
library. They hide system/transport details behind a simple interface for
reading/writing bytes to the connected Nvim instance.
This class exposes public methods for interacting with the underlying
event loop and delegates implementation-specific work to the following
methods, which subclasses are expected to implement:
- `_init()`: Implementation-specific initialization
- `_connect_tcp(address, port)`: connect to Nvim using tcp/ip
- `_connect_socket(path)`: Same as tcp, but use a UNIX domain socket or
or named pipe.
- `_connect_stdio()`: Use stdin/stdout as the connection to Nvim
- `_connect_child(argv)`: Use the argument vector `argv` to spawn an
embedded Nvim that has it's stdin/stdout connected to the event loop.
- `_start_reading()`: Called after any of _connect_* methods. Can be used
to perform any post-connection setup or validation.
- `_send(data)`: Send `data`(byte array) to Nvim. The data is only
- `_run()`: Runs the event loop until stopped or the connection is closed.
calling the following methods when some event happens:
actually sent when the event loop is running.
- `_on_data(data)`: When Nvim sends some data.
- `_on_signal(signum)`: When a signal is received.
- `_on_error(message)`: When a non-recoverable error occurs(eg:
connection lost)
- `_stop()`: Stop the event loop
- `_interrupt(data)`: Like `stop()`, but may be called from other threads
this.
- `_setup_signals(signals)`: Add implementation-specific listeners for
for `signals`, which is a list of OS-specific signal numbers.
- `_teardown_signals()`: Removes signal listeners set by `_setup_signals`
"""
def __init__(self, transport_type, *args):
"""Initialize and connect the event loop instance.
The only arguments are the transport type and transport-specific
configuration, like this:
>>> BaseEventLoop('tcp', '127.0.0.1', 7450)
Traceback (most recent call last):
...
AttributeError: 'BaseEventLoop' object has no attribute '_init'
>>> BaseEventLoop('socket', '/tmp/nvim-socket')
Traceback (most recent call last):
...
AttributeError: 'BaseEventLoop' object has no attribute '_init'
>>> BaseEventLoop('stdio')
Traceback (most recent call last):
...
AttributeError: 'BaseEventLoop' object has no attribute '_init'
>>> BaseEventLoop('child', ['nvim', '--embed', '-u', 'NONE'])
Traceback (most recent call last):
...
AttributeError: 'BaseEventLoop' object has no attribute '_init'
This calls the implementation-specific initialization
`_init`, one of the `_connect_*` methods(based on `transport_type`)
and `_start_reading()`
"""
self._transport_type = transport_type
self._signames = dict((k, v) for v, k in signal.__dict__.items()
if v.startswith('SIG'))
self._on_data = None
self._error = None
self._init()
getattr(self, '_connect_{}'.format(transport_type))(*args)
self._start_reading()
def connect_tcp(self, address, port):
"""Connect to tcp/ip `address`:`port`. Delegated to `_connect_tcp`."""
self._connect_tcp(address, port)
def connect_socket(self, path):
"""Connect to socket at `path`. Delegated to `_connect_socket`."""
self._connect_socket(path)
def connect_stdio(self):
"""Connect using stdin/stdout. Delegated to `_connect_stdio`."""
self._connect_stdio()
def connect_child(self, argv):
"""Connect a new Nvim instance. Delegated to `_connect_child`."""
self._connect_child(argv)
def send(self, data):
"""Queue `data` for sending to Nvim."""
self._send(data)
def threadsafe_call(self, fn):
"""Call a function in the event loop thread.
This is the only safe way to interact with a session from other
threads.
"""
self._threadsafe_call(fn)
def run(self, data_cb):
"""Run the event loop."""
if self._error:
err = self._error
if isinstance(self._error, KeyboardInterrupt):
# KeyboardInterrupt is not destructive(it may be used in
# the REPL).
# After throwing KeyboardInterrupt, cleanup the _error field
# so the loop may be started again
self._error = None
raise err
self._on_data = data_cb
if threading.current_thread() == main_thread:
self._setup_signals([signal.SIGINT, signal.SIGTERM])
self._run()
if threading.current_thread() == main_thread:
self._teardown_signals()
signal.signal(signal.SIGINT, default_int_handler)
self._on_data = None
def stop(self):
"""Stop the event loop."""
self._stop()
def _on_signal(self, signum):
msg = 'Received {}'.format(self._signames[signum])
if signum == signal.SIGINT and self._transport_type == 'stdio':
# When the transport is stdio, we are probably running as a Nvim
# child process. In that case, we don't want to be killed by
# ctrl+C
return
cls = Exception
if signum == signal.SIGINT:
cls = KeyboardInterrupt
self._error = cls(msg)
self.stop()
def _on_error(self, error):
self._error = IOError(error)
self.stop()
def _on_interrupt(self):
self.stop()
| 39.643312
| 79
| 0.640585
|
127cf076f5ddcb96abb8c134a7ecf580e6db5f50
| 698
|
py
|
Python
|
base/forms/order_form.py
|
geek911/hospitalmanagement
|
32ace7a10cfbd919a39e2101ae60bf2633224788
|
[
"MIT"
] | null | null | null |
base/forms/order_form.py
|
geek911/hospitalmanagement
|
32ace7a10cfbd919a39e2101ae60bf2633224788
|
[
"MIT"
] | null | null | null |
base/forms/order_form.py
|
geek911/hospitalmanagement
|
32ace7a10cfbd919a39e2101ae60bf2633224788
|
[
"MIT"
] | null | null | null |
from django.forms import ModelForm
from django.forms import TextInput, NumberInput, EmailInput
from base.models.order import Order
class OrderForm(ModelForm):
class Meta:
model = Order
fields = '__all__'
widgets = {
'full_name': TextInput(attrs={'class': 'form-control', 'id': 'name', 'placeholder': 'Enter Full Name'}),
'email': EmailInput(attrs={'class': 'form-control', 'id': 'name', 'placeholder': 'Enter email address'}),
'phn_number': NumberInput(attrs={'class': 'form-control', 'id': 'phn_number'}),
'address': TextInput(attrs={'class': 'form-control', 'id': 'name', 'placeholder': 'Enter address'})
}
| 36.736842
| 117
| 0.618911
|
21edc83388c8bd732ef7b2dbccdfc37e4c9272b3
| 21,440
|
py
|
Python
|
tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
|
esouthren/tfjs
|
b473e3c30b7910a154158374e93cc703fb3d6ece
|
[
"Apache-2.0"
] | 1
|
2021-10-10T12:44:35.000Z
|
2021-10-10T12:44:35.000Z
|
tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
|
orta/tfjs
|
ee8b2ae9d16328e63cfe5ad287cf19eb1ef2cb2f
|
[
"Apache-2.0"
] | 49
|
2020-09-07T07:37:04.000Z
|
2022-03-02T05:33:40.000Z
|
tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
|
rriveros/Tensorflowjs
|
26de95605ea5b72bf6f46b11adefa0ea1ebdacb7
|
[
"Apache-2.0"
] | 1
|
2021-11-05T04:33:49.000Z
|
2021-11-05T04:33:49.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for artifact conversion to and from Tensorflow SavedModel v2."""
import base64
import glob
import json
import os
import shutil
import sys
import tempfile
import unittest
import tensorflow as tf
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.saved_model.save import save
import tensorflow_hub as hub
from tensorflowjs import version
from tensorflowjs.converters import tf_saved_model_conversion_v2
SAVED_MODEL_DIR = 'saved_model'
HUB_MODULE_DIR = 'hub_module'
class ConvertTest(tf.test.TestCase):
def setUp(self):
super(ConvertTest, self).setUp()
self._tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(ConvertTest, self).tearDown()
def _create_saved_model_v1(self):
"""Create a TensorFlow SavedModel for testing."""
graph = tf.Graph()
with graph.as_default():
x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]])
w = tf.compat.v1.get_variable('w', shape=[2, 2])
y = tf.compat.v1.matmul(x, w)
output = tf.compat.v1.nn.softmax(y)
init_op = w.initializer
# Create a builder.
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_dir)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
"serving_default":
tf.compat.v1.saved_model \
.signature_def_utils.predict_signature_def(
inputs={"x": x},
outputs={"output": output})
},
assets_collection=None)
builder.save()
def _create_saved_model_v1_with_hashtable(self):
"""Create a TensorFlow SavedModel V1 with unused hash table for testing."""
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder('float32', [2, 2])
w = tf.compat.v1.get_variable('w', shape=[2, 2])
output = tf.compat.v1.matmul(x, w)
init_op = w.initializer
# Add a hash table that is not used by the output.
keys = tf.constant(['key'])
values = tf.constant([1])
initializer = tf.contrib.lookup.KeyValueTensorInitializer(keys, values)
table = tf.contrib.lookup.HashTable(initializer, -1)
# Create a builder.
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_dir)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
table.init.run()
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
"serving_default":
tf.compat.v1.saved_model \
.signature_def_utils.predict_signature_def(
inputs={"x": x},
outputs={"output": output})
},
assets_collection=None)
builder.save()
def _create_saved_model_with_fusable_conv2d(self):
"""Test a basic model with fusable conv2d."""
layers = [
tf.keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=False),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU()
]
model = tf.keras.Sequential(layers)
model.predict(tf.ones((1, 224, 224, 3)))
tf.keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model_with_prelu(self):
"""Test a basic model with fusable conv2d."""
layers = [
tf.keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=True),
tf.keras.layers.PReLU()
]
model = tf.keras.Sequential(layers)
model.predict(tf.ones((1, 224, 224, 3)))
tf.keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_saved_model_with_control_flow(self):
"""Test a basic model with control flow to inlined."""
@tf.function
def find_next_odd(v):
v1 = v + 1
while tf.equal(v1 % 2, 0):
v1 = v1 + 1
return v1
root = tracking.AutoTrackable()
root.f = find_next_odd
to_save = root.f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.int32))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_unsupported_saved_model(self):
root = tracking.AutoTrackable()
root.w = variables.Variable(tf.random.uniform([2, 2]))
@def_function.function
def exported_function(x):
root.x = constant_op.constant([[37.0, -23.0], [1.0, 4.0]])
root.y = tf.matmul(root.x, root.w)
# unsupported op: linalg.diag
root.z = tf.linalg.diag(root.y)
return root.z * x
root.f = exported_function
to_save = root.f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_saved_model_with_debug_ops(self):
root = tracking.AutoTrackable()
root.w = variables.Variable(tf.random.uniform([2, 2]))
@def_function.function
def exported_function(x):
root.x = constant_op.constant([[37.0, -23.0], [1.0, 4.0]])
root.y = tf.matmul(root.x, root.w)
tf.compat.v1.Print(root.x, [root.x])
tf.compat.v1.Assert(tf.greater(tf.reduce_max(root.x), 0), [root.x])
tf.compat.v1.check_numerics(root.x, 'NaN found')
return root.y * x
root.f = exported_function
to_save = root.f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_hub_module(self):
# Module function that doubles its input.
def double_module_fn():
w = tf.Variable([2.0, 4.0])
x = tf.compat.v1.placeholder(dtype=tf.float32)
hub.add_signature(inputs=x, outputs=x*w)
graph = tf.Graph()
with graph.as_default():
spec = hub.create_module_spec(double_module_fn)
m = hub.Module(spec)
# Export the module.
with tf.compat.v1.Session(graph=graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m.export(os.path.join(self._tmp_dir, HUB_MODULE_DIR), sess)
def test_convert_saved_model_v1(self):
self._create_saved_model_v1()
input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
output_dir = os.path.join(input_dir, 'js')
tf_saved_model_conversion_v2.convert_tf_saved_model(
input_dir,
output_dir
)
expected_weights_manifest = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}]}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js')
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, expected_weights_manifest)
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def test_convert_saved_model_v1_with_hashtable(self):
self._create_saved_model_v1_with_hashtable()
input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
output_dir = os.path.join(input_dir, 'js')
tf_saved_model_conversion_v2.convert_tf_saved_model(
input_dir,
output_dir
)
expected_weights_manifest = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}]}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js')
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, expected_weights_manifest)
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def test_convert_saved_model(self):
self._create_saved_model()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32',
'name': 'StatefulPartitionedCall/mul',
'shape': []}]}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(len(weights_manifest), len(weights))
if sys.version_info[0] < 3:
self.assertItemsEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertItemsEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
else:
self.assertCountEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertCountEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
def test_convert_saved_model_with_fused_conv2d(self):
self._create_saved_model_with_fusable_conv2d()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
nodes = model_json['modelTopology']['node']
fusedOp = None
for node in nodes:
self.assertTrue(not 'BatchNorm' in node['op'])
self.assertTrue(not 'Relu' in node['op'])
self.assertTrue(not 'BiasAdd' in node['op'])
if node['op'] == '_FusedConv2D':
fusedOp = node
self.assertTrue(fusedOp is not None)
self.assertEqual(
base64.b64decode(fusedOp['attr']['fused_ops']['list']['s'][0]),
b'BiasAdd')
self.assertEqual(
base64.b64decode(fusedOp['attr']['fused_ops']['list']['s'][1]),
b'Relu')
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_prelu(self):
self._create_saved_model_with_prelu()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
nodes = model_json['modelTopology']['node']
prelu_op = None
fused_op = None
for node in nodes:
if node['op'] == 'Prelu':
prelu_op = node
if node['op'] == '_FusedConv2D':
fused_op = node
self.assertTrue(prelu_op is None)
self.assertTrue(fused_op is not None)
fused_ops = list(map(base64.b64decode,
fused_op['attr']['fused_ops']['list']['s']))
self.assertEqual(fused_ops, [b'BiasAdd', b'Prelu'])
self.assertEqual(fused_op['attr']['num_args']['i'], '2')
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_control_flow(self):
self._create_saved_model_with_control_flow()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'int32', 'shape': [],
'name': 'StatefulPartitionedCall/while/loop_counter'},
{'dtype': 'int32', 'shape': [],
'name': 'StatefulPartitionedCall/while/maximum_iterations'
},
{'dtype': 'int32', 'shape': [],
'name': 'StatefulPartitionedCall/while/cond/_3/mod/y'},
{'dtype': 'int32', 'shape': [],
'name': 'StatefulPartitionedCall/while/cond/_3/Equal/y'},
{'dtype': 'int32', 'shape': [],
'name': 'StatefulPartitionedCall/while/body/_4/add_1/y'},
{'name': 'StatefulPartitionedCall/add/y',
'dtype': 'int32', 'shape': []}]}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(len(weights_manifest), len(weights))
if sys.version_info[0] < 3:
self.assertItemsEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertItemsEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
else:
self.assertCountEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertCountEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_optimizer_add_unsupported_op(self):
self._create_unsupported_saved_model()
with self.assertRaisesRegexp( # pylint: disable=deprecated-method
ValueError, r'^Unsupported Ops'):
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
def test_convert_saved_model_skip_op_check(self):
self._create_unsupported_saved_model()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR), skip_op_check=True
)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32',
'name': 'StatefulPartitionedCall/MatrixDiag',
'shape': [2, 2, 2]}]}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, weights)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
# (TODO: piyu) disable this test, need to change
# convert_variables_to_constants_v2 to set function_optimization=aggressive.
@unittest.skip('not supported')
def test_convert_saved_model_strip_debug_ops(self):
self._create_saved_model_with_debug_ops()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
strip_debug_ops=True)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'dtype': 'float32',
'name': 'add',
'shape': [2, 2]
}]
}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, weights)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_hub_module_v1(self):
self._create_hub_module()
module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, weights)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_hub_module_v2(self):
self._create_saved_model()
module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf_saved_model_conversion_v2.convert_tf_hub_module(
module_path, tfjs_path, "serving_default", "serve")
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [],
'name': 'StatefulPartitionedCall/mul',
'dtype': 'float32'
}]
}]
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, weights)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
if __name__ == '__main__':
tf.test.main()
| 37.157712
| 80
| 0.646315
|
2ccd578390715161e9978e81775557bdc6cd2200
| 1,242
|
py
|
Python
|
docs/conf.py
|
gfairbro/pycounts-gf
|
765295490614374e2d8717745670981fc4445aa0
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
gfairbro/pycounts-gf
|
765295490614374e2d8717745670981fc4445aa0
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
gfairbro/pycounts-gf
|
765295490614374e2d8717745670981fc4445aa0
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = u"pycounts"
copyright = u"2022, Gabriel Fairbrother"
author = u"Gabriel Fairbrother"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"autoapi.extension",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
autoapi_dirs = ["../src"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
| 33.567568
| 78
| 0.646538
|
7619923447b8f9579af81381a97c4469ed88ed1f
| 10,063
|
py
|
Python
|
examples/train_pointconv.py
|
Ndersam/learning3d
|
2483054191111420c4cefc9f4e6c9db75bcb4866
|
[
"MIT"
] | null | null | null |
examples/train_pointconv.py
|
Ndersam/learning3d
|
2483054191111420c4cefc9f4e6c9db75bcb4866
|
[
"MIT"
] | null | null | null |
examples/train_pointconv.py
|
Ndersam/learning3d
|
2483054191111420c4cefc9f4e6c9db75bcb4866
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
import logging
import numpy
import numpy as np
import torch
import torch.utils.data
import torchvision
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
# Only if the files are in example folder.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if BASE_DIR[-8:] == 'examples':
sys.path.append(os.path.join(BASE_DIR, os.pardir))
os.chdir(os.path.join(BASE_DIR, os.pardir))
from learning3d.models import create_pointconv
from learning3d.models import Classifier
from learning3d.data_utils import ClassificationData, ModelNet40Data
def _init_(args):
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/' + args.exp_name):
os.makedirs('checkpoints/' + args.exp_name)
if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
os.system('cp main.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup')
os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
class IOStream:
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text + '\n')
self.f.flush()
def close(self):
self.f.close()
def test_one_epoch(device, model, test_loader):
model.eval()
test_loss = 0.0
pred = 0.0
count = 0
for i, data in enumerate(tqdm(test_loader)):
points, target = data
target = target[:, 0]
points = points.to(device)
target = target.to(device)
output = model(points)
loss_val = torch.nn.functional.nll_loss(
torch.nn.functional.log_softmax(output, dim=1), target, size_average=False)
test_loss += loss_val.item()
count += output.size(0)
_, pred1 = output.max(dim=1)
ag = (pred1 == target)
am = ag.sum()
pred += am.item()
test_loss = float(test_loss) / count
accuracy = float(pred) / count
return test_loss, accuracy
def test(args, model, test_loader, textio):
test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
textio.cprint('Validation Loss: %f & Validation Accuracy: %f' % (test_loss, test_accuracy))
def train_one_epoch(device, model, train_loader, optimizer):
model.train()
train_loss = 0.0
pred = 0.0
count = 0
for i, data in enumerate(tqdm(train_loader)):
points, target = data
target = target[:, 0]
points = points.to(device)
target = target.to(device)
output = model(points)
loss_val = torch.nn.functional.nll_loss(
torch.nn.functional.log_softmax(output, dim=1), target, size_average=False)
# print(loss_val.item())
# forward + backward + optimize
optimizer.zero_grad()
loss_val.backward()
optimizer.step()
train_loss += loss_val.item()
count += output.size(0)
_, pred1 = output.max(dim=1)
ag = (pred1 == target)
am = ag.sum()
pred += am.item()
train_loss = float(train_loss) / count
accuracy = float(pred) / count
return train_loss, accuracy
def train(args, model, train_loader, test_loader, boardio, textio, checkpoint):
learnable_params = filter(lambda p: p.requires_grad, model.parameters())
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(learnable_params)
else:
optimizer = torch.optim.SGD(learnable_params, lr=0.1)
if checkpoint is not None:
min_loss = checkpoint['min_loss']
optimizer.load_state_dict(checkpoint['optimizer'])
best_test_loss = np.inf
for epoch in range(args.start_epoch, args.epochs):
train_loss, train_accuracy = train_one_epoch(args.device, model, train_loader, optimizer)
test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
if test_loss < best_test_loss:
best_test_loss = test_loss
snap = {'epoch': epoch + 1,
'model': model.state_dict(),
'min_loss': best_test_loss,
'optimizer': optimizer.state_dict(), }
torch.save(snap, 'checkpoints/%s/models/best_model_snap.t7' % (args.exp_name))
torch.save(model.state_dict(), 'checkpoints/%s/models/best_model.t7' % (args.exp_name))
torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/best_ptnet_model.t7' % (args.exp_name))
torch.save(snap, 'checkpoints/%s/models/model_snap.t7' % (args.exp_name))
torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % (args.exp_name))
torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/ptnet_model.t7' % (args.exp_name))
boardio.add_scalar('Train Loss', train_loss, epoch + 1)
boardio.add_scalar('Test Loss', test_loss, epoch + 1)
boardio.add_scalar('Best Test Loss', best_test_loss, epoch + 1)
boardio.add_scalar('Train Accuracy', train_accuracy, epoch + 1)
boardio.add_scalar('Test Accuracy', test_accuracy, epoch + 1)
textio.cprint('EPOCH:: %d, Traininig Loss: %f, Testing Loss: %f, Best Loss: %f' % (
epoch + 1, train_loss, test_loss, best_test_loss))
textio.cprint(
'EPOCH:: %d, Traininig Accuracy: %f, Testing Accuracy: %f' % (epoch + 1, train_accuracy, test_accuracy))
def options():
parser = argparse.ArgumentParser(description='Point Cloud Registration')
parser.add_argument('--exp_name', type=str, default='exp_classifier', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset_path', type=str, default='ModelNet40',
metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
# settings for input data
parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
metavar='DATASET', help='dataset type (default: modelnet)')
parser.add_argument('--num_points', default=1024, type=int,
metavar='N', help='points in point-cloud (default: 1024)')
# settings for PointNet
parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
help='train pointnet (default: tune)')
parser.add_argument('--emb_dims', default=1024, type=int,
metavar='K', help='dim. of the feature vector (default: 1024)')
parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
help='symmetric function (default: max)')
# settings for on training
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('-j', '--workers', default=4, type=int,
metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch_size', default=32, type=int,
metavar='N', help='mini-batch size (default: 32)')
parser.add_argument('--epochs', default=200, type=int,
metavar='N', help='number of total epochs to run')
parser.add_argument('--start_epoch', default=0, type=int,
metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--optimizer', default='Adam', choices=['Adam', 'SGD'],
metavar='METHOD', help='name of an optimizer (default: Adam)')
parser.add_argument('--resume', default='', type=str,
metavar='PATH', help='path to latest checkpoint (default: null (no-use))')
parser.add_argument('--pretrained', default='', type=str,
metavar='PATH', help='path to pretrained model file (default: null (no-use))')
parser.add_argument('--device', default='cuda:0', type=str,
metavar='DEVICE', help='use CUDA if available')
args = parser.parse_args()
return args
def main():
args = options()
args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
_init_(args)
textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
textio.cprint(str(args))
trainset = ClassificationData(ModelNet40Data(train=True))
testset = ClassificationData(ModelNet40Data(train=False))
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.workers)
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False,
num_workers=args.workers)
if not torch.cuda.is_available():
args.device = 'cpu'
args.device = torch.device(args.device)
# Create PointConv Model.
PointConv = create_pointconv(classifier=False, pretrained=None)
ptconv = PointConv(emb_dims=args.emb_dims, classifier=False, pretrained=None)
model = Classifier(feature_model=ptconv)
checkpoint = None
if args.resume:
assert os.path.isfile(args.resume)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
if args.pretrained:
assert os.path.isfile(args.pretrained)
model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
model.to(args.device)
if args.eval:
test(args, model, test_loader, textio)
else:
train(args, model, train_loader, test_loader, boardio, textio, checkpoint)
if __name__ == '__main__':
main()
| 39.155642
| 119
| 0.639968
|
23fb6dd24d6c73465a38f0376c06b35eef330ef2
| 20,545
|
py
|
Python
|
flux_mito/model_4.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_4.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_4.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 0.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 40000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.115741
| 798
| 0.804089
|
964c24210d5ba89b8c2f858c1e866f42bccc8d9f
| 1,844
|
py
|
Python
|
setup.py
|
hgldarby/tablite
|
db083c496e8030595e73f1823e51142814994884
|
[
"MIT"
] | null | null | null |
setup.py
|
hgldarby/tablite
|
db083c496e8030595e73f1823e51142814994884
|
[
"MIT"
] | null | null | null |
setup.py
|
hgldarby/tablite
|
db083c496e8030595e73f1823e51142814994884
|
[
"MIT"
] | null | null | null |
"""
tablite
"""
build_tag = "cf5b524aa45416c38ee51aab61fa3c1e5e5f2740a126f7c7d73250c948d8b"
from setuptools import setup
from pathlib import Path
folder = Path(__file__).parent
file = "README.md"
readme = folder / file
assert isinstance(readme, Path)
assert readme.exists(), readme
with open(str(readme), encoding='utf-8') as f:
long_description = f.read()
keywords = list({
'table', 'tables', 'csv', 'txt', 'excel', 'xlsx', 'ods', 'zip', 'log',
'any', 'all', 'filter', 'column', 'columns', 'rows', 'from', 'json', 'to',
'inner join', 'outer join', 'left join', 'groupby', 'pivot', 'pivot table',
'sort', 'is sorted', 'show', 'use disk', 'out-of-memory', 'list on disk',
'stored list', 'min', 'max', 'sum', 'first', 'last', 'count', 'unique',
'average', 'standard deviation', 'median', 'mode', 'in-memory', 'index'
})
keywords.sort(key=lambda x: x.lower())
setup(
name="tablite",
version="2020.11.3.62707",
url="https://github.com/root-11/tablite",
license="MIT",
author="Bjorn Madsen",
author_email="bjorn.madsen@operationsresearchgroup.com",
description="A table crunching library",
long_description=long_description,
long_description_content_type='text/markdown',
keywords=keywords,
packages=["table"],
include_package_data=True,
data_files=[(".", ["LICENSE", "README.md"])],
platforms="any",
install_requires=[
'xlrd>=1.2.0',
'pyexcel-ods>=0.5.6',
'openpyxl>=3.0.5',
'pyperclip>=1.8.1',
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 30.229508
| 79
| 0.621475
|
007a8d06472c42b76cfe5e99c29fec355596527e
| 286
|
py
|
Python
|
COE/contents/resources/stone.py
|
Python-Project-Cheap-Empire/cheap-of-empire
|
44aaae29e4fadc9df46734f529031ce8c4bb3475
|
[
"MIT"
] | null | null | null |
COE/contents/resources/stone.py
|
Python-Project-Cheap-Empire/cheap-of-empire
|
44aaae29e4fadc9df46734f529031ce8c4bb3475
|
[
"MIT"
] | 2
|
2022-01-31T21:05:15.000Z
|
2022-01-31T21:08:11.000Z
|
COE/contents/resources/stone.py
|
Python-Project-Cheap-Empire/cheap-of-empire
|
44aaae29e4fadc9df46734f529031ce8c4bb3475
|
[
"MIT"
] | 1
|
2022-02-04T12:05:14.000Z
|
2022-02-04T12:05:14.000Z
|
from .resource import Resource
from COE.contents.entity import Entity
from .resource_type import ResourceType
class Stone(Resource, Entity):
def __init__(self, **kwargs):
Resource.__init__(self, r_type=ResourceType.STONE, **kwargs)
Entity.__init__(self, **kwargs)
| 28.6
| 68
| 0.741259
|
40501f16afda3b082412bce1e6e9eb9c1fdc2245
| 12,046
|
py
|
Python
|
src/psfmachine/aperture.py
|
SSDataLab/psfmachine
|
8bb5b6573cb80b0686cc361de38cdc1eec6cec68
|
[
"MIT"
] | 14
|
2020-10-07T17:50:00.000Z
|
2022-03-18T15:23:18.000Z
|
src/psfmachine/aperture.py
|
SSDataLab/psfmachine
|
8bb5b6573cb80b0686cc361de38cdc1eec6cec68
|
[
"MIT"
] | 43
|
2020-11-05T23:00:21.000Z
|
2022-03-29T18:32:46.000Z
|
src/psfmachine/aperture.py
|
SSDataLab/psfmachine
|
8bb5b6573cb80b0686cc361de38cdc1eec6cec68
|
[
"MIT"
] | 2
|
2020-10-26T21:01:29.000Z
|
2020-11-05T02:09:41.000Z
|
"""
Collection of aperture utils lifted from
[Kepler-Apertures](https://github.com/jorgemarpa/kepler-apertures) and adapted to work
with PSFMachine.
Some this functions inputs and operate on a `Machine` object but we move them out of
`mahine.py` to keep the latter smowhow clean and short.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from tqdm import tqdm
def optimize_aperture(
psf_model,
target_complete=0.9,
target_crowd=0.9,
max_iter=100,
percentile_bounds=[0, 100],
quiet=False,
):
"""
Function to optimize the aperture mask for a given source.
The optimization is done using scipy Brent's algorithm and it uses a custom
loss function `goodness_metric_obj_fun` that uses a Leaky ReLU term to
achive the target value for both metrics.
Parameters
----------
psf_model : scipy.sparce.csr_matrix
Sparse matrix with the PSF models for all targets in the scene. It has shape
[n_sources, n_pixels].
target_complete : float
Value of the target completeness metric.
target_crowd : float
Value of the target crowdeness metric.
max_iter : int
Numer of maximum iterations to be performed by the optimizer.
percentile_bounds : tuple
Tuple of minimun and maximun values for allowed percentile values during
the optimization. Default is the widest range of [0, 100].
Returns
-------
optimal_percentile : numpy.ndarray
An array with the percentile value to defines the "optimal" aperture for
each source.
"""
# optimize percentile cut for every source
optimal_percentile = []
for sdx in tqdm(
range(psf_model.shape[0]),
desc="Optimizing apertures per source",
disable=quiet,
):
optim_params = {
"percentile_bounds": percentile_bounds,
"target_complete": target_complete,
"target_crowd": target_crowd,
"max_iter": max_iter,
"psf_models": psf_model,
"sdx": sdx,
}
minimize_result = optimize.minimize_scalar(
goodness_metric_obj_fun,
method="Bounded",
bounds=percentile_bounds,
options={"maxiter": max_iter, "disp": False},
args=(optim_params),
)
optimal_percentile.append(minimize_result.x)
return np.array(optimal_percentile)
def goodness_metric_obj_fun(percentile, optim_params):
"""
The objective function to minimize with scipy.optimize.minimize_scalar called
during optimization of the photometric aperture.
Parameters
----------
percentile : int
Percentile of the normalized flux distribution that defines the isophote.
optim_params : dictionary
Dictionary with the variables needed to evaluate the metric:
* psf_models
* sdx
* target_complete
* target_crowd
Returns
-------
penalty : float
Value of the objective function to be used for optiization.
"""
psf_models = optim_params["psf_models"]
sdx = optim_params["sdx"]
# Find the value where to cut
cut = np.nanpercentile(psf_models[sdx].data, percentile)
# create "isophot" mask with current cut
mask = (psf_models[sdx] > cut).toarray()[0]
# Do not compute and ignore if target score < 0
if optim_params["target_complete"] > 0:
# compute_FLFRCSAP returns an array of size 1 when doing only one source
completMetric = compute_FLFRCSAP(psf_models[sdx], mask)[0]
else:
completMetric = 1.0
# Do not compute and ignore if target score < 0
if optim_params["target_crowd"] > 0:
crowdMetric = compute_CROWDSAP(psf_models, mask, idx=sdx)
else:
crowdMetric = 1.0
# Once we hit the target we want to ease-back on increasing the metric
# However, we don't want to ease-back to zero pressure, that will
# unconstrain the penalty term and cause the optmizer to run wild.
# So, use a "Leaky ReLU"
# metric' = threshold + (metric - threshold) * leakFactor
leakFactor = 0.01
if (
optim_params["target_complete"] > 0
and completMetric >= optim_params["target_complete"]
):
completMetric = optim_params["target_complete"] + leakFactor * (
completMetric - optim_params["target_complete"]
)
if optim_params["target_crowd"] > 0 and crowdMetric >= optim_params["target_crowd"]:
crowdMetric = optim_params["target_crowd"] + leakFactor * (
crowdMetric - optim_params["target_crowd"]
)
penalty = -(completMetric + crowdMetric)
return penalty
def plot_flux_metric_diagnose(psf_model, idx=0, ax=None, optimal_percentile=None):
"""
Function to evaluate the flux metrics for a single source as a function of
the parameter that controls the aperture size.
The flux metrics are computed by taking into account the PSF models of
neighbor sources.
This function is meant to be used only to generate diagnostic figures.
Parameters
----------
psf_model : scipy.sparce.csr_matrix
Sparse matrix with the PSF models for all targets in the scene. It has shape
[n_sources, n_pixels].
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0].
ax : matplotlib.axes
Axis to be used to plot the figure
Returns
-------
ax : matplotlib.axes
Figure axes
"""
compl, crowd, cut = [], [], []
for p in range(0, 101, 1):
cut.append(p)
mask = (psf_model[idx] >= np.nanpercentile(psf_model[idx].data, p)).toarray()[0]
crowd.append(compute_CROWDSAP(psf_model, mask, idx))
compl.append(compute_FLFRCSAP(psf_model[idx], mask))
if ax is None:
fig, ax = plt.subplots(1)
ax.plot(cut, compl, label=r"FLFRCSAP", c="tab:blue")
ax.plot(cut, crowd, label=r"CROWDSAP", c="tab:green")
if optimal_percentile:
ax.axvline(optimal_percentile, c="tab:red", label="optimal")
ax.set_xlabel("Percentile")
ax.set_ylabel("Metric")
ax.legend()
return ax
def estimate_source_centroids_aperture(aperture_mask, flux, column, row):
"""
Computes the centroid via 2D moments methods for all sources all times. It needs
`aperture_mask` to be computed first by runing `compute_aperture_photometry`.
Parameters
----------
aperture_mask : numpy.ndarray
Aperture mask, shape is [n_surces, n_pixels]
flux: numpy.ndarray
Flux values at each pixels and times in units of electrons / sec
column : numpy.ndarray
Data array containing the "columns" of the detector that each pixel is on.
row : numpy.ndarray
Data array containing the "rows" of the detector that each pixel is on.
Returns
-------
centroid_col : numpy.ndarray
Column pixel number of the moments centroid, shape is [nsources, ntimes].
centroid_row : numpy.ndarray
Row pixel number of the moments centroid, shape is [nsources, ntimes].
"""
centroid_col, centroid_row = [], []
for idx in range(aperture_mask.shape[0]):
total_flux = np.nansum(flux[:, aperture_mask[idx]], axis=1)
centroid_col.append(
np.nansum(
np.tile(column[aperture_mask[idx]], (flux.shape[0], 1))
* flux[:, aperture_mask[idx]],
axis=1,
)
/ total_flux
)
centroid_row.append(
np.nansum(
np.tile(row[aperture_mask[idx]], (flux.shape[0], 1))
* flux[:, aperture_mask[idx]],
axis=1,
)
/ total_flux
)
return np.array(centroid_col), np.array(centroid_row)
def compute_FLFRCSAP(psf_models, aperture_mask):
"""
Compute fraction of target flux enclosed in the optimal aperture to total flux
for a given source (flux completeness).
Follows definition by Kinemuchi at al. 2012.
Parameters
----------
psf_models : scipy.sparce.csr_matrix
Sparse matrix with the PSF models for all targets in the scene. It has shape
[n_sources, n_pixels].
aperture_mask: numpy.ndarray
Array of boolean indicating the aperture for the target source. It has shape of
[n_sources, n_pixels].
Returns
-------
FLFRCSAP: numpy.ndarray
Completeness metric
"""
return np.array(
psf_models.multiply(aperture_mask.astype(float)).sum(axis=1)
/ psf_models.sum(axis=1)
).ravel()
def compute_CROWDSAP(psf_models, aperture_mask, idx=None):
"""
Compute the ratio of target flux relative to flux from all sources within
the photometric aperture (i.e. 1 - Crowdeness).
Follows definition by Kinemuchi at al. 2012.
Parameters
----------
psf_models : scipy.sparce.csr_matrix
Sparse matrix with the PSF models for all targets in the scene. It has shape
[n_sources, n_pixels].
aperture_mask : numpy.ndarray
Array of boolean indicating the aperture for the target source. It has shape of
[n_sources, n_pixels].
idx : int
Source index for what the metric is computed. Value has to be betweeen 0 and
psf_model first dimension size.
If None, it returns the metric for all sources (first dimension of psf_model).
Returns
-------
CROWDSAP : numpy.ndarray
Crowdeness metric
"""
ratio = psf_models.multiply(1 / psf_models.sum(axis=0)).tocsr()
if idx is None:
return np.array(
ratio.multiply(aperture_mask.astype(float)).sum(axis=1)
).ravel() / aperture_mask.sum(axis=1)
else:
return ratio[idx].toarray()[0][aperture_mask].sum() / aperture_mask.sum()
def aperture_mask_to_2d(tpfs, sources, aperture_mask, column, row):
"""
Convert 1D aperture mask into 2D to match the shape of TPFs. This 2D aperture
masks are useful to plot them with lightkurve TPF plot.
Because a sources can be in more than one TPF, having 2D array masks per object
with the shape of a single TPF is not possible.
Parameters
----------
tpfs: lightkurve TargetPixelFileCollection
Collection of Target Pixel files
tpfs_meta : list
List of source indices for every TPF in `tpfs`.
aperture_mask : numpy.ndarray
Aperture mask, shape is [n_surces, n_pixels]
column : numpy.ndarray
Data array containing the "columns" of the detector that each pixel is on.
row : numpy.ndarray
Data array containing the "rows" of the detector that each pixel is on.
Returns
-------
aperture_mask_2d : dictionary
Is a dictionary with key values as 'TPFindex_SOURCEindex', e.g. a source
(idx=10) with multiple TPF (TPF index 1 and 2) data will look '1_10' and '2_10'.
"""
aperture_mask_2d = {}
for k, tpf in enumerate(tpfs):
# find sources in tpf
sources_in = sources[k]
# row_col pix value of TPF
rc = [
"%i_%i" % (y, x)
for y in np.arange(tpf.row, tpf.row + tpf.shape[1])
for x in np.arange(tpf.column, tpf.column + tpf.shape[2])
]
# iter sources in the TPF
for sdx in sources_in:
# row_col value of pixels inside aperture
rc_in = [
"%i_%i"
% (
row[aperture_mask[sdx]][i],
column[aperture_mask[sdx]][i],
)
for i in range(aperture_mask[sdx].sum())
]
# create initial mask
mask = np.zeros(tpf.shape[1:], dtype=bool).ravel()
# populate mask with True when pixel is inside aperture
mask[np.in1d(rc, rc_in)] = True
mask = mask.reshape(tpf.shape[1:])
aperture_mask_2d["%i_%i" % (k, sdx)] = mask
return aperture_mask_2d
| 35.119534
| 88
| 0.63822
|
819b9dc64118b3438434bf68ed102272f36a1d22
| 398
|
py
|
Python
|
doctorUI/scan/views.py
|
award28/Diabetic_Retinopathy_Detection
|
079a7af791f3442853577c0731c9a797433bbcda
|
[
"MIT"
] | 2
|
2018-08-04T21:47:39.000Z
|
2019-03-23T02:56:59.000Z
|
doctorUI/scan/views.py
|
award28/Diabetic_Retinopathy_Detection
|
079a7af791f3442853577c0731c9a797433bbcda
|
[
"MIT"
] | null | null | null |
doctorUI/scan/views.py
|
award28/Diabetic_Retinopathy_Detection
|
079a7af791f3442853577c0731c9a797433bbcda
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.views import View
# Create your views here.
class Scan(View):
template_name = 'scan/index.html'
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
return HttpRequest("Nice post asshole. Now finish the view")
| 30.615385
| 68
| 0.71608
|
0fa911d2b58d19906104dfdffc24f4da39a3aa77
| 11,102
|
py
|
Python
|
scri/asymptotic_bondi_data/__init__.py
|
10220/scri
|
87fc7506038a53432b0a0749d6947aaac0d60996
|
[
"MIT"
] | null | null | null |
scri/asymptotic_bondi_data/__init__.py
|
10220/scri
|
87fc7506038a53432b0a0749d6947aaac0d60996
|
[
"MIT"
] | null | null | null |
scri/asymptotic_bondi_data/__init__.py
|
10220/scri
|
87fc7506038a53432b0a0749d6947aaac0d60996
|
[
"MIT"
] | 2
|
2020-11-12T19:41:23.000Z
|
2020-12-23T19:40:57.000Z
|
import numpy as np
from spherical_functions import LM_total_size
from .. import ModesTimeSeries
from .. import Inertial
from .. import sigma, psi4, psi3, psi2, psi1, psi0
class AsymptoticBondiData:
"""Class to store asymptotic Bondi data
This class stores time data, along with the corresponding values of psi0 through psi4 and sigma.
For simplicity, the data are stored as one contiguous array. That is, *all* values are stored
at all times, even if they are zero, and all Modes objects are stored with ell_min=0, even when
their spins are not zero.
The single contiguous array is then viewed as 6 separate ModesTimeSeries objects, which enables
them to track their spin weights, and provides various convenient methods like `eth` and
`ethbar`; `dot` and `ddot` for time-derivatives; `int` and `iint` for time-integrations; `norm`
to take the norm of a function over the sphere; `bar` for conjugation of the functions (which is
different from just conjugating the mode weights); etc. It also handles algebra correctly --
particularly addition (which is disallowed when the spin weights differ) and multiplication
(which can be delicate with regards to the resulting ell values).
This may lead to some headaches when the user tries to do things that are disabled by Modes
objects. The goal is to create headaches if and only if the user is trying to do things that
really should never be done (like conjugating mode weights, rather than the underlying function;
adding modes with different spin weights; etc.). Please open issues for any situations that
don't meet this standard.
This class also provides various convenience methods for computing things like the mass aspect,
the Bondi four-momentum, the Bianchi identities, etc.
"""
def __init__(self, time, ell_max, multiplication_truncator=sum, frameType=Inertial):
"""Create new storage for asymptotic Bondi data
Parameters
==========
time: int or array_like
Times at which the data will be stored. If this is an int, an empty array of that size
will be created. Otherwise, this must be a 1-dimensional array of floats.
ell_max: int
Maximum ell value to be stored
multiplication_truncator: callable [defaults to `sum`, even though `max` is nicer]
Function to be used by default when multiplying Modes objects together. See the
documentation for spherical_functions.Modes.multiply for more details. The default
behavior with `sum` is the most correct one -- keeping all ell values that result -- but
also the most wasteful, and very likely to be overkill. The user should probably always
use `max`. (Unfortunately, this must remain an opt-in choice, to ensure that the user
is aware of the situation.)
"""
import functools
if np.ndim(time) == 0:
# Assume this is just the size of the time array; construct an empty array
time = np.empty((time,), dtype=float)
elif np.ndim(time) > 1:
raise ValueError(f"Input `time` parameter must be an integer or a 1-d array; it has shape {time.shape}")
if time.dtype != float:
raise ValueError(f"Input `time` parameter must have dtype float; it has dtype {time.dtype}")
ModesTS = functools.partial(ModesTimeSeries, ell_max=ell_max, multiplication_truncator=multiplication_truncator)
shape = [6, time.size, LM_total_size(0, ell_max)]
self.frame = np.array([])
self.frameType = frameType
self._time = time.copy()
self._raw_data = np.zeros(shape, dtype=complex)
self._psi0 = ModesTS(self._raw_data[0], self._time, spin_weight=2)
self._psi1 = ModesTS(self._raw_data[1], self._time, spin_weight=1)
self._psi2 = ModesTS(self._raw_data[2], self._time, spin_weight=0)
self._psi3 = ModesTS(self._raw_data[3], self._time, spin_weight=-1)
self._psi4 = ModesTS(self._raw_data[4], self._time, spin_weight=-2)
self._sigma = ModesTS(self._raw_data[5], self._time, spin_weight=2)
@property
def time(self):
return self._time
@time.setter
def time(self, new_time):
self._time[:] = new_time
return self._time
u = time
t = time
@property
def n_times(self):
return self.time.size
@property
def n_modes(self):
return self._raw_data.shape[-1]
@property
def ell_min(self):
return self._psi2.ell_min
@property
def ell_max(self):
return self._psi2.ell_max
@property
def LM(self):
return self.psi2.LM
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, sigmaprm):
self._sigma[:] = sigmaprm
return self.sigma
@property
def psi4(self):
return self._psi4
@psi4.setter
def psi4(self, psi4prm):
self._psi4[:] = psi4prm
return self.psi4
@property
def psi3(self):
return self._psi3
@psi3.setter
def psi3(self, psi3prm):
self._psi3[:] = psi3prm
return self.psi3
@property
def psi2(self):
return self._psi2
@psi2.setter
def psi2(self, psi2prm):
self._psi2[:] = psi2prm
return self.psi2
@property
def psi1(self):
return self._psi1
@psi1.setter
def psi1(self, psi1prm):
self._psi1[:] = psi1prm
return self.psi1
@property
def psi0(self):
return self._psi0
@psi0.setter
def psi0(self, psi0prm):
self._psi0[:] = psi0prm
return self.psi0
def copy(self):
import copy
new_abd = type(self)(self.t, self.ell_max)
state = copy.deepcopy(self.__dict__)
new_abd.__dict__.update(state)
return new_abd
def interpolate(self, new_times):
new_abd = type(self)(new_times, self.ell_max)
new_abd.frameType = self.frameType
# interpolate waveform data
new_abd.sigma = self.sigma.interpolate(new_times)
new_abd.psi4 = self.psi4.interpolate(new_times)
new_abd.psi3 = self.psi3.interpolate(new_times)
new_abd.psi2 = self.psi2.interpolate(new_times)
new_abd.psi1 = self.psi1.interpolate(new_times)
new_abd.psi0 = self.psi0.interpolate(new_times)
# interpolate frame data if necessary
if self.frame.shape[0] == self.n_times:
import quaternion
new_abd.frame = quaternion.squad(self.frame, self.t, new_times)
return new_abd
def select_data(self, dataType):
if dataType == sigma:
return self.sigma
elif dataType == psi4:
return self.psi4
elif dataType == psi3:
return self.psi3
elif dataType == psi2:
return self.psi2
elif dataType == psi1:
return self.psi1
elif dataType == psi0:
return self.psi0
def speciality_index(self, **kwargs):
"""Computes the Baker-Campanelli speciality index (arXiv:gr-qc/0003031). NOTE: This quantity can only
determine algebraic speciality but can not determine the type! The rule of thumb given by Baker and
Campanelli is that for an algebraically special spacetime the speciality index should differ from unity
by no more than a factor of two.
"""
import spinsfast
import spherical_functions as sf
from spherical_functions import LM_index
output_ell_max = kwargs.pop("output_ell_max") if "output_ell_max" in kwargs else self.ell_max
working_ell_max = kwargs.pop("working_ell_max") if "working_ell_max" in kwargs else 2 * self.ell_max
n_theta = n_phi = 2 * working_ell_max + 1
# Transform to grid representation
psi4 = np.empty((self.n_times, n_theta, n_phi), dtype=complex)
psi3 = np.empty((self.n_times, n_theta, n_phi), dtype=complex)
psi2 = np.empty((self.n_times, n_theta, n_phi), dtype=complex)
psi1 = np.empty((self.n_times, n_theta, n_phi), dtype=complex)
psi0 = np.empty((self.n_times, n_theta, n_phi), dtype=complex)
for t_i in range(self.n_times):
psi4[t_i, :, :] = spinsfast.salm2map(
self.psi4.ndarray[t_i, :], self.psi4.spin_weight, lmax=self.ell_max, Ntheta=n_theta, Nphi=n_phi
)
psi3[t_i, :, :] = spinsfast.salm2map(
self.psi3.ndarray[t_i, :], self.psi3.spin_weight, lmax=self.ell_max, Ntheta=n_theta, Nphi=n_phi
)
psi2[t_i, :, :] = spinsfast.salm2map(
self.psi2.ndarray[t_i, :], self.psi2.spin_weight, lmax=self.ell_max, Ntheta=n_theta, Nphi=n_phi
)
psi1[t_i, :, :] = spinsfast.salm2map(
self.psi1.ndarray[t_i, :], self.psi1.spin_weight, lmax=self.ell_max, Ntheta=n_theta, Nphi=n_phi
)
psi0[t_i, :, :] = spinsfast.salm2map(
self.psi0.ndarray[t_i, :], self.psi0.spin_weight, lmax=self.ell_max, Ntheta=n_theta, Nphi=n_phi
)
curvature_invariant_I = psi4 * psi0 - 4 * psi3 * psi1 + 3 * psi2 ** 2
curvature_invariant_J = (
psi4 * (psi2 * psi0 - psi1 ** 2) - psi3 * (psi3 * psi0 - psi1 * psi2) + psi2 * (psi3 * psi1 - psi2 ** 2)
)
speciality_index = 27 * curvature_invariant_J ** 2 / curvature_invariant_I ** 3
# Transform back to mode representation
speciality_index_modes = np.empty((self.n_times, (working_ell_max) ** 2), dtype=complex)
for t_i in range(self.n_times):
speciality_index_modes[t_i, :] = spinsfast.map2salm(speciality_index[t_i, :], 0, lmax=working_ell_max - 1)
# Convert product ndarray to a ModesTimeSeries object
speciality_index_modes = speciality_index_modes[:, : LM_index(output_ell_max, output_ell_max, 0) + 1]
speciality_index_modes = ModesTimeSeries(
sf.SWSH_modes.Modes(
speciality_index_modes, spin_weight=0, ell_min=0, ell_max=output_ell_max, multiplication_truncator=max
),
time=self.t,
)
return speciality_index_modes
from .from_initial_values import from_initial_values
from .transformations import transform
from .constraints import (
bondi_constraints,
bondi_violations,
bondi_violation_norms,
bianchi_0,
bianchi_1,
bianchi_2,
constraint_3,
constraint_4,
constraint_mass_aspect,
)
from .bms_charges import (
mass_aspect,
bondi_rest_mass,
bondi_four_momentum,
bondi_angular_momentum,
bondi_dimensionless_spin,
bondi_boost_charge,
bondi_CoM_charge,
supermomentum,
)
from .frame_rotations import (
to_inertial_frame,
to_corotating_frame,
to_coprecessing_frame,
rotate_physical_system,
rotate_decomposition_basis,
)
| 38.020548
| 120
| 0.64601
|
514a880c3c23e48fe922ca3a9b6de88e40959b89
| 6,260
|
py
|
Python
|
src/oci/osub_subscription/models/product.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/osub_subscription/models/product.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/osub_subscription/models/product.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Product(object):
"""
Product description
"""
def __init__(self, **kwargs):
"""
Initializes a new Product object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param part_number:
The value to assign to the part_number property of this Product.
:type part_number: str
:param name:
The value to assign to the name property of this Product.
:type name: str
:param unit_of_measure:
The value to assign to the unit_of_measure property of this Product.
:type unit_of_measure: str
:param billing_category:
The value to assign to the billing_category property of this Product.
:type billing_category: str
:param product_category:
The value to assign to the product_category property of this Product.
:type product_category: str
:param ucm_rate_card_part_type:
The value to assign to the ucm_rate_card_part_type property of this Product.
:type ucm_rate_card_part_type: str
"""
self.swagger_types = {
'part_number': 'str',
'name': 'str',
'unit_of_measure': 'str',
'billing_category': 'str',
'product_category': 'str',
'ucm_rate_card_part_type': 'str'
}
self.attribute_map = {
'part_number': 'partNumber',
'name': 'name',
'unit_of_measure': 'unitOfMeasure',
'billing_category': 'billingCategory',
'product_category': 'productCategory',
'ucm_rate_card_part_type': 'ucmRateCardPartType'
}
self._part_number = None
self._name = None
self._unit_of_measure = None
self._billing_category = None
self._product_category = None
self._ucm_rate_card_part_type = None
@property
def part_number(self):
"""
**[Required]** Gets the part_number of this Product.
Product part numner
:return: The part_number of this Product.
:rtype: str
"""
return self._part_number
@part_number.setter
def part_number(self, part_number):
"""
Sets the part_number of this Product.
Product part numner
:param part_number: The part_number of this Product.
:type: str
"""
self._part_number = part_number
@property
def name(self):
"""
**[Required]** Gets the name of this Product.
Product name
:return: The name of this Product.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Product.
Product name
:param name: The name of this Product.
:type: str
"""
self._name = name
@property
def unit_of_measure(self):
"""
**[Required]** Gets the unit_of_measure of this Product.
Unit of measure
:return: The unit_of_measure of this Product.
:rtype: str
"""
return self._unit_of_measure
@unit_of_measure.setter
def unit_of_measure(self, unit_of_measure):
"""
Sets the unit_of_measure of this Product.
Unit of measure
:param unit_of_measure: The unit_of_measure of this Product.
:type: str
"""
self._unit_of_measure = unit_of_measure
@property
def billing_category(self):
"""
Gets the billing_category of this Product.
Metered service billing category
:return: The billing_category of this Product.
:rtype: str
"""
return self._billing_category
@billing_category.setter
def billing_category(self, billing_category):
"""
Sets the billing_category of this Product.
Metered service billing category
:param billing_category: The billing_category of this Product.
:type: str
"""
self._billing_category = billing_category
@property
def product_category(self):
"""
Gets the product_category of this Product.
Product category
:return: The product_category of this Product.
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""
Sets the product_category of this Product.
Product category
:param product_category: The product_category of this Product.
:type: str
"""
self._product_category = product_category
@property
def ucm_rate_card_part_type(self):
"""
Gets the ucm_rate_card_part_type of this Product.
Rate card part type of Product
:return: The ucm_rate_card_part_type of this Product.
:rtype: str
"""
return self._ucm_rate_card_part_type
@ucm_rate_card_part_type.setter
def ucm_rate_card_part_type(self, ucm_rate_card_part_type):
"""
Sets the ucm_rate_card_part_type of this Product.
Rate card part type of Product
:param ucm_rate_card_part_type: The ucm_rate_card_part_type of this Product.
:type: str
"""
self._ucm_rate_card_part_type = ucm_rate_card_part_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 27.699115
| 245
| 0.62524
|
e87f3873ed85e1d9f0bd8dbd9917354cfb98c6df
| 514
|
py
|
Python
|
v0_back/users/admin.py
|
japarra27/project1-wo-docker
|
c448aa187186c6a037bb214d5cd20082391c9b76
|
[
"MIT"
] | null | null | null |
v0_back/users/admin.py
|
japarra27/project1-wo-docker
|
c448aa187186c6a037bb214d5cd20082391c9b76
|
[
"MIT"
] | null | null | null |
v0_back/users/admin.py
|
japarra27/project1-wo-docker
|
c448aa187186c6a037bb214d5cd20082391c9b76
|
[
"MIT"
] | 1
|
2020-08-31T18:41:39.000Z
|
2020-08-31T18:41:39.000Z
|
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from v0_back.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 28.555556
| 83
| 0.747082
|
0da55a250b4a6239d692660deef51aa1b2097105
| 227
|
py
|
Python
|
flixed_django/flixed_django/utils.py
|
nilesh1168/flixed-movie-tracker
|
1ca1c9c74731596e386da001d393230fb86045af
|
[
"MIT"
] | null | null | null |
flixed_django/flixed_django/utils.py
|
nilesh1168/flixed-movie-tracker
|
1ca1c9c74731596e386da001d393230fb86045af
|
[
"MIT"
] | null | null | null |
flixed_django/flixed_django/utils.py
|
nilesh1168/flixed-movie-tracker
|
1ca1c9c74731596e386da001d393230fb86045af
|
[
"MIT"
] | null | null | null |
from flixedREST.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
| 28.375
| 71
| 0.696035
|
6e6bb934fa54aa9b6f9748f48f78131f586b3a9f
| 1,079
|
py
|
Python
|
kubernetes/test/test_v1alpha1_certificate_signing_request_condition.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1alpha1_certificate_signing_request_condition.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1alpha1_certificate_signing_request_condition.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_certificate_signing_request_condition import V1alpha1CertificateSigningRequestCondition
class TestV1alpha1CertificateSigningRequestCondition(unittest.TestCase):
""" V1alpha1CertificateSigningRequestCondition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1CertificateSigningRequestCondition(self):
"""
Test V1alpha1CertificateSigningRequestCondition
"""
model = kubernetes.client.models.v1alpha1_certificate_signing_request_condition.V1alpha1CertificateSigningRequestCondition()
if __name__ == '__main__':
unittest.main()
| 25.093023
| 132
| 0.768304
|
38d9f4ff3acb4af90d7f4987a1d2bc920a932a62
| 1,252
|
py
|
Python
|
src/leetcodepython/top100likedquestions/merge_nums.py
|
zhangyu345293721/leetcode
|
1aa5bcb984fd250b54dcfe6da4be3c1c67d14162
|
[
"MIT"
] | 90
|
2018-12-25T06:01:30.000Z
|
2022-01-03T14:01:26.000Z
|
src/leetcodepython/top100likedquestions/merge_nums.py
|
zhangyu345293721/leetcode
|
1aa5bcb984fd250b54dcfe6da4be3c1c67d14162
|
[
"MIT"
] | 1
|
2020-08-27T09:53:49.000Z
|
2020-08-28T08:57:49.000Z
|
src/leetcodepython/top100likedquestions/merge_nums.py
|
zhangyu345293721/leetcode
|
1aa5bcb984fd250b54dcfe6da4be3c1c67d14162
|
[
"MIT"
] | 27
|
2019-01-02T01:41:32.000Z
|
2022-01-03T14:01:30.000Z
|
# encoding='utf-8'
'''
合并数组
author:zhangyu
date:2020.1.9
题目:
两个有序数组进行合并为一个数组,其中保证数组中没有重复元素
如: nums1=[1,2,3,4]
nums2=[4,5,6,7,8]
合并之后为:result=[1,2,3,,4,5,6,7,8]
'''
from typing import List
class Solution:
def merge(self, nums1: List[int], nums2: List[int]) -> List[int]:
'''
合并两个数组
Args:
nums1:数组1
nums2:数组2
Returns:
合并后数组
'''
if not nums1:
return nums2
if not nums2:
return nums1
i, j, result = 0, 0, []
while i < len(nums1) and j < len(nums2):
if nums1[i] < nums2[j]:
result.append(nums1[i])
i += 1
elif nums1[i] == nums2[j]:
result.append(nums1[i])
i += 1
j += 1
else:
result.append(nums2[j])
j += 1
if i == len(nums1):
result.extend(nums2[j:len(nums2)])
if j == len(nums2):
result.extend(nums1[i:len(nums1)])
return result
if __name__ == '__main__':
nums1 = [1, 2, 3, 4]
nums2 = [4, 5, 6, 7, 8]
solution = Solution()
res = solution.merge(nums1, nums2)
print(res)
| 22.357143
| 69
| 0.454473
|
8f8cf3639086c3c4a8e167d65fcaf238f51af2c8
| 1,660
|
py
|
Python
|
obswsrc/requests.py
|
avmaint/obs-ws-rc
|
8ff2c36bdd2ac1636feabb356864b9ebb20e9b30
|
[
"MIT"
] | 38
|
2017-08-07T04:30:28.000Z
|
2021-11-03T08:30:47.000Z
|
obswsrc/requests.py
|
avmaint/obs-ws-rc
|
8ff2c36bdd2ac1636feabb356864b9ebb20e9b30
|
[
"MIT"
] | 10
|
2017-09-20T11:21:41.000Z
|
2021-09-27T22:56:22.000Z
|
obswsrc/requests.py
|
avmaint/obs-ws-rc
|
8ff2c36bdd2ac1636feabb356864b9ebb20e9b30
|
[
"MIT"
] | 13
|
2017-10-28T20:41:39.000Z
|
2020-12-28T02:51:03.000Z
|
"""
This module holds dynamically generated classes.
For more info see protocol.py and protocol.json.
"""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Python
from enum import Enum
# obs-ws-rc
from .struct import Struct, StructField, StructMeta
# =============================================================================
# >> BASE CLASSES
# =============================================================================
class ResponseStatus(Enum):
OK = 'OK'
ERROR = 'ERROR'
class BaseResponseMeta(StructMeta):
def __init__(cls, name, bases, namespace):
cls._fields = cls._fields[:] + (
StructField('message_id', "message-id", str),
StructField(
'status',
"status",
lambda status: ResponseStatus(status.upper())
),
StructField('error', "error", str, True),
)
super().__init__(name, bases, namespace)
class BaseResponse(Struct, metaclass=BaseResponseMeta):
pass
class BaseRequest(Struct):
@property
def type_name(self):
raise NotImplementedError
class response_class(BaseResponse):
pass
def get_request_data(self, message_id):
dict_ = self.copy()
dict_['request-type'] = self.type_name
dict_['message-id'] = message_id
return dict_
def dummy_request(**kwargs):
raise NotImplementedError("protocol.json doesn't implement this request")
AuthenticateRequest = dummy_request
GetAuthRequiredRequest = dummy_request
| 26.349206
| 79
| 0.51988
|
ad7b427f793f509bdb8b05cd6e9c647c3ee5f3a3
| 6,966
|
py
|
Python
|
tests/cli/commands/test_webserver_command.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 2
|
2021-07-30T17:35:51.000Z
|
2021-08-03T13:50:57.000Z
|
tests/cli/commands/test_webserver_command.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 8
|
2021-02-08T20:40:47.000Z
|
2022-03-29T22:27:53.000Z
|
tests/cli/commands/test_webserver_command.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 1
|
2021-05-12T11:37:59.000Z
|
2021-05-12T11:37:59.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import tempfile
import unittest
from time import sleep
from unittest import mock
import psutil
from airflow import settings
from airflow.cli import cli_parser
from airflow.cli.commands import webserver_command
from airflow.cli.commands.webserver_command import get_num_ready_workers_running
from airflow.models import DagBag
from airflow.utils.cli import setup_locations
from tests.test_utils.config import conf_vars
class TestCLIGetNumReadyWorkersRunning(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
def setUp(self):
self.gunicorn_master_proc = mock.Mock(pid=None)
self.children = mock.MagicMock()
self.child = mock.MagicMock()
self.process = mock.MagicMock()
def test_ready_prefix_on_cmdline(self):
self.child.cmdline.return_value = [settings.GUNICORN_WORKER_READY_PREFIX]
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 1)
def test_ready_prefix_on_cmdline_no_children(self):
self.process.children.return_value = []
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_ready_prefix_on_cmdline_zombie(self):
self.child.cmdline.return_value = []
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_ready_prefix_on_cmdline_dead_process(self):
self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
self.process.children.return_value = [self.child]
with mock.patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_cli_webserver_debug(self):
env = os.environ.copy()
proc = psutil.Popen(["airflow", "webserver", "--debug"], env=env)
sleep(3) # wait for webserver to start
return_code = proc.poll()
self.assertEqual(
None,
return_code,
"webserver terminated with return code {} in debug mode".format(return_code))
proc.terminate()
proc.wait()
class TestCliWebServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def setUp(self) -> None:
self._check_processes()
self._clean_pidfiles()
def _check_processes(self):
try:
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "--full", "--count", "airflow webserver"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "--count", "gunicorn"]).wait())
except: # noqa: E722
subprocess.Popen(["ps", "-ax"]).wait()
raise
def tearDown(self) -> None:
self._check_processes()
def _clean_pidfiles(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
if os.path.exists(pidfile_webserver):
os.remove(pidfile_webserver)
if os.path.exists(pidfile_monitor):
os.remove(pidfile_monitor)
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception: # pylint: disable=broad-except
sleep(1)
def test_cli_webserver_foreground(self):
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(["airflow", "webserver"])
proc.terminate()
proc.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
proc.terminate()
proc.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
pidfile_webserver = setup_locations("webserver")[0]
pidfile_monitor = setup_locations("webserver-monitor")[0]
# Run webserver as daemon in background. Note that the wait method is not called.
subprocess.Popen(["airflow", "webserver", "--daemon"])
pid_monitor = self._wait_pidfile(pidfile_monitor)
self._wait_pidfile(pidfile_webserver)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "--full", "--count", "airflow webserver"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "--count", "gunicorn"]).wait())
# Terminate monitor process.
proc = psutil.Process(pid_monitor)
proc.terminate()
proc.wait()
# Patch for causing webserver timeout
@mock.patch("airflow.cli.commands.webserver_command.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
webserver_command.webserver(args)
self.assertEqual(e.exception.code, 1)
| 39.355932
| 109
| 0.682314
|
e4c4e8e654d44b30bf71104be01c9eb95d3d8102
| 2,779
|
py
|
Python
|
computer_version/object_detection/main.py
|
afterloe/opencv-practice
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
[
"MIT"
] | 5
|
2020-03-13T07:34:30.000Z
|
2021-10-01T03:03:05.000Z
|
computer_version/object_detection/main.py
|
afterloe/Opencv-practice
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
[
"MIT"
] | null | null | null |
computer_version/object_detection/main.py
|
afterloe/Opencv-practice
|
83d76132d004ebbc96d99d34a0fd3fc37a044f9f
|
[
"MIT"
] | 1
|
2020-03-01T12:35:02.000Z
|
2020-03-01T12:35:02.000Z
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import argparse
from imutils.video import VideoStream, FPS
import imutils
import numpy as np
import time
import cv2 as cv
import logging
__version__ = "1.0.0"
logging.basicConfig(level=logging.INFO,
format='[%(asctime)8s][%(filename)s][%(levelname)s] - %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
CONSOLE = logging.getLogger("dev")
CONSOLE.setLevel(logging.DEBUG)
CONSOLE.info("实时对象检测 %s", __version__)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
if "__main__" == __name__:
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True, help="Caffe 模型部署描述文件")
ap.add_argument("-m", "--model", required=True, help="Caffe 模型")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="阈值")
args = vars(ap.parse_args())
CONSOLE.info("加载模型")
net_model = cv.dnn.readNetFromCaffe(args["prototxt"], args["model"])
CONSOLE.info("加载视频")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()
while True:
frame = vs.read()
if None is frame:
CONSOLE.error("无法读取视频流")
break
frame = imutils.resize(frame, width=400)
h, w = frame.shape[: 2]
blob_data = cv.dnn.blobFromImage(cv.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)
net_model.setInput(blob_data)
detections = net_model.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if args["confidence"] < confidence:
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3: 7] * np.array([w, h, w, h])
start_x, start_y, end_x, end_y = box.astype("int")
content = "%s: %.2f%%" % (CLASSES[idx], confidence * 100)
cv.rectangle(frame, (start_x, start_y), (end_x, end_y), COLORS[idx], 2)
y = start_y - 15 if 15 < start_y - 15 else start_y + 15
cv.putText(frame, content, (start_x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv.imshow("frame", frame)
key = cv.waitKey(1) & 0xff
if ord("q") == key:
CONSOLE.info("退出监控")
break
fps.update()
fps.stop()
CONSOLE.info("视频播放时间: %.2f" % fps.elapsed())
CONSOLE.info("平均FPS: %.2f" % fps.fps())
cv.destroyAllWindows()
vs.stop()
| 39.140845
| 103
| 0.558114
|
bb9471714a07f5fabab30ca3ddff74d892c06a20
| 6,097
|
py
|
Python
|
chs_s111/ascii_time_series.py
|
osgirl/chs-s111
|
a88a3de20868d0a0884498fffe3c1a1ea106bd12
|
[
"BSD-2-Clause"
] | null | null | null |
chs_s111/ascii_time_series.py
|
osgirl/chs-s111
|
a88a3de20868d0a0884498fffe3c1a1ea106bd12
|
[
"BSD-2-Clause"
] | null | null | null |
chs_s111/ascii_time_series.py
|
osgirl/chs-s111
|
a88a3de20868d0a0884498fffe3c1a1ea106bd12
|
[
"BSD-2-Clause"
] | null | null | null |
#******************************************************************************
#
#******************************************************************************
from datetime import datetime
from datetime import timedelta
import pytz
#******************************************************************************
class AsciiTimeSeries:
#******************************************************************************
def __init__(self, file_name):
self.file_name = file_name
self.ascii_file = None
self.interval = None
self.start_time = None
self.end_time = None
self.number_of_records = 0
self.current_record = 0
self.latitude = 0
self.longitude = 0
#Open the file.
self.ascii_file = open(self.file_name, 'r')
#Skip the header
self.read_header()
#******************************************************************************
def read_header(self):
"""Read the header of the time series file."""
#The header contains 24 rows, so read them all.
for rowIndex in range(0, 24):
#Read a line of data
data = self.ascii_file.readline()
#If this is the 1st row, then lets decode the start date and time.
if rowIndex == 0:
#66-66 1 : Units of depth [m: metres, f: feet]
self.unit = data[65:66]
#68-71 4 : Date (Year) of first data record
year = data[67:71]
#73-74 2 : Date (Month) of first data record
month = data[72:74]
#76-77 2 : Date (Day) of first data record
day = data[75:77]
#If this is the 2nd row, then lets decode the x and y positions.
elif rowIndex == 1:
#14-15 2 : Latitude (Degrees)
latDeg = data[13:15]
#17-23 7 : Latitude (Minutes up to 4 places of decimal)
latMin = data[16:23]
self.latitude = float(latDeg) + (float(latMin) / 60.0)
#24-24 1 : 'N' or 'S'
if data[23:24] == 'S':
self.latitude *= -1.0
#26-28 3 Longitude (Degrees)
lonDeg = data[25:28]
#30-36 7 : Longitude (Minutes up to 4 places of decimal)
lonMin = data[29:36]
self.longitude = float(lonDeg) + (float(lonMin) / 60.0)
#37-37 1 : 'W' or 'E'
if data[36:37] == 'W':
self.longitude *= -1.0
#62-66 5 : Time Zone [# of hours to add to determine UTC, always include + or - and
# always left justify, (leaves space for Nfld. time). i.e. +03.5]
utcOffset = data[61:66]
#68-69 2 : Time (Hour) of first data record
hour = data[67:69]
#70-71 2 : Time (Minute) of first data record
minute = data[69:71]
#73-74 2 : Time (Second) of first data record
seconds = data[72:74]
#We now have enought information to construct our timestamp.
timeNotInUTC = datetime(year = int(year), month = int(month), day = int(day),
hour = int(hour), minute = int(minute), second = int(seconds), tzinfo = pytz.utc)
self.deltaToUTC = timedelta(hours = float(utcOffset))
#Store the start time as UTC.
self.start_time = timeNotInUTC + self.deltaToUTC
#If this is the 3rd row, then lets decode the number of records in the file.
elif rowIndex == 2:
#col 01-10 10 : Number of Records to follow header
self.number_of_records = int(data[0:10])
#68-69 2 : Sampling interval (Hours)
sampleHours = data[67:69]
#70-71 2 : Sampling interval (Minutes)
sampleMinutes = data[69:71]
#73-74 2 : Sampling interval (Seconds)
sampleSeconds = data[72:74]
self.interval = timedelta(hours = int(sampleHours), minutes = int(sampleMinutes), seconds = int(sampleSeconds))
#With the start time, number of records, and interval... we can figure out the end time.
self.end_time = self.start_time + (self.number_of_records - 1) * self.interval
#******************************************************************************
def done(self):
"""Determine if we have read all records in the time series file.
:returns: true if all records have been read, else false.
"""
if self.current_record < self.number_of_records:
return False
return True
#******************************************************************************
def read_next_row(self):
"""Read the next row of data from the time series file.
:returns: A tuple containing the date, direction, and speed (in m/s).
"""
#If we are done... throw an error.
if self.done():
raise Exception('AsciiTimeSeries is done!')
self.current_record += 1
asciiData = self.ascii_file.readline()
#We expect the following: Date (YYYY/MM/DD), HourMinute (hhmm), Direction (deg T), Speed (m/s)
components = asciiData.split()
if len(components) != 4:
raise Exception('Record does not have the correct number of values.')
#decode the date and time, and then covert it to UTC.
dateAndTime = datetime.strptime(components[0] + components[1], '%Y/%m/%d%H:%M')
dateAndTime = dateAndTime + self.deltaToUTC
direction = float(components[2])
speed = float(components[3])
#Return a tuple with dateAndTime, direction, and speed
return (dateAndTime, direction, speed)
| 38.10625
| 132
| 0.478596
|
efd58823ec21719e7465de218b174308d5f4e384
| 345
|
py
|
Python
|
core/floor/__init__.py
|
BlenderCN-Org/building_tool
|
9c101dcf2a0df884e19ade87d8724eaa5ed7842b
|
[
"MIT"
] | 1
|
2019-05-25T07:34:15.000Z
|
2019-05-25T07:34:15.000Z
|
core/floor/__init__.py
|
BlenderCN-Org/building_tool
|
9c101dcf2a0df884e19ade87d8724eaa5ed7842b
|
[
"MIT"
] | null | null | null |
core/floor/__init__.py
|
BlenderCN-Org/building_tool
|
9c101dcf2a0df884e19ade87d8724eaa5ed7842b
|
[
"MIT"
] | 1
|
2019-07-05T05:41:13.000Z
|
2019-07-05T05:41:13.000Z
|
import bpy
from .floor import Floor
from .floor_ops import BTOOLS_OT_add_floors
from .floor_props import FloorProperty
classes = (FloorProperty, BTOOLS_OT_add_floors)
def register_floor():
for cls in classes:
bpy.utils.register_class(cls)
def unregister_floor():
for cls in classes:
bpy.utils.unregister_class(cls)
| 19.166667
| 47
| 0.753623
|
98a9a115152570f8df97deafacecd588f122f0d6
| 3,550
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/lactobacillusspwkb10.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/lactobacillusspwkb10.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/lactobacillusspwkb10.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Lactobacillus sp. wkB10.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LactobacillusSpWkb10(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lactobacillus sp. wkB10 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lactobacillus sp. wkB10 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LactobacillusSpWkb10",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.87037
| 223
| 0.676056
|
19cf78330b5b673652db2e5967c6a6998208890e
| 5,803
|
py
|
Python
|
scripts/synapse_pos_specificity.py
|
cabrittin/volumetric_analysis
|
82004378abae963ef02858bf4711786dad76f133
|
[
"MIT"
] | null | null | null |
scripts/synapse_pos_specificity.py
|
cabrittin/volumetric_analysis
|
82004378abae963ef02858bf4711786dad76f133
|
[
"MIT"
] | null | null | null |
scripts/synapse_pos_specificity.py
|
cabrittin/volumetric_analysis
|
82004378abae963ef02858bf4711786dad76f133
|
[
"MIT"
] | null | null | null |
"""
synaptic_specificity.py
Distribution of differences between homologous mean synapse positions.
Author: Christopher Brittin
Created: 07 February 2018
"""
import sys
sys.path.append(r'./volumetric_analysis/')
import matplotlib.pyplot as plt
import matplotlib as mpl
import db
from connectome.load import from_db
import connectome.synspecificity as synspec
import figures.stats as fstats
import aux
mpl.rcParams['xtick.labelsize'] = 24
mpl.rcParams['ytick.labelsize'] = 24
ADULT_COL = '#FFC300'
L4_COL = '#3380FF'
AL_COL = '#14FA29'
lr_pairs = './mat/lr_neurons.txt'
lr_dict = './mat/lr_dict.txt'
homologs = './mat/homologs.txt'
left_nodes = './mat/left_nodes.txt'
right_nodes = './mat/right_nodes.txt'
def write_source(fout,_data):
data = []
for n1 in _data:
for stype in _data[n1]:
for n2 in _data[n1][stype]:
if n2 in ['mean','std','size']: continue
data.append([stype,n1,n2,_data[n1][stype][n2]])
aux.write.from_list(fout,data)
def format_subcell(S,D,thresh=0.05):
dgap,mgap = [],[]
dpre,mpre = [],[]
dpost,mpost = [],[]
for n in S:
#print(n)
if S[n][0] <= thresh and n in D:
dgap += D[n][0]
#mgap += D[n][0][1]
if S[n][1] <= thresh and n in D:
dpre += D[n][1]
#mpre += D[n][1][1]
if S[n][2] <= thresh and n in D:
dpost += D[n][2]
#mpost += D[n][2][1]
# data = [dgap,mgap,dpre,mpre,dpost,mpost]
data = [dgap,dpre,dpost]
#for i in xrange(len(data)):
# data[i] = [d for d in data[i] if d <= 1]
return data
def run(fout=None,source_data=None):
N2U = 'N2U'
JSH = 'JSH'
_remove = ['VC01','VD01','VB01','VB02']
neurons = aux.read.into_list2(lr_pairs)
lrd = aux.read.into_lr_dict(lr_dict)
left = aux.read.into_list(left_nodes)
left.remove('CEHDL')
left.remove('CEHVL')
left.remove('HSNL')
left.remove('PVNL')
left.remove('PLNL')
N2U = from_db(N2U,adjacency=True,chemical=True,
electrical=True,remove=_remove,dataType='networkx')
JSH = from_db(JSH,adjacency=True,chemical=True,
electrical=True,remove=_remove,dataType='networkx')
n2ucon = db.connect.default('N2U')
n2ucur = n2ucon.cursor()
jshcon = db.connect.default('JSH')
jshcur = jshcon.cursor()
if source_data:
fsplit = source_data.split('.')
nout = fsplit[0] + '_adult.' + fsplit[1]
jout = fsplit[0] + '_l4.' + fsplit[1]
src = synspec.get_source_data(n2ucur,N2U.A.nodes())
write_source(nout,src)
src = synspec.get_source_data(jshcur,JSH.A.nodes())
write_source(jout,src)
both_nodes = set(N2U.A.nodes()) & set(JSH.A.nodes())
both_nodes.remove('SABD')
both_nodes.remove('FLPL')
both_nodes.remove('FLPR')
if 'VD01' in both_nodes: both_nodes.remove('VD01')
S1 = synspec.get_bilateral_specificity(N2U,lrd,left)
D1 = synspec.get_bilateral_subcell_specificity(n2ucur,neurons,lrd)
B1 = format_subcell(S1,D1)
S2 = synspec.get_bilateral_specificity(JSH,lrd,left)
D2 = synspec.get_bilateral_subcell_specificity(jshcur,neurons,lrd)
B2 = format_subcell(S2,D2)
S3 = synspec.get_developmental_specificity(N2U,JSH,
both_nodes=both_nodes)
D3 = synspec.get_developmental_subcell_specificity(n2ucur,
jshcur,
both_nodes=both_nodes)
B3 = format_subcell(S3,D3)
n2ucon.close()
jshcon.close()
labels = None
pos = [1.5,2,2.5,3.5,4,4.5,5.5,6,6.5]
data = [B1[0],B2[0],B3[0],
B1[1],B2[1],B3[1],
B1[2],B2[2],B3[2]]
print('Stats:')
fstats.print_wilcoxon(data[0],'Adult L/R gap')
fstats.print_wilcoxon(data[1],'L4 L/R gap')
fstats.print_wilcoxon(data[2],'Adult/L4 gap')
fstats.print_wilcoxon(data[3],'Adult L/R pre')
fstats.print_wilcoxon(data[4],'L4 L/R pre')
fstats.print_wilcoxon(data[5],'Adult/L4 pre')
fstats.print_wilcoxon(data[6],'Adult L/R post')
fstats.print_wilcoxon(data[7],'L4 L/R post')
fstats.print_wilcoxon(data[8],'Adult/L4 post')
colors = [ADULT_COL,L4_COL,AL_COL,
ADULT_COL,L4_COL,AL_COL,
ADULT_COL,L4_COL,AL_COL,]
fig,ax = plt.subplots(1,1,figsize=(15,10))
bp = fstats.plot_boxplots(ax,data,labels=labels,positions=pos,
ylim=[-5,5],
ylabel='Mean position difference',
title='Mean synapse position',
showfliers=True,width=0.2,colors=colors)
_len = [len(d) for d in data]
_ticklabels = ['gap j.', 'presyn.', 'postsyn.']
for i in range(3):
n = ','.join(list(map(str,[_len[3*i + _j] for _j in range(3)])))
_ticklabels[i] += "\n($n=" + n +"$)"
ax.set_xticklabels(_ticklabels)
ax.set_xticks([2, 4, 6])
ax.xaxis.set_tick_params(labelsize=32)
ax.set_ylim([-1,1])
ax.axvspan(0,3,facecolor='#C3C3C3')
ax.axvspan(3,5,facecolor='#D8D7D7')
ax.axvspan(5,8,facecolor='#C3C3C3')
ax.axhline(0,color='r',linewidth=3,linestyle='--')
_A, = ax.plot([1,1],ADULT_COL)
_L, = ax.plot([1,1],L4_COL)
_AL, = ax.plot([1,1],AL_COL)
leg =ax.legend((_A, _L,_AL),
('Adult L/R', 'L4 L/R','Adult/L4'),
fontsize=18)
for legobj in leg.legendHandles:
legobj.set_linewidth(4.0)
_A.set_visible(False)
_L.set_visible(False)
_AL.set_visible(False)
plt.tight_layout()
if fout: plt.savefig(fout)
plt.show()
if __name__=='__main__':
run()
| 31.367568
| 77
| 0.58487
|
ef10c8d719f83011e9d443407de82ed11e97b7dd
| 799
|
py
|
Python
|
keras_addon/activations.py
|
fedorovarthur/Keras-NALU-Layer
|
1c8b3f63c07b954384d54061fe9f38a2ca4d8998
|
[
"MIT"
] | null | null | null |
keras_addon/activations.py
|
fedorovarthur/Keras-NALU-Layer
|
1c8b3f63c07b954384d54061fe9f38a2ca4d8998
|
[
"MIT"
] | null | null | null |
keras_addon/activations.py
|
fedorovarthur/Keras-NALU-Layer
|
1c8b3f63c07b954384d54061fe9f38a2ca4d8998
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import pi, sqrt
import keras.backend as K
def gelu(x, approximation='tanh'):
assert approximation in ('sigmoid', 'tanh'), \
'Approximation method must be chosen from [tanh, sigmoid]'
if approximation == 'tanh':
return .5 * x * (1 + K.tanh(sqrt(2/pi) * (x + .044715 * x ** 3)))
else:
return x * K.sigmoid(1.702 * x)
def silu(x):
return x * K.sigmoid(x)
def swish(x, beta=1):
return x * K.sigmoid(beta * x)
def lelu(x, mu=0, s=1):
return x * K.sigmoid((x - mu)/s)
def nac(x, w, m):
return K.dot(x, K.tanh(w) * K.sigmoid(m))
def log_nac(x, w, m):
return K.exp(K.dot(K.log(K.abs(x) + K.epsilon()), nac(x, w, m)))
| 21.026316
| 73
| 0.612015
|
4741132de6d64bdd5d37261092c9dc02d60be71f
| 397
|
py
|
Python
|
database.py
|
tugberkozkara/songs-i-like-api
|
7581e63cb016cc749d5a5ac85f05bd4eca51d994
|
[
"MIT"
] | null | null | null |
database.py
|
tugberkozkara/songs-i-like-api
|
7581e63cb016cc749d5a5ac85f05bd4eca51d994
|
[
"MIT"
] | null | null | null |
database.py
|
tugberkozkara/songs-i-like-api
|
7581e63cb016cc749d5a5ac85f05bd4eca51d994
|
[
"MIT"
] | null | null | null |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os
# Local Postgres
#engine = create_engine("postgresql://postgres:postgres@localhost/songs_db",
# echo=True
#)
# Heroku Postgres
engine = create_engine(os.environ['DATABASE_URL'])
Base = declarative_base()
SessionLocal = sessionmaker(bind=engine)
| 20.894737
| 76
| 0.798489
|
4c7b27152a6725c24fb8eb85c73ba3da1a2e204c
| 2,265
|
py
|
Python
|
tests/support/mock_server.py
|
nelsond/sirah-matisse-commander
|
78699878b2acd098a18bfe8029aa33c4b1b12fed
|
[
"MIT"
] | 1
|
2021-11-02T15:10:49.000Z
|
2021-11-02T15:10:49.000Z
|
tests/support/mock_server.py
|
nelsond/sirah-matisse-commander
|
78699878b2acd098a18bfe8029aa33c4b1b12fed
|
[
"MIT"
] | 2
|
2021-11-02T15:10:26.000Z
|
2021-11-02T15:37:49.000Z
|
tests/support/mock_server.py
|
nelsond/sirah-matisse-commander
|
78699878b2acd098a18bfe8029aa33c4b1b12fed
|
[
"MIT"
] | null | null | null |
import socket
import threading
import time
class MockServer:
"""
Simple TCP mock server running in a separate thread for testing network
connections to remote.
Arguments:
port (int, optional): Listening port, 30000 by default.
"""
def __init__(self, port: int = 30000):
self._port = port
self._request = None
self._response = None
self._ready = None
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.settimeout(1)
self._thread = None
self._loop = False
def start(self):
"""Start server."""
attempts = 1
while self._loop is False and attempts < 10:
try:
self._socket.bind(('127.0.0.1', self._port))
self._loop = True
except OSError:
time.sleep(0.1 * attempts)
attempts += 1
if self._loop is False:
raise RuntimeError('Could not bind to network address.')
self._ready = threading.Event()
self._thread = threading.Thread(target=self.listen)
self._thread.daemon = True
self._thread.start()
return self._ready
def stop(self):
"""Stop server."""
self._loop = False
if self._thread is not None:
self._thread.join()
def setup(self, request: bytes, response: bytes):
"""
Set expected request and response data.
Arguments:
request (bytes):
Expected request data.
reponse (bytes):
Response data once expected request data is received.
"""
self._request = request
self._response = response
def listen(self):
"""Listen for new connections."""
while self._loop is True:
self._socket.listen()
self._ready.set()
try:
conn, _ = self._socket.accept()
request = conn.recv(1024)
if request == self._request:
conn.send(self._response)
conn.close()
except socket.timeout:
pass
| 25.166667
| 75
| 0.550552
|
ba2a5e341af77e37663f0e96c04bb953c0ea0f81
| 7,784
|
py
|
Python
|
spinup/utils/test_policy_action_plain.py
|
ColorlessBoy/spinningup
|
2d6cf818e0f370dcbbc43ebdcde483a129d0dd9c
|
[
"MIT"
] | null | null | null |
spinup/utils/test_policy_action_plain.py
|
ColorlessBoy/spinningup
|
2d6cf818e0f370dcbbc43ebdcde483a129d0dd9c
|
[
"MIT"
] | null | null | null |
spinup/utils/test_policy_action_plain.py
|
ColorlessBoy/spinningup
|
2d6cf818e0f370dcbbc43ebdcde483a129d0dd9c
|
[
"MIT"
] | null | null | null |
import time
import joblib
import os
import os.path as osp
import tensorflow as tf
import torch
from spinup import EpochLogger
from spinup.utils.logx import restore_tf_graph
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
device = torch.device('cuda')
def load_policy_and_env(fpath, itr='last', deterministic=False):
"""
Load a policy from save, whether it's TF or PyTorch, along with RL env.
Not exceptionally future-proof, but it will suffice for basic uses of the
Spinning Up implementations.
Checks to see if there's a tf1_save folder. If yes, assumes the model
is tensorflow and loads it that way. Otherwise, loads as if there's a
PyTorch save.
"""
# determine if tf save or pytorch save
if any(['tf1_save' in x for x in os.listdir(fpath)]):
backend = 'tf1'
else:
backend = 'pytorch'
# handle which epoch to load from
if itr=='last':
# check filenames for epoch (AKA iteration) numbers, find maximum value
if backend == 'tf1':
saves = [int(x[8:]) for x in os.listdir(fpath) if 'tf1_save' in x and len(x)>8]
elif backend == 'pytorch':
pytsave_path = osp.join(fpath, 'pyt_save')
# Each file in this folder has naming convention 'modelXX.pt', where
# 'XX' is either an integer or empty string. Empty string case
# corresponds to len(x)==8, hence that case is excluded.
saves = [int(x.split('.')[0][5:]) for x in os.listdir(pytsave_path) if len(x)>8 and 'model' in x]
itr = '%d'%max(saves) if len(saves) > 0 else ''
else:
assert isinstance(itr, int), \
"Bad value provided for itr (needs to be int or 'last')."
itr = '%d'%itr
# load the get_action function
if backend == 'tf1':
get_action = load_tf_policy(fpath, itr, deterministic)
else:
get_action = load_pytorch_policy(fpath, itr, deterministic)
# try to load environment from save
# (sometimes this will fail because the environment could not be pickled)
try:
state = joblib.load(osp.join(fpath, 'vars'+itr+'.pkl'))
env = state['env']
except:
env = None
return env, get_action
def load_tf_policy(fpath, itr, deterministic=False):
""" Load a tensorflow policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'tf1_save'+itr)
print('\n\nLoading from %s.\n\n'%fname)
# load the things!
sess = tf.Session()
model = restore_tf_graph(sess, fname)
# get the correct op for executing actions
if deterministic and 'mu' in model.keys():
# 'deterministic' is only a valid option for SAC policies
print('Using deterministic action op.')
action_op = model['mu']
else:
print('Using default action op.')
action_op = model['pi']
# make function for producing an action given a single state
get_action = lambda x : sess.run(action_op, feed_dict={model['x']: x[None,:]})[0]
return get_action
def load_pytorch_policy(fpath, itr, deterministic=False):
""" Load a pytorch policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
print('\n\nLoading from %s.\n\n'%fname)
model = torch.load(fname).to(device)
# make function for producing an action given a single state
def get_action(o):
with torch.no_grad():
o = torch.FloatTensor(o.reshape(1, -1)).to(device)
action = model.act(o, deterministic)
if 'gac' not in fpath:
action = action[0]
return action
return get_action
def run_policy(env, get_action, max_ep_len=None, num_episodes=100, render=True, name='default'):
assert env is not None, \
"Environment not found!\n\n It looks like the environment wasn't saved, " + \
"and we can't run the agent in it. :( \n\n Check out the readthedocs " + \
"page on Experiment Outputs for how to handle this situation."
# axis_bound = env.action_space.high[0] + 0.01
axis_bound = 1.0 + 0.01
# fig = plt.figure(figsize=(10, 10))
# ax = Axes3D(fig)
# dots = ax.scatter([], [], [], 'b.', alpha=0.06)
# dots1 = ax.scatter([], [], [], 'r.', alpha=0.02)
# dots2 = ax.scatter([], [], [], 'r.', alpha=0.02)
# dots3 = ax.scatter([], [], [], 'r.', alpha=0.02)
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs = axs.reshape(-1)
dots = [ax.plot([], [], 'bo', alpha=0.005)[0] for ax in axs]
def init():
# ax.set_xlim(-axis_bound, axis_bound)
# ax.set_ylim(-axis_bound, axis_bound)
# ax.set_zlim(-axis_bound, axis_bound)
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_title(name, fontsize='large')
# ax.grid()
axis_name = ['XY', 'XZ', 'YZ']
for ax, s in zip(axs, axis_name):
ax.set_xlim(-axis_bound, axis_bound)
ax.set_ylim(-axis_bound, axis_bound)
ax.set_xlabel(s[0])
ax.set_ylabel(s[1])
ax.set_title(name+'-'+s, fontsize='x-large')
ax.grid()
def gen_dot():
o, r, d, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0
logger = EpochLogger()
while n < num_episodes:
if render:
env.render()
time.sleep(1e-3)
a = get_action(o)
o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
actions = []
for _ in range(1000):
a = get_action(o)
actions.append(a)
yield np.array(actions)
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
print('Episode %d \t EpRet %.3f \t EpLen %d'%(n, ep_ret, ep_len))
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
n += 1
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.dump_tabular()
def update_dot(actions):
# dots._offsets3d = (actions[:, 0], actions[:, 1], actions[:, 2])
# dots1._offsets3d = (actions[:, 0], actions[:, 1], -axis_bound)
# dots2._offsets3d = (actions[:, 0], axis_bound, actions[:, 2])
# dots3._offsets3d = (-axis_bound, actions[:, 1], actions[:, 2])
dots[0].set_data(actions[:, 0], actions[:, 1])
dots[1].set_data(actions[:, 0], actions[:, 2])
dots[2].set_data(actions[:, 1], actions[:, 2])
return dots
ani = animation.FuncAnimation(fig, update_dot, frames = gen_dot, interval = 500, init_func=init)
ani.save('./{}.gif'.format(name), writer='pillow', fps=2)
# init()
# for idx, actions in enumerate(gen_dot()):
# update_dot(actions)
# fig.savefig('./pics/{}-{}.png'.format(name, str(idx)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fpath', type=str)
parser.add_argument('--len', '-l', type=int, default=0)
parser.add_argument('--episodes', '-n', type=int, default=100)
parser.add_argument('--norender', '-nr', action='store_true')
parser.add_argument('--itr', '-i', type=int, default=-1)
parser.add_argument('--deterministic', '-d', action='store_true')
parser.add_argument('--name', type=str, default='default')
args = parser.parse_args()
env, get_action = load_policy_and_env(args.fpath,
args.itr if args.itr >=0 else 'last',
args.deterministic)
run_policy(env, get_action, args.len, args.episodes, not(args.norender), args.name)
| 35.870968
| 109
| 0.595838
|
d3035c6291155a7d5bff0879bc745075e5b44201
| 17,596
|
py
|
Python
|
UNITER/train_nlvr2.py
|
dinhanhx/hateful_memes_classification
|
1be84b6489512f313b4272cc8644dc354e84f051
|
[
"MIT"
] | 1
|
2021-09-24T03:22:35.000Z
|
2021-09-24T03:22:35.000Z
|
train_nlvr2.py
|
hexiang-hu/UNITER
|
f2582bc2532b58f95a07973f3112b4876ed3de3e
|
[
"MIT"
] | null | null | null |
train_nlvr2.py
|
hexiang-hu/UNITER
|
f2582bc2532b58f95a07973f3112b4876ed3de3e
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for NLVR2
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm
from data import (DistributedTokenBucketSampler, DetectFeatLmdb, TxtTokLmdb,
Nlvr2PairedDataset, Nlvr2PairedEvalDataset,
Nlvr2TripletDataset, Nlvr2TripletEvalDataset,
nlvr2_paired_collate, nlvr2_paired_eval_collate,
nlvr2_triplet_collate, nlvr2_triplet_eval_collate,
PrefetchLoader)
from model.nlvr2 import (UniterForNlvr2Paired, UniterForNlvr2Triplet,
UniterForNlvr2PairedAttn)
from optim import get_lr_sched
from optim.misc import build_optimizer
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
from utils.const import IMG_DIM, BUCKET_SIZE
def create_dataloader(img_path, txt_path, batch_size, is_train,
dset_cls, collate_fn, opts):
img_db = DetectFeatLmdb(img_path, opts.conf_th, opts.max_bb, opts.min_bb,
opts.num_bb, opts.compressed_db)
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len if is_train else -1)
dset = dset_cls(txt_db, img_db, opts.use_img_type)
sampler = DistributedTokenBucketSampler(
hvd.size(), hvd.rank(), dset.lens,
bucket_size=BUCKET_SIZE, batch_size=batch_size, droplast=is_train)
loader = DataLoader(dset, batch_sampler=sampler,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn)
return PrefetchLoader(loader)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_dir}")
if 'paired' in opts.model:
DatasetCls = Nlvr2PairedDataset
EvalDatasetCls = Nlvr2PairedEvalDataset
collate_fn = nlvr2_paired_collate
eval_collate_fn = nlvr2_paired_eval_collate
if opts.model == 'paired':
ModelCls = UniterForNlvr2Paired
elif opts.model == 'paired-attn':
ModelCls = UniterForNlvr2PairedAttn
else:
raise ValueError('unrecognized model type')
elif opts.model == 'triplet':
DatasetCls = Nlvr2TripletDataset
EvalDatasetCls = Nlvr2TripletEvalDataset
ModelCls = UniterForNlvr2Triplet
collate_fn = nlvr2_triplet_collate
eval_collate_fn = nlvr2_triplet_eval_collate
else:
raise ValueError('unrecognized model type')
# data loaders
train_dataloader = create_dataloader(opts.train_img_db, opts.train_txt_db,
opts.train_batch_size, True,
DatasetCls, collate_fn, opts)
val_dataloader = create_dataloader(opts.val_img_db, opts.val_txt_db,
opts.val_batch_size, False,
EvalDatasetCls, eval_collate_fn, opts)
test_dataloader = create_dataloader(opts.test_img_db, opts.test_txt_db,
opts.val_batch_size, False,
EvalDatasetCls, eval_collate_fn, opts)
# Prepare model
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
model = ModelCls.from_pretrained(opts.model_config, state_dict=checkpoint,
img_dim=IMG_DIM)
model.init_type_embedding()
model.to(device)
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
# Prepare optimizer
optimizer = build_optimizer(model, opts)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results')) # store val predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataloader.dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
targets = batch['targets']
n_examples += targets.size(0)
loss = model(**batch, compute_loss=True)
loss = loss.mean()
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
losses = all_gather_list(running_loss)
running_loss = RunningMeter(
'loss', sum(l.val for l in losses)/len(losses))
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 100 == 0:
# monitor training throughput
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'Step {global_step}: '
f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
if global_step % opts.valid_steps == 0:
for split, loader in [('val', val_dataloader),
('test', test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
log, results = validate(model, loader, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
TB_LOGGER.log_scaler_dict(log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"Step {global_step}: finished {n_epoch} epochs")
for split, loader in [('val', val_dataloader), ('test', test_dataloader)]:
LOGGER.info(f"Step {global_step}: start running "
f"validation on {split} split...")
log, results = validate(model, loader, split)
with open(f'{opts.output_dir}/results/'
f'{split}_results_{global_step}_'
f'rank{rank}_final.csv', 'w') as f:
for id_, ans in results:
f.write(f'{id_},{ans}\n')
TB_LOGGER.log_scaler_dict(log)
model_saver.save(model, f'{global_step}_final')
@torch.no_grad()
def validate(model, val_loader, split):
model.eval()
val_loss = 0
tot_score = 0
n_ex = 0
st = time()
results = []
for i, batch in enumerate(val_loader):
qids = batch['qids']
targets = batch['targets']
del batch['targets']
del batch['qids']
scores = model(**batch, targets=None, compute_loss=False)
loss = F.cross_entropy(scores, targets, reduction='sum')
val_loss += loss.item()
tot_score += (scores.max(dim=-1, keepdim=False)[1] == targets
).sum().item()
answers = ['True' if i == 1 else 'False'
for i in scores.max(dim=-1, keepdim=False
)[1].cpu().tolist()]
results.extend(zip(qids, answers))
n_ex += len(qids)
val_loss = sum(all_gather_list(val_loss))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_loss /= n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/{split}_loss': val_loss,
f'valid/{split}_acc': val_acc,
f'valid/{split}_ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score: {val_acc*100:.2f}")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_dir",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_dir",
default=None, type=str,
help="The input validation images.")
parser.add_argument("--test_txt_db",
default=None, type=str,
help="The input test corpus. (LMDB)")
parser.add_argument("--test_img_dir",
default=None, type=str,
help="The input test images.")
parser.add_argument('--compressed_db', action='store_true',
help='use compressed LMDB')
parser.add_argument("--model_config",
default=None, type=str,
help="json file for model architecture")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model")
parser.add_argument("--model", default='paired',
choices=['paired', 'triplet', 'paired-attn'],
help="choose from 2 model architecture")
parser.add_argument('--use_img_type', action='store_true',
help="expand the type embedding for 2 image types")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+', type=float,
help="beta for adam optimizer")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for.")
# device parameters
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 42.708738
| 79
| 0.558479
|
6aea56851a7c9a89cd4b7526325ec47769ed1544
| 2,781
|
py
|
Python
|
sqlalchemy_continuum/operation.py
|
nikola-kocic/sqlalchemy-continuum
|
45b8ada3162435670dbe844b3d630823fa50f6fc
|
[
"BSD-3-Clause"
] | 1
|
2015-04-25T18:42:22.000Z
|
2015-04-25T18:42:22.000Z
|
sqlalchemy_continuum/operation.py
|
nikola-kocic/sqlalchemy-continuum
|
45b8ada3162435670dbe844b3d630823fa50f6fc
|
[
"BSD-3-Clause"
] | null | null | null |
sqlalchemy_continuum/operation.py
|
nikola-kocic/sqlalchemy-continuum
|
45b8ada3162435670dbe844b3d630823fa50f6fc
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import copy
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import six
import sqlalchemy as sa
from sqlalchemy_utils import identity
class Operation(object):
INSERT = 0
UPDATE = 1
DELETE = 2
def __init__(self, target, type):
self.target = target
self.type = type
self.processed = False
def __eq__(self, other):
return (
self.target == other.target and
self.type == other.type
)
def __ne__(self, other):
return not (self == other)
class Operations(object):
"""
A collection of operations
"""
def __init__(self):
self.objects = OrderedDict()
def format_key(self, target):
# We cannot use target._sa_instance_state.identity here since object's
# identity is not yet updated at this phase
return (target.__class__, identity(target))
def __contains__(self, target):
return self.format_key(target) in self.objects
def __setitem__(self, key, operation):
self.objects[key] = operation
def __getitem__(self, key):
return self.objects[key]
def __delitem__(self, key):
del self.objects[key]
def __bool__(self):
return bool(self.objects)
def __nonzero__(self):
return self.__bool__()
@property
def entities(self):
"""
Return a set of changed versioned entities for given session.
:param session: SQLAlchemy session object
"""
return set(key[0] for key, _ in self.iteritems())
def iteritems(self):
return six.iteritems(self.objects)
def items(self):
return self.objects.items()
def add(self, operation):
self[self.format_key(operation.target)] = operation
def add_insert(self, target):
if target in self:
# If the object is deleted and then inserted within the same
# transaction we are actually dealing with an update.
self.add(Operation(target, Operation.UPDATE))
else:
self.add(Operation(target, Operation.INSERT))
def add_update(self, target):
state_copy = copy(sa.inspect(target).committed_state)
relationships = sa.inspect(target.__class__).relationships
# Remove all ONETOMANY and MANYTOMANY relationships
for rel_key, relationship in relationships.items():
if relationship.direction.name in ['ONETOMANY', 'MANYTOMANY']:
if rel_key in state_copy:
del state_copy[rel_key]
if state_copy:
self.add(Operation(target, Operation.UPDATE))
def add_delete(self, target):
self.add(Operation(target, Operation.DELETE))
| 27.264706
| 78
| 0.636102
|
e04451c561610a9a456a8bea1c72ed06a4e9f0f0
| 457
|
py
|
Python
|
OpenCV/1.1.py
|
Nivedya-27/Autumn-of-Automation
|
2f645b58d035d6277f7ee0ff77814be812815f6d
|
[
"MIT"
] | null | null | null |
OpenCV/1.1.py
|
Nivedya-27/Autumn-of-Automation
|
2f645b58d035d6277f7ee0ff77814be812815f6d
|
[
"MIT"
] | null | null | null |
OpenCV/1.1.py
|
Nivedya-27/Autumn-of-Automation
|
2f645b58d035d6277f7ee0ff77814be812815f6d
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
import sys
import os
img=cv.imread('test.png')
if img is None:
sys.exit('could not read the image; check directory')
grey=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret,bw=cv.threshold(grey,127,255,cv.THRESH_BINARY)
images=[grey,bw]
titles=['grayscale','black and white']
os.makedirs('bw_gray')
for i in range(2):
cv.imwrite(os.path.join('bw_gray/',(titles[i]+'.png')),images[i])
if cv.waitKey(0)==27:
cv.destroyAllWindows()
| 26.882353
| 67
| 0.730853
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.